gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import NetworkManagementClientConfiguration
from .operations import ApplicationGatewaysOperations
from .operations import ApplicationSecurityGroupsOperations
from .operations import AvailableDelegationsOperations
from .operations import AvailableResourceGroupDelegationsOperations
from .operations import AvailableServiceAliasesOperations
from .operations import AzureFirewallsOperations
from .operations import AzureFirewallFqdnTagsOperations
from .operations import BastionHostsOperations
from .operations import NetworkManagementClientOperationsMixin
from .operations import DdosCustomPoliciesOperations
from .operations import DdosProtectionPlansOperations
from .operations import AvailableEndpointServicesOperations
from .operations import ExpressRouteCircuitAuthorizationsOperations
from .operations import ExpressRouteCircuitPeeringsOperations
from .operations import ExpressRouteCircuitConnectionsOperations
from .operations import PeerExpressRouteCircuitConnectionsOperations
from .operations import ExpressRouteCircuitsOperations
from .operations import ExpressRouteServiceProvidersOperations
from .operations import ExpressRouteCrossConnectionsOperations
from .operations import ExpressRouteCrossConnectionPeeringsOperations
from .operations import ExpressRouteGatewaysOperations
from .operations import ExpressRouteConnectionsOperations
from .operations import ExpressRoutePortsLocationsOperations
from .operations import ExpressRoutePortsOperations
from .operations import ExpressRouteLinksOperations
from .operations import FirewallPoliciesOperations
from .operations import FirewallPolicyRuleGroupsOperations
from .operations import LoadBalancersOperations
from .operations import LoadBalancerBackendAddressPoolsOperations
from .operations import LoadBalancerFrontendIPConfigurationsOperations
from .operations import InboundNatRulesOperations
from .operations import LoadBalancerLoadBalancingRulesOperations
from .operations import LoadBalancerOutboundRulesOperations
from .operations import LoadBalancerNetworkInterfacesOperations
from .operations import LoadBalancerProbesOperations
from .operations import NatGatewaysOperations
from .operations import NetworkInterfacesOperations
from .operations import NetworkInterfaceIPConfigurationsOperations
from .operations import NetworkInterfaceLoadBalancersOperations
from .operations import NetworkInterfaceTapConfigurationsOperations
from .operations import NetworkProfilesOperations
from .operations import NetworkSecurityGroupsOperations
from .operations import SecurityRulesOperations
from .operations import DefaultSecurityRulesOperations
from .operations import NetworkWatchersOperations
from .operations import PacketCapturesOperations
from .operations import ConnectionMonitorsOperations
from .operations import Operations
from .operations import PrivateEndpointsOperations
from .operations import AvailablePrivateEndpointTypesOperations
from .operations import PrivateLinkServicesOperations
from .operations import PublicIPAddressesOperations
from .operations import PublicIPPrefixesOperations
from .operations import RouteFiltersOperations
from .operations import RouteFilterRulesOperations
from .operations import RouteTablesOperations
from .operations import RoutesOperations
from .operations import BgpServiceCommunitiesOperations
from .operations import ServiceEndpointPoliciesOperations
from .operations import ServiceEndpointPolicyDefinitionsOperations
from .operations import ServiceTagsOperations
from .operations import UsagesOperations
from .operations import VirtualNetworksOperations
from .operations import SubnetsOperations
from .operations import ResourceNavigationLinksOperations
from .operations import ServiceAssociationLinksOperations
from .operations import VirtualNetworkPeeringsOperations
from .operations import VirtualNetworkGatewaysOperations
from .operations import VirtualNetworkGatewayConnectionsOperations
from .operations import LocalNetworkGatewaysOperations
from .operations import VirtualNetworkTapsOperations
from .operations import VirtualRoutersOperations
from .operations import VirtualRouterPeeringsOperations
from .operations import VirtualWansOperations
from .operations import VpnSitesOperations
from .operations import VpnSiteLinksOperations
from .operations import VpnSitesConfigurationOperations
from .operations import VpnServerConfigurationsOperations
from .operations import VirtualHubsOperations
from .operations import HubVirtualNetworkConnectionsOperations
from .operations import VpnGatewaysOperations
from .operations import VpnConnectionsOperations
from .operations import VpnSiteLinkConnectionsOperations
from .operations import VpnLinkConnectionsOperations
from .operations import P2SVpnGatewaysOperations
from .operations import VpnServerConfigurationsAssociatedWithVirtualWanOperations
from .operations import WebApplicationFirewallPoliciesOperations
from .. import models
class NetworkManagementClient(NetworkManagementClientOperationsMixin):
"""Network Client.
:ivar application_gateways: ApplicationGatewaysOperations operations
:vartype application_gateways: azure.mgmt.network.v2019_08_01.aio.operations.ApplicationGatewaysOperations
:ivar application_security_groups: ApplicationSecurityGroupsOperations operations
:vartype application_security_groups: azure.mgmt.network.v2019_08_01.aio.operations.ApplicationSecurityGroupsOperations
:ivar available_delegations: AvailableDelegationsOperations operations
:vartype available_delegations: azure.mgmt.network.v2019_08_01.aio.operations.AvailableDelegationsOperations
:ivar available_resource_group_delegations: AvailableResourceGroupDelegationsOperations operations
:vartype available_resource_group_delegations: azure.mgmt.network.v2019_08_01.aio.operations.AvailableResourceGroupDelegationsOperations
:ivar available_service_aliases: AvailableServiceAliasesOperations operations
:vartype available_service_aliases: azure.mgmt.network.v2019_08_01.aio.operations.AvailableServiceAliasesOperations
:ivar azure_firewalls: AzureFirewallsOperations operations
:vartype azure_firewalls: azure.mgmt.network.v2019_08_01.aio.operations.AzureFirewallsOperations
:ivar azure_firewall_fqdn_tags: AzureFirewallFqdnTagsOperations operations
:vartype azure_firewall_fqdn_tags: azure.mgmt.network.v2019_08_01.aio.operations.AzureFirewallFqdnTagsOperations
:ivar bastion_hosts: BastionHostsOperations operations
:vartype bastion_hosts: azure.mgmt.network.v2019_08_01.aio.operations.BastionHostsOperations
:ivar ddos_custom_policies: DdosCustomPoliciesOperations operations
:vartype ddos_custom_policies: azure.mgmt.network.v2019_08_01.aio.operations.DdosCustomPoliciesOperations
:ivar ddos_protection_plans: DdosProtectionPlansOperations operations
:vartype ddos_protection_plans: azure.mgmt.network.v2019_08_01.aio.operations.DdosProtectionPlansOperations
:ivar available_endpoint_services: AvailableEndpointServicesOperations operations
:vartype available_endpoint_services: azure.mgmt.network.v2019_08_01.aio.operations.AvailableEndpointServicesOperations
:ivar express_route_circuit_authorizations: ExpressRouteCircuitAuthorizationsOperations operations
:vartype express_route_circuit_authorizations: azure.mgmt.network.v2019_08_01.aio.operations.ExpressRouteCircuitAuthorizationsOperations
:ivar express_route_circuit_peerings: ExpressRouteCircuitPeeringsOperations operations
:vartype express_route_circuit_peerings: azure.mgmt.network.v2019_08_01.aio.operations.ExpressRouteCircuitPeeringsOperations
:ivar express_route_circuit_connections: ExpressRouteCircuitConnectionsOperations operations
:vartype express_route_circuit_connections: azure.mgmt.network.v2019_08_01.aio.operations.ExpressRouteCircuitConnectionsOperations
:ivar peer_express_route_circuit_connections: PeerExpressRouteCircuitConnectionsOperations operations
:vartype peer_express_route_circuit_connections: azure.mgmt.network.v2019_08_01.aio.operations.PeerExpressRouteCircuitConnectionsOperations
:ivar express_route_circuits: ExpressRouteCircuitsOperations operations
:vartype express_route_circuits: azure.mgmt.network.v2019_08_01.aio.operations.ExpressRouteCircuitsOperations
:ivar express_route_service_providers: ExpressRouteServiceProvidersOperations operations
:vartype express_route_service_providers: azure.mgmt.network.v2019_08_01.aio.operations.ExpressRouteServiceProvidersOperations
:ivar express_route_cross_connections: ExpressRouteCrossConnectionsOperations operations
:vartype express_route_cross_connections: azure.mgmt.network.v2019_08_01.aio.operations.ExpressRouteCrossConnectionsOperations
:ivar express_route_cross_connection_peerings: ExpressRouteCrossConnectionPeeringsOperations operations
:vartype express_route_cross_connection_peerings: azure.mgmt.network.v2019_08_01.aio.operations.ExpressRouteCrossConnectionPeeringsOperations
:ivar express_route_gateways: ExpressRouteGatewaysOperations operations
:vartype express_route_gateways: azure.mgmt.network.v2019_08_01.aio.operations.ExpressRouteGatewaysOperations
:ivar express_route_connections: ExpressRouteConnectionsOperations operations
:vartype express_route_connections: azure.mgmt.network.v2019_08_01.aio.operations.ExpressRouteConnectionsOperations
:ivar express_route_ports_locations: ExpressRoutePortsLocationsOperations operations
:vartype express_route_ports_locations: azure.mgmt.network.v2019_08_01.aio.operations.ExpressRoutePortsLocationsOperations
:ivar express_route_ports: ExpressRoutePortsOperations operations
:vartype express_route_ports: azure.mgmt.network.v2019_08_01.aio.operations.ExpressRoutePortsOperations
:ivar express_route_links: ExpressRouteLinksOperations operations
:vartype express_route_links: azure.mgmt.network.v2019_08_01.aio.operations.ExpressRouteLinksOperations
:ivar firewall_policies: FirewallPoliciesOperations operations
:vartype firewall_policies: azure.mgmt.network.v2019_08_01.aio.operations.FirewallPoliciesOperations
:ivar firewall_policy_rule_groups: FirewallPolicyRuleGroupsOperations operations
:vartype firewall_policy_rule_groups: azure.mgmt.network.v2019_08_01.aio.operations.FirewallPolicyRuleGroupsOperations
:ivar load_balancers: LoadBalancersOperations operations
:vartype load_balancers: azure.mgmt.network.v2019_08_01.aio.operations.LoadBalancersOperations
:ivar load_balancer_backend_address_pools: LoadBalancerBackendAddressPoolsOperations operations
:vartype load_balancer_backend_address_pools: azure.mgmt.network.v2019_08_01.aio.operations.LoadBalancerBackendAddressPoolsOperations
:ivar load_balancer_frontend_ip_configurations: LoadBalancerFrontendIPConfigurationsOperations operations
:vartype load_balancer_frontend_ip_configurations: azure.mgmt.network.v2019_08_01.aio.operations.LoadBalancerFrontendIPConfigurationsOperations
:ivar inbound_nat_rules: InboundNatRulesOperations operations
:vartype inbound_nat_rules: azure.mgmt.network.v2019_08_01.aio.operations.InboundNatRulesOperations
:ivar load_balancer_load_balancing_rules: LoadBalancerLoadBalancingRulesOperations operations
:vartype load_balancer_load_balancing_rules: azure.mgmt.network.v2019_08_01.aio.operations.LoadBalancerLoadBalancingRulesOperations
:ivar load_balancer_outbound_rules: LoadBalancerOutboundRulesOperations operations
:vartype load_balancer_outbound_rules: azure.mgmt.network.v2019_08_01.aio.operations.LoadBalancerOutboundRulesOperations
:ivar load_balancer_network_interfaces: LoadBalancerNetworkInterfacesOperations operations
:vartype load_balancer_network_interfaces: azure.mgmt.network.v2019_08_01.aio.operations.LoadBalancerNetworkInterfacesOperations
:ivar load_balancer_probes: LoadBalancerProbesOperations operations
:vartype load_balancer_probes: azure.mgmt.network.v2019_08_01.aio.operations.LoadBalancerProbesOperations
:ivar nat_gateways: NatGatewaysOperations operations
:vartype nat_gateways: azure.mgmt.network.v2019_08_01.aio.operations.NatGatewaysOperations
:ivar network_interfaces: NetworkInterfacesOperations operations
:vartype network_interfaces: azure.mgmt.network.v2019_08_01.aio.operations.NetworkInterfacesOperations
:ivar network_interface_ip_configurations: NetworkInterfaceIPConfigurationsOperations operations
:vartype network_interface_ip_configurations: azure.mgmt.network.v2019_08_01.aio.operations.NetworkInterfaceIPConfigurationsOperations
:ivar network_interface_load_balancers: NetworkInterfaceLoadBalancersOperations operations
:vartype network_interface_load_balancers: azure.mgmt.network.v2019_08_01.aio.operations.NetworkInterfaceLoadBalancersOperations
:ivar network_interface_tap_configurations: NetworkInterfaceTapConfigurationsOperations operations
:vartype network_interface_tap_configurations: azure.mgmt.network.v2019_08_01.aio.operations.NetworkInterfaceTapConfigurationsOperations
:ivar network_profiles: NetworkProfilesOperations operations
:vartype network_profiles: azure.mgmt.network.v2019_08_01.aio.operations.NetworkProfilesOperations
:ivar network_security_groups: NetworkSecurityGroupsOperations operations
:vartype network_security_groups: azure.mgmt.network.v2019_08_01.aio.operations.NetworkSecurityGroupsOperations
:ivar security_rules: SecurityRulesOperations operations
:vartype security_rules: azure.mgmt.network.v2019_08_01.aio.operations.SecurityRulesOperations
:ivar default_security_rules: DefaultSecurityRulesOperations operations
:vartype default_security_rules: azure.mgmt.network.v2019_08_01.aio.operations.DefaultSecurityRulesOperations
:ivar network_watchers: NetworkWatchersOperations operations
:vartype network_watchers: azure.mgmt.network.v2019_08_01.aio.operations.NetworkWatchersOperations
:ivar packet_captures: PacketCapturesOperations operations
:vartype packet_captures: azure.mgmt.network.v2019_08_01.aio.operations.PacketCapturesOperations
:ivar connection_monitors: ConnectionMonitorsOperations operations
:vartype connection_monitors: azure.mgmt.network.v2019_08_01.aio.operations.ConnectionMonitorsOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.network.v2019_08_01.aio.operations.Operations
:ivar private_endpoints: PrivateEndpointsOperations operations
:vartype private_endpoints: azure.mgmt.network.v2019_08_01.aio.operations.PrivateEndpointsOperations
:ivar available_private_endpoint_types: AvailablePrivateEndpointTypesOperations operations
:vartype available_private_endpoint_types: azure.mgmt.network.v2019_08_01.aio.operations.AvailablePrivateEndpointTypesOperations
:ivar private_link_services: PrivateLinkServicesOperations operations
:vartype private_link_services: azure.mgmt.network.v2019_08_01.aio.operations.PrivateLinkServicesOperations
:ivar public_ip_addresses: PublicIPAddressesOperations operations
:vartype public_ip_addresses: azure.mgmt.network.v2019_08_01.aio.operations.PublicIPAddressesOperations
:ivar public_ip_prefixes: PublicIPPrefixesOperations operations
:vartype public_ip_prefixes: azure.mgmt.network.v2019_08_01.aio.operations.PublicIPPrefixesOperations
:ivar route_filters: RouteFiltersOperations operations
:vartype route_filters: azure.mgmt.network.v2019_08_01.aio.operations.RouteFiltersOperations
:ivar route_filter_rules: RouteFilterRulesOperations operations
:vartype route_filter_rules: azure.mgmt.network.v2019_08_01.aio.operations.RouteFilterRulesOperations
:ivar route_tables: RouteTablesOperations operations
:vartype route_tables: azure.mgmt.network.v2019_08_01.aio.operations.RouteTablesOperations
:ivar routes: RoutesOperations operations
:vartype routes: azure.mgmt.network.v2019_08_01.aio.operations.RoutesOperations
:ivar bgp_service_communities: BgpServiceCommunitiesOperations operations
:vartype bgp_service_communities: azure.mgmt.network.v2019_08_01.aio.operations.BgpServiceCommunitiesOperations
:ivar service_endpoint_policies: ServiceEndpointPoliciesOperations operations
:vartype service_endpoint_policies: azure.mgmt.network.v2019_08_01.aio.operations.ServiceEndpointPoliciesOperations
:ivar service_endpoint_policy_definitions: ServiceEndpointPolicyDefinitionsOperations operations
:vartype service_endpoint_policy_definitions: azure.mgmt.network.v2019_08_01.aio.operations.ServiceEndpointPolicyDefinitionsOperations
:ivar service_tags: ServiceTagsOperations operations
:vartype service_tags: azure.mgmt.network.v2019_08_01.aio.operations.ServiceTagsOperations
:ivar usages: UsagesOperations operations
:vartype usages: azure.mgmt.network.v2019_08_01.aio.operations.UsagesOperations
:ivar virtual_networks: VirtualNetworksOperations operations
:vartype virtual_networks: azure.mgmt.network.v2019_08_01.aio.operations.VirtualNetworksOperations
:ivar subnets: SubnetsOperations operations
:vartype subnets: azure.mgmt.network.v2019_08_01.aio.operations.SubnetsOperations
:ivar resource_navigation_links: ResourceNavigationLinksOperations operations
:vartype resource_navigation_links: azure.mgmt.network.v2019_08_01.aio.operations.ResourceNavigationLinksOperations
:ivar service_association_links: ServiceAssociationLinksOperations operations
:vartype service_association_links: azure.mgmt.network.v2019_08_01.aio.operations.ServiceAssociationLinksOperations
:ivar virtual_network_peerings: VirtualNetworkPeeringsOperations operations
:vartype virtual_network_peerings: azure.mgmt.network.v2019_08_01.aio.operations.VirtualNetworkPeeringsOperations
:ivar virtual_network_gateways: VirtualNetworkGatewaysOperations operations
:vartype virtual_network_gateways: azure.mgmt.network.v2019_08_01.aio.operations.VirtualNetworkGatewaysOperations
:ivar virtual_network_gateway_connections: VirtualNetworkGatewayConnectionsOperations operations
:vartype virtual_network_gateway_connections: azure.mgmt.network.v2019_08_01.aio.operations.VirtualNetworkGatewayConnectionsOperations
:ivar local_network_gateways: LocalNetworkGatewaysOperations operations
:vartype local_network_gateways: azure.mgmt.network.v2019_08_01.aio.operations.LocalNetworkGatewaysOperations
:ivar virtual_network_taps: VirtualNetworkTapsOperations operations
:vartype virtual_network_taps: azure.mgmt.network.v2019_08_01.aio.operations.VirtualNetworkTapsOperations
:ivar virtual_routers: VirtualRoutersOperations operations
:vartype virtual_routers: azure.mgmt.network.v2019_08_01.aio.operations.VirtualRoutersOperations
:ivar virtual_router_peerings: VirtualRouterPeeringsOperations operations
:vartype virtual_router_peerings: azure.mgmt.network.v2019_08_01.aio.operations.VirtualRouterPeeringsOperations
:ivar virtual_wans: VirtualWansOperations operations
:vartype virtual_wans: azure.mgmt.network.v2019_08_01.aio.operations.VirtualWansOperations
:ivar vpn_sites: VpnSitesOperations operations
:vartype vpn_sites: azure.mgmt.network.v2019_08_01.aio.operations.VpnSitesOperations
:ivar vpn_site_links: VpnSiteLinksOperations operations
:vartype vpn_site_links: azure.mgmt.network.v2019_08_01.aio.operations.VpnSiteLinksOperations
:ivar vpn_sites_configuration: VpnSitesConfigurationOperations operations
:vartype vpn_sites_configuration: azure.mgmt.network.v2019_08_01.aio.operations.VpnSitesConfigurationOperations
:ivar vpn_server_configurations: VpnServerConfigurationsOperations operations
:vartype vpn_server_configurations: azure.mgmt.network.v2019_08_01.aio.operations.VpnServerConfigurationsOperations
:ivar virtual_hubs: VirtualHubsOperations operations
:vartype virtual_hubs: azure.mgmt.network.v2019_08_01.aio.operations.VirtualHubsOperations
:ivar hub_virtual_network_connections: HubVirtualNetworkConnectionsOperations operations
:vartype hub_virtual_network_connections: azure.mgmt.network.v2019_08_01.aio.operations.HubVirtualNetworkConnectionsOperations
:ivar vpn_gateways: VpnGatewaysOperations operations
:vartype vpn_gateways: azure.mgmt.network.v2019_08_01.aio.operations.VpnGatewaysOperations
:ivar vpn_connections: VpnConnectionsOperations operations
:vartype vpn_connections: azure.mgmt.network.v2019_08_01.aio.operations.VpnConnectionsOperations
:ivar vpn_site_link_connections: VpnSiteLinkConnectionsOperations operations
:vartype vpn_site_link_connections: azure.mgmt.network.v2019_08_01.aio.operations.VpnSiteLinkConnectionsOperations
:ivar vpn_link_connections: VpnLinkConnectionsOperations operations
:vartype vpn_link_connections: azure.mgmt.network.v2019_08_01.aio.operations.VpnLinkConnectionsOperations
:ivar p2_svpn_gateways: P2SVpnGatewaysOperations operations
:vartype p2_svpn_gateways: azure.mgmt.network.v2019_08_01.aio.operations.P2SVpnGatewaysOperations
:ivar vpn_server_configurations_associated_with_virtual_wan: VpnServerConfigurationsAssociatedWithVirtualWanOperations operations
:vartype vpn_server_configurations_associated_with_virtual_wan: azure.mgmt.network.v2019_08_01.aio.operations.VpnServerConfigurationsAssociatedWithVirtualWanOperations
:ivar web_application_firewall_policies: WebApplicationFirewallPoliciesOperations operations
:vartype web_application_firewall_policies: azure.mgmt.network.v2019_08_01.aio.operations.WebApplicationFirewallPoliciesOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The subscription credentials which uniquely identify the Microsoft Azure subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = NetworkManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.application_gateways = ApplicationGatewaysOperations(
self._client, self._config, self._serialize, self._deserialize)
self.application_security_groups = ApplicationSecurityGroupsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.available_delegations = AvailableDelegationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.available_resource_group_delegations = AvailableResourceGroupDelegationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.available_service_aliases = AvailableServiceAliasesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.azure_firewalls = AzureFirewallsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.azure_firewall_fqdn_tags = AzureFirewallFqdnTagsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.bastion_hosts = BastionHostsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.ddos_custom_policies = DdosCustomPoliciesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.ddos_protection_plans = DdosProtectionPlansOperations(
self._client, self._config, self._serialize, self._deserialize)
self.available_endpoint_services = AvailableEndpointServicesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_circuit_authorizations = ExpressRouteCircuitAuthorizationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_circuit_peerings = ExpressRouteCircuitPeeringsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_circuit_connections = ExpressRouteCircuitConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.peer_express_route_circuit_connections = PeerExpressRouteCircuitConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_circuits = ExpressRouteCircuitsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_service_providers = ExpressRouteServiceProvidersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_cross_connections = ExpressRouteCrossConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_cross_connection_peerings = ExpressRouteCrossConnectionPeeringsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_gateways = ExpressRouteGatewaysOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_connections = ExpressRouteConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_ports_locations = ExpressRoutePortsLocationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_ports = ExpressRoutePortsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_links = ExpressRouteLinksOperations(
self._client, self._config, self._serialize, self._deserialize)
self.firewall_policies = FirewallPoliciesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.firewall_policy_rule_groups = FirewallPolicyRuleGroupsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.load_balancers = LoadBalancersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.load_balancer_backend_address_pools = LoadBalancerBackendAddressPoolsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.load_balancer_frontend_ip_configurations = LoadBalancerFrontendIPConfigurationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.inbound_nat_rules = InboundNatRulesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.load_balancer_load_balancing_rules = LoadBalancerLoadBalancingRulesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.load_balancer_outbound_rules = LoadBalancerOutboundRulesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.load_balancer_network_interfaces = LoadBalancerNetworkInterfacesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.load_balancer_probes = LoadBalancerProbesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.nat_gateways = NatGatewaysOperations(
self._client, self._config, self._serialize, self._deserialize)
self.network_interfaces = NetworkInterfacesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.network_interface_ip_configurations = NetworkInterfaceIPConfigurationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.network_interface_load_balancers = NetworkInterfaceLoadBalancersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.network_interface_tap_configurations = NetworkInterfaceTapConfigurationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.network_profiles = NetworkProfilesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.network_security_groups = NetworkSecurityGroupsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.security_rules = SecurityRulesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.default_security_rules = DefaultSecurityRulesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.network_watchers = NetworkWatchersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.packet_captures = PacketCapturesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.connection_monitors = ConnectionMonitorsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
self.private_endpoints = PrivateEndpointsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.available_private_endpoint_types = AvailablePrivateEndpointTypesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.private_link_services = PrivateLinkServicesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.public_ip_addresses = PublicIPAddressesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.public_ip_prefixes = PublicIPPrefixesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.route_filters = RouteFiltersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.route_filter_rules = RouteFilterRulesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.route_tables = RouteTablesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.routes = RoutesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.bgp_service_communities = BgpServiceCommunitiesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.service_endpoint_policies = ServiceEndpointPoliciesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.service_endpoint_policy_definitions = ServiceEndpointPolicyDefinitionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.service_tags = ServiceTagsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.usages = UsagesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_networks = VirtualNetworksOperations(
self._client, self._config, self._serialize, self._deserialize)
self.subnets = SubnetsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.resource_navigation_links = ResourceNavigationLinksOperations(
self._client, self._config, self._serialize, self._deserialize)
self.service_association_links = ServiceAssociationLinksOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_network_peerings = VirtualNetworkPeeringsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_network_gateways = VirtualNetworkGatewaysOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_network_gateway_connections = VirtualNetworkGatewayConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.local_network_gateways = LocalNetworkGatewaysOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_network_taps = VirtualNetworkTapsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_routers = VirtualRoutersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_router_peerings = VirtualRouterPeeringsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_wans = VirtualWansOperations(
self._client, self._config, self._serialize, self._deserialize)
self.vpn_sites = VpnSitesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.vpn_site_links = VpnSiteLinksOperations(
self._client, self._config, self._serialize, self._deserialize)
self.vpn_sites_configuration = VpnSitesConfigurationOperations(
self._client, self._config, self._serialize, self._deserialize)
self.vpn_server_configurations = VpnServerConfigurationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_hubs = VirtualHubsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.hub_virtual_network_connections = HubVirtualNetworkConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.vpn_gateways = VpnGatewaysOperations(
self._client, self._config, self._serialize, self._deserialize)
self.vpn_connections = VpnConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.vpn_site_link_connections = VpnSiteLinkConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.vpn_link_connections = VpnLinkConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.p2_svpn_gateways = P2SVpnGatewaysOperations(
self._client, self._config, self._serialize, self._deserialize)
self.vpn_server_configurations_associated_with_virtual_wan = VpnServerConfigurationsAssociatedWithVirtualWanOperations(
self._client, self._config, self._serialize, self._deserialize)
self.web_application_firewall_policies = WebApplicationFirewallPoliciesOperations(
self._client, self._config, self._serialize, self._deserialize)
async def _send_request(self, http_request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse:
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.AsyncHttpResponse
"""
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = await self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "NetworkManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
|
|
from django.test import TestCase
from django.core.exceptions import PermissionDenied
from django.contrib.auth.models import Group
from hs_access_control.models import PrivilegeCodes
from hs_core import hydroshare
from hs_core.testing import MockIRODSTestCaseMixin
from hs_access_control.tests.utilities import global_reset, is_equal_to_as_set
class T12UserActive(MockIRODSTestCaseMixin, TestCase):
def setUp(self):
super(T12UserActive, self).setUp()
global_reset()
self.group, _ = Group.objects.get_or_create(name='Hydroshare Author')
self.admin = hydroshare.create_account(
'admin@gmail.com',
username='admin',
first_name='administrator',
last_name='couch',
superuser=True,
groups=[]
)
self.cat = hydroshare.create_account(
'cat@gmail.com',
username='cat',
first_name='not a dog',
last_name='last_name_cat',
superuser=False,
groups=[]
)
self.dog = hydroshare.create_account(
'dog@gmail.com',
username='dog',
first_name='a little arfer',
last_name='last_name_dog',
superuser=False,
groups=[]
)
self.scratching = hydroshare.create_resource(
resource_type='GenericResource',
owner=self.cat,
title='all about sofas as scrathing posts',
metadata=[],
)
self.felines = self.cat.uaccess.create_group(
title='felines', description="We are the feliness")
def test_00_exceptions(self):
"All user routines raise PermissionDenied if user is inactive"
scratching = self.scratching
felines = self.felines
dog = self.dog
cat = self.cat
# turn off active
cat.is_active = False
cat.save()
# all user routines should raise exceptions
with self.assertRaises(PermissionDenied):
cat.uaccess.create_group(title='foo', description="We are the foo")
with self.assertRaises(PermissionDenied):
cat.uaccess.delete_group(felines)
with self.assertRaises(PermissionDenied):
cat.uaccess.view_groups
with self.assertRaises(PermissionDenied):
cat.uaccess.owned_groups
with self.assertRaises(PermissionDenied):
cat.uaccess.owns_group(felines)
with self.assertRaises(PermissionDenied):
cat.uaccess.can_change_group(felines)
with self.assertRaises(PermissionDenied):
cat.uaccess.can_view_group(felines)
with self.assertRaises(PermissionDenied):
cat.uaccess.can_view_group_metadata(felines)
with self.assertRaises(PermissionDenied):
cat.uaccess.can_change_group_flags(felines)
with self.assertRaises(PermissionDenied):
cat.uaccess.can_delete_group(felines)
with self.assertRaises(PermissionDenied):
cat.uaccess.can_share_group(felines, PrivilegeCodes.VIEW)
with self.assertRaises(PermissionDenied):
cat.uaccess.share_group_with_user(
felines, dog, PrivilegeCodes.VIEW)
with self.assertRaises(PermissionDenied):
cat.uaccess.unshare_group_with_user(felines, dog)
with self.assertRaises(PermissionDenied):
cat.uaccess.can_unshare_group_with_user(felines, dog)
with self.assertRaises(PermissionDenied):
cat.uaccess.get_group_unshare_users(felines)
with self.assertRaises(PermissionDenied):
cat.uaccess.view_resources
with self.assertRaises(PermissionDenied):
cat.uaccess.owned_resources
with self.assertRaises(PermissionDenied):
cat.uaccess.edit_resources
with self.assertRaises(PermissionDenied):
cat.uaccess.get_resources_with_explicit_access(PrivilegeCodes.VIEW)
with self.assertRaises(PermissionDenied):
cat.uaccess.owns_resource(scratching)
with self.assertRaises(PermissionDenied):
cat.uaccess.can_change_resource(scratching)
with self.assertRaises(PermissionDenied):
cat.uaccess.can_change_resource_flags(scratching)
with self.assertRaises(PermissionDenied):
cat.uaccess.can_view_resource(scratching)
with self.assertRaises(PermissionDenied):
cat.uaccess.can_delete_resource(scratching)
with self.assertRaises(PermissionDenied):
cat.uaccess.can_share_resource(scratching, PrivilegeCodes.VIEW)
with self.assertRaises(PermissionDenied):
cat.uaccess.can_share_resource_with_group(
scratching, felines, PrivilegeCodes.VIEW)
with self.assertRaises(PermissionDenied):
cat.uaccess.share_resource_with_user(
scratching, dog, PrivilegeCodes.VIEW)
with self.assertRaises(PermissionDenied):
cat.uaccess.unshare_resource_with_user(scratching, dog)
with self.assertRaises(PermissionDenied):
cat.uaccess.can_unshare_resource_with_user(scratching, dog)
with self.assertRaises(PermissionDenied):
cat.uaccess.share_resource_with_group(
scratching, felines, PrivilegeCodes.VIEW)
with self.assertRaises(PermissionDenied):
cat.uaccess.unshare_resource_with_group(scratching, felines)
with self.assertRaises(PermissionDenied):
cat.uaccess.can_unshare_resource_with_group(scratching, felines)
with self.assertRaises(PermissionDenied):
cat.uaccess.get_resource_unshare_users(scratching)
with self.assertRaises(PermissionDenied):
cat.uaccess.get_resource_unshare_groups(scratching)
def test_01_reporting(self):
"User records disappear when user is inactive"
scratching = self.scratching
felines = self.felines
dog = self.dog
cat = self.cat
cat.uaccess.share_resource_with_user(
scratching, dog, PrivilegeCodes.OWNER)
cat.uaccess.share_group_with_user(felines, dog, PrivilegeCodes.OWNER)
self.assertTrue(
is_equal_to_as_set(
cat.uaccess.get_group_unshare_users(felines), [
cat, dog]))
self.assertTrue(
# cat is the quota holder, so cannot be unshared
is_equal_to_as_set(
cat.uaccess.get_resource_unshare_users(scratching), [dog]))
self.assertTrue(
is_equal_to_as_set(
felines.gaccess.members, [
cat, dog]))
self.assertTrue(is_equal_to_as_set(felines.gaccess.owners, [cat, dog]))
self.assertTrue(
is_equal_to_as_set(
scratching.raccess.view_users, [
cat, dog]))
self.assertTrue(
is_equal_to_as_set(
scratching.raccess.edit_users, [
cat, dog]))
self.assertTrue(
is_equal_to_as_set(
scratching.raccess.owners, [
cat, dog]))
dog.is_active = False
dog.save()
self.assertTrue(
is_equal_to_as_set(
cat.uaccess.get_group_unshare_users(felines),
[]))
self.assertTrue(
is_equal_to_as_set(
cat.uaccess.get_resource_unshare_users(scratching),
[]))
self.assertTrue(is_equal_to_as_set(felines.gaccess.members, [cat]))
self.assertTrue(is_equal_to_as_set(felines.gaccess.owners, [cat]))
self.assertTrue(
is_equal_to_as_set(
scratching.raccess.view_users,
[cat]))
self.assertTrue(
is_equal_to_as_set(
scratching.raccess.edit_users,
[cat]))
self.assertTrue(is_equal_to_as_set(scratching.raccess.owners, [cat]))
|
|
"""This class stores all of the samples for training. It is able to
construct randomly selected batches of phi's from the stored history.
"""
import numpy as np
import time
import theano
floatX = theano.config.floatX
class DataSet(object):
"""A replay memory consisting of circular buffers for observed images,
actions, and rewards.
"""
def __init__(self, width, height, rng, max_steps=1000, phi_length=4):
"""Construct a DataSet.
Arguments:
width, height - image size
max_steps - the number of time steps to store
phi_length - number of images to concatenate into a state
rng - initialized numpy random number generator, used to
choose random minibatches
"""
# TODO: Specify capacity in number of state transitions, not
# number of saved time steps.
# Store arguments.
self.width = width
self.height = height
self.max_steps = max_steps
self.phi_length = phi_length
self.rng = rng
# Allocate the circular buffers and indices.
self.imgs = np.zeros((max_steps, height, width), dtype='uint8')
self.actions = np.zeros(max_steps, dtype='int32')
self.rewards = np.zeros(max_steps, dtype=floatX)
self.terminal = np.zeros(max_steps, dtype='bool')
self.bottom = 0
self.top = 0
self.size = 0
def add_sample(self, img, action, reward, terminal):
"""Add a time step record.
Arguments:
img -- observed image
action -- action chosen by the agent
reward -- reward received after taking the action
terminal -- boolean indicating whether the episode ended
after this time step
"""
self.imgs[self.top] = img
self.actions[self.top] = action
self.rewards[self.top] = reward
self.terminal[self.top] = terminal
if self.size == self.max_steps:
self.bottom = (self.bottom + 1) % self.max_steps
else:
self.size += 1
self.top = (self.top + 1) % self.max_steps
def __len__(self):
"""Return an approximate count of stored state transitions."""
# TODO: Properly account for indices which can't be used, as in
# random_batch's check.
return max(0, self.size - self.phi_length)
def last_phi(self):
"""Return the most recent phi (sequence of image frames)."""
indexes = np.arange(self.top - self.phi_length, self.top)
return self.imgs.take(indexes, axis=0, mode='wrap')
def phi(self, img):
"""Return a phi (sequence of image frames), using the last phi_length -
1, plus img.
"""
indexes = np.arange(self.top - self.phi_length + 1, self.top)
phi = np.empty((self.phi_length, self.height, self.width), dtype=floatX)
phi[0:self.phi_length - 1] = self.imgs.take(indexes,
axis=0,
mode='wrap')
phi[-1] = img
return phi
def random_batch(self, batch_size):
"""Return corresponding states, actions, rewards, terminal status, and
next_states for batch_size randomly chosen state transitions.
"""
# Allocate the response.
states = np.zeros((batch_size,
self.phi_length,
self.height,
self.width),
dtype='uint8')
actions = np.zeros((batch_size, 1), dtype='int32')
rewards = np.zeros((batch_size, 1), dtype=floatX)
terminal = np.zeros((batch_size, 1), dtype='bool')
next_states = np.zeros((batch_size,
self.phi_length,
self.height,
self.width),
dtype='uint8')
count = 0
while count < batch_size:
# Randomly choose a time step from the replay memory.
index = self.rng.randint(self.bottom,
self.bottom + self.size - self.phi_length)
initial_indices = np.arange(index, index + self.phi_length)
transition_indices = initial_indices + 1
end_index = index + self.phi_length - 1
# Check that the initial state corresponds entirely to a
# single episode, meaning none but the last frame may be
# terminal. If the last frame of the initial state is
# terminal, then the last frame of the transitioned state
# will actually be the first frame of a new episode, which
# the Q learner recognizes and handles correctly during
# training by zeroing the discounted future reward estimate.
if np.any(self.terminal.take(initial_indices[0:-1], mode='wrap')):
continue
# Add the state transition to the response.
states[count] = self.imgs.take(initial_indices, axis=0, mode='wrap')
actions[count] = self.actions.take(end_index, mode='wrap')
rewards[count] = self.rewards.take(end_index, mode='wrap')
terminal[count] = self.terminal.take(end_index, mode='wrap')
next_states[count] = self.imgs.take(transition_indices,
axis=0,
mode='wrap')
count += 1
return states, actions, rewards, next_states, terminal
# TESTING CODE BELOW THIS POINT...
def simple_tests():
np.random.seed(222)
dataset = DataSet(width=2, height=3,
rng=np.random.RandomState(42),
max_steps=6, phi_length=4)
for i in range(10):
img = np.random.randint(0, 256, size=(3, 2))
action = np.random.randint(16)
reward = np.random.random()
terminal = False
if np.random.random() < .05:
terminal = True
print 'img', img
dataset.add_sample(img, action, reward, terminal)
print "I", dataset.imgs
print "A", dataset.actions
print "R", dataset.rewards
print "T", dataset.terminal
print "SIZE", dataset.size
print
print "LAST PHI", dataset.last_phi()
print
print 'BATCH', dataset.random_batch(2)
def speed_tests():
dataset = DataSet(width=80, height=80,
rng=np.random.RandomState(42),
max_steps=20000, phi_length=4)
img = np.random.randint(0, 256, size=(80, 80))
action = np.random.randint(16)
reward = np.random.random()
start = time.time()
for i in range(100000):
terminal = False
if np.random.random() < .05:
terminal = True
dataset.add_sample(img, action, reward, terminal)
print "samples per second: ", 100000 / (time.time() - start)
start = time.time()
for i in range(200):
a = dataset.random_batch(32)
print "batches per second: ", 200 / (time.time() - start)
print dataset.last_phi()
def trivial_tests():
dataset = DataSet(width=2, height=1,
rng=np.random.RandomState(42),
max_steps=3, phi_length=2)
img1 = np.array([[1, 1]], dtype='uint8')
img2 = np.array([[2, 2]], dtype='uint8')
img3 = np.array([[3, 3]], dtype='uint8')
dataset.add_sample(img1, 1, 1, False)
dataset.add_sample(img2, 2, 2, False)
dataset.add_sample(img3, 2, 2, True)
print "last", dataset.last_phi()
print "random", dataset.random_batch(1)
def max_size_tests():
dataset1 = DataSet(width=3, height=4,
rng=np.random.RandomState(42),
max_steps=10, phi_length=4)
dataset2 = DataSet(width=3, height=4,
rng=np.random.RandomState(42),
max_steps=1000, phi_length=4)
for i in range(100):
img = np.random.randint(0, 256, size=(4, 3))
action = np.random.randint(16)
reward = np.random.random()
terminal = False
if np.random.random() < .05:
terminal = True
dataset1.add_sample(img, action, reward, terminal)
dataset2.add_sample(img, action, reward, terminal)
np.testing.assert_array_almost_equal(dataset1.last_phi(),
dataset2.last_phi())
print "passed"
def test_memory_usage_ok():
import memory_profiler
dataset = DataSet(width=80, height=80,
rng=np.random.RandomState(42),
max_steps=100000, phi_length=4)
last = time.time()
for i in xrange(1000000000):
if (i % 100000) == 0:
print i
dataset.add_sample(np.random.random((80, 80)), 1, 1, False)
if i > 200000:
states, actions, rewards, next_states, terminals = \
dataset.random_batch(32)
if (i % 10007) == 0:
print time.time() - last
mem_usage = memory_profiler.memory_usage(-1)
print len(dataset), mem_usage
last = time.time()
def main():
speed_tests()
test_memory_usage_ok()
max_size_tests()
simple_tests()
if __name__ == "__main__":
main()
|
|
#!/usr/bin/python
#
# portchecker.py
#
# Brian Bolander
#
#
import socket
import sys
import re
import pdb
import getopt
import os
"""
_______________________________________________________________________________
readportsfile
_______________________________________________________________________________
"""
def readportsfile(filename):
"""
Read the file that contains the information about the ports and return
a data structure with the following format.
ports = {
# hostname port name port
'codex': {
'telnet': 22,
'webserver': 80
},
'etmessrv01': {
'asadmin': 4848,
'webserver': 80
},
}
File format: One port per line...
<hostname> <port name (one word, non-whitespace characters)> <port number>
<hostname> <port name (one word, non-whitespace characters)> <port number>
.
.
.
filename: The name of the file to be read.
"""
debug = False
ports = dict()
hostname = ""
portnumber = ""
portname = ""
#pdb.set_trace()
try:
portsfile = open(portscpath + filename, 'r')
except IOError:
try:
portsfile = open(filename, 'r')
except IOError, e:
print "Error opening file: %s" % e
sys.exit(2)
for line in portsfile:
line = line.rstrip("\n")
#print line
#if re.match('^[^#]\w*\W*\w*\W*\d*$', line) and line:
if re.match('^[^#]\S*\W*\w*\W*\d*$', line) and line:
hostname, portname, portnumber = line.split()
if debug:
print
print "Host:\t%s" % hostname
print "Descr:\t%s" % portname
print "Port:\t%s" % portnumber
if ports.has_key(hostname):
ports[hostname][portname] = int(portnumber)
else:
ports[hostname] = {portname: int(portnumber)}
else:
if re.match('^[^#].*$', line) and line:
print "Format check failed for line:"
print line
return(ports)
"""
_______________________________________________________________________________
portping
_______________________________________________________________________________
"""
def portping(hostname, port):
"""
Ping a port and return the result in status string.
hostname: The hostname of the machine.
port: The port number.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(5)
status = "success"
try:
s.connect((hostname, port))
except socket.gaierror, e:
status = "gaierror: %s" % e[1]
except socket.error, e:
status = "error: %s" % e[0]
except socket.timeout, e:
status = "timeout: %s" % e[1]
except:
status = "portping: Unknown error."
s.close()
return(status)
"""
_______________________________________________________________________________
usage
_______________________________________________________________________________
"""
def usage():
print "portchecker -c <configuration file> [-f]"
print " %-24s" % "-c <config file>",
print "Name of the configuration file located in /usr/share/portchecker/conf"
print " %-24s" % "-f",
print "Firewall check; only throw an error if there is a timeout."
print " %-24s" % "-l",
print "List the configuration files located in /usr/share/portchecker/conf."
"""
_______________________________________________________________________________
printport
_______________________________________________________________________________
"""
def printport(hostname, portname):
print "%-24s" % hostname,
print "%-24s" % portname,
print "%-6d" % ports[hostname][portname],
print "\t",
print "%s" % status
"""
_______________________________________________________________________________
listconfigs
_______________________________________________________________________________
"""
def listconfigs():
print "Config files located in %s:" % portscpath
filenames = os.listdir(portscpath)
for filename in filenames:
print "\t%s" % filename
"""
_______________________________________________________________________________
Main
_______________________________________________________________________________
"""
try:
opts, args = getopt.getopt(sys.argv[1:], "c:fl")
except getopt.GetoptError, err:
print str(err)
usage()
sys.exit(2)
portscpath = "/usr/share/portchecker/"
portsfile = "ports.txt"
mode = "NORMAL"
for opt, arg in opts:
if opt == "-c":
portsfile = arg
elif opt == "-f":
mode = "FW_CHECK"
elif opt == "-l":
listconfigs()
sys.exit()
ports = readportsfile(portsfile)
for hostname in ports:
for portname in ports[hostname]:
status = portping(hostname, ports[hostname][portname])
if mode == "NORMAL":
printport(hostname, portname)
elif mode == "FW_CHECK" and re.match('^.*timed out.*$', status) :
printport(hostname, portname)
|
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
"""
@Create: 2016MMDD
@LastUpdate: 2016MMDD HH:MM:SS
@Version: 0.0
"""
from os.path import abspath, sep, join, exists
from os import mkdir, statvfs
from sys import argv, stdout
from platform import release, linux_distribution
from subprocess import Popen, PIPE
from json import dump, dumps
from collections import OrderedDict
from platform import node
from time import localtime, strftime
from re import compile
"""Summary of script 'inspect.py'.
self._check_kernel()
self._check_cpu()
self._check_disk()
self._check_umask()
self._check_hostname()
self._check_ip()
# self._check_connectivity()
"""
__version__ = "0.0"
__all__ = []
__author__ = "yyg"
class ToolBox(object):
@classmethod
def _convert2kilobyte(cls, num):
return (num / 1024)
@classmethod
def _convert2megabyte(cls, num):
return cls._convert2kilobyte(num) / 1024
@classmethod
def _convert2gigabyte(cls, num):
return cls._convert2megabyte(num) / 1024
@classmethod
def _convert2terabyte(cls, num):
return cls._convert2gigabyte(num) / 1024
@classmethod
def convert(cls, num):
if cls._convert2terabyte(num) > 1:
return str(cls._convert2terabyte(num)) + "T"
elif cls._convert2gigabyte(num) > 1:
return str(cls._convert2gigabyte(num)) + "G"
elif cls._convert2megabyte(num) > 1:
return str(cls._convert2megabyte(num)) + "MB"
elif cls._convert2kilobyte(num) > 1:
return str(cls._convert2kilobyte(num)) + "KB"
else:
return str(num) + "B"
@classmethod
def isnotcomment(cls, line):
if not line.strip().startswith("#"):
return True
else:
return False
class Inspector(object):
def __init__(self, hostname):
self.__report = None
self.__hostname = hostname
@property
def report(self):
return self.__report
@property
def hostname(self):
return self.__hostname
@hostname.setter
def hostname(self, hostname):
self.__hostname = hostname
def setup(self):
self.__report = {
"id": self.hostname,
"linux_info": OrderedDict(),
"netwk_info": OrderedDict(),
"disk_info": OrderedDict(),
"cpu_info": OrderedDict(),
"memory_info": OrderedDict(),
"software_info": OrderedDict()
}
def _check_kernel(self):
"""
:return
self.__report["linux_info"]["kernel"] = "2.6.18-194.3.1.el5"
"""
self.__report["linux_info"]["kernel"] = release()
def _check_os(self):
"""
:return
self.__report["linux_info"]["os"] = "Centos-6.5"
"""
self.__report["linux_info"]["os"] = "-".join(linux_distribution())
def _check_memory(self):
"""
:return
self.__report["memory_info"]["memory"] = "128G"
self.__report["memory_info"]["swap_enable"] = "off"
"""
with open("/proc/meminfo") as meminfo:
for line in meminfo:
if ToolBox.isnotcomment(line) and "MemTotal" in line:
self.__report["memory_info"]["memory"] = ToolBox.convert(
int(line.split(":")[1].strip().split()[0]) * 1024
)
elif ToolBox.isnotcomment(line) and "SwapCached" in line:
if int(line.split(":")[1].strip().split()[0]) > 0:
self.__report["memory_info"]["swap_enable"] = "on"
else:
self.__report["memory_info"]["swap_enable"] = "off"
pass
def _check_cpu(self):
"""
: return
self.__report["cpu_info"]["Processor"] = 32 # logic processors
self.__report["cpu_info"]["cores"] = 16 # physical cores
self.__report["cpu_info"]["siblings"] = 16 # siblings
self.__report["cpu_info"]["HyperThreadingEnable"] = "on"
"""
self.__report["cpu_info"]["processors"] = 0
self.__report["cpu_info"]["cores"] = 0
self.__report["cpu_info"]["siblings"] = 0
self.__report["cpu_info"]["hyperthreading_enable"] = None
with open("/proc/cpuinfo") as cpu_info:
for line in cpu_info:
if ToolBox.isnotcomment(line):
if "processor" in line:
self.__report["cpu_info"]["processors"] += 1
if "cpu cores" in line:
self.__report["cpu_info"]["cores"] = line.split(":")[1].strip()
if "siblings" in line:
self.__report["cpu_info"]["siblings"] = line.split(":")[1].strip()
self.__report["cpu_info"]["processors"] = str(
self.__report["cpu_info"]["processors"])
if self.__report["cpu_info"]["siblings"] == self.__report["cpu_info"]["cores"]:
self.__report["cpu_info"]["hyperthreading_enable"] = "on"
else:
self.__report["cpu_info"]["hyperthreading_enable"] = "off"
def _check_disk(self):
"""
: () = >
self.__report["disk_1"] = ("/data1","20%", "100G")
self.__report["mt_num"] = 10
"""
_cmd = "df -h"
_index = 1
_disk = Popen(args=_cmd, shell=True, stdout=PIPE, stderr=PIPE)
_stdout = _disk.stdout.readlines()
_stderr = _disk.stderr.readlines()
for line in _stdout:
if ToolBox.isnotcomment(line) and "/dev/" in line:
line = line.split()
self.__report["disk_info"]["disk_%d" % _index] = (
"%s=%s-%s(%s)=%s" % (line[5], line[1], line[2], line[4], line[3])
)
_index += 1
self.__report["disk_info"]["disk_num"] = _index - 1
def _check_io(self):
"""
: return
self.__report["disk_1_io"] = "200MB/s"
"""
pass
# for each disk mounted, test its I/O
def _check_ip(self):
"""
: return
self.__report["netwk_*"] =
"""
_cmd = "ip a"
_index = 1
_ifconfig = Popen(args=_cmd, shell=True, stdout=PIPE, stderr=PIPE)
_stdout = _ifconfig.stdout.readlines()
_stderr = _ifconfig.stderr.readlines()
for line in _stdout:
if "inet" in line and "inet6" not in line:
line = line.split()
for col in line:
if "/" in col and "127" not in col:
self.__report["netwk_info"]["netwk_ip_%d" % _index] = col.split("/")[0]
_index += 1
def _check_connectivity(self):
"""
: return
self.__report["netwk_info"]["FailureConnectivity"] = "110.222.222.22,
111.22.223.22"
"""
pass
def _check_bandwidth(self):
"""
: return
self._report["netwk_info"]["netwk_ip_1_bandwidth"]
"""
pass
def _check_umask(self):
_cmd = "su - root -c 'umask'"
_umask = Popen(args=_cmd, stdout=PIPE, stderr=PIPE, shell=True)
_stdout = _umask.stdout.readlines()
_stderr = _umask.stderr.readlines()
self.__report["linux_info"]["umask"] = _stdout[0].strip()
def _check_hostname(self):
"""
: return
self.__report["hostname"] = "hadoop001"
"""
self.__report["linux_info"]["hostname"] = node()
def _check_iptables(self):
"""
Turn off iptable before check
: return
self.__report["IptableStatus"] = "off"
"""
_cmd_turnoff = ["service iptables stop", "chkconfig iptables off"]
_result = Popen(_cmd_turnoff, shell=True, stdout=PIPE, stderr=PIPE)
print("run 'service iptables stop'")
print("run 'chkconfig iptables off'")
_cmd = "service iptables status"
_iptables_status = Popen(_cmd, shell=True, stdout=PIPE)
for line in _iptables_status.stdout.readlines():
if "Firewall is not running" in line:
self.__report["linux_info"]["iptable_status"] = "off"
else:
self.__report["linux_info"]["iptable_status"] = "on"
def _check_selinux(self):
"""
: return
self.__report["SelinuxStatus"] = "disabled"
"""
_cmd_selinux = "getenforce"
_result = Popen(_cmd_selinux, shell=True, stdout=PIPE)
self.__report["linux_info"]["selinux_status"] = _result.stdout.readlines()[0].strip()
def _check_desktop(self):
"""
: return
self.__report["DesktopStatus"] = "off"
"""
with open("/etc/inittab") as init:
if (ToolBox.isnotcomment(line) and "3" in line for line in init):
self.__report["linux_info"]["desktop_status"] = "off"
else:
self.__report["linux-info"]["desktop_status"] = "on"
def _check_openssh(self):
"""
: return
self.__report[
"openssh"] = "openssh-clients-5.3p1-118.1.el6_8.x86_64"
openssh-clients-5.3p1-118.1.el6_8.x86_64
openssh-server-5.3p1-118.1.el6_8.x86_64
openssh-5.3p1-118.1.el6_8.x86_64
"""
cmd = "rpm -qa|grep openssh"
pattern = compile(r"openssh-\d.*")
_result = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
for line in _result.stdout.readlines():
if pattern.match(line):
self.__report["software_info"]["openssh"] = line.strip("\n")
def _check_openssl(self):
cmd = "rpm -qa|grep openssl"
pattern = compile(r"openssl-\d.*")
_result = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
for line in _result.stdout.readlines():
if pattern.match(line):
self.__report["software_info"]["openssl"] = line.strip("\n")
def _check_jdk(self):
"""
:return
self.__report["jdk"] = "java version "1.7.0_67"
java version "1.7.0_67"
Java(TM) SE Runtime Environment (build 1.7.0_67-b01)
Java HotSpot(TM) 64-Bit Server VM (build 24.65-b04, mixed mode)
"""
_jdk = "java -version"
_result = Popen(_jdk, shell=True, stderr=PIPE, stdout=PIPE)
self.__report["software_info"]["jdk"] = "None"
for line in _result.stdout.readlines():
if "java version" in line:
self.__report["software_info"]["jdk"] = line.split()[2].strip("\"")
def _check_clock(self):
"""
: return
self.__report["Time"] = "2016/12/5 20:19:22"
"""
self.__report["linux_info"]["time"] = strftime("%Z-%Y-%m-%d %H:%M:%S", localtime())
def _check_nofile(self):
"""
: return
self.__report["openfile"] = 65536
"""
cmd = "ulimit -n"
_result = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
self.__report["linux_info"]["nofile"] = _result.stdout.readlines()[0].strip("\n")
def _check_nproc(self):
"""
:return
self.__report["linux_info"]["nproc"] = 1024
"""
cmd = "ulimit -u"
_result = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
self.__report["linux_info"]["nproc"] = _result.stdout.readlines()[0].strip("\n")
def _check_hugetable(self):
"""
: return
self.__report["linux_info"]["hugetable"] = "off"
"""
pass
def _collect(self):
self.setup()
self._check_kernel()
self._check_os()
self._check_memory()
self._check_cpu()
self._check_disk()
# self._check_io()
self._check_ip()
# self._check_connectivity()
# self._check_bandwidth()
self._check_umask()
self._check_hostname()
self._check_iptables()
self._check_selinux()
self._check_desktop()
self._check_openssh()
self._check_openssl()
self._check_jdk()
self._check_clock()
self._check_nofile()
self._check_nproc()
# self._check_hugetable()
def run(self, path):
self._collect()
path = abspath(path)
if exists(path) is False:
mkdir(path)
_report_path = join(path, (self.__hostname + ".json"))
with open(_report_path, "w") as _report_file:
dump(self.__report, _report_file, encoding='utf-8')
print(dumps(self.__report, indent=1))
if __name__ == "__main__":
inspector=Inspector(argv[1])
inspector.run(argv[2])
|
|
#! /usr/bin/env python3
import imp
import os
import sys
import subprocess
import setuptools
NAME = 'Orange'
VERSION = '3.2'
ISRELEASED = False
DESCRIPTION = 'Orange, a component-based data mining framework.'
README_FILE = os.path.join(os.path.dirname(__file__), 'README.md')
LONG_DESCRIPTION = open(README_FILE).read()
AUTHOR = 'Bioinformatics Laboratory, FRI UL'
AUTHOR_EMAIL = 'contact@orange.biolab.si'
URL = 'http://orange.biolab.si/'
DOWNLOAD_URL = 'https://bitbucket.org/biolab/orange/downloads'
LICENSE = 'GPLv3'
KEYWORDS = (
'data mining',
'machine learning',
'artificial intelligence',
)
CLASSIFIERS = (
'Development Status :: 4 - Beta',
'Environment :: X11 Applications :: Qt',
'Environment :: Console',
'Environment :: Plugins',
'Programming Language :: Python',
'Framework :: Orange',
'License :: OSI Approved :: '
'GNU General Public License v3 or later (GPLv3+)',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Visualization',
'Topic :: Software Development :: Libraries :: Python Modules',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
)
INSTALL_REQUIRES = (
'setuptools',
'numpy>=1.9.0',
'scipy',
'bottlechest',
'scikit-learn>=0.16',
'chardet>=2.3.0', # encoding detection
'xlrd>=0.9.2', # reading Excel files
'docutils', # parsing docs for addon installation
)
if sys.version_info < (3, 4):
INSTALL_REQUIRES = INSTALL_REQUIRES + ("singledispatch",)
ENTRY_POINTS = {
"orange.canvas.help": (
"html-index = Orange.widgets:WIDGET_HELP_PATH",)
}
# Return the git revision as a string
def git_version():
"""Return the git revision as a string.
Copied from numpy setup.py
"""
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(cmd, stdout = subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except OSError:
GIT_REVISION = "Unknown"
return GIT_REVISION
def write_version_py(filename='Orange/version.py'):
# Copied from numpy setup.py
cnt = """
# THIS FILE IS GENERATED FROM ORANGE SETUP.PY
short_version = '%(version)s'
version = '%(version)s'
full_version = '%(full_version)s'
git_revision = '%(git_revision)s'
release = %(isrelease)s
if not release:
version = full_version
short_version += ".dev"
"""
FULLVERSION = VERSION
if os.path.exists('.git'):
GIT_REVISION = git_version()
elif os.path.exists('Orange/version.py'):
# must be a source distribution, use existing version file
version = imp.load_source("Orange.version", "Orange/version.py")
GIT_REVISION = version.git_revision
else:
GIT_REVISION = "Unknown"
if not ISRELEASED:
FULLVERSION += '.dev0+' + GIT_REVISION[:7]
a = open(filename, 'w')
try:
a.write(cnt % {'version': VERSION,
'full_version': FULLVERSION,
'git_revision': GIT_REVISION,
'isrelease': str(ISRELEASED)})
finally:
a.close()
from numpy.distutils.core import setup
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg:
# "Ignoring attempt to set 'name' (from ... "
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('Orange')
config.get_version('Orange/version.py') # sets config.version
return config
PACKAGES = [
"Orange",
"Orange.canvas",
"Orange.canvas.application",
"Orange.canvas.application.tutorials",
"Orange.canvas.canvas",
"Orange.canvas.canvas.items",
"Orange.canvas.document",
"Orange.canvas.gui",
"Orange.canvas.help",
"Orange.canvas.preview",
"Orange.canvas.registry",
"Orange.canvas.scheme",
"Orange.canvas.styles",
"Orange.canvas.utils",
"Orange.canvas.report",
"Orange.classification",
"Orange.clustering",
"Orange.data",
"Orange.data.sql",
"Orange.distance",
"Orange.evaluation",
"Orange.misc",
"Orange.preprocess",
"Orange.projection",
"Orange.regression",
"Orange.statistics",
"Orange.testing",
"Orange.widgets",
"Orange.widgets.data",
"Orange.widgets.visualize",
"Orange.widgets.classify",
"Orange.widgets.regression",
"Orange.widgets.evaluate",
"Orange.widgets.unsupervised",
"Orange.widgets.utils",
"Orange.widgets.utils.plot",
"Orange.widgets.utils.plot.primitives"
]
PACKAGE_DATA = {
"Orange": ["datasets/*.{}".format(ext)
for ext in ["tab", "csv", "basket", "info"]],
"Orange.canvas": ["icons/*.png", "icons/*.svg"],
"Orange.canvas.styles": ["*.qss", "orange/*.svg"],
"Orange.canvas.application.tutorials": ["*.ows"],
"Orange.canvas.report": ["icons/*.svg", "*.html"],
"Orange.widgets": ["icons/*.png", "icons/*.svg"],
"Orange.widgets.data": ["icons/*.svg", "icons/paintdata/*.png", "icons/paintdata/*.svg"],
"Orange.widgets.visualize": ["icons/*.svg"],
"Orange.widgets.classify": ["icons/*.svg"],
"Orange.widgets.regression": ["icons/*.svg"],
"Orange.widgets.evaluate": ["icons/*.svg"],
"Orange.widgets.unsupervised": ["icons/*.svg"],
"Orange.widgets.plot": ["*.fs", "*.gs", "*.vs"],
"Orange.widgets.plot.primitives": ["*.obj"],
}
def setup_package():
write_version_py()
setup(
configuration=configuration,
name=NAME,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
url=URL,
download_url=DOWNLOAD_URL,
license=LICENSE,
keywords=KEYWORDS,
classifiers=CLASSIFIERS,
packages=PACKAGES,
package_data=PACKAGE_DATA,
install_requires=INSTALL_REQUIRES,
entry_points=ENTRY_POINTS,
zip_safe=False,
include_package_data=True,
test_suite='Orange.tests.test_suite',
)
if __name__ == '__main__':
setup_package()
|
|
# Copyright (c) 2016 by Kaminario Technologies, Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for kaminario driver."""
import mock
from oslo_utils import units
import time
from cinder import context
from cinder import exception
from cinder import objects
from cinder.objects import fields
from cinder import test
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder import utils
from cinder.volume import configuration
from cinder.volume.drivers.kaminario import kaminario_common
from cinder.volume.drivers.kaminario import kaminario_fc
from cinder.volume.drivers.kaminario import kaminario_iscsi
from cinder.volume import utils as vol_utils
CONNECTOR = {'initiator': 'iqn.1993-08.org.debian:01:12aa12aa12aa',
'ip': '192.168.2.5', 'platform': 'x86_64', 'host': 'test-k2',
'wwpns': ['12341a2a00001234', '12341a2a00001235'],
'wwnns': ['12351a2a00001234', '12361a2a00001234'],
'os_type': 'linux2', 'multipath': False}
class FakeK2Obj(object):
id = 548
lun = 548
class FakeSaveObject(FakeK2Obj):
def __init__(self, *args, **kwargs):
self.ntype = kwargs.get('ntype')
self.ip_address = '10.0.0.1'
self.iscsi_qualified_target_name = "xyztlnxyz"
self.snapshot = FakeK2Obj()
self.name = 'test'
self.pwwn = '50024f4053300300'
self.volume_group = self
self.is_dedup = True
self.size = units.Mi
self.replication_status = None
self.state = 'in_sync'
self.generation_number = 548
self.current_role = 'target'
self.current_snapshot_progress = 100
self.current_snapshot_id = None
def refresh(self):
return
def save(self):
return FakeSaveObject()
def delete(self):
return None
class FakeSaveObjectExp(FakeSaveObject):
def save(self):
raise exception.KaminarioCinderDriverException("test")
def delete(self):
raise exception.KaminarioCinderDriverException("test")
class FakeSearchObject(object):
hits = [FakeSaveObject()]
total = 1
def __init__(self, *args):
if args and "mappings" in args[0]:
self.total = 0
class FakeSearchObjectExp(object):
hits = [FakeSaveObjectExp()]
total = 1
class FakeKrest(object):
def search(self, *args, **argv):
return FakeSearchObject(*args)
def new(self, *args, **argv):
return FakeSaveObject()
class FakeKrestException(object):
def search(self, *args, **argv):
return FakeSearchObjectExp()
def new(self, *args, **argv):
return FakeSaveObjectExp()
class Replication(object):
backend_id = '10.0.0.1'
login = 'login'
password = 'password'
rpo = 500
class TestKaminarioISCSI(test.TestCase):
driver = None
conf = None
def setUp(self):
self._setup_config()
self._setup_driver()
super(TestKaminarioISCSI, self).setUp()
self.context = context.get_admin_context()
self.vol = fake_volume.fake_volume_obj(self.context)
self.vol.volume_type = fake_volume.fake_volume_type_obj(self.context)
self.vol.volume_type.extra_specs = {'foo': None}
self.snap = fake_snapshot.fake_snapshot_obj(self.context)
self.snap.volume = self.vol
def _setup_config(self):
self.conf = mock.Mock(spec=configuration.Configuration)
self.conf.kaminario_dedup_type_name = "dedup"
self.conf.volume_dd_blocksize = 2
def _setup_driver(self):
self.driver = (kaminario_iscsi.
KaminarioISCSIDriver(configuration=self.conf))
device = mock.Mock(return_value={'device': {'path': '/dev'}})
self.driver._connect_device = device
self.driver.client = FakeKrest()
def test_create_volume(self):
"""Test create_volume."""
result = self.driver.create_volume(self.vol)
self.assertIsNone(result)
def test_create_volume_with_exception(self):
"""Test create_volume_with_exception."""
self.driver.client = FakeKrestException()
self.assertRaises(exception.KaminarioCinderDriverException,
self.driver.create_volume, self.vol)
def test_delete_volume(self):
"""Test delete_volume."""
result = self.driver.delete_volume(self.vol)
self.assertIsNone(result)
def test_delete_volume_with_exception(self):
"""Test delete_volume_with_exception."""
self.driver.client = FakeKrestException()
self.assertRaises(exception.KaminarioCinderDriverException,
self.driver.delete_volume, self.vol)
def test_create_snapshot(self):
"""Test create_snapshot."""
self.snap.id = "253b2878-ec60-4793-ad19-e65496ec7aab"
self.driver.client.new = mock.Mock()
result = self.driver.create_snapshot(self.snap)
self.assertIsNone(result)
fake_object = self.driver.client.search().hits[0]
self.driver.client.new.assert_called_once_with(
"snapshots",
short_name='cs-253b2878-ec60-4793-ad19-e65496ec7aab',
source=fake_object, retention_policy=fake_object,
is_auto_deleteable=False)
def test_create_snapshot_with_exception(self):
"""Test create_snapshot_with_exception."""
self.driver.client = FakeKrestException()
self.assertRaises(exception.KaminarioCinderDriverException,
self.driver.create_snapshot, self.snap)
def test_delete_snapshot(self):
"""Test delete_snapshot."""
result = self.driver.delete_snapshot(self.snap)
self.assertIsNone(result)
def test_delete_snapshot_with_exception(self):
"""Test delete_snapshot_with_exception."""
self.driver.client = FakeKrestException()
self.assertRaises(exception.KaminarioCinderDriverException,
self.driver.delete_snapshot, self.snap)
@mock.patch.object(utils, 'brick_get_connector_properties')
@mock.patch.object(vol_utils, 'copy_volume')
def test_create_volume_from_snapshot(self, mock_copy_volume,
mock_brick_get):
"""Test create_volume_from_snapshot."""
mock_brick_get.return_value = CONNECTOR
mock_copy_volume.return_value = None
self.driver._kaminario_disconnect_volume = mock.Mock()
result = self.driver.create_volume_from_snapshot(self.vol, self.snap)
self.assertIsNone(result)
@mock.patch.object(utils, 'brick_get_connector_properties')
@mock.patch.object(vol_utils, 'copy_volume')
def test_create_volume_from_snapshot_with_exception(self, mock_copy_volume,
mock_brick_get):
"""Test create_volume_from_snapshot_with_exception."""
mock_brick_get.return_value = CONNECTOR
mock_copy_volume.return_value = None
self.driver.client = FakeKrestException()
self.assertRaises(exception.KaminarioCinderDriverException,
self.driver.create_volume_from_snapshot, self.vol,
self.snap)
@mock.patch.object(utils, 'brick_get_connector_properties')
@mock.patch.object(vol_utils, 'copy_volume')
def test_create_cloned_volume(self, mock_copy_volume, mock_brick_get):
"""Test create_cloned_volume."""
mock_brick_get.return_value = CONNECTOR
mock_copy_volume.return_value = None
self.driver._kaminario_disconnect_volume = mock.Mock()
result = self.driver.create_cloned_volume(self.vol, self.vol)
self.assertIsNone(result)
@mock.patch.object(utils, 'brick_get_connector_properties')
@mock.patch.object(vol_utils, 'copy_volume')
def test_create_cloned_volume_with_exception(self, mock_copy_volume,
mock_brick_get):
"""Test create_cloned_volume_with_exception."""
mock_brick_get.return_value = CONNECTOR
mock_copy_volume.return_value = None
self.driver.terminate_connection = mock.Mock()
self.driver.client = FakeKrestException()
self.assertRaises(exception.KaminarioCinderDriverException,
self.driver.create_cloned_volume, self.vol, self.vol)
def test_extend_volume(self):
"""Test extend_volume."""
new_size = 256
result = self.driver.extend_volume(self.vol, new_size)
self.assertIsNone(result)
def test_extend_volume_with_exception(self):
"""Test extend_volume_with_exception."""
self.driver.client = FakeKrestException()
new_size = 256
self.assertRaises(exception.KaminarioCinderDriverException,
self.driver.extend_volume, self.vol, new_size)
def test_initialize_connection(self):
"""Test initialize_connection."""
conn_info = self.driver.initialize_connection(self.vol, CONNECTOR)
self.assertIn('data', conn_info)
self.assertIn('target_iqn', conn_info['data'])
def test_initialize_connection_with_exception(self):
"""Test initialize_connection_with_exception."""
self.driver.client = FakeKrestException()
self.assertRaises(exception.KaminarioCinderDriverException,
self.driver.initialize_connection, self.vol,
CONNECTOR)
def test_terminate_connection(self):
"""Test terminate_connection."""
result = self.driver.terminate_connection(self.vol, CONNECTOR)
self.assertIsNone(result)
def test_get_lun_number(self):
"""Test _get_lun_number."""
host, host_rs, host_name = self.driver._get_host_object(CONNECTOR)
result = self.driver._get_lun_number(self.vol, host)
self.assertEqual(548, result)
def test_get_volume_object(self):
"""Test _get_volume_object."""
result = self.driver._get_volume_object(self.vol)
self.assertEqual(548, result.id)
def test_get_host_object(self):
"""Test _get_host_object."""
host, host_rs, host_name = self.driver._get_host_object(CONNECTOR)
self.assertEqual(548, host.id)
self.assertEqual(1, host_rs.total)
self.assertEqual('test-k2', host_name)
def test_get_target_info(self):
"""Test get_target_info."""
iscsi_portal, target_iqn = self.driver.get_target_info(self.vol)
self.assertEqual('10.0.0.1:3260', iscsi_portal)
self.assertEqual('xyztlnxyz', target_iqn)
def test_k2_initialize_connection(self):
"""Test k2_initialize_connection."""
result = self.driver.k2_initialize_connection(self.vol, CONNECTOR)
self.assertEqual(548, result)
def test_manage_existing(self):
"""Test manage_existing."""
self.driver._get_replica_status = mock.Mock(return_value=False)
result = self.driver.manage_existing(self.vol, {'source-name': 'test'})
self.assertIsNone(result)
def test_manage_existing_exp(self):
self.driver._get_replica_status = mock.Mock(return_value=True)
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing, self.vol,
{'source-name': 'test'})
def test_manage_existing_get_size(self):
"""Test manage_existing_get_size."""
self.driver.client.search().hits[0].size = units.Mi
result = self.driver.manage_existing_get_size(self.vol,
{'source-name': 'test'})
self.assertEqual(1, result)
def test_get_is_dedup(self):
"""Test _get_is_dedup."""
result = self.driver._get_is_dedup(self.vol.volume_type)
self.assertTrue(result)
def test_get_is_dedup_false(self):
"""Test _get_is_dedup_false."""
specs = {'kaminario:thin_prov_type': 'nodedup'}
self.vol.volume_type.extra_specs = specs
result = self.driver._get_is_dedup(self.vol.volume_type)
self.assertFalse(result)
def test_get_replica_status(self):
"""Test _get_replica_status."""
result = self.driver._get_replica_status(self.vol)
self.assertTrue(result)
def test_create_volume_replica(self):
"""Test _create_volume_replica."""
vg = FakeSaveObject()
rep = Replication()
self.driver.replica = rep
session_name = self.driver.get_session_name('1234567890987654321')
self.assertEqual('ssn-1234567890987654321', session_name)
rsession_name = self.driver.get_rep_name(session_name)
self.assertEqual('rssn-1234567890987654321', rsession_name)
src_ssn = self.driver.client.new("replication/sessions").save()
self.assertEqual('in_sync', src_ssn.state)
result = self.driver._create_volume_replica(self.vol, vg, vg, rep.rpo)
self.assertIsNone(result)
def test_create_volume_replica_exp(self):
"""Test _create_volume_replica_exp."""
vg = FakeSaveObject()
rep = Replication()
self.driver.replica = rep
self.driver.client = FakeKrestException()
self.assertRaises(exception.KaminarioCinderDriverException,
self.driver._create_volume_replica, self.vol,
vg, vg, rep.rpo)
def test_delete_by_ref(self):
"""Test _delete_by_ref."""
result = self.driver._delete_by_ref(self.driver.client, 'volume',
'name', 'message')
self.assertIsNone(result)
def test_failover_volume(self):
"""Test _failover_volume."""
self.driver.target = FakeKrest()
session_name = self.driver.get_session_name('1234567890987654321')
self.assertEqual('ssn-1234567890987654321', session_name)
rsession_name = self.driver.get_rep_name(session_name)
self.assertEqual('rssn-1234567890987654321', rsession_name)
result = self.driver._failover_volume(self.vol)
self.assertIsNone(result)
@mock.patch.object(kaminario_common.KaminarioCinderDriver,
'_check_for_status')
@mock.patch.object(objects.service.Service, 'get_by_args')
def test_failover_host(self, get_by_args, check_stauts):
"""Test failover_host."""
mock_args = mock.Mock()
mock_args.active_backend_id = '10.0.0.1'
self.vol.replication_status = 'failed-over'
self.driver.configuration.san_ip = '10.0.0.1'
get_by_args.side_effect = [mock_args, mock_args]
self.driver.host = 'host'
volumes = [self.vol, self.vol]
self.driver.replica = Replication()
self.driver.target = FakeKrest()
self.driver.target.search().total = 1
self.driver.client.search().total = 1
backend_ip, res_volumes = self.driver.failover_host(None, volumes)
self.assertEqual('10.0.0.1', backend_ip)
status = res_volumes[0]['updates']['replication_status']
self.assertEqual(fields.ReplicationStatus.FAILED_OVER, status)
# different backend ip
self.driver.configuration.san_ip = '10.0.0.2'
self.driver.client.search().hits[0].state = 'in_sync'
backend_ip, res_volumes = self.driver.failover_host(None, volumes)
self.assertEqual('10.0.0.2', backend_ip)
status = res_volumes[0]['updates']['replication_status']
self.assertEqual(fields.ReplicationStatus.DISABLED, status)
def test_delete_volume_replica(self):
"""Test _delete_volume_replica."""
self.driver.replica = Replication()
self.driver.target = FakeKrest()
session_name = self.driver.get_session_name('1234567890987654321')
self.assertEqual('ssn-1234567890987654321', session_name)
rsession_name = self.driver.get_rep_name(session_name)
self.assertEqual('rssn-1234567890987654321', rsession_name)
res = self.driver._delete_by_ref(self.driver.client, 'volumes',
'test', 'test')
self.assertIsNone(res)
result = self.driver._delete_volume_replica(self.vol, 'test', 'test')
self.assertIsNone(result)
src_ssn = self.driver.client.search("replication/sessions").hits[0]
self.assertEqual('idle', src_ssn.state)
def test_delete_volume_replica_exp(self):
"""Test _delete_volume_replica_exp."""
self.driver.replica = Replication()
self.driver.target = FakeKrestException()
self.driver._check_for_status = mock.Mock()
self.assertRaises(exception.KaminarioCinderDriverException,
self.driver._delete_volume_replica, self.vol,
'test', 'test')
def test_get_is_replica(self):
"""Test get_is_replica."""
result = self.driver._get_is_replica(self.vol.volume_type)
self.assertFalse(result)
def test_get_is_replica_true(self):
"""Test get_is_replica_true."""
self.driver.replica = Replication()
self.vol.volume_type.extra_specs = {'kaminario:replication': 'enabled'}
result = self.driver._get_is_replica(self.vol.volume_type)
self.assertTrue(result)
def test_after_volume_copy(self):
"""Test after_volume_copy."""
result = self.driver.after_volume_copy(None, self.vol,
self.vol.volume_type)
self.assertIsNone(result)
def test_retype(self):
"""Test retype."""
replica_status = self.driver._get_replica_status('test')
self.assertTrue(replica_status)
replica = self.driver._get_is_replica(self.vol.volume_type)
self.assertFalse(replica)
self.driver.replica = Replication()
result = self.driver._add_replication(self.vol)
self.assertIsNone(result)
self.driver.target = FakeKrest()
self.driver._check_for_status = mock.Mock()
result = self.driver._delete_replication(self.vol)
self.assertIsNone(result)
self.driver._delete_volume_replica = mock.Mock()
result = self.driver.retype(None, self.vol,
self.vol.volume_type, None, None)
self.assertTrue(result)
new_vol_type = fake_volume.fake_volume_type_obj(self.context)
new_vol_type.extra_specs = {'kaminario:thin_prov_type': 'nodedup'}
result2 = self.driver.retype(None, self.vol,
new_vol_type, None, None)
self.assertFalse(result2)
def test_add_replication(self):
""""Test _add_replication."""
self.driver.replica = Replication()
result = self.driver._add_replication(self.vol)
self.assertIsNone(result)
def test_delete_replication(self):
"""Test _delete_replication."""
self.driver.replica = Replication()
self.driver.target = FakeKrest()
self.driver._check_for_status = mock.Mock()
result = self.driver._delete_replication(self.vol)
self.assertIsNone(result)
def test_create_failover_volume_replica(self):
"""Test _create_failover_volume_replica."""
self.driver.replica = Replication()
self.driver.target = FakeKrest()
self.driver.configuration.san_ip = '10.0.0.1'
result = self.driver._create_failover_volume_replica(self.vol,
'test', 'test')
self.assertIsNone(result)
def test_create_volume_replica_user_snap(self):
"""Test create_volume_replica_user_snap."""
result = self.driver._create_volume_replica_user_snap(FakeKrest(),
'sess')
self.assertEqual(548, result)
def test_is_user_snap_sync_finished(self):
"""Test _is_user_snap_sync_finished."""
sess_mock = mock.Mock()
sess_mock.refresh = mock.Mock()
sess_mock.generation_number = 548
sess_mock.current_snapshot_id = None
sess_mock.current_snapshot_progress = 100
sess_mock.current_snapshot_id = None
self.driver.snap_updates = [{'tgt_ssn': sess_mock, 'gno': 548,
'stime': time.time()}]
result = self.driver._is_user_snap_sync_finished()
self.assertIsNone(result)
def test_delete_failover_volume_replica(self):
"""Test _delete_failover_volume_replica."""
self.driver.target = FakeKrest()
result = self.driver._delete_failover_volume_replica(self.vol, 'test',
'test')
self.assertIsNone(result)
class TestKaminarioFC(TestKaminarioISCSI):
def _setup_driver(self):
self.driver = (kaminario_fc.
KaminarioFCDriver(configuration=self.conf))
device = mock.Mock(return_value={'device': {'path': '/dev'}})
self.driver._connect_device = device
self.driver.client = FakeKrest()
self.driver._lookup_service = mock.Mock()
def test_initialize_connection(self):
"""Test initialize_connection."""
conn_info = self.driver.initialize_connection(self.vol, CONNECTOR)
self.assertIn('data', conn_info)
self.assertIn('target_wwn', conn_info['data'])
def test_get_target_info(self):
"""Test get_target_info."""
target_wwpn = self.driver.get_target_info(self.vol)
self.assertEqual(['50024f4053300300'], target_wwpn)
def test_terminate_connection(self):
"""Test terminate_connection."""
result = self.driver.terminate_connection(self.vol, CONNECTOR)
self.assertIn('data', result)
|
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import re
import json
import time
from collections import defaultdict, OrderedDict
import requests
import demjson
LINUX_PRICING_URLS = [
# Deprecated instances (JSON format)
'https://aws.amazon.com/ec2/pricing/json/linux-od.json',
# Previous generation instances (JavaScript file)
'https://a0.awsstatic.com/pricing/1/ec2/previous-generation/linux-od.min.js',
# New generation instances (JavaScript file)
'https://a0.awsstatic.com/pricing/1/ec2/linux-od.min.js'
]
EC2_REGIONS = [
'us-east-1',
'us-west-1',
'us-west-2',
'eu-west-1',
'eu-central-1',
'ap-southeast-1',
'ap-southeast-2',
'ap-northeast-1',
'ap-northeast-2',
'ap-south-1',
'sa-east-1'
]
EC2_INSTANCE_TYPES = [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.medium',
'm3.large',
'm3.xlarge',
'm3.2xlarge',
'c1.medium',
'c1.xlarge',
'cc1.4xlarge',
'cc2.8xlarge',
'c3.large',
'c3.xlarge',
'c3.2xlarge',
'c3.4xlarge',
'c3.8xlarge',
'd2.xlarge',
'd2.2xlarge',
'd2.4xlarge',
'd2.8xlarge',
'cg1.4xlarge',
'g2.2xlarge',
'g2.8xlarge',
'cr1.8xlarge',
'hs1.4xlarge',
'hs1.8xlarge',
'i2.xlarge',
'i2.2xlarge',
'i2.4xlarge',
'i2.8xlarge',
'r3.large',
'r3.xlarge',
'r3.2xlarge',
'r3.4xlarge',
'r3.8xlarge',
't2.micro',
't2.small',
't2.medium',
't2.large',
'x1.32xlarge'
]
# Maps EC2 region name to region name used in the pricing file
REGION_NAME_MAP = {
'us-east': 'ec2_us_east',
'us-east-1': 'ec2_us_east',
'us-west': 'ec2_us_west',
'us-west-1': 'ec2_us_west',
'us-west-2': 'ec2_us_west_oregon',
'eu-west-1': 'ec2_eu_west',
'eu-ireland': 'ec2_eu_west',
'eu-central-1': 'ec2_eu_central',
'apac-sin': 'ec2_ap_southeast',
'ap-southeast-1': 'ec2_ap_southeast',
'apac-syd': 'ec2_ap_southeast_2',
'ap-southeast-2': 'ec2_ap_southeast_2',
'apac-tokyo': 'ec2_ap_northeast',
'ap-northeast-1': 'ec2_ap_northeast',
'ap-northeast-2': 'ec2_ap_northeast',
'ap-south-1': 'ec2_ap_south_1',
'sa-east-1': 'ec2_sa_east',
'us-gov-west-1': 'ec2_us_govwest'
}
INSTANCE_SIZES = [
'micro',
'small',
'medium',
'large',
'xlarge',
'x-large',
'extra-large'
]
RE_NUMERIC_OTHER = re.compile(r'(?:([0-9]+)|([-A-Z_a-z]+)|([^-0-9A-Z_a-z]+))')
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
PRICING_FILE_PATH = os.path.join(BASE_PATH, '../libcloud/data/pricing.json')
PRICING_FILE_PATH = os.path.abspath(PRICING_FILE_PATH)
def scrape_ec2_pricing():
result = defaultdict(OrderedDict)
for url in LINUX_PRICING_URLS:
response = requests.get(url)
if re.match('.*?\.json$', url):
data = response.json()
elif re.match('.*?\.js$', url):
data = response.content
match = re.match('^.*callback\((.*?)\);?$', data,
re.MULTILINE | re.DOTALL)
data = match.group(1)
# demjson supports non-strict mode and can parse unquoted objects
data = demjson.decode(data)
regions = data['config']['regions']
for region_data in regions:
region_name = region_data['region']
libcloud_region_name = REGION_NAME_MAP[region_name]
instance_types = region_data['instanceTypes']
for instance_type in instance_types:
sizes = instance_type['sizes']
for size in sizes:
price = size['valueColumns'][0]['prices']['USD']
if str(price).lower() == 'n/a':
# Price not available
continue
result[libcloud_region_name][size['size']] = float(price)
return result
def update_pricing_file(pricing_file_path, pricing_data):
with open(pricing_file_path, 'r') as fp:
content = fp.read()
data = json.loads(content)
data['updated'] = int(time.time())
data['compute'].update(pricing_data)
# Always sort the pricing info
data = sort_nested_dict(data)
content = json.dumps(data, indent=4)
lines = content.splitlines()
lines = [line.rstrip() for line in lines]
content = '\n'.join(lines)
with open(pricing_file_path, 'w') as fp:
fp.write(content)
def sort_nested_dict(value):
"""
Recursively sort a nested dict.
"""
result = OrderedDict()
for key, value in sorted(value.items(), key=sort_key_by_numeric_other):
if isinstance(value, (dict, OrderedDict)):
result[key] = sort_nested_dict(value)
else:
result[key] = value
return result
def sort_key_by_numeric_other(key_value):
"""
Split key into numeric, alpha and other part and sort accordingly.
"""
return tuple((
int(numeric) if numeric else None,
INSTANCE_SIZES.index(alpha) if alpha in INSTANCE_SIZES else alpha,
other
) for (numeric, alpha, other) in RE_NUMERIC_OTHER.findall(key_value[0]))
def main():
print('Scraping EC2 pricing data')
pricing_data = scrape_ec2_pricing()
update_pricing_file(pricing_file_path=PRICING_FILE_PATH,
pricing_data=pricing_data)
print('Pricing data updated')
if __name__ == '__main__':
main()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Cloud Storage sensors."""
import os
from datetime import datetime
from typing import Callable, List, Optional, Sequence, Set, Union
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.sensors.base import BaseSensorOperator, poke_mode_only
from airflow.utils.decorators import apply_defaults
class GCSObjectExistenceSensor(BaseSensorOperator):
"""
Checks for the existence of a file in Google Cloud Storage.
:param bucket: The Google Cloud Storage bucket where the object is.
:type bucket: str
:param object: The name of the object to check in the Google cloud
storage bucket.
:type object: str
:param google_cloud_conn_id: The connection ID to use when
connecting to Google Cloud Storage.
:type google_cloud_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = (
'bucket',
'object',
'impersonation_chain',
)
ui_color = '#f0eee4'
@apply_defaults
def __init__(
self,
*,
bucket: str,
object: str, # pylint: disable=redefined-builtin
google_cloud_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.bucket = bucket
self.object = object
self.google_cloud_conn_id = google_cloud_conn_id
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
def poke(self, context: dict) -> bool:
self.log.info('Sensor checks existence of : %s, %s', self.bucket, self.object)
hook = GCSHook(
google_cloud_storage_conn_id=self.google_cloud_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
return hook.exists(self.bucket, self.object)
def ts_function(context):
"""
Default callback for the GoogleCloudStorageObjectUpdatedSensor. The default
behaviour is check for the object being updated after execution_date +
schedule_interval.
"""
return context['dag'].following_schedule(context['execution_date'])
class GCSObjectUpdateSensor(BaseSensorOperator):
"""
Checks if an object is updated in Google Cloud Storage.
:param bucket: The Google Cloud Storage bucket where the object is.
:type bucket: str
:param object: The name of the object to download in the Google cloud
storage bucket.
:type object: str
:param ts_func: Callback for defining the update condition. The default callback
returns execution_date + schedule_interval. The callback takes the context
as parameter.
:type ts_func: function
:param google_cloud_conn_id: The connection ID to use when
connecting to Google Cloud Storage.
:type google_cloud_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = (
'bucket',
'object',
'impersonation_chain',
)
ui_color = '#f0eee4'
@apply_defaults
def __init__(
self,
bucket: str,
object: str, # pylint: disable=redefined-builtin
ts_func: Callable = ts_function,
google_cloud_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.bucket = bucket
self.object = object
self.ts_func = ts_func
self.google_cloud_conn_id = google_cloud_conn_id
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
def poke(self, context: dict) -> bool:
self.log.info('Sensor checks existence of : %s, %s', self.bucket, self.object)
hook = GCSHook(
google_cloud_storage_conn_id=self.google_cloud_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
return hook.is_updated_after(self.bucket, self.object, self.ts_func(context))
class GCSObjectsWtihPrefixExistenceSensor(BaseSensorOperator):
"""
Checks for the existence of GCS objects at a given prefix, passing matches via XCom.
When files matching the given prefix are found, the poke method's criteria will be
fulfilled and the matching objects will be returned from the operator and passed
through XCom for downstream tasks.
:param bucket: The Google Cloud Storage bucket where the object is.
:type bucket: str
:param prefix: The name of the prefix to check in the Google cloud
storage bucket.
:type prefix: str
:param google_cloud_conn_id: The connection ID to use when
connecting to Google Cloud Storage.
:type google_cloud_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = (
'bucket',
'prefix',
'impersonation_chain',
)
ui_color = '#f0eee4'
@apply_defaults
def __init__(
self,
bucket: str,
prefix: str,
google_cloud_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.bucket = bucket
self.prefix = prefix
self.google_cloud_conn_id = google_cloud_conn_id
self.delegate_to = delegate_to
self._matches: List[str] = []
self.impersonation_chain = impersonation_chain
def poke(self, context: dict) -> bool:
self.log.info('Sensor checks existence of objects: %s, %s', self.bucket, self.prefix)
hook = GCSHook(
google_cloud_storage_conn_id=self.google_cloud_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
self._matches = hook.list(self.bucket, prefix=self.prefix)
return bool(self._matches)
def execute(self, context: dict) -> List[str]:
"""Overridden to allow matches to be passed"""
super().execute(context)
return self._matches
def get_time():
"""
This is just a wrapper of datetime.datetime.now to simplify mocking in the
unittests.
"""
return datetime.now()
@poke_mode_only
class GCSUploadSessionCompleteSensor(BaseSensorOperator):
"""
Checks for changes in the number of objects at prefix in Google Cloud Storage
bucket and returns True if the inactivity period has passed with no
increase in the number of objects. Note, this sensor will no behave correctly
in reschedule mode, as the state of the listed objects in the GCS bucket will
be lost between rescheduled invocations.
:param bucket: The Google Cloud Storage bucket where the objects are.
expected.
:type bucket: str
:param prefix: The name of the prefix to check in the Google cloud
storage bucket.
:param inactivity_period: The total seconds of inactivity to designate
an upload session is over. Note, this mechanism is not real time and
this operator may not return until a poke_interval after this period
has passed with no additional objects sensed.
:type inactivity_period: float
:param min_objects: The minimum number of objects needed for upload session
to be considered valid.
:type min_objects: int
:param previous_objects: The set of object ids found during the last poke.
:type previous_objects: set[str]
:param allow_delete: Should this sensor consider objects being deleted
between pokes valid behavior. If true a warning message will be logged
when this happens. If false an error will be raised.
:type allow_delete: bool
:param google_cloud_conn_id: The connection ID to use when connecting
to Google Cloud Storage.
:type google_cloud_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = (
'bucket',
'prefix',
'impersonation_chain',
)
ui_color = '#f0eee4'
@apply_defaults
def __init__(
self,
bucket: str,
prefix: str,
inactivity_period: float = 60 * 60,
min_objects: int = 1,
previous_objects: Optional[Set[str]] = None,
allow_delete: bool = True,
google_cloud_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.bucket = bucket
self.prefix = prefix
if inactivity_period < 0:
raise ValueError("inactivity_period must be non-negative")
self.inactivity_period = inactivity_period
self.min_objects = min_objects
self.previous_objects = previous_objects if previous_objects else set()
self.inactivity_seconds = 0
self.allow_delete = allow_delete
self.google_cloud_conn_id = google_cloud_conn_id
self.delegate_to = delegate_to
self.last_activity_time = None
self.impersonation_chain = impersonation_chain
self.hook: Optional[GCSHook] = None
def _get_gcs_hook(self) -> Optional[GCSHook]:
if not self.hook:
self.hook = GCSHook(
gcp_conn_id=self.google_cloud_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
return self.hook
def is_bucket_updated(self, current_objects: Set[str]) -> bool:
"""
Checks whether new objects have been uploaded and the inactivity_period
has passed and updates the state of the sensor accordingly.
:param current_objects: set of object ids in bucket during last poke.
:type current_objects: set[str]
"""
current_num_objects = len(current_objects)
if current_objects > self.previous_objects:
# When new objects arrived, reset the inactivity_seconds
# and update previous_objects for the next poke.
self.log.info(
"New objects found at %s resetting last_activity_time.",
os.path.join(self.bucket, self.prefix),
)
self.log.debug("New objects: %s", "\n".join(current_objects - self.previous_objects))
self.last_activity_time = get_time()
self.inactivity_seconds = 0
self.previous_objects = current_objects
return False
if self.previous_objects - current_objects:
# During the last poke interval objects were deleted.
if self.allow_delete:
self.previous_objects = current_objects
self.last_activity_time = get_time()
self.log.warning(
"""
Objects were deleted during the last
poke interval. Updating the file counter and
resetting last_activity_time.
%s
""",
self.previous_objects - current_objects,
)
return False
raise AirflowException(
"""
Illegal behavior: objects were deleted in {} between pokes.
""".format(
os.path.join(self.bucket, self.prefix)
)
)
if self.last_activity_time:
self.inactivity_seconds = (get_time() - self.last_activity_time).total_seconds()
else:
# Handles the first poke where last inactivity time is None.
self.last_activity_time = get_time()
self.inactivity_seconds = 0
if self.inactivity_seconds >= self.inactivity_period:
path = os.path.join(self.bucket, self.prefix)
if current_num_objects >= self.min_objects:
self.log.info(
"""SUCCESS:
Sensor found %s objects at %s.
Waited at least %s seconds, with no new objects dropped.
""",
current_num_objects,
path,
self.inactivity_period,
)
return True
self.log.error("FAILURE: Inactivity Period passed, not enough objects found in %s", path)
return False
return False
def poke(self, context: dict) -> bool:
return self.is_bucket_updated(
set(self._get_gcs_hook().list(self.bucket, prefix=self.prefix)) # type: ignore[union-attr]
)
|
|
# -*- coding: utf-8 -*-
"""
Base
====
This file defines the individual classes used to build a simulation, including
a ``Cantilever``, ``Transistor`` and ``Experiment``
NOTE: To use units in your own file, import the
unitregistry (``u``) from ``jittermodel``!
Class structure
---------------
These classes should be conceptually much simpler; they should be simple classes,
that perhaps contain additional logic buried underneath.
For example, the cantilever should *only* expose simple attributes,
"""
from __future__ import division
from numpy import pi
from jittermodel import (u, UnitAssigner, NoUnitAssigner,
get_defaults, E_0, k_B, q)
class SimpleCantilever(UnitAssigner):
"""Implement a simple unit cantilever, and require all inputs."""
k_B = k_B
def __init__(self, f_c, k_c, Q):
"""Initialize the cantilever."""
self.f_c = f_c
self.k_c = k_c
self.Q = Q
self._default_units = {'f_c': u.kHz, 'k_c': u.N/u.m,
'Q': u.dimensionless}
self._check_dimensionality_units()
self._check_number_inputs_positive()
# Properties of the cantilver
@property
def omega_c(self):
"""Return the angular resonance frequency of the cantilever."""
return self.f_c * 2 * pi
@property
def Gamma_i(self):
"""Return the cantilever's intrinsic dissipation."""
return (self.k_c / (self.omega_c * self.Q))
def F_min(self, T, bandwidth=1*u.Hz):
"""Return the thermally limited minimum detectable
force (pN).
The optional bandwidth parameter allows determining
a miniumun force over a broader or narrower bandwidth
than 1 Hz."""
return ((4 * self.k_B * self.Gamma_i * T * bandwidth) ** 0.5)
# Representations of the cantilever
def __str__(self):
"""Write out the cantilever as its most important parameters:
resonance frequency, spring constant, quality factor and
intrinsic friction."""
return "f_c = {self.f_c}, k_c = {self.k_c}, Q = {self.Q}\
Gamma_i = {self.Gamma_i}".format(self=self)
def __repr__(self):
"""Return a representation of the cantilever. Rounds the cantilever
to 9 digits, so eval(repr(cantilever)) is not necessarily equal to
cantilever."""
return "Cantilever(f_c = {self.f_c}, k_c = {self.k_c}, \
Q = {self.Q})".format(self=self)
class Cantilever(SimpleCantilever):
"""Implement a Cantilever class with support for units."""
def __init__(self, f_c=50*u.kHz, k_c=3*u.N/u.m, Q=1000*u.dimensionless,
R_tip=40*u.nm, L_tip=15*u.um, theta_tip=16*u.degrees,
geometry_c='perpendicular'):
"""Initialize the cantilever."""
self.UnitlessClass = UnitlessCantilever
self.f_c = f_c
self.k_c = k_c
self.Q = Q
self.R_tip = R_tip
self.L_tip = L_tip
self.theta_tip = theta_tip
self.geometry_c = geometry_c
self._default_units = {'f_c': u.kHz, 'k_c': u.N/u.m,
'Q': u.dimensionless, 'R_tip': u.nm,
'L_tip': u.um, 'theta_tip': u.degrees}
self._check_dimensionality_units()
self._check_number_inputs_positive()
self._check_theta_less_than_90()
self._check_geometry()
# Functions to check the inputs to the cantilever.
def _check_geometry(self):
"""Raises an error if the geometry of the cantilever geometry_c
is not either 'perpendicular' or 'parallel'."""
if self.geometry_c is not ('perpendicular' or 'parallel'):
raise ValueError("""geometry_c must be either 'perpendicular'\
or 'parallel'""")
def _check_theta_less_than_90(self):
"""Return a ValueError if theta_tip >= 90 degrees
since this is unphysical."""
if self.theta_tip >= 90 * u.degrees:
raise ValueError("'theta_tip' must be less than 90 degrees.")
# Representations of the cantilever
def __repr__(self):
"""Return a representation of the cantilever. Rounds the cantilever
to 9 digits, so eval(repr(cantilever)) is not necessarily equal to
cantilever."""
return "Cantilever(f_c = {self.f_c}, k_c = {self.k_c}, Q = {self.Q},\
R_tip = {self.R_tip}, L_tip = {self.L_tip},\
theta_tip = {self.theta_tip},\
geometry_c = '{self.geometry_c}')".format(self=self)
class UnitlessCantilever(NoUnitAssigner, Cantilever):
pass
class Experiment(UnitAssigner):
"""Stores parameters set by the experimenter. Now with units!"""
def __init__(self, d=100 * u.nm, V_ts=5 * u.V,
jitter_f_i=0.2 * u.Hz, jitter_f_f=3 * u.Hz):
self.UnitlessClass = UnitlessExperiment
self.d = d
self.V_ts = V_ts
self.jitter_f_i = jitter_f_i
self.jitter_f_f = jitter_f_f
self._default_units = {'d': u.nm, 'V_ts': u.V, 'jitter_f_i': u.Hz,
'jitter_f_f': u.Hz}
self._check_dimensionality_units()
self._check_number_inputs_positive()
# Check for errors in the experimental parameters
if self.V_ts < 0 * u.V:
raise ValueError("The voltages 'V_g' and 'V_ts' must be positive.")
if self.jitter_f_i > self.jitter_f_f:
raise ValueError("'jitter_f_i' must be less than 'jitter_f_f'.")
def __str__(self):
"""A nice string representation of the experiment."""
return """Tip-sample: {self.d}, {self.V_ts}""".format(self=self)
class UnitlessExperiment(NoUnitAssigner, Experiment):
pass
class Transistor(UnitAssigner):
"""A transistor sample, now with units."""
E_0 = E_0
k_B = k_B
q = q
def __init__(self, semiconductor='TPD',
h=70 * u.nm, h_trans=1 * u.nm, h_i=300 * u.nm,
E_s1=3.5, E_s2=-0.0005,
E_i1=4.65, E_i2=0,
mobility=3e-6 * u.cm ** 2 / u.V / u.s, T=298 * u.K,
V_g=10 * u.V, rho=None):
"""Initialize the sample with all of the experimentally
relevant sample parameters."""
self.UnitlessClass = UnitlessTransistor
self.semiconductor = semiconductor
self.h = h
self.h_trans = h_trans
self.h_i = h_i
self.E_s1 = E_s1
self.E_s2 = E_s2
self.E_i1 = E_i1
self.E_i2 = E_i2
self.mobility = mobility
self.T = T
self._check_V_g_rho_defined(V_g, rho)
self._default_units = {'h': u.nm, 'h_trans': u.nm, 'h_i': u.nm,
'mobility': u.cm ** 2 / u.V / u.s, 'T': u.K,
'V_g': u.V, 'rho': u.cm ** -3}
self._check_dimensionality_units()
self._check_number_inputs_positive()
def _check_V_g_rho_defined(self, V_g, rho):
"""Checks to determine whether one, both, or none of V_g and rho
were given when the sample was initialized, and properly assigns
V_g and rho or throws an error as appropriate."""
default_dict = get_defaults(self.__init__)
if rho is None:
self.V_g = V_g
elif V_g == default_dict['V_g']:
self.rho = rho
else:
raise ValueError("The provided values of 'V_g' and 'rho' are \
incompatible. Only specify one of 'V_g' or 'rho' when defining a Sample.")
@property
def diff(self):
"""Diffusion constant defined according to the Einstein relation."""
return self.mobility * self.k_B * self.T / self.q
@property
def C_i(self):
"""Capacitance per unit area between the
transistor gate and sample."""
return self.E_i1 * self.E_0 / self.h_i
@property
def h_diel(self):
"""Layer of the sample which is acting as a pure dielectric layer."""
return self.h - self.h_trans
@property
def E_s(self):
"""Total dielectric constant, adjusted for conductivity."""
return self.E_s1 + self.E_s2*1j
@property
def E_i(self):
"""Total dielectric constant of the insulator layer,
assuming the same lossy part of the spectrum as the
sample layer."""
return self.E_i1 + self.E_i2*1j
#---------------------------------------------------------
# The gate voltage, charge density pair are defined below.
@property
def V_g(self):
"""Get the gate voltage."""
return self._V_g
@V_g.setter
def V_g(self, value):
"""Set the gate voltage. Updates the semiconductor
carrier density hidden variable _rho to match the
new gate voltage."""
self._V_g = value
self._rho = self.C_i * self._V_g / (self.h_trans * self.q)
@property
def rho(self):
"""Get the semiconductor carrier density rho."""
return self._rho
@rho.setter
def rho(self, value):
"""Set the semiconductory carrier density rho.
Updates the gate voltage hidden variable _V_g to match the new
carrier density."""
self._rho = value
self._V_g = self.q * self._rho * self.h_trans / self.C_i
#---------------------------------------------------------
# Relevant properties derived from gate voltage / charge density.
@property
def sigma(self):
"""The conductivity sigma of the sample."""
return self.mobility * self.rho * self.q
@property
def kappa(self):
"""Define the inverse Debye length screening length kappa,
used in the Lekkala Loring theory. See Lekkala et al.,
p4, at http://dx.doi.org/10.1063/1.4754602."""
return (2 * self.rho * self.q ** 2 /
(self.E_0 * self.k_B * self.T)) ** 0.5
def E_eff(self, omega):
"""Defines the effective dielectric constant,
which corrects for the effect of Ohm's law (carrier drift),
at a particular angular frequency. See Eq. 14 in
Lekkala et al., 2013."""
return self.E_s - self.sigma / (self.E_0 * omega) * 1j
def __str__(self):
"""Write out the sample, using its semiconductor material,
height and mobility."""
return "{self.semiconductor:P} {self.h_nm:P}\
mobility {self.mobility_cm2:P}".format(self=self)
def __repr__(self):
return "Transistor(semiconductor = '{self.semiconductor}', \
h = {self.h}, h_trans = {self.h_trans}, \
h_i = {self.h_i}, E_s1 = {self.E_s1}, \
E_s2 = {self.E_s2}, E_i1 = {self.E_i1}, \
E_i2 = {self.E_i2}, mobility = {self.mobility}, \
T = {self.T}, V_g = {self.V_g})".format(self=self)
class UnitlessTransistor(NoUnitAssigner, Transistor):
pass
class BareTransistor(UnitAssigner):
"""A transistor sample, now with units."""
E_0 = E_0
k_B = k_B
q = q
def __init__(self, semiconductor='Si',
h_d=300 * u.nm, h_s=500 * u.um,
E_d1=4.65, E_d2=-0.005,
E_s1=11.65, E_s2=-0.05,
mobility=1 * u.cm ** 2 / u.V / u.s,
T=298 * u.K,
rho=None):
"""Initialize the sample with all of the experimentally
relevant sample parameters."""
self.UnitlessClass = UnitlessTransistor
self.semiconductor = semiconductor
self.h_d = h_d
self.h_s = h_s
self.E_s1 = E_s1
self.E_s2 = E_s2
self.E_d1 = E_d1
self.E_d2 = E_d2
self.mobility = mobility
self.T = T
self.rho = rho
self._default_units = {'h_d': u.nm, 'h_s': u.nm,
'mobility': u.cm ** 2 / u.V / u.s, 'T': u.K,
'rho': u.cm ** -3}
self._check_dimensionality_units()
self._check_number_inputs_positive()
def _check_V_g_rho_defined(self, V_g, rho):
"""Checks to determine whether one, both, or none of V_g and rho
were given when the sample was initialized, and properly assigns
V_g and rho or throws an error as appropriate."""
default_dict = get_defaults(self.__init__)
if rho is None:
self.V_g = V_g
elif V_g == default_dict['V_g']:
self.rho = rho
else:
raise ValueError("The provided values of 'V_g' and 'rho' are \
incompatible. Only specify one of 'V_g' or 'rho' when defining a Sample.")
@property
def diff(self):
"""Diffusion constant defined according to the Einstein relation."""
return self.mobility * self.k_B * self.T / self.q
@property
def E_s(self):
"""Total dielectric constant, adjusted for conductivity."""
return self.E_s1 + self.E_s2*1j
@property
def E_d(self):
"""Total dielectric constant of the insulator layer,
assuming the same lossy part of the spectrum as the
sample layer."""
return self.E_d1 + self.E_d2*1j
#---------------------------------------------------------
# Relevant properties derived from gate voltage / charge density.
@property
def sigma(self):
"""The conductivity sigma of the sample."""
return self.mobility * self.rho * self.q
@property
def kappa(self):
"""Define the inverse Debye length screening length kappa,
used in the Lekkala Loring theory. See Lekkala et al.,
p4, at http://dx.doi.org/10.1063/1.4754602."""
return (2 * self.rho * self.q ** 2 /
(self.E_0 * self.k_B * self.T)) ** 0.5
def E_eff(self, omega):
"""Defines the effective dielectric constant,
which corrects for the effect of Ohm's law (carrier drift),
at a particular angular frequency. See Eq. 14 in
Lekkala et al., 2013."""
return self.E_s - self.sigma / (self.E_0 * omega) * 1j
def __str__(self):
"""Write out the sample, using its semiconductor material,
height and mobility."""
return "{self.semiconductor:P} {self.h_nm:P}\
mobility {self.mobility_cm2:P}".format(self=self)
def __repr__(self):
return "Transistor(semiconductor = '{self.semiconductor}', \
h = {self.h}, h_trans = {self.h_trans}, \
h_i = {self.h_i}, E_s1 = {self.E_s1}, \
E_s2 = {self.E_s2}, E_i1 = {self.E_i1}, \
E_i2 = {self.E_i2}, mobility = {self.mobility}, \
T = {self.T}, V_g = {self.V_g})".format(self=self)
class UnitlessTransistor(NoUnitAssigner, Transistor):
pass
|
|
import math
import util
from pattern.en import wordnet
from pattern.en import NOUN, VERB, ADJECTIVE, ADVERB
from textblob import Word
from lexicons import SentiWords
__sentiwords = SentiWords()
ATTENUATORS_ADVERBS = open('groups_of_adverbs/medium_attenuator_adv.txt','r').readlines()
ATTENUATORS_ADVERBS = ATTENUATORS_ADVERBS + open('groups_of_adverbs/strong_attenuator_adv.txt','r').readlines()
ATTENUATORS_ADVERBS = ATTENUATORS_ADVERBS + open('groups_of_adverbs/weak_attenuator_adv.txt','r').readlines()
INTENSIFIERS_ADVERBS = open('groups_of_adverbs/medium_intensifier_adv.txt','r').readlines()
INTENSIFIERS_ADVERBS = INTENSIFIERS_ADVERBS + open('groups_of_adverbs/strong_intensifier_adv.txt','r').readlines()
INTENSIFIERS_ADVERBS = INTENSIFIERS_ADVERBS + open('groups_of_adverbs/weak_intensifier_adv.txt','r').readlines()
NON_GRADING_ADVERBS = open('groups_of_adverbs/non_grading_adv.txt','r').readlines()
def word_polarity(word, pos_tag=None, prior_polarity_score=False, linear_score=None):
"""returns a (polarity, subjectivity)-tuple for the given word from SENTIWORDNET.
If there is no synsets for the given word, None will be returned
The word can be NOUN, VERB, ADJECTIVE, ADVERB"""
if prior_polarity_score:
return __word_prior_polarity(word, pos_tag, linear_score)
pos_tag = "NOUN" if pos_tag in util.PENN_NOUNS_TAGS else pos_tag
pos_tag = "VERB" if pos_tag in util.PENN_VERBS_TAGS else pos_tag
pos_tag = "ADVERB" if pos_tag in util.PENN_ADVERBS_TAGS else pos_tag
pos_tag = "ADJECTIVE" if pos_tag in util.PENN_ADJECTIVES_TAGS else None
TAGS = {"NOUN":NOUN, "VERB":VERB, "ADJECTIVE":ADJECTIVE, "ADVERB":ADVERB}
TAG = TAGS[pos_tag] if pos_tag else ADJECTIVE
synsets = wordnet.synsets(word['raw'], TAG)
if len(synsets) > 0:
polarity = synsets[0].weight
if linear_score:
polarity[0] = polarity[0] * (word['index'] / linear_score['doc_size']) * linear_score['linear_score_constant']
return polarity
else:
return None
def __word_prior_polarity(word, pos_tag=None, linear_score=None):
pos_tag = "n" if pos_tag in util.PENN_NOUNS_TAGS else pos_tag
pos_tag = "v" if pos_tag in util.PENN_VERBS_TAGS else pos_tag
pos_tag = "r" if pos_tag in util.PENN_ADVERBS_TAGS else pos_tag
pos_tag = "a" if pos_tag in util.PENN_ADJECTIVES_TAGS else None
if pos_tag is None:
pos_tag = 'a'
prior_polarity_score = __sentiwords.get_entry_by_name_and_pos(word['raw'],pos_tag)
if prior_polarity_score is None:
return None
if linear_score:
prior_polarity_score['prior_polarity_score'] = prior_polarity_score['prior_polarity_score'] * (float(word['index']) / float(linear_score['doc_size'])) * linear_score['linear_score_constant']
return (prior_polarity_score['prior_polarity_score'], 0)
def is_negation(bigram_first_word):
"""Gets the fist word of a bigram and checks if this words is a negation or contraction word"""
NEGATION_WORDS = ['no','not']
NEGATION_CONTRACTIONS = ["isn't","aren't","wasn't","weren't","haven't",
"hasn't","hadn't","won't","wouldn't","don't",
"doesn't","didn't","can't","couldn't","shouldn't"
"mightn't","mustn't","ain't","mayn't","oughtn't",
"shan't"]
return (bigram_first_word in NEGATION_WORDS) or (bigram_first_word in NEGATION_CONTRACTIONS)
def invert_polarity(polarity, type=None):
"""It inverts or do a complement of the polarity"""
if type == 'complement':
if polarity < 0:
return -(1.0 - abs(polarity))
else:
return 1.0 - polarity
return -1.0 * polarity
def apply_adverb_factor(adverb, polarity, negation=None):
if is_negation(adverb):
return invert_polarity(polarity, negation)
bigram_polarity = 0.0
type = 1 #non-grading again
polarity = float(polarity)
factor = 1.0 #assumes that is non_grading by default
for att_adv in ATTENUATORS_ADVERBS:
if adverb in att_adv:
values = att_adv.split('\n')[0]
values = values.split(';')
factor = float(values[1])
type = 2
break
if factor == 1.0: #did not find nothing in attenuators
for int_adv in INTENSIFIERS_ADVERBS:
if adverb in int_adv:
values = int_adv.split('\n')[0]
values = values.split(';')
factor = float(values[1])
type = 3
break
if type == 3:
if polarity < 0:
#print 'adverb + polarity: ' + str(- math.pow(abs(polarity), 1.0 / factor))
return (- math.pow(abs(polarity), 1.0 / factor))
else:
#print 'adverb + polarity: ' + str(math.pow(polarity, 1.0 / factor))
return (math.pow(polarity, 1.0 / factor))
elif type == 2:
if polarity < 0:
#print 'adverb + polarity: ' + str(- math.pow(abs(polarity), factor))
return (- math.pow(abs(polarity), factor))
else:
#print 'adverb + polarity: ' + str(math.pow(polarity,factor))
return (math.pow(polarity,factor))
elif type == 1:
return polarity
def default_adv_xxx_bigram_polarity(bigram, negation=None, prior_polarity_score=False, linear_score=None):
"""Calculates the bigram polarity based on a empirical factor from each adverb group
and SENTIWORDNET word polarity
"""
second_word_polarity = word_polarity(bigram['second_word'],
bigram['second_word']['tag'],
prior_polarity_score = prior_polarity_score,
linear_score = linear_score)
# If is a verb, tries again in lemmatized form
if bigram['second_word']['tag'] in util.PENN_VERBS_TAGS and \
(second_word_polarity == None or second_word_polarity[0] == 0):
w = Word(bigram['second_word']['raw'])
bigram['second_word']['lemma'] = w.lemmatize("v")
second_word_polarity = word_polarity(bigram['second_word'],
bigram['second_word']['tag'],
prior_polarity_score = prior_polarity_score,
linear_score = linear_score)
#if the ngram_2 does not have polarity, so stops the method
if second_word_polarity == None:
return None
return apply_adverb_factor(bigram['first_word']['raw'],second_word_polarity[0], negation)
def adjectives_polarities(list_of_adjectives, prior_polarity_score=False, linear_score=None):
"""This method calculates all adjectives polarities based on the following arguments
Keyword arguments:
list_of_adjectives -- list of adjectives from a document
"""
adjectives_polarities = []
for adjective in list_of_adjectives:
polarity = word_polarity(adjective,
prior_polarity_score = prior_polarity_score,
linear_score = linear_score)
if polarity and polarity[0] != 0.0:
adjectives_polarities.append(polarity[0])
return adjectives_polarities
def adv_adj_bigrams_polarities(list_of_adv_adj_bigrams, negation=None, prior_polarity_score=False, linear_score=None):
"""This method calculates all bigrams polarities based on the following arguments
Keyword arguments:
list_of_adv_adj_bigrams -- list of bigrams in the following format: ADVERB / ADJECTIVE
"""
bigrams_polarities = []
for bigram in list_of_adv_adj_bigrams:
bigram_polarity = default_adv_xxx_bigram_polarity(bigram,
negation,
prior_polarity_score=prior_polarity_score,
linear_score=linear_score)
if bigram_polarity:
bigrams_polarities.append(bigram_polarity)
return bigrams_polarities
def trigram_polarity(trigram, negation=None, prior_polarity_score=False):
first_word = trigram[0]
middle_word = trigram[1]
third_word = trigram[2]
#words
first_word_word = first_word.split('/')[0]
#word tags
middle_word_tag = middle_word.split('/')[1]
third_word_tag = third_word.split('/')[1]
results = []
#adv/adv/adj trigram
if middle_word_tag in util.PENN_ADVERBS_TAGS and third_word_tag in util.PENN_ADJECTIVES_TAGS:
parcial_result = default_adv_xxx_bigram_polarity((middle_word,third_word), negation, prior_polarity_score=prior_polarity_score)
if parcial_result == None:
return None
parcial_result = apply_adverb_factor(first_word_word,parcial_result)
if parcial_result != None and abs(parcial_result) != 0:
results.append(parcial_result)
return results
return results
def ngrams_polarities(ngrams_list, negation=None, prior_polarity_score=False, linear_score=None):
"""
Given a list of ngrams (such as "good, bad, (very,good),awesome"), returns a list of corresponding polarities
"""
polarities = []
for ngram in ngrams_list:
pol = 0
if len(ngram) == 2: #bigrams - adverbs and adjectives
pol = default_adv_xxx_bigram_polarity(ngram,
negation,
prior_polarity_score = prior_polarity_score,
linear_score = linear_score)
else: #unigrams - adjectives
pol = word_polarity(ngram,
prior_polarity_score=prior_polarity_score,
linear_score = linear_score)
if pol != None and type(pol) is tuple and pol[0] != 0:
polarities.append(pol[0])
elif pol != None and (type(pol) is int or type(pol) is float) and pol != 0:
polarities.append(pol)
return polarities
def ngrams_matrix_polarities(ngrams_matrix, negation=None, prior_polarity_score=False, linear_score=None):
"""Given a matrix of ngrams (or a list of ngrams list), return a matrix of its corresponding polarities"""
polarities_matrix = {}
for _id, ngrams_list in ngrams_matrix.iteritems():
polarities_matrix[_id] = ngrams_polarities(ngrams_list,
negation,
prior_polarity_score=prior_polarity_score,
linear_score=linear_score)
return polarities_matrix
|
|
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from django.template import Template, Context
from django.urls import reverse
from django.test.utils import override_settings
from model_mommy import mommy
from coop_cms.models import BaseArticle, ArticleCategory
from coop_cms.settings import get_article_class
from coop_cms.tests import BaseTestCase
@override_settings(COOP_CMS_ARTICLES_CATEGORY_PAGINATION=10)
class ArticlesByCategoryTest(BaseTestCase):
"""Articles category page"""
def test_view_articles(self):
"""view article by category"""
article_class = get_article_class()
cat = mommy.make(ArticleCategory)
art = mommy.make(article_class, category=cat, title="AZERTYUIOP", publication=BaseArticle.PUBLISHED)
url = reverse('coop_cms_articles_category', args=[cat.slug])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, art.title)
def test_view_articles_ordering(self):
"""view articles by category in order"""
article_class = get_article_class()
cat = mommy.make(ArticleCategory)
dt1 = datetime.now() + timedelta(1)
dt2 = datetime.now()
dt3 = datetime.now() - timedelta(2)
dt4 = datetime.now() - timedelta(1)
art1 = mommy.make(
article_class, category=cat, title="#ITEM1#", publication_date=dt1,
publication=BaseArticle.PUBLISHED
)
art2 = mommy.make(
article_class, category=cat, title="#ITEM2#", publication_date=dt2,
publication=BaseArticle.PUBLISHED
)
art3 = mommy.make(
article_class, category=cat, title="#ITEM3#", publication_date=dt3,
publication=BaseArticle.PUBLISHED
)
art4 = mommy.make(
article_class, category=cat, title="#ITEM4#", publication_date=dt4,
publication=BaseArticle.PUBLISHED
)
url = reverse('coop_cms_articles_category', args=[cat.slug])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, art1.title)
self.assertContains(response, art2.title)
self.assertContains(response, art3.title)
self.assertContains(response, art4.title)
content = response.content.decode('utf-8')
articles = sorted((art1, art2, art3, art4), key=lambda x: x.publication_date)
articles.reverse()
positions = [content.find(a.title) for a in articles]
self.assertEqual(positions, sorted(positions))
def test_view_no_articles(self):
"""No article in category: It should return 404"""
cat = mommy.make(ArticleCategory)
url = reverse('coop_cms_articles_category', args=[cat.slug])
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_view_no_published_articles(self):
"""no published article in category: returns 404"""
article_class = get_article_class()
cat = mommy.make(ArticleCategory)
mommy.make(article_class, category=cat, title="AZERTYUIOP", publication=BaseArticle.DRAFT)
url = reverse('coop_cms_articles_category', args=[cat.slug])
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_view_articles_publication(self):
"""view article by category: publication falg is taken into account"""
article_class = get_article_class()
cat = mommy.make(ArticleCategory)
art1 = mommy.make(article_class, category=cat, title="AZERTYUIOP", publication=BaseArticle.PUBLISHED)
art2 = mommy.make(article_class, category=cat, title="QSDFGHJKLM", publication=BaseArticle.DRAFT)
url = reverse('coop_cms_articles_category', args=[cat.slug])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, art1.title)
self.assertNotContains(response, art2.title)
def test_view_articles_different_categories(self):
"""view article by category : no articles from different category"""
article_class = get_article_class()
cat1 = mommy.make(ArticleCategory)
cat2 = mommy.make(ArticleCategory)
art1 = mommy.make(article_class, category=cat1, title="AZERTYUIOP", publication=BaseArticle.PUBLISHED)
art2 = mommy.make(article_class, category=cat2, title="QSDFGHJKLM", publication=BaseArticle.PUBLISHED)
url = reverse('coop_cms_articles_category', args=[cat1.slug])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, art1.title)
self.assertNotContains(response, art2.title)
def test_view_articles_unknwonw_categories(self):
"""view article with unknown category: 404"""
article_class = get_article_class()
cat = mommy.make(ArticleCategory, name="abcd")
mommy.make(article_class, category=cat, title="AZERTYUIOP", publication=BaseArticle.PUBLISHED)
url = reverse('coop_cms_articles_category', args=["ghjk"])
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_view_articles_category_template(self):
"""view article with custom template"""
article_class = get_article_class()
cat = mommy.make(ArticleCategory, name="Only for unit testing")
art = mommy.make(article_class, category=cat, title="AZERTYUIOP", publication=BaseArticle.PUBLISHED)
self.assertEqual(cat.slug, "only-for-unit-testing")
url = reverse('coop_cms_articles_category', args=[cat.slug])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, art.title)
self.assertContains(response, "This comes from custom template")
def test_view_articles_category_many(self):
"""view articles by category: check pagination"""
article_class = get_article_class()
cat = mommy.make(ArticleCategory)
for i in range(30):
mommy.make(
article_class, category=cat, publication_date=datetime(2014, 3, i+1),
title="AZERTY-{0}-UIOP".format(i), publication=BaseArticle.PUBLISHED
)
url = reverse('coop_cms_articles_category', args=[cat.slug])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
ids = list(range(30))
ids.reverse()
for i in ids[:10]:
self.assertContains(response, "AZERTY-{0}-UIOP".format(i))
for i in ids[10:]:
self.assertNotContains(response, "AZERTY-{0}-UIOP".format(i))
response = self.client.get(url+"?page=2")
self.assertEqual(response.status_code, 200)
for i in ids[10:20]:
self.assertContains(response, "AZERTY-{0}-UIOP".format(i))
for i in ids[:10]:
self.assertNotContains(response, "AZERTY-{0}-UIOP".format(i))
@override_settings(COOP_CMS_ARTICLES_CATEGORY_PAGINATION=10)
def test_view_articles_category_pagination_size(self):
"""view articles by category: check pagination"""
article_class = get_article_class()
cat = mommy.make(ArticleCategory, pagination_size=5)
for i in range(30):
mommy.make(
article_class, category=cat, publication_date=datetime(2014, 3, i + 1),
title="AZERTY-{0}-UIOP".format(i), publication=BaseArticle.PUBLISHED
)
url = reverse('coop_cms_articles_category', args=[cat.slug])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
ids = list(range(30))
ids.reverse()
for i in ids[:5]:
self.assertContains(response, "AZERTY-{0}-UIOP".format(i))
for i in ids[5:]:
self.assertNotContains(response, "AZERTY-{0}-UIOP".format(i))
response = self.client.get(url + "?page=2")
self.assertEqual(response.status_code, 200)
for i in ids[5:10]:
self.assertContains(response, "AZERTY-{0}-UIOP".format(i))
for i in ids[:5]:
self.assertNotContains(response, "AZERTY-{0}-UIOP".format(i))
@override_settings(COOP_CMS_ARTICLES_CATEGORY_PAGINATION=5)
def test_view_articles_category_pagination_size_default(self):
"""view articles by category: check pagination"""
article_class = get_article_class()
cat = mommy.make(ArticleCategory)
for i in range(30):
mommy.make(
article_class, category=cat, publication_date=datetime(2014, 3, i + 1),
title="AZERTY-{0}-UIOP".format(i), publication=BaseArticle.PUBLISHED
)
url = reverse('coop_cms_articles_category', args=[cat.slug])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
ids = list(range(30))
ids.reverse()
for i in ids[:5]:
self.assertContains(response, "AZERTY-{0}-UIOP".format(i))
for i in ids[5:]:
self.assertNotContains(response, "AZERTY-{0}-UIOP".format(i))
response = self.client.get(url + "?page=2")
self.assertEqual(response.status_code, 200)
for i in ids[5:10]:
self.assertContains(response, "AZERTY-{0}-UIOP".format(i))
for i in ids[:5]:
self.assertNotContains(response, "AZERTY-{0}-UIOP".format(i))
class CoopCategoryTemplateTagTest(BaseTestCase):
def test_use_template(self):
tpl = Template('{% load coop_utils %}{% coop_category "abc" def %}!!{{def}}!!')
html = tpl.render(Context({}))
self.assertEqual(ArticleCategory.objects.count(), 1)
self.assertEqual(html, "!!abc!!")
def test_use_template_several_times(self):
tpl = Template(
'{% load coop_utils %}{% coop_category "joe" bar %}{% coop_category "abc" def %}!!{{def}}-{{bar}}!!'
)
html = tpl.render(Context({}))
self.assertEqual(ArticleCategory.objects.count(), 2)
self.assertEqual(html, "!!abc-joe!!")
def test_use_template_many_calls(self):
tpl = Template('{% load coop_utils %}{% coop_category "abc" def %}!!{{def}}!!')
for i in range(10):
html = tpl.render(Context({}))
self.assertEqual(ArticleCategory.objects.count(), 1)
self.assertEqual(html, "!!abc!!")
def test_use_template_many_calls_not_slug(self):
tpl = Template('{% load coop_utils %}{% coop_category "Ab CD" def %}!!{{def}}!!')
html = ""
for i in range(10):
html = tpl.render(Context({}))
self.assertEqual(ArticleCategory.objects.count(), 1)
self.assertEqual(html, "!!Ab CD!!")
def test_use_template_existing_category(self):
mommy.make(ArticleCategory, name="abc")
tpl = Template('{% load coop_utils %}{% coop_category "abc" def %}!!{{def}}!!')
html = tpl.render(Context({}))
self.assertEqual(ArticleCategory.objects.count(), 1)
self.assertEqual(html, "!!abc!!")
def test_use_template_as_variable(self):
mommy.make(ArticleCategory, name="abc")
tpl = Template('{% load coop_utils %}{% coop_category cat def %}!!{{def}}!!')
html = tpl.render(Context({'cat': "abc"}))
self.assertEqual(ArticleCategory.objects.count(), 1)
self.assertEqual(html, "!!abc!!")
def test_view_category_articles(self):
cat = mommy.make(ArticleCategory, name="abc")
art1 = mommy.make(
get_article_class(), slug="test1", category=cat, publication=True, publication_date=datetime.now()
)
art2 = mommy.make(
get_article_class(), slug="test2", category=cat, publication=True,
publication_date=datetime.now()-timedelta(1)
)
self.assertEqual(list(cat.get_articles_qs().all()), [art2, art1])
def test_view_category_articles_not_all_published(self):
cat = mommy.make(ArticleCategory, name="abc")
art1 = mommy.make(get_article_class(), slug="test1", category=cat, publication=False)
art2 = mommy.make(get_article_class(), slug="test2", category=cat, publication=True)
self.assertEqual(list(cat.get_articles_qs().all()), [art2])
|
|
import unittest
from agent import *
from completesimulation import HamadryasSim, HamaPopulation
from dispersal import HamadryasDispersal
from group import HamadryasGroup
from seedgroups import HamadryasSeed
class FullRunTests(unittest.TestCase):
def test_run(self):
hamadryas_sim = HamadryasSim()
hamadryas_sim.duration = 50
output = hamadryas_sim.run_simulation()
print output
self.assertTrue(output)
class ChallengeTests(unittest.TestCase):
def setup_rhp(self):
hama_rhp_pop = HamaPopulation()
hama_rhp_group = HamadryasGroup(1)
hama_rhp_pop.groupsdict[1] = hama_rhp_group
hamadryas_sim = HamadryasSim()
HamadryasSeed.addagenttoseed(1, hama_rhp_group, hama_rhp_pop, 'm', None, None, 10, hamadryas_sim)
HamadryasSeed.addagenttoseed(1, hama_rhp_group, hama_rhp_pop, 'm', None, None, 10, hamadryas_sim)
HamadryasSeed.addagenttoseed(1, hama_rhp_group, hama_rhp_pop, 'm', None, None, 13.5, hamadryas_sim)
HamadryasSeed.addagenttoseed(1, hama_rhp_group, hama_rhp_pop, 'm', None, None, 13.5, hamadryas_sim)
hama_rhp_pop.dict[1].rhp = '1'
hama_rhp_pop.dict[2].rhp = '2'
hama_rhp_pop.dict[3].rhp = '3'
hama_rhp_pop.dict[4].rhp = '4'
return hama_rhp_pop
def test_rhp(self):
hama_rhp_pop = self.setup_rhp()
self.assertTrue(hama_rhp_pop.dict[1].get_rhp() > hama_rhp_pop.dict[2].get_rhp())
self.assertTrue(hama_rhp_pop.dict[3].get_rhp() > hama_rhp_pop.dict[1].get_rhp())
self.assertTrue(hama_rhp_pop.dict[4].get_rhp() > hama_rhp_pop.dict[2].get_rhp())
def test_challenge_outcome(self):
dead2 = 0
dead4 = 0
for i in range(0, 1000):
challenge_pop = self.setup_rhp()
challenge_sim = HamadryasSim()
challenge_pop.groupsdict[1].leadermales = [4]
HamadryasSeed.addagenttoseed(1,
challenge_pop.groupsdict[1],
challenge_pop,
'f',
None,
None,
10,
challenge_sim)
HamadryasDispersal.challenge(challenge_pop.dict[2], challenge_pop.dict[4], challenge_pop, challenge_sim)
self.assertIn(5, challenge_pop.dict[4].females)
HamadryasDispersal.challenge(challenge_pop.dict[1], challenge_pop.dict[4], challenge_pop, challenge_sim)
if 2 not in challenge_pop.all:
dead2 += 1
if 4 not in challenge_pop.all:
dead4 += 1
self.assertIn(5, challenge_pop.dict[1].females)
self.assertAlmostEqual(dead2, 500, delta=50)
self.assertAlmostEqual(dead4, 500, delta=50)
def test_rhp_assignment(self):
agent = HamadryasAgent('m', None, None, None)
self.assertEqual(agent.taxon, "hamadryas")
score = MakeAgents.assignrhpcurve(HamadryasAgent('m', None, None, None))
self.assertTrue(score)
def test_sol_choices(self):
sol_sim = HamadryasSim()
became_leader = 0
followed = 0
died = 0
for i in range(0, 5000):
sol_pop = HamaPopulation()
sol_pop = HamadryasSeed.makeseed(1, sol_pop, sol_sim)
# add a solitary
HamadryasSeed.addagenttoseed(1, sol_pop.groupsdict[1], sol_pop, 'm', None, None, 20.5, sol_sim)
our_guy = sol_pop.all[-1]
our_guy = sol_pop.dict[our_guy]
our_guy.maleState = MaleState.sol
our_guy.clanID = 2
sol_sim.get_young_natal_females(sol_pop)
sol_sim.male_eligibility(sol_pop)
self.assertTrue(sol_pop.groupsdict[1].leadermales)
# give him choices
sol_sim.male_choices(our_guy, sol_pop)
if our_guy.index not in sol_pop.all:
died += 1
elif our_guy.maleState == MaleState.lea:
became_leader += 1
elif our_guy.maleState == MaleState.fol:
followed += 1
print became_leader, followed, died
self.assertAlmostEqual(followed, 2250, delta=450)
self.assertAlmostEqual(became_leader, 1250, delta=250)
self.assertAlmostEqual(75, died, delta=50)
def test_fol_choices(self):
fol_sim = HamadryasSim()
became_leader = 0
followed = 0
died = 0
for i in range(0, 1000):
fol_pop = HamaPopulation()
fol_pop = HamadryasSeed.makeseed(1, fol_pop, fol_sim)
fol_group = fol_pop.groupsdict[1]
# add a solitary
HamadryasSeed.addagenttoseed(1, fol_group, fol_pop, 'm', None, None, 20.5, fol_sim)
our_guy = fol_pop.all[-1]
our_guy = fol_pop.dict[our_guy]
our_guy.clanID = 2
leader = random.choice([x for x in fol_group.leadermales if fol_pop.dict[x].clanID == 2])
leader = fol_pop.dict[leader]
HamadryasDispersal.follow(our_guy, leader, fol_pop)
fol_sim.get_young_natal_females(fol_pop)
fol_sim.male_eligibility(fol_pop)
# give him choices
fol_sim.male_choices(our_guy, fol_pop)
if our_guy.index not in fol_pop.all:
died += 1
elif our_guy.maleState == MaleState.lea:
self.assertTrue(our_guy.females)
became_leader += 1
elif our_guy.maleState == MaleState.fol:
self.assertFalse(our_guy.females)
followed += 1
print became_leader, followed, died
self.assertAlmostEqual(followed, 400, delta=100)
self.assertAlmostEqual(became_leader, 600, delta=100)
self.assertEqual(0, died)
def test_disp_between_bands(self):
change_OMU = 0
change_clan = 0
change_band = 0
for i in range(0, 100):
band_disp_sim = HamadryasSim()
band_disp_pop = HamaPopulation()
for groupindex in range(0, 10):
band_disp_pop = HamadryasSeed.makeseed(groupindex, band_disp_pop, band_disp_sim)
female_to_disp = band_disp_pop.dict[14]
start_OMU = female_to_disp.OMUID
start_clan = female_to_disp.clanID
start_band = female_to_disp.bandID
band_disp_sim.killagent(band_disp_pop.dict[female_to_disp.OMUID],
band_disp_pop,
band_disp_pop.groupsdict[0],
50)
band_disp_sim.male_eligibility(band_disp_pop)
self.assertTrue(band_disp_pop.eligible_males)
HamadryasDispersal.opportun_takeover(female_to_disp,
band_disp_pop,
band_disp_sim)
if female_to_disp.OMUID != start_OMU:
change_OMU += 1
if female_to_disp.clanID != start_clan and female_to_disp.bandID == start_band:
change_clan += 1
if female_to_disp.bandID != start_band:
change_band += 1
print "Moved between OMUs: " + str(change_OMU)
print "Stayed in natal clan: " + str(1000 - change_band - change_clan)
print "Moved between clans within a band: " + str(change_clan)
print "Moved between bands: " + str(change_band)
self.assertEqual(100, change_OMU)
self.assertAlmostEqual(15, change_clan, delta=50)
self.assertAlmostEqual(60, change_band, delta=50)
def test_infanticide(self):
infant_died = 0
sires = []
for i in range(0, 1000):
inf_sim = HamadryasSim()
inf_pop = HamaPopulation()
inf_pop = HamadryasSeed.makeseed(0, inf_pop, inf_sim)
HamadryasSeed.addagenttoseed(0, inf_pop.groupsdict[0], inf_pop, 'f', None, None, 10, inf_sim)
mom = inf_pop.dict[66]
mom.sire_of_fetus = inf_pop.dict[1]
inf_sim.birthagent(mom, inf_pop, 50)
infant = mom.offspring[0]
sires.append(inf_pop.dict[infant].parents[1])
inf_sim.male_eligibility(inf_pop)
HamadryasDispersal.opportun_takeover(mom, inf_pop, inf_sim)
if infant not in inf_pop.all:
infant_died += 1
print str(infant_died) + " infants died."
self.assertAlmostEqual(500, infant_died, delta=50)
def test_inherit_works(self):
inherits = 0
no_females = 0
for i in range(0, 1000):
inher_sim = HamadryasSim()
inher_pop = HamaPopulation()
inher_pop = HamadryasSeed.makeseed(0, inher_pop, inher_sim)
leader_to_kill = inher_pop.all[0]
leader_to_kill = inher_pop.dict[leader_to_kill]
inheritor = inher_pop.all[1]
inheritor = inher_pop.dict[inheritor]
if leader_to_kill.females:
leaders_females = list(leader_to_kill.females)
else:
no_females += 1
inher_sim.killagent(leader_to_kill, inher_pop, inher_pop.groupsdict[0], 50)
# self.assertTrue(inher_pop.avail_females)
if inheritor.maleState == MaleState.lea:
self.assertIn(inheritor.females[0], leaders_females)
inherits += 1
print "Inherits: " + str(inherits)
print "No females to inherit: " + str(no_females)
self.assertAlmostEqual(900, inherits, delta=900)
def test_initial_unit(self):
dispersed = 0
dispersed_across_bands = 0
for i in range(0, 100):
init_sim = HamadryasSim()
init_pop = HamaPopulation()
for i in range(0, 10):
init_pop = HamadryasSeed.makeseed(i, init_pop, init_sim)
HamadryasSeed.addagenttoseed(0, init_pop.groupsdict[0], init_pop,
'f', None, None, 2, init_sim)
our_girl = init_pop.all[-1]
our_girl = init_pop.dict[our_girl]
start_OMU = our_girl.OMUID
start_clan = our_girl.clanID
start_band = our_girl.bandID
init_sim.get_young_natal_females(init_pop)
self.assertTrue(init_pop.young_natal_females)
males = [male for male in init_pop.dict.values() if male.sex == 'm']
for male in males:
init_sim.male_choices(male, init_pop)
if our_girl.OMUID != start_OMU:
dispersed += 1
if our_girl.bandID != start_band:
dispersed_across_bands += 1
print dispersed, dispersed_across_bands
self.assertAlmostEqual(90, dispersed, delta=90)
self.assertAlmostEqual(30, dispersed_across_bands, delta=30)
|
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
"""Convenient library for data statistics generation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import csv
import gzip
import io
import multiprocessing
import os
import tempfile
from typing import Any, List, Optional, Text, cast
import apache_beam as beam
from apache_beam.io.filesystem import CompressionTypes
from apache_beam.options.pipeline_options import PipelineOptions
from joblib import delayed
from joblib import Parallel
import numpy as np
from pandas import DataFrame
import tensorflow as tf
from tensorflow_data_validation import constants
from tensorflow_data_validation import types
from tensorflow_data_validation.api import stats_api
from tensorflow_data_validation.coders import csv_decoder
from tensorflow_data_validation.statistics import stats_impl
from tensorflow_data_validation.statistics import stats_options as options
from tensorflow_data_validation.statistics.generators import stats_generator
from tensorflow_data_validation.utils import stats_util
from tfx_bsl.arrow import table_util
from tfx_bsl.tfxio import tf_example_record
from tensorflow_metadata.proto.v0 import schema_pb2
from tensorflow_metadata.proto.v0 import statistics_pb2
def generate_statistics_from_tfrecord(
data_location: Text,
output_path: Optional[bytes] = None,
stats_options: options.StatsOptions = options.StatsOptions(),
pipeline_options: Optional[PipelineOptions] = None,
) -> statistics_pb2.DatasetFeatureStatisticsList:
"""Compute data statistics from TFRecord files containing TFExamples.
Runs a Beam pipeline to compute the data statistics and return the result
data statistics proto.
This is a convenience method for users with data in TFRecord format.
Users with data in unsupported file/data formats, or users who wish
to create their own Beam pipelines need to use the 'GenerateStatistics'
PTransform API directly instead.
Args:
data_location: The location of the input data files.
output_path: The file path to output data statistics result to. If None, we
use a temporary directory. It will be a TFRecord file containing a single
data statistics proto, and can be read with the 'load_statistics' API.
If you run this function on Google Cloud, you must specify an
output_path. Specifying None may cause an error.
stats_options: `tfdv.StatsOptions` for generating data statistics.
pipeline_options: Optional beam pipeline options. This allows users to
specify various beam pipeline execution parameters like pipeline runner
(DirectRunner or DataflowRunner), cloud dataflow service project id, etc.
See https://cloud.google.com/dataflow/pipelines/specifying-exec-params for
more details.
Returns:
A DatasetFeatureStatisticsList proto.
"""
if output_path is None:
output_path = os.path.join(tempfile.mkdtemp(), 'data_stats.tfrecord')
output_dir_path = os.path.dirname(output_path)
if not tf.io.gfile.exists(output_dir_path):
tf.io.gfile.makedirs(output_dir_path)
batch_size = stats_options.desired_batch_size
# PyLint doesn't understand Beam PTransforms.
# pylint: disable=no-value-for-parameter
with beam.Pipeline(options=pipeline_options) as p:
# Auto detect tfrecord file compression format based on input data
# path suffix.
_ = (
p
| 'ReadData' >> (tf_example_record.TFExampleRecord(
file_pattern=data_location,
schema=None,
telemetry_descriptors=['tfdv', 'generate_statistics_from_tfrecord'])
.BeamSource(batch_size))
| 'GenerateStatistics' >> stats_api.GenerateStatistics(stats_options)
| 'WriteStatsOutput' >>
(stats_api.WriteStatisticsToTFRecord(output_path)))
return stats_util.load_statistics(output_path)
def generate_statistics_from_csv(
data_location: Text,
column_names: Optional[List[types.FeatureName]] = None,
delimiter: Text = ',',
output_path: Optional[bytes] = None,
stats_options: options.StatsOptions = options.StatsOptions(),
pipeline_options: Optional[PipelineOptions] = None,
compression_type: Text = CompressionTypes.AUTO,
) -> statistics_pb2.DatasetFeatureStatisticsList:
"""Compute data statistics from CSV files.
Runs a Beam pipeline to compute the data statistics and return the result
data statistics proto.
This is a convenience method for users with data in CSV format.
Users with data in unsupported file/data formats, or users who wish
to create their own Beam pipelines need to use the 'GenerateStatistics'
PTransform API directly instead.
Args:
data_location: The location of the input data files.
column_names: A list of column names to be treated as the CSV header. Order
must match the order in the input CSV files. If this argument is not
specified, we assume the first line in the input CSV files as the
header. Note that this option is valid only for 'csv' input file format.
delimiter: A one-character string used to separate fields in a CSV file.
output_path: The file path to output data statistics result to. If None, we
use a temporary directory. It will be a TFRecord file containing a single
data statistics proto, and can be read with the 'load_statistics' API.
If you run this function on Google Cloud, you must specify an
output_path. Specifying None may cause an error.
stats_options: `tfdv.StatsOptions` for generating data statistics.
pipeline_options: Optional beam pipeline options. This allows users to
specify various beam pipeline execution parameters like pipeline runner
(DirectRunner or DataflowRunner), cloud dataflow service project id, etc.
See https://cloud.google.com/dataflow/pipelines/specifying-exec-params for
more details.
compression_type: Used to handle compressed input files. Default value is
CompressionTypes.AUTO, in which case the file_path's extension will be
used to detect the compression.
Returns:
A DatasetFeatureStatisticsList proto.
"""
if output_path is None:
output_path = os.path.join(tempfile.mkdtemp(), 'data_stats.tfrecord')
output_dir_path = os.path.dirname(output_path)
if not tf.io.gfile.exists(output_dir_path):
tf.io.gfile.makedirs(output_dir_path)
batch_size = (
stats_options.desired_batch_size if stats_options.desired_batch_size
and stats_options.desired_batch_size > 0 else
constants.DEFAULT_DESIRED_INPUT_BATCH_SIZE)
# PyLint doesn't understand Beam PTransforms.
# pylint: disable=no-value-for-parameter
with beam.Pipeline(options=pipeline_options) as p:
# If a header is not provided, assume the first line in a file
# to be the header.
skip_header_lines = 1 if column_names is None else 0
if column_names is None:
column_names = get_csv_header(data_location, delimiter, compression_type)
_ = (
p
| 'ReadData' >> beam.io.textio.ReadFromText(
file_pattern=data_location,
skip_header_lines=skip_header_lines,
compression_type=compression_type)
| 'DecodeData' >> csv_decoder.DecodeCSV(
column_names=column_names,
delimiter=delimiter,
schema=stats_options.schema
if stats_options.infer_type_from_schema else None,
desired_batch_size=batch_size)
| 'GenerateStatistics' >> stats_api.GenerateStatistics(stats_options)
| 'WriteStatsOutput' >> stats_api.WriteStatisticsToTFRecord(
output_path))
return stats_util.load_statistics(output_path)
def generate_statistics_from_dataframe(
dataframe: DataFrame,
stats_options: options.StatsOptions = options.StatsOptions(),
n_jobs: int = 1
) -> statistics_pb2.DatasetFeatureStatisticsList:
"""Compute data statistics for the input pandas DataFrame.
This is a utility function for users with in-memory data represented
as a pandas DataFrame.
This function supports only DataFrames with columns of primitive types.
DataFrames with multivalent features are not supported.
Args:
dataframe: Input pandas DataFrame.
stats_options: `tfdv.StatsOptions` for generating data statistics.
n_jobs: Number of processes to run (defaults to 1). If -1 is provided,
uses the same number of processes as the number of CPU cores.
Returns:
A DatasetFeatureStatisticsList proto.
"""
if not isinstance(dataframe, DataFrame):
raise TypeError('dataframe argument is of type {}. Must be a '
'pandas DataFrame.'.format(type(dataframe).__name__))
stats_generators = cast(
List[stats_generator.CombinerStatsGenerator],
stats_impl.get_generators(stats_options, in_memory=True))
if n_jobs < -1 or n_jobs == 0:
raise ValueError('Invalid n_jobs parameter {}. Should be either '
' -1 or >= 1.'.format(n_jobs))
if n_jobs == -1:
n_jobs = multiprocessing.cpu_count()
n_jobs = max(min(n_jobs, multiprocessing.cpu_count()), 1)
if n_jobs == 1:
merged_partial_stats = _generate_partial_statistics_from_df(
dataframe, stats_options, stats_generators)
else:
# TODO(b/144580609): Consider using Beam for inmemory mode as well.
splits = np.array_split(dataframe, n_jobs)
partial_stats = Parallel(n_jobs=n_jobs)(
delayed(_generate_partial_statistics_from_df)(
splits[i], stats_options, stats_generators) for i in range(n_jobs))
merged_partial_stats = [
gen.merge_accumulators(stats)
for gen, stats in zip(stats_generators, zip(*partial_stats))
]
return stats_impl.extract_statistics_output(
merged_partial_stats, stats_generators)
def _generate_partial_statistics_from_df(
dataframe: DataFrame,
stats_options: options.StatsOptions,
stats_generators: List[stats_generator.CombinerStatsGenerator]
) -> List[Any]:
"""Generate accumulators containing partial stats."""
feature_allowlist = set()
if stats_options.feature_allowlist:
feature_allowlist.update(stats_options.feature_allowlist)
# Create a copy of the stats options so that we don't modify the input object.
stats_options_modified = copy.copy(stats_options)
# Remove feature_allowlist option as it is no longer needed.
stats_options_modified.feature_allowlist = None
schema = schema_pb2.Schema()
drop_columns = []
for col_name, col_type in zip(dataframe.columns, dataframe.dtypes):
if (not table_util.NumpyKindToArrowType(col_type.kind) or
(feature_allowlist and col_name not in feature_allowlist)):
drop_columns.append(col_name)
elif col_type.kind == 'b':
# Track bool type feature as categorical.
schema.feature.add(
name=col_name,
type=schema_pb2.INT,
bool_domain=schema_pb2.BoolDomain())
dataframe = dataframe.drop(columns=drop_columns)
if schema.feature:
stats_options_modified.schema = schema
record_batch_with_primitive_arrays = table_util.DataFrameToRecordBatch(
dataframe)
record_batch_with_list_arrays = table_util.CanonicalizeRecordBatch(
record_batch_with_primitive_arrays)
return stats_impl.generate_partial_statistics_in_memory(
record_batch_with_list_arrays, stats_options_modified, stats_generators)
def get_csv_header(
data_location: Text,
delimiter: Text,
compression_type: Text = CompressionTypes.AUTO) -> List[types.FeatureName]:
"""Gets the CSV header from the input files.
This function assumes that the header is present as the first line in all
the files in the input path.
Args:
data_location: Glob pattern(s) specifying the location of the input data
files.
delimiter: A one-character string used to separate fields in a CSV file.
compression_type: Used to handle compressed input files. Default value is
CompressionTypes.AUTO, in which case the file_path's extension will be
used to detect the compression.
Returns:
The list of column names.
Raises:
ValueError: If any of the input files is not found or empty, or if the files
have different headers.
"""
matched_files = tf.io.gfile.glob(data_location)
if not matched_files:
raise ValueError(
'No file found in the input data location: %s' % data_location)
# detect compression base on file extension if it is `AUTO`.
if compression_type == CompressionTypes.AUTO:
compression_type = CompressionTypes.detect_compression_type(
matched_files[0])
if compression_type == CompressionTypes.UNCOMPRESSED:
read_csv_fn = _read_csv_uncompressed
elif compression_type == CompressionTypes.GZIP:
read_csv_fn = _read_csv_gzip
else:
raise ValueError('Compression Type: `%s` is not supported for csv files.' %
compression_type)
result = read_csv_fn(matched_files[0], delimiter)
# Make sure that all files have the same header.
for filename in matched_files[1:]:
if read_csv_fn(filename, delimiter) != result:
raise ValueError('Files have different headers.')
return result
def _read_csv_gzip(file, delimiter):
with tf.io.gfile.GFile(file, 'rb') as f:
with io.TextIOWrapper(gzip.GzipFile(fileobj=f), newline='') as t: # type: ignore
try:
return next(csv.reader(t, delimiter=delimiter))
except StopIteration as e:
raise ValueError('Found empty file when reading the header line: %s' %
file) from e
def _read_csv_uncompressed(file, delimiter):
with tf.io.gfile.GFile(file, 'r') as reader:
try:
return next(csv.reader(reader, delimiter=delimiter))
except StopIteration as e:
raise ValueError('Found empty file when reading the header line: %s' %
file) from e
|
|
import re
import operator
from collections import Counter
from zipfile import ZipFile
from numpy import array
from scipy import zeros
from scipy.stats import chisquare
kWORDS = re.compile("[a-z]{1,}")
kSTOPWORDS = set(['i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'yo',
'your', 'yours', 'yourself', 'yourselves', 'he', 'him', 'his',
'himself', 'she', 'her', 'hers', 'herself', 'it', 'its', 'itself',
'they', 'them', 'their', 'theirs', 'themselves', 'what', 'which',
'who', 'whom', 'this', 'that', 'these', 'those', 'am', 'is', 'are',
'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had', 'having',
'do', 'does', 'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if',
'or', 'because', 'as', 'until', 'while', 'of', 'at', 'by', 'for',
'with', 'about', 'against', 'between', 'into', 'through', 'during',
'before', 'after', 'above', 'below', 'to', 'from', 'up', 'down', 'in',
'out', 'on', 'off', 'over', 'under', 'again', 'further', 'then',
'once', 'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any',
'both', 'each', 'few', 'more', 'most', 'other', 'some', 'such', 'no',
'nor', 'not', 'only', 'own', 'same', 'so', 'than', 'too', 'very',
's', 't', 'can', 'will', 'just', 'don', 'should', 'now', 've', 'm'])
def bigrams(sentence):
"""
Given a sentence, generate all bigrams in the sentence.
"""
for ii, ww in enumerate(sentence[:-1]):
yield ww, sentence[ii + 1]
def tokenize(sentence):
"""
Given a sentence, return a list of all the words in the sentence.
"""
return kWORDS.findall(sentence.lower())
def sentences_from_zipfile(zip_file):
"""
Given a zip file, yield an iterator over the lines in each file in the
zip file.
"""
with ZipFile(zip_file) as z:
for ii in z.namelist():
try:
pres = ii.replace(".txt", "").replace("state_union/", "").split("-")[1]
except IndexError:
continue
for jj in z.read(ii).decode(errors='replace').split("\n")[3:]:
yield jj.lower()
def chisquare_pvalue(obs, ex):
"""
Given a 2x2 contingency table both observed and expected, returns the
corresponding chisquared p-value.
@param obs An array (list of lists or numpy array) of observed values
@param obs An array (list of lists or numpy array) of expected values
"""
return 1.0
class BigramFinder:
"""
Finds bigrams in a stream of text.
"""
def __init__(self, min_unigram = 10, max_unigram = 150, min_ngram = 5,
exclude=[]):
"""
Instantiates the class.
@param min_ngram Ignore bigrams that appear fewer than this many times
@param max_unigram Ignore words that appear more than this many times
@param min_unigram Ignore words that appear fewer than this many times
@param exclude Don't add words from this set to bigrams
"""
self._exclude = set(exclude)
self._max_unigram = max_unigram
self._min_unigram = min_unigram
self._min_ngram = min_ngram
self._vocab = None
# You may want to add additional data structures here.
self._unigram = Counter()
def observed_and_expected(self, bigram):
"""
Compute the observed and expected counts for a bigram
@bigram A tuple containing the words to score
"""
obs = zeros((2, 2))
ex = zeros((2, 2))
return obs, ex
def score(self, bigram):
"""
Compute the chi-square probability of a bigram being dependent.
If either word of a bigram is in the "exclude" list, return 1.0.
@bigram A tuple containing the words to score
"""
# you shouldn't need to edit this function
if any(x in self._exclude for x in bigram):
return 1.0
obs, ex = self.observed_and_expected(bigram)
return chisquare_pvalue(obs, ex)
def vocab_scan(self, sentence):
"""
Given a sentence, scan all of its words and add up their counts.
This will be used to finalize the vocabulary later.
"""
# Don't modify this function.
for ii in sentence:
self._unigram[ii] += 1
def vocab(self):
"""
Return the finder's vocab
"""
# Don't modify this function.
return self._vocab
def finalize(self):
"""
Creates the vocabulary of for later processing. Filters low frequency
and high frequency words.
"""
# Don't modify this function.
self._vocab = set(x for x in self._unigram if self._unigram
if self._unigram[x] >= self._min_unigram and
self._unigram[x] <= self._max_unigram and
x not in self._exclude)
def add_sentence(self, sentence):
"""
Add the counts for a sentence (assumed to be iterable) so that we can
then score bigrams.
"""
assert self._vocab is not None, "Adding counts before finalizing vocabulary"
# Your code here
for ll, rr in bigrams(sentence):
None
# Your code here
def valid_bigrams(self):
"""
Return an iterator over the bigrams that have been seen enough to get a
score.
"""
# Your code here
return []
def sorted_bigrams(self):
"""
Return n-grams sorted by the probability of being an n-gram. Should
yield a tuple of words in bigram and the p-value of the bigram.
"""
# You should not need to modify this function
d = {}
for ngram in self.valid_bigrams():
d[ngram] = self.score(ngram)
for ngram, score in sorted(d.items(), key=operator.itemgetter(1), reverse=True):
yield ngram, score
if __name__ == "__main__":
bf = BigramFinder(exclude=kSTOPWORDS)
for sent in sentences_from_zipfile("../data/state_union.zip"):
bf.vocab_scan(tokenize(sent))
bf.finalize()
for sent in sentences_from_zipfile("../data/state_union.zip"):
bf.add_sentence(tokenize(sent))
for ngram, score in list(bf.sorted_bigrams())[:100]:
print("%f\t%s\t%s\t" % (score, ngram[0], ngram[1]))
|
|
"""
Helper for views.py
"""
from base_handler import base_handler
import traceback
import app.model
from flask import g, render_template
class single_access_handler(base_handler):
def __init__(self):
"""
Manages all the operations that are involved with a single port association with EPGs
(for virtual port channel association the vpc_access_handler is used)
:return:
"""
try:
self.cobra_apic_object = single_access_handler.init_connections()
self.exception = None
except Exception as e:
self.exception = e
print traceback.print_exc()
def get_create_single_access_networks(self, obj_response, form_values):
# Check if there has been connection errors
if self.exception is not None:
obj_response.script("create_notification('Connection problem', '" + str(self.exception).replace("'", "").
replace('"', '').replace("\n", "")[0:100] + "', 'danger', 0)")
return
# Load the sel_create_single_access_network select with the networks within the selected group
try:
network_ap = self.cobra_apic_object.get_nca_ap(form_values['sel_create_single_access_group'])
item_list = []
if network_ap is not None:
networks = self.cobra_apic_object.get_epg_by_ap(str(network_ap.dn))
for network in networks:
# Creates a dynamic object
network_do = type('network_do', (object,), {})
network_do.key = str(network.dn)
network_do.text = network.name
item_list.append(network_do)
html_response = render_template('select_partial.html', item_list=item_list)
obj_response.html("#sel_create_single_access_network", html_response)
except Exception as e:
print traceback.print_exc()
obj_response.script("create_notification('Can not retrieve networks', '" + str(e).replace("'", "").
replace('"', '').replace("\n", "")[0:100] + "', 'danger', 0)")
finally:
g.db.close()
obj_response.html("#create_single_access_response", '')
def get_create_single_access_ports(self, obj_response, form_values):
# Check if there has been connection errors
if self.exception is not None:
obj_response.script("create_notification('Connection problem', '" + str(self.exception).replace("'", "").
replace('"', '').replace("\n", "")[0:100] + "', 'danger', 0)")
return
# Load the sel_create_single_access_port select with the available ports within the selected leaf
try:
ports = self.cobra_apic_object.get_available_ports(form_values['sel_create_single_access_leaf'])
item_list = []
for i in range(0, len(ports[0])):
# Creates a dynamic object
port_do = type('port_do', (object,), {})
port_do.key = ports[0][i]
port_do.text = ports[1][i]
item_list.append(port_do)
html_response = render_template('select_partial.html', item_list=item_list)
obj_response.html("#sel_create_single_access_port", html_response)
except Exception as e:
print traceback.print_exc()
obj_response.script("create_notification('Can not retrieve ports', '" + str(e).replace("'", "").
replace('"', '').replace("\n", "")[0:100] + "', 'danger', 0)")
finally:
g.db.close()
obj_response.html("#create_single_access_response", '')
def create_single_access(self, obj_response, form_values):
# Check if there has been connection errors
if self.exception is not None:
obj_response.script("create_notification('Connection problem', '" + str(self.exception).replace("'", "").
replace('"', '').replace("\n", "")[0:100] + "', 'danger', 0)")
return
# Creates switch profiles, interface profiles, policy groups and static bindings to associate a port
# to an EPG
try:
port_id = form_values['sel_create_single_access_port'].split('[')[-1][:-1].replace('/','-')
switch_id = form_values['sel_create_single_access_leaf'].split('/')[-1]
if form_values['create_port_access_type'] == 'single_vlan':
network_o = app.model.network.select().where(app.model.network.epg_dn ==
form_values['sel_create_single_access_network'])
if len(network_o) > 0:
self.cobra_apic_object.create_single_access(network_o[0].epg_dn,
form_values['sel_create_single_access_leaf'],
form_values['sel_create_single_access_port'],
network_o[0].encapsulation,
'migration-tool',
'if_policy_' + switch_id + '_' + port_id,
'single_access_' + switch_id + '_' + port_id)
obj_response.script("create_notification('Assigned', '', 'success', 5000)")
else:
obj_response.script(
"create_notification('Network not found in local database', '', 'danger', 0)")
elif form_values['create_port_access_type'] == 'vlan_profile':
network_profilexnetworks = app.model.network_profilexnetwork.select().where(
app.model.network_profilexnetwork.network_profile == int(form_values['sel_profile_create_port_access']))
for network_profile in network_profilexnetworks:
network_o = app.model.network.select().where(app.model.network.id == network_profile.network.id)
if len(network_o) > 0:
self.cobra_apic_object.create_single_access(network_o[0].epg_dn,
form_values['sel_create_single_access_leaf'],
form_values['sel_create_single_access_port'],
network_o[0].encapsulation,
'migration-tool',
'if_policy_' + switch_id + '_' + port_id,
'single_access_' + switch_id + '_' + port_id)
else:
ex = Exception()
ex.message = 'Some networks where not assigned because they are not in the local database'
raise ex
obj_response.script("create_notification('Assigned', '', 'success', 5000)")
except Exception as e:
print traceback.print_exc()
obj_response.script("create_notification('Can not create single access', '" + str(e).replace("'", "").
replace('"', '').replace("\n", "")[0:100] + "', 'danger', 0)")
finally:
g.db.close()
obj_response.html("#create_single_access_response", '')
def get_delete_single_access_networks(self, obj_response, form_values):
# Check if there has been connection errors
if self.exception is not None:
obj_response.script("create_notification('Connection problem', '" + str(self.exception).replace("'", "").
replace('"', '').replace("\n", "")[0:100] + "', 'danger', 0)")
return
# Load the sel_delete_single_access_network select with the network within the selected group
try:
network_ap = self.cobra_apic_object.get_nca_ap(form_values['sel_delete_single_access_group'])
item_list = []
if network_ap is not None:
networks = self.cobra_apic_object.get_epg_by_ap(str(network_ap.dn))
for network in networks:
# Creates a dynamic object
network_do = type('network_do', (object,), {})
network_do.key = str(network.dn)
network_do.text = network.name
item_list.append(network_do)
html_response = render_template('select_partial.html', item_list=item_list)
obj_response.html("#sel_delete_single_access_network", html_response)
except Exception as e:
print traceback.print_exc()
obj_response.script("create_notification('Can not retrieve networks', '" + str(e).replace("'", "").
replace('"', '').replace("\n", "")[0:100] + "', 'danger', 0)")
finally:
g.db.close()
obj_response.html("#delete_single_access_response", '')
def get_delete_single_access_ports(self, obj_response, form_values):
# Check if there has been connection errors
if self.exception is not None:
obj_response.script("create_notification('Connection problem', '" + str(self.exception).replace("'", "").
replace('"', '').replace("\n", "")[0:100] + "', 'danger', 0)")
return
# Load the sel_delete_single_access_port select with the available ports from the selected leaf
try:
ports = self.cobra_apic_object.get_available_ports(form_values['sel_delete_single_access_leaf'])
item_list = []
for i in range(0, len(ports[0])):
# Creates a dynamic object
port_do = type('port_do', (object,), {})
port_do.key = ports[0][i]
port_do.text = ports[1][i]
item_list.append(port_do)
html_response = render_template('select_partial.html', item_list=item_list)
obj_response.html("#sel_delete_single_access_port", html_response)
except Exception as e:
print traceback.print_exc()
obj_response.script("create_notification('Can not retrieve ports', '" + str(e).replace("'", "").
replace('"', '').replace("\n", "")[0:100] + "', 'danger', 0)")
finally:
g.db.close()
obj_response.html("#delete_single_access_response", '')
def delete_single_access(self, obj_response, form_values):
# Check if there has been connection errors
if self.exception is not None:
obj_response.script("create_notification('Connection problem', '" + str(self.exception).replace("'", "").
replace('"', '').replace("\n", "")[0:100] + "', 'danger', 0)")
return
# Removes the static binding between a port and an EPG. If no other EPG is using this port the system
# removes also the switch profile, interface profile and policy group associated with the port
try:
port_id = form_values['sel_delete_single_access_port'].split('[')[-1][:-1].replace('/','-')
switch_id = form_values['sel_delete_single_access_leaf'].split('/')[-1]
if form_values['delete_port_access_type'] == 'single_vlan':
network_o = app.model.network.select().where(app.model.network.epg_dn ==
form_values['sel_delete_single_access_network'])
if len(network_o) > 0:
self.cobra_apic_object.delete_single_access(form_values['sel_delete_single_access_network'],
form_values['sel_delete_single_access_port'],
'if_policy_' + switch_id + '_' + port_id,
'single_access_' + switch_id + '_' + port_id)
obj_response.script("create_notification('Removed', '', 'success', 5000)")
else:
obj_response.script(
"create_notification('Network not found in local database', '', 'danger', 0)")
elif form_values['delete_port_access_type'] == 'vlan_profile':
network_profilexnetworks = app.model.network_profilexnetwork.select().where(
app.model.network_profilexnetwork.network_profile == int(form_values['sel_profile_delete_port_access']))
for network_profile in network_profilexnetworks:
network_o = app.model.network.select().where(app.model.network.id == network_profile.network.id)
if len(network_o) > 0:
self.cobra_apic_object.delete_single_access(network_o[0].epg_dn,
form_values['sel_delete_single_access_port'],
'if_policy_' + switch_id + '_' + port_id,
'single_access_' + switch_id + '_' + port_id)
obj_response.script("create_notification('Removed', '', 'success', 5000)")
except Exception as e:
print traceback.print_exc()
obj_response.script(
"create_notification('Can not delete single access', '" + str(e).replace("'", "").replace('"', '').
replace("\n", "")[0:100] + "', 'danger', 0)")
finally:
g.db.close()
obj_response.html("#delete_single_access_response", '')
|
|
# Copyright (c) 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
from cStringIO import StringIO
from hashlib import md5
from swift.common.swob import Request, HTTPAccepted
from swift.common.middleware.s3api.etree import fromstring, tostring, \
Element, SubElement, XMLNS_XSI
from swift.common.middleware.s3api.s3response import InvalidArgument
from swift.common.middleware.s3api.acl_utils import handle_acl_header
from test.unit.common.middleware.s3api import S3ApiTestCase
from test.unit.common.middleware.s3api.helpers import UnreadableInput
from test.unit.common.middleware.s3api.test_s3_acl import s3acl
class TestS3ApiAcl(S3ApiTestCase):
def setUp(self):
super(TestS3ApiAcl, self).setUp()
# All ACL API should be called against to existing bucket.
self.swift.register('PUT', '/v1/AUTH_test/bucket',
HTTPAccepted, {}, None)
def _check_acl(self, owner, body):
elem = fromstring(body, 'AccessControlPolicy')
permission = elem.find('./AccessControlList/Grant/Permission').text
self.assertEqual(permission, 'FULL_CONTROL')
name = elem.find('./AccessControlList/Grant/Grantee/ID').text
self.assertEqual(name, owner)
def test_bucket_acl_GET(self):
req = Request.blank('/bucket?acl',
environ={'REQUEST_METHOD': 'GET'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
status, headers, body = self.call_s3api(req)
self._check_acl('test:tester', body)
def test_bucket_acl_PUT(self):
elem = Element('AccessControlPolicy')
owner = SubElement(elem, 'Owner')
SubElement(owner, 'ID').text = 'id'
acl = SubElement(elem, 'AccessControlList')
grant = SubElement(acl, 'Grant')
grantee = SubElement(grant, 'Grantee', nsmap={'xsi': XMLNS_XSI})
grantee.set('{%s}type' % XMLNS_XSI, 'Group')
SubElement(grantee, 'URI').text = \
'http://acs.amazonaws.com/groups/global/AllUsers'
SubElement(grant, 'Permission').text = 'READ'
xml = tostring(elem)
req = Request.blank('/bucket?acl',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()},
body=xml)
status, headers, body = self.call_s3api(req)
self.assertEqual(status.split()[0], '200')
req = Request.blank('/bucket?acl',
environ={'REQUEST_METHOD': 'PUT',
'wsgi.input': StringIO(xml)},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header(),
'Transfer-Encoding': 'chunked'})
self.assertIsNone(req.content_length)
self.assertIsNone(req.message_length())
status, headers, body = self.call_s3api(req)
self.assertEqual(status.split()[0], '200')
def test_bucket_canned_acl_PUT(self):
req = Request.blank('/bucket?acl',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header(),
'X-AMZ-ACL': 'public-read'})
status, headers, body = self.call_s3api(req)
self.assertEqual(status.split()[0], '200')
@s3acl(s3acl_only=True)
def test_bucket_canned_acl_PUT_with_s3acl(self):
req = Request.blank('/bucket?acl',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header(),
'X-AMZ-ACL': 'public-read'})
with mock.patch('swift.common.middleware.s3api.s3request.'
'handle_acl_header') as mock_handler:
status, headers, body = self.call_s3api(req)
self.assertEqual(status.split()[0], '200')
self.assertEqual(mock_handler.call_count, 0)
def test_bucket_fails_with_both_acl_header_and_xml_PUT(self):
elem = Element('AccessControlPolicy')
owner = SubElement(elem, 'Owner')
SubElement(owner, 'ID').text = 'id'
acl = SubElement(elem, 'AccessControlList')
grant = SubElement(acl, 'Grant')
grantee = SubElement(grant, 'Grantee', nsmap={'xsi': XMLNS_XSI})
grantee.set('{%s}type' % XMLNS_XSI, 'Group')
SubElement(grantee, 'URI').text = \
'http://acs.amazonaws.com/groups/global/AllUsers'
SubElement(grant, 'Permission').text = 'READ'
xml = tostring(elem)
req = Request.blank('/bucket?acl',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header(),
'X-AMZ-ACL': 'public-read'},
body=xml)
status, headers, body = self.call_s3api(req)
self.assertEqual(self._get_error_code(body),
'UnexpectedContent')
def _test_put_no_body(self, use_content_length=False,
use_transfer_encoding=False, string_to_md5=''):
content_md5 = md5(string_to_md5).digest().encode('base64').strip()
with UnreadableInput(self) as fake_input:
req = Request.blank(
'/bucket?acl',
environ={
'REQUEST_METHOD': 'PUT',
'wsgi.input': fake_input},
headers={
'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header(),
'Content-MD5': content_md5},
body='')
if not use_content_length:
req.environ.pop('CONTENT_LENGTH')
if use_transfer_encoding:
req.environ['HTTP_TRANSFER_ENCODING'] = 'chunked'
status, headers, body = self.call_s3api(req)
self.assertEqual(status, '400 Bad Request')
self.assertEqual(self._get_error_code(body), 'MissingSecurityHeader')
self.assertEqual(self._get_error_message(body),
'Your request was missing a required header.')
self.assertIn('<MissingHeaderName>x-amz-acl</MissingHeaderName>', body)
@s3acl
def test_bucket_fails_with_neither_acl_header_nor_xml_PUT(self):
self._test_put_no_body()
self._test_put_no_body(string_to_md5='test')
self._test_put_no_body(use_content_length=True)
self._test_put_no_body(use_content_length=True, string_to_md5='test')
self._test_put_no_body(use_transfer_encoding=True)
self._test_put_no_body(use_transfer_encoding=True, string_to_md5='zz')
def test_object_acl_GET(self):
req = Request.blank('/bucket/object?acl',
environ={'REQUEST_METHOD': 'GET'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
status, headers, body = self.call_s3api(req)
self._check_acl('test:tester', body)
def test_invalid_xml(self):
req = Request.blank('/bucket?acl',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()},
body='invalid')
status, headers, body = self.call_s3api(req)
self.assertEqual(self._get_error_code(body), 'MalformedACLError')
def test_handle_acl_header(self):
def check_generated_acl_header(acl, targets):
req = Request.blank('/bucket',
headers={'X-Amz-Acl': acl})
handle_acl_header(req)
for target in targets:
self.assertTrue(target[0] in req.headers)
self.assertEqual(req.headers[target[0]], target[1])
check_generated_acl_header('public-read',
[('X-Container-Read', '.r:*,.rlistings')])
check_generated_acl_header('public-read-write',
[('X-Container-Read', '.r:*,.rlistings'),
('X-Container-Write', '.r:*')])
check_generated_acl_header('private',
[('X-Container-Read', '.'),
('X-Container-Write', '.')])
@s3acl(s3acl_only=True)
def test_handle_acl_header_with_s3acl(self):
def check_generated_acl_header(acl, targets):
req = Request.blank('/bucket',
headers={'X-Amz-Acl': acl})
for target in targets:
self.assertTrue(target not in req.headers)
self.assertTrue('HTTP_X_AMZ_ACL' in req.environ)
# TODO: add transration and assertion for s3acl
check_generated_acl_header('public-read',
['X-Container-Read'])
check_generated_acl_header('public-read-write',
['X-Container-Read', 'X-Container-Write'])
check_generated_acl_header('private',
['X-Container-Read', 'X-Container-Write'])
def test_handle_acl_with_invalid_header_string(self):
req = Request.blank('/bucket', headers={'X-Amz-Acl': 'invalid'})
with self.assertRaises(InvalidArgument) as cm:
handle_acl_header(req)
self.assertTrue('argument_name' in cm.exception.info)
self.assertEqual(cm.exception.info['argument_name'], 'x-amz-acl')
self.assertTrue('argument_value' in cm.exception.info)
self.assertEqual(cm.exception.info['argument_value'], 'invalid')
if __name__ == '__main__':
unittest.main()
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
import logging
import smtplib
import email.feedparser
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from email import header
import tg
from paste.deploy.converters import asbool, asint, aslist
from formencode import validators as fev
from pylons import tmpl_context as c
from pylons import app_globals as g
from allura.lib.utils import ConfigProxy
from allura.lib import exceptions as exc
from allura.lib import helpers as h
log = logging.getLogger(__name__)
RE_MESSAGE_ID = re.compile(r'<(?:[^>]*/)?([^>]*)>')
config = ConfigProxy(
common_suffix='forgemail.domain',
common_suffix_alt='forgemail.domain.alternates',
return_path='forgemail.return_path',
)
EMAIL_VALIDATOR = fev.Email(not_empty=True)
def Header(text, *more_text):
'''Helper to make sure we encode headers properly'''
if isinstance(text, header.Header):
return text
# email.header.Header handles str vs unicode differently
# see
# http://docs.python.org/library/email.header.html#email.header.Header.append
if type(text) != unicode:
raise TypeError('This must be unicode: %r' % text)
head = header.Header(text)
for m in more_text:
if type(m) != unicode:
raise TypeError('This must be unicode: %r' % text)
head.append(m)
return head
def AddrHeader(fromaddr):
'''Accepts any of:
Header() instance
foo@bar.com
"Foo Bar" <foo@bar.com>
'''
if isinstance(fromaddr, basestring) and ' <' in fromaddr:
name, addr = fromaddr.rsplit(' <', 1)
addr = '<' + addr # restore the char we just split off
addrheader = Header(name, addr)
if str(addrheader).startswith('=?'): # encoding escape chars
# then quoting the name is no longer necessary
name = name.strip('"')
addrheader = Header(name, addr)
else:
addrheader = Header(fromaddr)
return addrheader
def is_autoreply(msg):
'''Returns True, if message is an autoreply
Detection based on suggestions from
https://github.com/opennorth/multi_mail/wiki/Detecting-autoresponders
'''
h = msg['headers']
return (
h.get('Auto-Submitted') == 'auto-replied'
or h.get('X-POST-MessageClass') == '9; Autoresponder'
or h.get('Delivered-To') == 'Autoresponder'
or h.get('X-FC-MachineGenerated') == 'true'
or h.get('X-AutoReply-From') is not None
or h.get('X-Autogenerated') in ['Forward', 'Group', 'Letter', 'Mirror', 'Redirect', 'Reply']
or h.get('X-Precedence') == 'auto_reply'
or h.get('Return-Path') == '<>'
)
def parse_address(addr):
userpart, domain = addr.split('@')
# remove common domain suffix
for suffix in [config.common_suffix] + aslist(config.common_suffix_alt):
if domain.endswith(suffix):
domain = domain[:-len(suffix)]
break
else:
raise exc.AddressException, 'Unknown domain: ' + domain
path = '/'.join(reversed(domain.split('.')))
project, mount_point = h.find_project('/' + path)
if project is None:
raise exc.AddressException, 'Unknown project: ' + domain
if len(mount_point) != 1:
raise exc.AddressException, 'Unknown tool: ' + domain
with h.push_config(c, project=project):
app = project.app_instance(mount_point[0])
if not app:
raise exc.AddressException, 'Unknown tool: ' + domain
return userpart, project, app
def parse_message(data):
# Parse the email to its constituent parts
parser = email.feedparser.FeedParser()
parser.feed(data)
msg = parser.close()
# Extract relevant data
result = {}
result['multipart'] = multipart = msg.is_multipart()
result['headers'] = dict(msg)
result['message_id'] = _parse_message_id(msg.get('Message-ID'))
result['in_reply_to'] = _parse_message_id(msg.get('In-Reply-To'))
result['references'] = _parse_message_id(msg.get('References'))
if result['message_id'] == []:
result['message_id'] = h.gen_message_id()
else:
result['message_id'] = result['message_id'][0]
if multipart:
result['parts'] = []
for part in msg.walk():
dpart = dict(
headers=dict(part),
message_id=result['message_id'],
in_reply_to=result['in_reply_to'],
references=result['references'],
content_type=part.get_content_type(),
filename=part.get_filename(None),
payload=part.get_payload(decode=True))
charset = part.get_content_charset()
if charset:
dpart['payload'] = dpart['payload'].decode(charset)
result['parts'].append(dpart)
else:
result['payload'] = msg.get_payload(decode=True)
charset = msg.get_content_charset()
if charset:
result['payload'] = result['payload'].decode(charset)
return result
def identify_sender(peer, email_address, headers, msg):
from allura import model as M
# Dumb ID -- just look for email address claimed by a particular user
addr = M.EmailAddress.get(email=email_address, confirmed=True)
if addr and addr.claimed_by_user_id:
return addr.claimed_by_user() or M.User.anonymous()
from_address = headers.get('From', '').strip()
if not from_address:
return M.User.anonymous()
addr = M.EmailAddress.get(email=from_address)
if addr and addr.claimed_by_user_id:
return addr.claimed_by_user() or M.User.anonymous()
return M.User.anonymous()
def encode_email_part(content, content_type):
try:
return MIMEText(content.encode('ascii'), content_type, 'ascii')
except:
return MIMEText(content.encode('utf-8'), content_type, 'utf-8')
def make_multipart_message(*parts):
msg = MIMEMultipart('related')
msg.preamble = 'This is a multi-part message in MIME format.'
alt = MIMEMultipart('alternative')
msg.attach(alt)
for part in parts:
alt.attach(part)
return msg
def _parse_message_id(msgid):
if msgid is None:
return []
return [mo.group(1)
for mo in RE_MESSAGE_ID.finditer(msgid)]
def _parse_smtp_addr(addr):
addr = str(addr)
addrs = _parse_message_id(addr)
if addrs and addrs[0]:
return addrs[0]
if '@' in addr:
return addr
return g.noreply
def isvalid(addr):
'''return True if addr is a (possibly) valid email address, false
otherwise'''
try:
EMAIL_VALIDATOR.to_python(addr, None)
return True
except fev.Invalid:
return False
class SMTPClient(object):
def __init__(self):
self._client = None
def sendmail(
self, addrs, fromaddr, reply_to, subject, message_id, in_reply_to, message,
sender=None, references=None, cc=None, to=None):
if not addrs:
return
if to:
message['To'] = AddrHeader(h.really_unicode(to))
else:
message['To'] = AddrHeader(reply_to)
message['From'] = AddrHeader(fromaddr)
message['Reply-To'] = AddrHeader(reply_to)
message['Subject'] = Header(subject)
message['Message-ID'] = Header('<' + message_id + u'>')
message['Date'] = email.utils.formatdate()
if sender:
message['Sender'] = AddrHeader(sender)
if cc:
message['CC'] = AddrHeader(cc)
addrs.append(cc)
if in_reply_to:
if not isinstance(in_reply_to, basestring):
raise TypeError('Only strings are supported now, not lists')
message['In-Reply-To'] = Header(u'<%s>' % in_reply_to)
if not references:
message['References'] = message['In-Reply-To']
if references:
references = [u'<%s>' % r for r in aslist(references)]
message['References'] = Header(*references)
content = message.as_string()
smtp_addrs = map(_parse_smtp_addr, addrs)
smtp_addrs = [a for a in smtp_addrs if isvalid(a)]
if not smtp_addrs:
log.warning('No valid addrs in %s, so not sending mail',
map(unicode, addrs))
return
try:
self._client.sendmail(
config.return_path,
smtp_addrs,
content)
except:
self._connect()
self._client.sendmail(
config.return_path,
smtp_addrs,
content)
def _connect(self):
if asbool(tg.config.get('smtp_ssl', False)):
smtp_client = smtplib.SMTP_SSL(
tg.config.get('smtp_server', 'localhost'),
asint(tg.config.get('smtp_port', 25)),
timeout=float(tg.config.get('smtp_timeout', 10)),
)
else:
smtp_client = smtplib.SMTP(
tg.config.get('smtp_server', 'localhost'),
asint(tg.config.get('smtp_port', 465)),
timeout=float(tg.config.get('smtp_timeout', 10)),
)
if tg.config.get('smtp_user', None):
smtp_client.login(tg.config['smtp_user'],
tg.config['smtp_password'])
if asbool(tg.config.get('smtp_tls', False)):
smtp_client.starttls()
self._client = smtp_client
|
|
"""
pygments.lexers.praat
~~~~~~~~~~~~~~~~~~~~~
Lexer for Praat
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, words, bygroups, include
from pygments.token import Name, Text, Comment, Keyword, String, Punctuation, Number, \
Operator
__all__ = ['PraatLexer']
class PraatLexer(RegexLexer):
"""
For `Praat <http://www.praat.org>`_ scripts.
.. versionadded:: 2.1
"""
name = 'Praat'
aliases = ['praat']
filenames = ['*.praat', '*.proc', '*.psc']
keywords = (
'if', 'then', 'else', 'elsif', 'elif', 'endif', 'fi', 'for', 'from', 'to',
'endfor', 'endproc', 'while', 'endwhile', 'repeat', 'until', 'select', 'plus',
'minus', 'demo', 'assert', 'stopwatch', 'nocheck', 'nowarn', 'noprogress',
'editor', 'endeditor', 'clearinfo',
)
functions_string = (
'backslashTrigraphsToUnicode', 'chooseDirectory', 'chooseReadFile',
'chooseWriteFile', 'date', 'demoKey', 'do', 'environment', 'extractLine',
'extractWord', 'fixed', 'info', 'left', 'mid', 'percent', 'readFile', 'replace',
'replace_regex', 'right', 'selected', 'string', 'unicodeToBackslashTrigraphs',
)
functions_numeric = (
'abs', 'appendFile', 'appendFileLine', 'appendInfo', 'appendInfoLine', 'arccos',
'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh', 'barkToHertz',
'beginPause', 'beginSendPraat', 'besselI', 'besselK', 'beta', 'beta2',
'binomialP', 'binomialQ', 'boolean', 'ceiling', 'chiSquareP', 'chiSquareQ',
'choice', 'comment', 'cos', 'cosh', 'createDirectory', 'deleteFile',
'demoClicked', 'demoClickedIn', 'demoCommandKeyPressed',
'demoExtraControlKeyPressed', 'demoInput', 'demoKeyPressed',
'demoOptionKeyPressed', 'demoShiftKeyPressed', 'demoShow', 'demoWaitForInput',
'demoWindowTitle', 'demoX', 'demoY', 'differenceLimensToPhon', 'do', 'editor',
'endPause', 'endSendPraat', 'endsWith', 'erb', 'erbToHertz', 'erf', 'erfc',
'exitScript', 'exp', 'extractNumber', 'fileReadable', 'fisherP', 'fisherQ',
'floor', 'gaussP', 'gaussQ', 'hertzToBark', 'hertzToErb', 'hertzToMel',
'hertzToSemitones', 'imax', 'imin', 'incompleteBeta', 'incompleteGammaP', 'index',
'index_regex', 'integer', 'invBinomialP', 'invBinomialQ', 'invChiSquareQ', 'invFisherQ',
'invGaussQ', 'invSigmoid', 'invStudentQ', 'length', 'ln', 'lnBeta', 'lnGamma',
'log10', 'log2', 'max', 'melToHertz', 'min', 'minusObject', 'natural', 'number',
'numberOfColumns', 'numberOfRows', 'numberOfSelected', 'objectsAreIdentical',
'option', 'optionMenu', 'pauseScript', 'phonToDifferenceLimens', 'plusObject',
'positive', 'randomBinomial', 'randomGauss', 'randomInteger', 'randomPoisson',
'randomUniform', 'real', 'readFile', 'removeObject', 'rindex', 'rindex_regex',
'round', 'runScript', 'runSystem', 'runSystem_nocheck', 'selectObject',
'selected', 'semitonesToHertz', 'sentence', 'sentencetext', 'sigmoid', 'sin', 'sinc',
'sincpi', 'sinh', 'soundPressureToPhon', 'sqrt', 'startsWith', 'studentP',
'studentQ', 'tan', 'tanh', 'text', 'variableExists', 'word', 'writeFile', 'writeFileLine',
'writeInfo', 'writeInfoLine',
)
functions_array = (
'linear', 'randomGauss', 'randomInteger', 'randomUniform', 'zero',
)
objects = (
'Activation', 'AffineTransform', 'AmplitudeTier', 'Art', 'Artword',
'Autosegment', 'BarkFilter', 'BarkSpectrogram', 'CCA', 'Categories',
'Cepstrogram', 'Cepstrum', 'Cepstrumc', 'ChebyshevSeries', 'ClassificationTable',
'Cochleagram', 'Collection', 'ComplexSpectrogram', 'Configuration', 'Confusion',
'ContingencyTable', 'Corpus', 'Correlation', 'Covariance',
'CrossCorrelationTable', 'CrossCorrelationTables', 'DTW', 'DataModeler',
'Diagonalizer', 'Discriminant', 'Dissimilarity', 'Distance', 'Distributions',
'DurationTier', 'EEG', 'ERP', 'ERPTier', 'EditCostsTable', 'EditDistanceTable',
'Eigen', 'Excitation', 'Excitations', 'ExperimentMFC', 'FFNet', 'FeatureWeights',
'FileInMemory', 'FilesInMemory', 'Formant', 'FormantFilter', 'FormantGrid',
'FormantModeler', 'FormantPoint', 'FormantTier', 'GaussianMixture', 'HMM',
'HMM_Observation', 'HMM_ObservationSequence', 'HMM_State', 'HMM_StateSequence',
'Harmonicity', 'ISpline', 'Index', 'Intensity', 'IntensityTier', 'IntervalTier',
'KNN', 'KlattGrid', 'KlattTable', 'LFCC', 'LPC', 'Label', 'LegendreSeries',
'LinearRegression', 'LogisticRegression', 'LongSound', 'Ltas', 'MFCC', 'MSpline',
'ManPages', 'Manipulation', 'Matrix', 'MelFilter', 'MelSpectrogram',
'MixingMatrix', 'Movie', 'Network', 'Object', 'OTGrammar', 'OTHistory', 'OTMulti',
'PCA', 'PairDistribution', 'ParamCurve', 'Pattern', 'Permutation', 'Photo',
'Pitch', 'PitchModeler', 'PitchTier', 'PointProcess', 'Polygon', 'Polynomial',
'PowerCepstrogram', 'PowerCepstrum', 'Procrustes', 'RealPoint', 'RealTier',
'ResultsMFC', 'Roots', 'SPINET', 'SSCP', 'SVD', 'Salience', 'ScalarProduct',
'Similarity', 'SimpleString', 'SortedSetOfString', 'Sound', 'Speaker',
'Spectrogram', 'Spectrum', 'SpectrumTier', 'SpeechSynthesizer', 'SpellingChecker',
'Strings', 'StringsIndex', 'Table', 'TableOfReal', 'TextGrid', 'TextInterval',
'TextPoint', 'TextTier', 'Tier', 'Transition', 'VocalTract', 'VocalTractTier',
'Weight', 'WordList',
)
variables_numeric = (
'macintosh', 'windows', 'unix', 'praatVersion', 'pi', 'e', 'undefined',
)
variables_string = (
'praatVersion', 'tab', 'shellDirectory', 'homeDirectory',
'preferencesDirectory', 'newline', 'temporaryDirectory',
'defaultDirectory',
)
object_attributes = (
'ncol', 'nrow', 'xmin', 'ymin', 'xmax', 'ymax', 'nx', 'ny', 'dx', 'dy',
)
tokens = {
'root': [
(r'(\s+)(#.*?$)', bygroups(Text, Comment.Single)),
(r'^#.*?$', Comment.Single),
(r';[^\n]*', Comment.Single),
(r'\s+', Text),
(r'\bprocedure\b', Keyword, 'procedure_definition'),
(r'\bcall\b', Keyword, 'procedure_call'),
(r'@', Name.Function, 'procedure_call'),
include('function_call'),
(words(keywords, suffix=r'\b'), Keyword),
(r'(\bform\b)(\s+)([^\n]+)',
bygroups(Keyword, Text, String), 'old_form'),
(r'(print(?:line|tab)?|echo|exit|asserterror|pause|send(?:praat|socket)|'
r'include|execute|system(?:_nocheck)?)(\s+)',
bygroups(Keyword, Text), 'string_unquoted'),
(r'(goto|label)(\s+)(\w+)', bygroups(Keyword, Text, Name.Label)),
include('variable_name'),
include('number'),
(r'"', String, 'string'),
(words((objects), suffix=r'(?=\s+\S+\n)'), Name.Class, 'string_unquoted'),
(r'\b[A-Z]', Keyword, 'command'),
(r'(\.{3}|[)(,])', Punctuation),
],
'command': [
(r'( ?[\w()-]+ ?)', Keyword),
include('string_interpolated'),
(r'\.{3}', Keyword, ('#pop', 'old_arguments')),
(r':', Keyword, ('#pop', 'comma_list')),
(r'\s', Text, '#pop'),
],
'procedure_call': [
(r'\s+', Text),
(r'([\w.]+)(:|\s*\()',
bygroups(Name.Function, Text), '#pop'),
(r'([\w.]+)', Name.Function, ('#pop', 'old_arguments')),
],
'procedure_definition': [
(r'\s', Text),
(r'([\w.]+)(\s*?[(:])',
bygroups(Name.Function, Text), '#pop'),
(r'([\w.]+)([^\n]*)',
bygroups(Name.Function, Text), '#pop'),
],
'function_call': [
(words(functions_string, suffix=r'\$(?=\s*[:(])'), Name.Function, 'function'),
(words(functions_array, suffix=r'#(?=\s*[:(])'), Name.Function, 'function'),
(words(functions_numeric, suffix=r'(?=\s*[:(])'), Name.Function, 'function'),
],
'function': [
(r'\s+', Text),
(r':', Punctuation, ('#pop', 'comma_list')),
(r'\s*\(', Punctuation, ('#pop', 'comma_list')),
],
'comma_list': [
(r'(\s*\n\s*)(\.{3})', bygroups(Text, Punctuation)),
(r'(\s*[])\n])', Text, '#pop'),
(r'\s+', Text),
(r'"', String, 'string'),
(r'\b(if|then|else|fi|endif)\b', Keyword),
include('function_call'),
include('variable_name'),
include('operator'),
include('number'),
(r'[()]', Text),
(r',', Punctuation),
],
'old_arguments': [
(r'\n', Text, '#pop'),
include('variable_name'),
include('operator'),
include('number'),
(r'"', String, 'string'),
(r'[^\n]', Text),
],
'number': [
(r'\n', Text, '#pop'),
(r'\b\d+(\.\d*)?([eE][-+]?\d+)?%?', Number),
],
'object_reference': [
include('string_interpolated'),
(r'([a-z][a-zA-Z0-9_]*|\d+)', Name.Builtin),
(words(object_attributes, prefix=r'\.'), Name.Builtin, '#pop'),
(r'\$', Name.Builtin),
(r'\[', Text, '#pop'),
],
'variable_name': [
include('operator'),
include('number'),
(words(variables_string, suffix=r'\$'), Name.Variable.Global),
(words(variables_numeric,
suffix=r'(?=[^a-zA-Z0-9_."\'$#\[:(]|\s|^|$)'),
Name.Variable.Global),
(words(objects, prefix=r'\b', suffix=r"(_)"),
bygroups(Name.Builtin, Name.Builtin),
'object_reference'),
(r'\.?_?[a-z][\w.]*(\$|#)?', Text),
(r'[\[\]]', Punctuation, 'comma_list'),
include('string_interpolated'),
],
'operator': [
(r'([+\/*<>=!-]=?|[&*|][&*|]?|\^|<>)', Operator),
(r'(?<![\w.])(and|or|not|div|mod)(?![\w.])', Operator.Word),
],
'string_interpolated': [
(r'\'[_a-z][^\[\]\'":]*(\[([\d,]+|"[\w,]+")\])?(:[0-9]+)?\'',
String.Interpol),
],
'string_unquoted': [
(r'(\n\s*)(\.{3})', bygroups(Text, Punctuation)),
(r'\n', Text, '#pop'),
(r'\s', Text),
include('string_interpolated'),
(r"'", String),
(r"[^'\n]+", String),
],
'string': [
(r'(\n\s*)(\.{3})', bygroups(Text, Punctuation)),
(r'"', String, '#pop'),
include('string_interpolated'),
(r"'", String),
(r'[^\'"\n]+', String),
],
'old_form': [
(r'(\s+)(#.*?$)', bygroups(Text, Comment.Single)),
(r'\s+', Text),
(r'(optionmenu|choice)([ \t]+\S+:[ \t]+)',
bygroups(Keyword, Text), 'number'),
(r'(option|button)([ \t]+)',
bygroups(Keyword, Text), 'string_unquoted'),
(r'(sentence|text)([ \t]+\S+)',
bygroups(Keyword, Text), 'string_unquoted'),
(r'(word)([ \t]+\S+[ \t]*)(\S+)?([ \t]+.*)?',
bygroups(Keyword, Text, String, Text)),
(r'(boolean)(\s+\S+\s*)(0|1|"?(?:yes|no)"?)',
bygroups(Keyword, Text, Name.Variable)),
# Ideally processing of the number would happen in the 'number'
# but that doesn't seem to work
(r'(real|natural|positive|integer)([ \t]+\S+[ \t]*)([+-]?)(\d+(?:\.\d*)?'
r'(?:[eE][-+]?\d+)?%?)',
bygroups(Keyword, Text, Operator, Number)),
(r'(comment)(\s+)',
bygroups(Keyword, Text), 'string_unquoted'),
(r'\bendform\b', Keyword, '#pop'),
]
}
|
|
#! /usr/env/python
"""
flow_direction_DN.py: calculates single-direction flow directions on a regular
or irregular grid.
GT Nov 2013
Modified Feb 2014
"""
import numpy as np
import inspect
from landlab import RasterModelGrid, BAD_INDEX_VALUE
from landlab.grid.raster_funcs import calculate_steepest_descent_across_cell_faces
UNDEFINED_INDEX = BAD_INDEX_VALUE
def grid_flow_directions(grid, elevations):
"""Flow directions on raster grid.
Calculate flow directions for node elevations on a raster grid.
Each node is assigned a single direction, toward one of its neighboring
nodes (or itself, if none of its neighbors are lower). There is only
flow from one node to another if there is a negative gradient. If a
node's steepest gradient is >= 0., then its slope is set to zero and
its receiver node is listed as itself.
Parameters
----------
grid : RasterModelGrid
a raster grid.
elevations: ndarray
Node elevations.
Returns
-------
receiver : (ncells, ) ndarray
For each cell, the node in the direction of steepest descent, or
itself if no downstream nodes.
steepest_slope : (ncells, ) ndarray
The slope value in the steepest direction of flow.
Notes
-----
This function considers only nodes that have four neighbors. Thus, only
calculate flow directions and slopes for nodes that have associated
cells.
Examples
--------
This example calculates flow routing on a (4,5) raster grid with the
following node elevations::
5 - 5 - 5 - 5 - 5
| | | | |
5 - 3 - 4 - 3 - 5
| | | | |
5 - 1 - 2 - 2 - 5
| | | | |
5 - 0 - 5 - 5 - 5
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> from landlab.components.flow_routing.flow_direction_DN import grid_flow_directions
>>> mg = RasterModelGrid(4,5)
>>> z = np.array([5., 0., 5., 5., 5.,
... 5., 1., 2., 2., 5.,
... 5., 3., 4., 3., 5.,
... 5., 5., 5., 5., 5.])
>>> recv_nodes, slope = grid_flow_directions(mg, z)
Each node with a cell has a receiving node (although that node may be
itself).
>>> recv_nodes
array([1, 6, 8, 6, 7, 8])
All positive gradients are clipped to zero.
>>> slope
array([-1., -1., 0., -2., -2., -1.])
If a cell has no surrounding neighbors lower than itself, it is a sink.
Use :attr:`~landlab.grid.base.ModelGrid.node_index_at_cells` to get the
nodes associated with the cells.
>>> sink_cells = np.where(slope >= 0)[0]
>>> list(sink_cells)
[2]
>>> mg.node_index_at_cells[sink_cells] # Sink nodes
array([8])
The source/destination node pairs for the flow.
>>> list(zip(mg.node_index_at_cells, recv_nodes))
[(6, 1), (7, 6), (8, 8), (11, 6), (12, 7), (13, 8)]
"""
slope, receiver = calculate_steepest_descent_across_cell_faces(
grid, elevations, return_node=True)
(sink_cell, ) = np.where(slope >= 0.)
receiver[sink_cell] = grid.node_index_at_cells[sink_cell]
slope[sink_cell] = 0.
return receiver, slope
def flow_directions(elev, active_links, fromnode, tonode, link_slope,
grid=None, baselevel_nodes=None):
"""Find flow directions on a grid.
Finds and returns flow directions for a given elevation grid. Each node is
assigned a single direction, toward one of its N neighboring nodes (or
itself, if none of its neighbors are lower).
Parameters
----------
elev : array_like
Elevations at nodes.
active_links : array_like
IDs of active links.
fromnode : array_like
IDs of the "from" node for each link.
tonode : array_like
IDs of the "to" node for each link.
link_slope : array_like
slope of each link, defined POSITIVE DOWNHILL (i.e., a negative value
means the link runs uphill from the fromnode to the tonode).
baselevel_nodes : array_like, optional
IDs of open boundary (baselevel) nodes.
Returns
-------
receiver : ndarray
For each node, the ID of the node that receives its flow. Defaults to
the node itself if no other receiver is assigned.
steepest_slope : ndarray
The slope value (positive downhill) in the direction of flow
sink : ndarray
IDs of nodes that are flow sinks (they are their own receivers)
receiver_link : ndarray
ID of link that leads from each node to its receiver, or
UNDEFINED_INDEX if none.
Examples
--------
The example below assigns elevations to the 10-node example network in
Braun and Willett (2012), so that their original flow pattern should be
re-created.
>>> import numpy as np
>>> from landlab.components.flow_routing.flow_direction_DN import flow_directions
>>> z = np.array([2.4, 1.0, 2.2, 3.0, 0.0, 1.1, 2.0, 2.3, 3.1, 3.2])
>>> fn = np.array([1,4,4,0,1,2,5,1,5,6,7,7,8,6,3,3,2,0])
>>> tn = np.array([4,5,7,1,2,5,6,5,7,7,8,9,9,8,8,6,3,3])
>>> s = z[fn] - z[tn] # slope with unit link length, positive downhill
>>> active_links = np.arange(len(fn))
>>> r, ss, snk, rl = flow_directions(z, active_links, fn, tn, s)
>>> r
array([1, 4, 1, 6, 4, 4, 5, 4, 6, 7])
>>> ss
array([ 1.4, 1. , 1.2, 1. , 0. , 1.1, 0.9, 2.3, 1.1, 0.9])
>>> snk
array([4])
>>> rl[3:8]
array([ 15, 2147483647, 1, 6, 2])
OK, the following are rough notes on design: we want to work with just the
active links. Ways to do this:
- Pass active_links in as argument
- In calling code, only refer to receiver_links for active nodes
"""
# Setup
num_nodes = len(elev)
steepest_slope = np.zeros(num_nodes)
receiver = np.arange(num_nodes)
receiver_link = UNDEFINED_INDEX + np.zeros(num_nodes, dtype=np.int)
# For each link, find the higher of the two nodes. The higher is the
# potential donor, and the lower is the potential receiver. If the slope
# from donor to receiver is steeper than the steepest one found so far for
# the donor, then assign the receiver to the donor and record the new slope.
# (Note the minus sign when looking at slope from "t" to "f").
#
# NOTE: MAKE SURE WE ARE ONLY LOOKING AT ACTIVE LINKS
#THIS REMAINS A PROBLEM AS OF DEJH'S EFFORTS, MID MARCH 14.
#overridden as part of fastscape_stream_power
#DEJH attempting to replace the node-by-node loop, 5/28/14:
#This is actually about the same speed on a 100*100 grid!
#as of Dec 2014, we prioritise the weave if a weave is viable, and only do
#the numpy methods if it's not (~10% speed gain on 100x100 grid;
#presumably better if the grid is bigger)
method = 'cython'
if method == 'cython':
from .cfuncs import adjust_flow_receivers
adjust_flow_receivers(fromnode, tonode, elev, link_slope,
active_links, receiver, receiver_link,
steepest_slope)
else:
if grid==None or not RasterModelGrid in inspect.getmro(grid.__class__):
for i in xrange(len(fromnode)):
f = fromnode[i]
t = tonode[i]
if elev[f]>elev[t] and link_slope[i]>steepest_slope[f]:
receiver[f] = t
steepest_slope[f] = link_slope[i]
receiver_link[f] = active_links[i]
elif elev[t]>elev[f] and -link_slope[i]>steepest_slope[t]:
receiver[t] = f
steepest_slope[t] = -link_slope[i]
receiver_link[t] = active_links[i]
else:
#alternative, assuming grid structure doesn't change between steps
#global neighbor_nodes
#global links_list #this is ugly. We need another way of saving that doesn't make these permanent (can't change grid size...)
try:
elevs_array = np.where(neighbor_nodes!=-1, elev[neighbor_nodes], np.finfo(float).max)
except NameError:
neighbor_nodes = np.empty((grid.active_nodes.size, 8), dtype=int)
#the target shape is (nnodes,4) & S,W,N,E,SW,NW,NE,SE
neighbor_nodes[:,:4] = grid.get_neighbor_list(bad_index=-1)[grid.active_nodes,:][:,::-1] # comes as (nnodes, 4), and E,N,W,S
neighbor_nodes[:,4:] = grid.get_diagonal_list(bad_index=-1)[grid.active_nodes,:][:,[2,1,0,3]] #NE,NW,SW,SE
links_list = np.empty_like(neighbor_nodes)
links_list[:,:4] = grid.node_links().T[grid.active_nodes,:] #(n_active_nodes, SWNE)
links_list[:,4:] = grid.node_diagonal_links().T[grid.active_nodes,:] #SW,NW,NE,NE
elevs_array = np.where(neighbor_nodes!=-1, elev[neighbor_nodes], np.finfo(float).max/1000.)
slope_array = (elev[grid.active_nodes].reshape((grid.active_nodes.size,1)) - elevs_array)/grid.link_length[links_list]
axis_indices = np.argmax(slope_array, axis=1)
steepest_slope[grid.active_nodes] = slope_array[np.indices(axis_indices.shape),axis_indices]
downslope = np.greater(steepest_slope, 0.)
downslope_active = downslope[grid.active_nodes]
receiver[downslope] = neighbor_nodes[np.indices(axis_indices.shape),axis_indices][0,downslope_active]
receiver_link[downslope] = links_list[np.indices(axis_indices.shape),axis_indices][0,downslope_active]
node_id = np.arange(num_nodes)
# Optionally, handle baselevel nodes: they are their own receivers
if baselevel_nodes is not None:
receiver[baselevel_nodes] = node_id[baselevel_nodes]
receiver_link[baselevel_nodes] = UNDEFINED_INDEX
steepest_slope[baselevel_nodes] = 0.
# The sink nodes are those that are their own receivers (this will normally
# include boundary nodes as well as interior ones; "pits" would be sink
# nodes that are also interior nodes).
(sink, ) = np.where(node_id==receiver)
sink = sink.astype(np.int, copy=False)
return receiver, steepest_slope, sink, receiver_link
if __name__ == '__main__':
import doctest
doctest.testmod()
|
|
from SBaaS_base.postgresql_orm_base import *
class data_stage02_isotopomer_fittedFluxes(Base):
__tablename__ = 'data_stage02_isotopomer_fittedFluxes'
id = Column(Integer, Sequence('data_stage02_isotopomer_fittedFluxes_id_seq'), primary_key=True)
simulation_id = Column(String(500))
simulation_dateAndTime = Column(DateTime);
#experiment_id = Column(String(50))
#model_id = Column(String(50))
#mapping_id = Column(String(100))
#sample_name_abbreviation = Column(String(100))
#time_point = Column(String(10))
rxn_id = Column(String(100))
flux = Column(Float);
flux_stdev = Column(Float);
flux_lb = Column(Float); # based on 95% CI
flux_ub = Column(Float);
flux_units = Column(String(50));
fit_alf = Column(Float);
fit_chi2s = Column(postgresql.ARRAY(Float));
fit_cor = Column(postgresql.ARRAY(Float));
fit_cov = Column(postgresql.ARRAY(Float));
free = Column(Boolean);
used_ = Column(Boolean);
comment_ = Column(Text);
__table_args__ = (
#ForeignKeyConstraint(['simulation_id'], ['data_stage02_isotopomer_simulation.simulation_id']),
UniqueConstraint('simulation_id','rxn_id','simulation_dateAndTime'),
)
def __init__(self,
row_dict_I,
):
self.fit_cov=row_dict_I['fit_cov'];
self.comment_=row_dict_I['comment_'];
self.used_=row_dict_I['used_'];
self.free=row_dict_I['free'];
self.simulation_id=row_dict_I['simulation_id'];
self.simulation_dateAndTime=row_dict_I['simulation_dateAndTime'];
self.rxn_id=row_dict_I['rxn_id'];
self.flux=row_dict_I['flux'];
self.flux_stdev=row_dict_I['flux_stdev'];
self.flux_lb=row_dict_I['flux_lb'];
self.flux_ub=row_dict_I['flux_ub'];
self.flux_units=row_dict_I['flux_units'];
self.fit_alf=row_dict_I['fit_alf'];
self.fit_chi2s=row_dict_I['fit_chi2s'];
self.fit_cor=row_dict_I['fit_cor'];
def __set__row__(self,simulation_id_I,
simulation_dateAndTime_I,
#experiment_id_I,
#model_id_I,
#mapping_id_I,
#sample_name_abbreviation_I,
#time_point_I,
rxn_id_I,
flux_I,
flux_stdev_I,
flux_lb_I,
flux_ub_I,
flux_units_I,
fit_alf_I,
fit_chi2s_I,
fit_cor_I,
fit_cov_I,
free_I,
used__I,
comment__I):
self.simulation_id=simulation_id_I
self.simulation_dateAndTime=simulation_dateAndTime_I
#self.experiment_id=experiment_id_I
#self.model_id=model_id_I
#self.mapping_id=mapping_id_I
#self.sample_name_abbreviation=sample_name_abbreviation_I
#self.time_point=time_point_I
self.rxn_id=rxn_id_I
self.flux=flux_I
self.flux_stdev=flux_stdev_I
self.flux_lb=flux_lb_I
self.flux_ub=flux_ub_I
self.flux_units=flux_units_I
self.fit_alf=fit_alf_I
self.fit_chi2s=fit_chi2s_I
self.fit_cor=fit_cor_I
self.fit_cov=fit_cov_I
self.free=free_I
self.used_=used__I
self.comment_=comment__I
def __repr__dict__(self):
return {'id':self.id,
'simulation_id':self.simulation_id,
'simulation_dateAndTime':self.simulation_dateAndTime,
#'experiment_id':self.experiment_id,
#'model_id':self.model_id,
#'mapping_id':self.mapping_id,
#'sample_name_abbreviation':self.sample_name_abbreviation,
#'time_point':self.time_point,
'rxn_id':self.rxn_id,
'flux':self.flux,
'flux_stdev':self.flux_stdev,
'flux_lb':self.flux_lb,
'flux_ub':self.flux_ub,
'flux_units':self.flux_units,
'fit_alf':self.fit_alf,
'fit_chi2s':self.fit_chi2s,
'fit_cor':self.fit_cor,
'fit_cov':self.fit_cov,
'free':self.free,
'used_':self.used_,
'comment_':self.comment_}
def __repr__json__(self):
return json.dumps(self.__repr__dict__())
class data_stage02_isotopomer_fittedFragments(Base):
__tablename__ = 'data_stage02_isotopomer_fittedFragments'
id = Column(Integer, Sequence('data_stage02_isotopomer_fittedFragments_id_seq'), primary_key=True)
simulation_id = Column(String(500))
simulation_dateAndTime = Column(DateTime);
experiment_id = Column(String(50));
#model_id = Column(String(50));
#mapping_id = Column(String(100));
sample_name_abbreviation = Column(String(100));
time_point = Column(String(10));
fragment_id = Column(String(100));
#fragment_formula = Column(String(500));
fragment_mass = Column(Integer);
fit_val = Column(Float);
fit_stdev = Column(Float);
fit_units = Column(String(50));
fit_alf = Column(Float);
fit_cor = Column(postgresql.ARRAY(Float));
fit_cov = Column(postgresql.ARRAY(Float));
free = Column(Boolean);
used_ = Column(Boolean);
comment_ = Column(Text);
__table_args__ = (
#ForeignKeyConstraint(['simulation_id'], ['data_stage02_isotopomer_simulation.simulation_id']),
UniqueConstraint('simulation_id','time_point','fragment_id','fragment_mass','simulation_dateAndTime'),
)
def __init__(self,
row_dict_I,
):
self.simulation_id=row_dict_I['simulation_id'];
self.comment_=row_dict_I['comment_'];
self.used_=row_dict_I['used_'];
self.free=row_dict_I['free'];
self.simulation_dateAndTime=row_dict_I['simulation_dateAndTime'];
self.fit_cov=row_dict_I['fit_cov'];
self.fit_cor=row_dict_I['fit_cor'];
self.fit_alf=row_dict_I['fit_alf'];
self.fit_units=row_dict_I['fit_units'];
self.fit_stdev=row_dict_I['fit_stdev'];
self.fit_val=row_dict_I['fit_val'];
self.fragment_mass=row_dict_I['fragment_mass'];
self.fragment_id=row_dict_I['fragment_id'];
self.time_point=row_dict_I['time_point'];
self.sample_name_abbreviation=row_dict_I['sample_name_abbreviation'];
self.experiment_id=row_dict_I['experiment_id'];
def __set__row__(self,simulation_id_I,
simulation_dateAndTime_I,
experiment_id_I,
#model_id_I,
#mapping_id_I,
sample_name_abbreviation_I,
time_point_I,
fragment_id_I,
#fragment_formula_I,
fragment_mass_I,
fit_val_I,
fit_stdev_I,
fit_units_I,
fit_alf_I,
fit_cor_I,
fit_cov_I,
free_I,
used__I,
comment__I):
self.simulation_id=simulation_id_I
self.simulation_dateAndTime=simulation_dateAndTime_I
self.experiment_id=experiment_id_I
#self.model_id=model_id_I
#self.mapping_id=mapping_id_I
self.sample_name_abbreviation=sample_name_abbreviation_I
self.time_point=time_point_I
self.fragment_id=fragment_id_I
#self.fragment_formula=fragment_formula_I
self.fragment_mass=fragment_mass_I
self.fit_val=fit_val_I
self.fit_stdev=fit_stdev_I
self.fit_units=fit_units_I
self.fit_alf=fit_alf_I
self.fit_cor=fit_cor_I
self.fit_cov=fit_cov_I
self.free=free_I
self.used_=used__I
self.comment_=comment__I
def __repr__dict__(self):
return {'id':self.id,
'simulation_id':self.simulation_id,
'simulation_dateAndTime':self.simulation_dateAndTime,
'experiment_id':self.experiment_id,
#'model_id':self.model_id,
#'mapping_id':self.mapping_id,
'sample_name_abbreviation':self.sample_name_abbreviation,
'time_point':self.time_point,
'fragment_id':self.fragment_id,
#'fragment_formula':self.fragment_formula,
'fragment_mass':self.fragment_mass,
'fit_val':self.fit_val,
'fit_stdev':self.fit_stdev,
'fit_units':self.fit_units,
'fit_alf':self.fit_alf,
'fit_chi2s':self.fit_chi2s,
'fit_cor':self.fit_cor,
'fit_cov':self.fit_cov,
'free':self.free,
'used_':self.used_,
'comment_':self.comment_}
def __repr__json__(self):
return json.dumps(self.__repr__dict__())
class data_stage02_isotopomer_fittedData(Base):
__tablename__ = 'data_stage02_isotopomer_fittedData'
id = Column(Integer, Sequence('data_stage02_isotopomer_fittedData_id_seq'), primary_key=True)
simulation_id = Column(String(500))
simulation_dateAndTime = Column(DateTime);
#experiment_id = Column(String(50))
#model_id = Column(String(50))
#mapping_id = Column(String(100))
#sample_name_abbreviation = Column(String(100))
#time_point = Column(String(10))
fitted_echi2 = Column(postgresql.ARRAY(Float));
fitted_alf = Column(Float);
fitted_chi2 = Column(Float);
fitted_dof = Column(Integer);
used_ = Column(Boolean);
comment_ = Column(Text);
__table_args__ = (
#ForeignKeyConstraint(['simulation_id'], ['data_stage02_isotopomer_simulation.simulation_id']),
UniqueConstraint('simulation_id','simulation_dateAndTime'),
)
def __init__(self,
row_dict_I,
):
self.fitted_alf=row_dict_I['fitted_alf'];
self.fitted_chi2=row_dict_I['fitted_chi2'];
self.fitted_dof=row_dict_I['fitted_dof'];
self.used_=row_dict_I['used_'];
self.comment_=row_dict_I['comment_'];
self.simulation_dateAndTime=row_dict_I['simulation_dateAndTime'];
self.simulation_id=row_dict_I['simulation_id'];
self.fitted_echi2=row_dict_I['fitted_echi2'];
def __set__row__(self,
simulation_id_I,
simulation_dateAndTime_I,
#experiment_id_I,
#model_id_I,
#mapping_id_I,
#sample_name_abbreviation_I,
#time_point_I,
fitted_echi2_I,
fitted_alf_I,
fitted_chi2_I,
fitted_dof_I,
used__I,
comment__I):
self.simulation_id=simulation_id_I
self.simulation_dateAndTime=simulation_dateAndTime_I
#self.experiment_id=experiment_id_I
#self.model_id=model_id_I
#self.mapping_id=mapping_id_I
#self.sample_name_abbreviation=sample_name_abbreviation_I
#self.time_point=time_point_I
self.fitted_echi2=fitted_echi2_I
self.fitted_alf=fitted_alf_I
self.fitted_chi2=fitted_chi2_I
self.fitted_dof=fitted_dof_I
self.used_=used__I
self.comment_=comment__I
def __repr__dict__(self):
return {'id':self.id,
'simulation_id':self.simulation_id,
'simulation_dateAndTime':self.simulation_dateAndTime,
#'experiment_id':self.experiment_id,
#'model_id':self.model_id,
#'mapping_id':self.mapping_id,
#'sample_name_abbreviation':self.sample_name_abbreviation,
#'time_point':self.time_point,
'fitted_echi2':self.fitted_echi2,
'fitted_alf':self.fitted_alf,
'fitted_chi2':self.fitted_chi2,
'fitted_dof':self.fitted_dof,
'used_':self.used_,
'comment_':self.comment_}
def __repr__json__(self):
return json.dumps(self.__repr__dict__())
class data_stage02_isotopomer_fittedMeasuredFluxes(Base):
__tablename__ = 'data_stage02_isotopomer_fittedMeasuredFluxes'
id = Column(Integer, Sequence('data_stage02_isotopomer_fittedMeasuredFluxes_id_seq'), primary_key=True)
simulation_id = Column(String(500))
simulation_dateAndTime = Column(DateTime);
experiment_id = Column(String(50))
#model_id = Column(String(50))
#mapping_id = Column(String(100))
sample_name_abbreviation = Column(String(100))
#time_point = Column(String(10))
rxn_id = Column(String(100))
fitted_sres = Column(Float);
used_ = Column(Boolean);
comment_ = Column(Text);
__table_args__ = (
#ForeignKeyConstraint(['simulation_id'], ['data_stage02_isotopomer_simulation.simulation_id']),
UniqueConstraint('simulation_id','rxn_id','simulation_dateAndTime'),
)
def __init__(self,
row_dict_I,
):
self.experiment_id=row_dict_I['experiment_id'];
self.comment_=row_dict_I['comment_'];
self.used_=row_dict_I['used_'];
self.fitted_sres=row_dict_I['fitted_sres'];
self.rxn_id=row_dict_I['rxn_id'];
self.sample_name_abbreviation=row_dict_I['sample_name_abbreviation'];
self.simulation_dateAndTime=row_dict_I['simulation_dateAndTime'];
self.simulation_id=row_dict_I['simulation_id'];
def __set__row__(self,
simulation_id_I,
simulation_dateAndTime_I,
experiment_id_I,
#model_id_I,
#mapping_id_I,
sample_name_abbreviation_I,
#time_point_I,
rxn_id_I,
fitted_sres_I,
used__I,
comment__I):
self.simulation_id=simulation_id_I
self.simulation_dateAndTime=simulation_dateAndTime_I
self.experiment_id=experiment_id_I
#self.model_id=model_id_I
#self.mapping_id=mapping_id_I
self.sample_name_abbreviation=sample_name_abbreviation_I
#self.time_point=time_point_I
self.rxn_id=rxn_id_I
self.fitted_sres=fitted_sres_I
self.used_=used__I
self.comment_=comment__I
def __repr__dict__(self):
return {'id':self.id,
'simulation_id':self.simulation_id,
'simulation_dateAndTime':self.simulation_dateAndTime,
'experiment_id':self.experiment_id,
#'model_id':self.model_id,
#'mapping_id':self.mapping_id,
'sample_name_abbreviation':self.sample_name_abbreviation,
#'time_point':self.time_point,
'rxn_id':self.rxn_id,
'fitted_sres':self.fitted_sres,
'used_':self.used_,
'comment_':self.comment_}
def __repr__json__(self):
return json.dumps(self.__repr__dict__())
class data_stage02_isotopomer_fittedMeasuredFragments(Base):
__tablename__ = 'data_stage02_isotopomer_fittedMeasuredFragments'
id = Column(Integer, Sequence('data_stage02_isotopomer_fittedMeasuredFragments_id_seq'), primary_key=True)
simulation_id = Column(String(500))
simulation_dateAndTime = Column(DateTime);
experiment_id = Column(String(50))
#model_id = Column(String(50))
#mapping_id = Column(String(100))
sample_name_abbreviation = Column(String(100))
#time_point = Column(String(10))
#met_id = Column(String(100))
fragment_id = Column(String(100))
#fragment_formula = Column(String(500))
fitted_sres = Column(Float);
used_ = Column(Boolean);
comment_ = Column(Text);
__table_args__ = (
#ForeignKeyConstraint(['simulation_id'], ['data_stage02_isotopomer_simulation.simulation_id']),
UniqueConstraint('simulation_id','fragment_id','simulation_dateAndTime'),
)
def __init__(self,
row_dict_I,
):
self.comment_=row_dict_I['comment_'];
self.used_=row_dict_I['used_'];
self.fitted_sres=row_dict_I['fitted_sres'];
self.fragment_id=row_dict_I['fragment_id'];
self.sample_name_abbreviation=row_dict_I['sample_name_abbreviation'];
self.experiment_id=row_dict_I['experiment_id'];
self.simulation_dateAndTime=row_dict_I['simulation_dateAndTime'];
self.simulation_id=row_dict_I['simulation_id'];
def __set__row__(self,simulation_id_I,
simulation_dateAndTime_I,
experiment_id_I,
#model_id_I,
#mapping_id_I,
sample_name_abbreviation_I,
#time_point_I,
#met_id_I,
fragment_id_I,
#fragment_formula_I,
fitted_sres_I,
used__I,
comment__I):
self.simulation_id=simulation_id_I
self.simulation_dateAndTime=simulation_dateAndTime_I
self.experiment_id=experiment_id_I
#self.model_id=model_id_I
#self.mapping_id=mapping_id_I
self.sample_name_abbreviation=sample_name_abbreviation_I
#self.time_point=time_point_I
#self.met_id=met_id_I
self.fragment_id=fragment_id_I
#self.fragment_formula=fragment_formula_I
self.fitted_sres=fitted_sres_I
self.used_=used__I
self.comment_=comment__I
def __repr__dict__(self):
return {'id':self.id,
'simulation_id':self.simulation_id,
'simulation_dateAndTime':self.simulation_dateAndTime,
'experiment_id':self.experiment_id,
#'model_id':self.model_id,
#'mapping_id':self.mapping_id,
'sample_name_abbreviation':self.sample_name_abbreviation,
#'time_point':self.time_point,
#'met_id':self.met_id,
'fragment_id':self.fragment_id,
#'fragment_formula':self.fragment_formula,
'fitted_sres':self.fitted_sres,
'used_':self.used_,
'comment_':self.comment_}
def __repr__json__(self):
return json.dumps(self.__repr__dict__())
class data_stage02_isotopomer_fittedMeasuredFluxResiduals(Base):
__tablename__ = 'data_stage02_isotopomer_fittedMeasuredFluxResiduals'
id = Column(Integer, Sequence('data_stage02_isotopomer_fittedMeasuredFluxResiduals_id_seq'), primary_key=True)
simulation_id = Column(String(500))
simulation_dateAndTime = Column(DateTime);
experiment_id = Column(String(50))
#model_id = Column(String(50))
#mapping_id = Column(String(100))
sample_name_abbreviation = Column(String(100))
time_point = Column(String(10))
rxn_id = Column(String(100))
res_data = Column(Float);
res_esens = Column(Float);
res_fit = Column(Float);
res_msens = Column(Float);
res_peak = Column(String(100));
res_stdev = Column(Float);
res_val = Column(Float);
used_ = Column(Boolean);
comment_ = Column(Text);
__table_args__ = (
#ForeignKeyConstraint(['simulation_id'], ['data_stage02_isotopomer_simulation.simulation_id']),
UniqueConstraint('simulation_id','rxn_id','time_point','simulation_dateAndTime'),
)
def __init__(self,
row_dict_I,
):
self.res_stdev=row_dict_I['res_stdev'];
self.res_peak=row_dict_I['res_peak'];
self.res_msens=row_dict_I['res_msens'];
self.res_fit=row_dict_I['res_fit'];
self.res_esens=row_dict_I['res_esens'];
self.used_=row_dict_I['used_'];
self.simulation_id=row_dict_I['simulation_id'];
self.simulation_dateAndTime=row_dict_I['simulation_dateAndTime'];
self.experiment_id=row_dict_I['experiment_id'];
self.sample_name_abbreviation=row_dict_I['sample_name_abbreviation'];
self.time_point=row_dict_I['time_point'];
self.rxn_id=row_dict_I['rxn_id'];
self.res_data=row_dict_I['res_data'];
self.res_val=row_dict_I['res_val'];
self.comment_=row_dict_I['comment_'];
def __set__row__(self,simulation_id_I,
simulation_dateAndTime_I,
experiment_id_I,
#model_id_I,
#mapping_id_I,
sample_name_abbreviation_I,
time_point_I,
rxn_id_I,
res_data_I,
res_esens_I,
res_fit_I,
res_msens_I,
res_peak_I,
res_stdev_I,
res_val_I,
used__I,
comment__I):
self.simulation_id=simulation_id_I
self.simulation_dateAndTime=simulation_dateAndTime_I
self.experiment_id=experiment_id_I
#self.model_id=model_id_I
#self.mapping_id=mapping_id_I
self.sample_name_abbreviation=sample_name_abbreviation_I
self.time_point=time_point_I
self.rxn_id=rxn_id_I
self.res_data=res_data_I
self.res_esens=res_esens_I
self.res_fit=res_fit_I
self.res_msens=res_msens_I
self.res_peak=res_peak_I
self.res_stdev=res_stdev_I
self.res_val=res_val_I
self.used_=used__I
self.comment_=comment__I
def __repr__dict__(self):
return {'id':self.id,
'simulation_id':self.simulation_id,
'simulation_dateAndTime':self.simulation_dateAndTime,
'experiment_id':self.experiment_id,
#'model_id':self.model_id,
#'mapping_id':self.mapping_id,
'sample_name_abbreviation':self.sample_name_abbreviation,
'time_point':self.time_point,
'rxn_id':self.rxn_id,
'res_data':self.res_data,
'res_esens':self.res_esens,
'res_fit':self.res_fit,
'res_msens':self.res_msens,
'res_peak':self.res_peak,
'res_stdev':self.res_stdev,
'res_val':self.res_val,
'used_':self.used_,
'comment_':self.comment_}
def __repr__json__(self):
return json.dumps(self.__repr__dict__())
class data_stage02_isotopomer_fittedMeasuredFragmentResiduals(Base):
__tablename__ = 'data_stage02_isotopomer_fittedMeasuredFragmentResiduals'
id = Column(Integer, Sequence('data_stage02_isotopomer_fittedMeasuredFragmentResiduals_id_seq'), primary_key=True)
simulation_id = Column(String(500))
simulation_dateAndTime = Column(DateTime);
experiment_id = Column(String(50));
#model_id = Column(String(50));
#mapping_id = Column(String(100));
sample_name_abbreviation = Column(String(100));
time_point = Column(String(10));
fragment_id = Column(String(100));
#fragment_formula = Column(String(500));
fragment_mass = Column(Integer);
res_data = Column(Float);
res_esens = Column(Float);
res_fit = Column(Float);
res_msens = Column(Float);
res_peak = Column(String(100));
res_stdev = Column(Float);
res_val = Column(Float);
used_ = Column(Boolean);
comment_ = Column(Text);
__table_args__ = (
#ForeignKeyConstraint(['simulation_id'], ['data_stage02_isotopomer_simulation.simulation_id']),
UniqueConstraint('simulation_id','time_point','fragment_id','fragment_mass','simulation_dateAndTime'),
)
def __init__(self,
row_dict_I,
):
self.res_val=row_dict_I['res_val'];
self.used_=row_dict_I['used_'];
self.comment_=row_dict_I['comment_'];
self.simulation_id=row_dict_I['simulation_id'];
self.simulation_dateAndTime=row_dict_I['simulation_dateAndTime'];
self.experiment_id=row_dict_I['experiment_id'];
self.sample_name_abbreviation=row_dict_I['sample_name_abbreviation'];
self.time_point=row_dict_I['time_point'];
self.fragment_id=row_dict_I['fragment_id'];
self.fragment_mass=row_dict_I['fragment_mass'];
self.res_data=row_dict_I['res_data'];
self.res_esens=row_dict_I['res_esens'];
self.res_fit=row_dict_I['res_fit'];
self.res_msens=row_dict_I['res_msens'];
self.res_peak=row_dict_I['res_peak'];
self.res_stdev=row_dict_I['res_stdev'];
def __set__row__(self,simulation_id_I,
simulation_dateAndTime_I,
experiment_id_I,
#model_id_I,
#mapping_id_I,
sample_name_abbreviation_I,
time_point_I,
fragment_id_I,
#fragment_formula_I,
fragment_mass_I,
res_data_I,
res_esens_I,
res_fit_I,
res_msens_I,
res_peak_I,
res_stdev_I,
res_val_I,
used__I,
comment__I):
self.simulation_id=simulation_id_I
self.simulation_dateAndTime=simulation_dateAndTime_I
self.experiment_id=experiment_id_I
#self.model_id=model_id_I
#self.mapping_id=mapping_id_I
self.sample_name_abbreviation=sample_name_abbreviation_I
self.time_point=time_point_I
self.fragment_id=fragment_id_I
#self.fragment_formula=fragment_formula_I
self.fragment_mass=fragment_mass_I
self.res_data=res_data_I
self.res_esens=res_esens_I
self.res_fit=res_fit_I
self.res_msens=res_msens_I
self.res_peak=res_peak_I
self.res_stdev=res_stdev_I
self.res_val=res_val_I
self.used_=used__I
self.comment_=comment__I
def __repr__dict__(self):
return {'id':self.id,
'simulation_id':self.simulation_id,
'simulation_dateAndTime':self.simulation_dateAndTime,
'experiment_id':self.experiment_id,
#'model_id':self.model_id,
#'mapping_id':self.mapping_id,
'sample_name_abbreviation':self.sample_name_abbreviation,
'time_point':self.time_point,
'fragment_id':self.fragment_id,
#'fragment_formula':self.fragment_formula,
'fragment_mass':self.fragment_mass,
'res_data':self.res_data,
'res_esens':self.res_esens,
'res_fit':self.res_fit,
'res_msens':self.res_msens,
'res_peak':self.res_peak,
'res_stdev':self.res_stdev,
'res_val':self.res_val,
'used_':self.used_,
'comment_':self.comment_}
def __repr__json__(self):
return json.dumps(self.__repr__dict__())
class data_stage02_isotopomer_fittedFluxStatistics(Base):
__tablename__ = 'data_stage02_isotopomer_fittedFluxStatistics'
id = Column(Integer, Sequence('data_stage02_isotopomer_fittedFluxStatistics_id_seq'), primary_key=True)
simulation_id = Column(String(500))
simulation_dateAndTime = Column(DateTime);
n_fluxes = Column(Integer)
n_observableFluxes = Column(Integer)
total_precision = Column(Float)
total_observablePrecision = Column(Float)
relative_nObservableFluxes = Column(Float)
average_observableFluxPrecision = Column(Float);
average_fluxPrecision = Column(Float);
flux_units = Column(String(50));
used_ = Column(Boolean);
comment_ = Column(Text);
__table_args__ = (
#ForeignKeyConstraint(['simulation_id'], ['data_stage02_isotopomer_simulation.simulation_id']),
UniqueConstraint('simulation_id','simulation_dateAndTime','flux_units'),
)
def __init__(self,
row_dict_I,
):
self.total_precision=row_dict_I['total_precision'];
self.relative_nObservableFluxes=row_dict_I['relative_nObservableFluxes'];
self.average_observableFluxPrecision=row_dict_I['average_observableFluxPrecision'];
self.average_fluxPrecision=row_dict_I['average_fluxPrecision'];
self.flux_units=row_dict_I['flux_units'];
self.used_=row_dict_I['used_'];
self.comment_=row_dict_I['comment_'];
self.simulation_id=row_dict_I['simulation_id'];
self.simulation_dateAndTime=row_dict_I['simulation_dateAndTime'];
self.n_fluxes=row_dict_I['n_fluxes'];
self.n_observableFluxes=row_dict_I['n_observableFluxes'];
self.total_observablePrecision=row_dict_I['total_observablePrecision'];
def __set__row__(self,simulation_id_I,
simulation_dateAndTime_I,
n_fluxes_I,
n_observableFluxes_I,
total_precision_I,
total_observablePrecision_I,
relative_nObservableFluxes_I,
average_observableFluxPrecision_I,
average_fluxPrecision_I,
flux_units_I,
used__I,
comment__I):
self.simulation_id=simulation_id_I
self.simulation_dateAndTime=simulation_dateAndTime_I
self.n_fluxes=n_fluxes_I
self.n_observableFluxes=n_observableFluxes_I
self.total_precision=total_precision_I
self.total_observablePrecision=total_observablePrecision_I
self.relative_nObservableFluxes=relative_nObservableFluxes_I
self.average_observableFluxPrecision=average_observableFluxPrecision_I
self.average_fluxPrecision=average_fluxPrecision_I
self.flux_units=flux_units_I
self.used_=used__I
self.comment_=comment__I
def __repr__dict__(self):
return {'id':self.id,
'simulation_id':self.simulation_id,
'simulation_dateAndTime':self.simulation_dateAndTime,
'n_fluxes':self.n_fluxes,
'n_observableFluxes':self.n_observableFluxes,
'total_precision':self.total_precision,
'total_observablePrecision':self.total_observablePrecision,
'relative_nObservableFluxes':self.relative_nObservableFluxes,
'average_observableFluxPrecision':self.average_observableFluxPrecision,
'average_fluxPrecision':self.average_fluxPrecision,
'flux_units':self.flux_units,
'used_':self.used_,
'comment_':self.comment_}
def __repr__json__(self):
return json.dumps(self.__repr__dict__())
class data_stage02_isotopomer_simulationParameters(Base):
__tablename__ = 'data_stage02_isotopomer_simulationParameters'
id = Column(Integer, Sequence('data_stage02_isotopomer_simulationParameters_id_seq'), primary_key=True)
simulation_id = Column(String(500))
simulation_dateAndTime = Column(DateTime);
original_filename=Column(Text); #TODO add in new column
cont_alpha = Column(Float);
cont_reltol = Column(Float);
cont_steps = Column(Float);
fit_nudge = Column(Float);
fit_reinit = Column(Boolean);
fit_reltol = Column(Float);
fit_starts = Column(Float);
fit_tau = Column(Float);
hpc_mcr = Column(String(50));
hpc_on = Column(Boolean);
hpc_serve = Column(String(50));
int_maxstep = Column(Float);
int_reltol = Column(Float);
int_senstol = Column(Float);
int_timeout = Column(Float);
int_tspan = Column(Float);
ms_correct = Column(Boolean);
oed_crit = Column(String(50))
oed_reinit = Column(Boolean);
oed_tolf = Column(Float);
oed_tolx = Column(Float);
sim_more = Column(Boolean);
sim_na = Column(Boolean);
sim_sens = Column(Boolean);
sim_ss = Column(Boolean);
sim_tunit = Column(String(50));
__table_args__ = (
#ForeignKeyConstraint(['simulation_id'], ['data_stage02_isotopomer_simulation.simulation_id']),
UniqueConstraint('simulation_id','simulation_dateAndTime'),
)
def __init__(self,
row_dict_I,
):
self.simulation_dateAndTime=row_dict_I['simulation_dateAndTime'];
self.sim_tunit=row_dict_I['sim_tunit'];
self.original_filename=row_dict_I['original_filename']
self.sim_ss=row_dict_I['sim_ss'];
self.sim_sens=row_dict_I['sim_sens'];
self.sim_na=row_dict_I['sim_na'];
self.sim_more=row_dict_I['sim_more'];
self.oed_tolx=row_dict_I['oed_tolx'];
self.oed_tolf=row_dict_I['oed_tolf'];
self.oed_reinit=row_dict_I['oed_reinit'];
self.oed_crit=row_dict_I['oed_crit'];
self.ms_correct=row_dict_I['ms_correct'];
self.int_tspan=row_dict_I['int_tspan'];
self.int_timeout=row_dict_I['int_timeout'];
self.int_senstol=row_dict_I['int_senstol'];
self.int_reltol=row_dict_I['int_reltol'];
self.int_maxstep=row_dict_I['int_maxstep'];
self.hpc_serve=row_dict_I['hpc_serve'];
self.hpc_on=row_dict_I['hpc_on'];
self.hpc_mcr=row_dict_I['hpc_mcr'];
self.fit_tau=row_dict_I['fit_tau'];
self.fit_starts=row_dict_I['fit_starts'];
self.fit_reltol=row_dict_I['fit_reltol'];
self.fit_reinit=row_dict_I['fit_reinit'];
self.fit_nudge=row_dict_I['fit_nudge'];
self.cont_steps=row_dict_I['cont_steps'];
self.cont_reltol=row_dict_I['cont_reltol'];
self.cont_alpha=row_dict_I['cont_alpha'];
self.simulation_id=row_dict_I['simulation_id'];
def __set__row__(self,simulation_id_I,
simulation_dateAndTime_I,
original_filename_I,
cont_alpha_I,
cont_reltol_I,
cont_steps_I,
fit_nudge_I,
fit_reinit_I,
fit_reltol_I,
fit_starts_I,
fit_tau_I,
hpc_mcr_I,
hpc_on_I,
hpc_serve_I,
int_maxstep_I,
int_reltol_I,
int_senstol_I,
int_timeout_I,
int_tspan_I,
ms_correct_I,
oed_crit_I,
oed_reinit_I,
oed_tolf_I,
oed_tolx_I,
sim_more_I,
sim_na_I,
sim_sens_I,
sim_ss_I,
sim_tunit_I):
self.simulation_id=simulation_id_I
self.simulation_dateAndTime=simulation_dateAndTime_I
self.original_filename=original_filename_I;
self.cont_alpha=cont_alpha_I
self.cont_reltol=cont_reltol_I
self.cont_steps=cont_steps_I
self.fit_nudge=fit_nudge_I
self.fit_reinit=fit_reinit_I
self.fit_reltol=fit_reltol_I
self.fit_starts=fit_starts_I
self.fit_tau=fit_tau_I
self.hpc_mcr=hpc_mcr_I
self.hpc_on=hpc_on_I
self.hpc_serve=hpc_serve_I
self.int_maxstep=int_maxstep_I
self.int_reltol=int_reltol_I
self.int_senstol=int_senstol_I
self.int_timeout=int_timeout_I
self.int_tspan=int_tspan_I
self.ms_correct=ms_correct_I
self.oed_crit=oed_crit_I
self.oed_reinit=oed_reinit_I
self.oed_tolf=oed_tolf_I
self.oed_tolx=oed_tolx_I
self.sim_more=sim_more_I
self.sim_na=sim_na_I
self.sim_sens=sim_sens_I
self.sim_ss=sim_ss_I
self.sim_tunit=sim_tunit_I
def __repr__dict__(self):
return {'id':self.id,
'simulation_id':self.simulation_id,
'simulation_dateAndTime':self.simulation_dateAndTime,
'original_filename':self.original_filename,
'cont_alpha':self.cont_alpha,
'cont_reltol':self.cont_reltol,
'cont_steps':self.cont_steps,
'fit_nudge':self.fit_nudge,
'fit_reinit':self.fit_reinit,
'fit_reltol':self.fit_reltol,
'fit_starts':self.fit_starts,
'fit_tau':self.fit_tau,
'hpc_mcr':self.hpc_mcr,
'hpc_on':self.hpc_on,
'hpc_serve':self.hpc_serve,
'int_maxstep':self.int_maxstep,
'int_reltol':self.int_reltol,
'int_senstol':self.int_senstol,
'int_timeout':self.int_timeout,
'int_tspan':self.int_tspan,
'ms_correct':self.ms_correct,
'oed_crit':self.oed_crit,
'oed_reinit':self.oed_reinit,
'oed_tolf':self.oed_tolf,
'oed_tolx':self.oed_tolx,
'sim_more':self.sim_more,
'sim_na':self.sim_na,
'sim_sens':self.sim_sens,
'sim_ss':self.sim_ss,
'sim_tunit':self.sim_tunit}
def __repr__json__(self):
return json.dumps(self.__repr__dict__())
|
|
# -*- coding: utf-8 -*-
"""
ENERPI - python API for accessing handy objects:
- Remote replication or data extraction
- Local configuration variables
- Local ENERPI data catalog
- Constant & config derived definitions
- Receiver value generator
...
"""
import datetime as dt
import os
import pandas as pd
import requests
import tempfile
# noinspection PyUnresolvedReferences
from enerpi.base import (ENCODING, CONFIG, SENSORS, DATA_PATH, INDEX_DATA_CATALOG, check_resource_files,
FILE_LOGGING, LOGGING_LEVEL, log, timeit)
# noinspection PyUnresolvedReferences
from enerpi.database import init_catalog, get_ts_last_save, extract_log_file, delete_log_file, HDF_STORE
# noinspection PyUnresolvedReferences
from enerpi.enerpimeter import receiver_msg_generator, enerpi_raw_data, msg_to_dict
# noinspection PyUnresolvedReferences
from enerpi.iobroadcast import get_encryption_key, get_codec
from enerpi.hdftscat import get_catalog_paths
def enerpi_receiver_generator(verbose=False, n_msgs=None):
"""
Generator of broadcasted values by ENERPI Logger.
It can be used by any machine in the same network as the ENERPI Logger. It decrypts the encrypted broadcast and
returns a dict of vars - values.
Used by the webserver for read & stream real-time values.
:param verbose: :bool: Log to stdout
:param n_msgs: :int: # of msgs to receive (unlimited by default).
:return: :dict:
"""
gen = receiver_msg_generator(verbose=verbose, n_msgs=n_msgs)
count = 0
while True:
try:
msg, _t1, _t2 = next(gen)
yield msg_to_dict(msg)
count += 1
except StopIteration:
log('EXIT from enerpi_receiver_generator. StopIteration in msg #{}'.format(count), 'error', verbose)
break
return None
def enerpi_default_config():
"""
Default configuration for ENERPI Data Catalog, read from INI file.
:return: :dict: parameters
"""
conf = {'store': HDF_STORE,
'DATA_PATH': DATA_PATH,
'delta': SENSORS.delta_sec_data,
'window': SENSORS.rms_roll_window_sec,
'ts': SENSORS.ts_data_ms,
'LOGGING_LEVEL': LOGGING_LEVEL,
'FILE_LOGGING': FILE_LOGGING}
return conf
def enerpi_data_catalog(check_integrity=False, **kwargs):
"""
Get ENERPI data catalog for access & operation.
:param check_integrity: :bool: False by default. If true, checks integrity and generates / updates data index.
:param kwargs: :dict:
:return: :EnerpiCatalog:
"""
return init_catalog(check_integrity=check_integrity, **kwargs)
def _request_enerpi_remote_file(url_api_download, dest_path,
file_mimetype='application/octet-stream', verbose=False, timeout=10):
"""
Download remote file from ENERPI
:param str url_api_download: download URL
:param str dest_path: local path for remote file
:param str file_mimetype: mimetype check
:param bool verbose: verbose mode
:return: download ok
:rtype: bool
"""
try:
r = requests.get(url_api_download, timeout=timeout)
except requests.exceptions.ConnectTimeout:
log('TIMEOUT REQUEST AT "{}"'.format(url_api_download), 'error', verbose, False)
return False
if r.ok and (r.headers['Content-Type'] == file_mimetype):
if verbose:
size_kb = int(r.headers['Content-Length']) / 1024
date_mod = pd.Timestamp(r.headers['Last-Modified']).tz_convert('Europe/Madrid')
log('ENERPI FILE Downloaded from "{}" --> {:.2f} KB, mtime={:%d/%m/%Y %H:%M:%S}'
.format(url_api_download, size_kb, date_mod), 'ok', verbose, False)
check_resource_files(dest_path, verbose=False)
with open(dest_path, 'wb') as f:
f.write(r.content)
if verbose:
local_size = os.path.getsize(dest_path) / 1024
local_date_mod = dt.datetime.fromtimestamp(os.path.getmtime(dest_path))
log('DOWNLOADED FILE NOW IN LOCAL DISK AT "{}", {:.2f} KB, mtime={:%d/%m/%Y %H:%M:%S}'
.format(dest_path, local_size, local_date_mod), 'magenta', verbose, False)
return True
log('REQUEST NOT OK TRYING TO DOWNLOAD FILE AT "{}". STATUS_CODE={}, HEADERS={}'
.format(url_api_download, r.status_code, r.headers), 'error', verbose, False)
return False
@timeit('remote_data_get', verbose=True)
def remote_data_get(t0, tf=None,
enerpi_ip='192.168.1.52', port=80,
prefix_remote_enerpi='/enerpi', verbose=True):
"""
Query a ENERPI catalog in a remote machine with enerpiweb running.
:param t0: start of slice
:param tf: end of slice (or None for end = now)
:param enerpi_ip: IP of the remote machine
:param port: PORT of the remote enerpiweb server
:param prefix_remote_enerpi: URL prefix of the remote enerpiweb server
:param verbose: :bool: verbose mode
:return: (data_key, pd.DataFrame of sliced values) pairs
:rtype: dict
"""
def _request_extract_enerpi_data_store(url_api_download_st):
dest_path = os.path.join(tempfile.gettempdir(), 'temp_store.h5')
'application/octet-stream'
data = _request_enerpi_remote_file(url_api_download_st, dest_path, file_mimetype='application/octet-stream')
if data:
# Return content & remove hdf temporal file store:
with pd.HDFStore(dest_path, 'r') as st:
data = {k: st[k] for k in st.keys()}
log('HDF Store downloaded:\n{}'.format(st), 'ok', verbose, False)
os.remove(dest_path)
return data
return None
paths = get_catalog_paths(t0, tf)
url_mask = 'http://{}:{}{}/api/hdfstores/'.format(enerpi_ip, port, prefix_remote_enerpi) + '{}'
data_stores = []
for p in paths:
url = url_mask.format(os.path.split(p)[-1])
log('REQUEST HDF STORE AT: {}'.format(url), 'info', verbose, False)
data_i = _request_extract_enerpi_data_store(url)
if data_i is not None:
data_stores.append(data_i)
keys = set([k for d in data_stores for k in d.keys()])
data_out = {k: pd.DataFrame(pd.concat([data[k].loc[t0:tf] for data in data_stores
if k in data.keys()])).sort_index()
for k in keys}
return data_out
@timeit('replicate_remote_enerpi_data_catalog', verbose=True)
def replicate_remote_enerpi_data_catalog(local_path=DATA_PATH, enerpi_ip='192.168.1.52', port=80,
prefix_remote_enerpi='/enerpi', verbose=True):
"""
Replicate the ENERPI data catalog from a remote machine with enerpiweb running.
:param str local_path: Local path where to replicate remote data catalog
:param str enerpi_ip: IP of the remote machine
:param int port: PORT of the remote enerpiweb server (Default 80)
:param prefix_remote_enerpi: URL prefix of the remote enerpiweb server (Default '/enerpi')
:param bool verbose: verbose mode
"""
url_mask = 'http://{}:{}{}/api/'.format(enerpi_ip, port, prefix_remote_enerpi) + '{}'
# Get remote index:
csv_url = url_mask.format('filedownload/catalog')
path_csv = os.path.join(local_path, INDEX_DATA_CATALOG)
mimetype_csv = 'text/csv; charset={}'.format(ENCODING.lower())
ok_catalog = _request_enerpi_remote_file(csv_url, path_csv, file_mimetype=mimetype_csv, verbose=verbose, timeout=60)
if not ok_catalog:
log('ERROR RETRIEVING REMOTE CATALOG FILE IN: {}, ok={}'.format(csv_url, ok_catalog), 'error', verbose, False)
return False
# Load retrieved catalog:
remote_cat = enerpi_data_catalog(check_integrity=False, base_path=local_path)
if (remote_cat.tree is None) or remote_cat.tree.empty:
log('EMPTY REMOTE CATALOG! NOTHING TO REPLICATE. EXITING...', 'error', verbose, False)
return False
else:
log('REMOTE CATALOG TO REPLICATE:\n{}'.format(remote_cat.tree), 'debug', verbose, False)
df_stores = remote_cat.tree[remote_cat.tree.is_cat & remote_cat.tree.is_raw]
result = {}
ts_init = df_stores.ts_ini.min()
for _, row in df_stores.iterrows():
rel_path_remote_st = row.st
log('* Replicating store "{}", with {} raw samples from {:%-d/%m/%Y} to {:%-d/%m/%Y}'
.format(rel_path_remote_st, row.n_rows, row.ts_ini, row.ts_fin), 'debug', verbose, False)
abs_path_new_st = os.path.join(local_path, rel_path_remote_st)
file_id = os.path.split(rel_path_remote_st)[-1]
url_remote_st = url_mask.format('hdfstores/' + file_id)
ok_store_i = _request_enerpi_remote_file(url_remote_st, abs_path_new_st,
file_mimetype='application/octet-stream', verbose=verbose)
if ok_store_i:
result[file_id] = (int(os.path.getsize(abs_path_new_st) / 1024), row.n_rows)
else:
result[file_id] = (None, row.n_rows)
# Download raw_data:
url_remote_raw_st = url_mask.format('filedownload/raw_store')
abs_path_new_raw_data = os.path.join(local_path, HDF_STORE)
ok_store_raw = _request_enerpi_remote_file(url_remote_raw_st, abs_path_new_raw_data,
file_mimetype='application/octet-stream', verbose=verbose)
# Operation report
msg = '\nREPLICATION FROM {} DONE. DATA SINCE: {:%c}. RESULTS:\n'.format(enerpi_ip, ts_init)
kbytes = 0
if ok_store_raw:
kbytes_i = int(os.path.getsize(abs_path_new_raw_data) / 1024)
kbytes += kbytes_i
msg += ' -> RAW DATA: {} KB\n'.format(kbytes_i)
for k, v in result.items():
kbytes += v[0]
msg += ' -> STORE "{}": {} KB; {} ROWS\n'.format(k, v[0], v[1])
msg += 'TOTAL SIZE OF DOWNLOADED DATA: {:.2f} MB'.format(kbytes / 1024)
log(msg, 'info', verbose, False)
return True
|
|
# Generic CNN classifier that uses a geojson file and gbdx imagery to classify chips
import numpy as np
import os, random
import json, geojson
from mltools import geojson_tools as gt
from mltools.data_extractors import get_data_from_polygon_list as get_chips
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.models import Sequential, model_from_json
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.callbacks import ModelCheckpoint
from keras.optimizers import SGD
class PoolNet(object):
'''
Convolutional Neural Network model to classify chips as pool/no pool
INPUT classes (list [str]): Classes to train model on, exactly as they appear in
the properties of any geojsons used for training. Defaults to pool
classes: ['No swimming pool', 'Swimming pool'].
batch_size (int): Amount of images to use for each batch during training.
Defaults to 32.
input_shape (tuple[int]): Shape of input chips with theano dimensional
ordering (n_channels, height, width). Height and width must be equal. If
an old model is loaded (old_model_name is not None), input shape will be
automatically set from the architecture and does not need to be specified.
Defaults to (3,125,125).
old_model_name (str): Name of previous model to load (not including file
extension). There should be a json architecture file and HDF5 ('.h5')
weights file in the working directory under this name. If None, a new
model will be compiled for training. Defaults to None.
learning_rate (float): Learning rate for the first round of training. Defualts
to 0.001
small_model (bool): Use a model with nine layers instead of 16. Will train
faster but may be less accurate and cannot be used with large chips.
Defaults to False.
kernel_size (int): Size (in pixels) of the kernels to use at each
convolutional layer of the network. Defaults to 3 (standard for VGGNet).
'''
def __init__(self, classes=['No swimming pool', 'Swimming pool'], batch_size=32,
input_shape=(3, 125, 125), small_model=False, model_name=None,
learning_rate = 0.001, kernel_size=3):
self.nb_classes = len(classes)
self.classes = classes
self.batch_size = batch_size
self.small_model = small_model
self.input_shape = input_shape
self.lr = learning_rate
self.kernel_size = kernel_size
self.cls_dict = {classes[i]: i for i in xrange(len(self.classes))}
if model_name:
self.model_name = model_name
self.model = self._load_model_architecture(model_name)
self.model.load_weights(model_name + '.h5')
self.input_shape = self.model.input_shape
elif self.small_model:
self.model = self._small_model()
else:
self.model = self._VGG_16()
self.model_layer_names = [self.model.layers[i].get_config()['name']
for i in range(len(self.model.layers))]
def _VGG_16(self):
'''
Implementation of VGG 16-layer net.
'''
print 'Compiling VGG Net...'
model = Sequential()
model.add(ZeroPadding2D((1,1), input_shape=self.input_shape))
model.add(Convolution2D(64, self.kernel_size, self.kernel_size,activation='relu',
input_shape=self.input_shape))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(64, self.kernel_size, self.kernel_size,
activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, self.kernel_size, self.kernel_size,
activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, self.kernel_size, self.kernel_size,
activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, self.kernel_size, self.kernel_size,
activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, self.kernel_size, self.kernel_size,
activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, self.kernel_size, self.kernel_size,
activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, self.kernel_size, self.kernel_size,
activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, self.kernel_size, self.kernel_size,
activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, self.kernel_size, self.kernel_size,
activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, self.kernel_size, self.kernel_size,
activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, self.kernel_size, self.kernel_size,
activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, self.kernel_size, self.kernel_size,
activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(Flatten())
model.add(Dense(2048, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(2048, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(self.nb_classes, activation='softmax'))
sgd = SGD(lr=self.lr, decay=0.01, momentum=0.9, nesterov=True)
model.compile(optimizer = 'sgd', loss = 'categorical_crossentropy')
return model
def _small_model(self):
'''
Alternative model architecture with fewer layers for computationally expensive
training datasets
'''
print 'Compiling Small Net...'
model = Sequential()
model.add(ZeroPadding2D((1,1), input_shape=self.input_shape))
model.add(Convolution2D(64, self.kernel_size, self.kernel_size,activation='relu',
input_shape=self.input_shape))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(64, self.kernel_size, self.kernel_size,
activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, self.kernel_size, self.kernel_size,
activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, self.kernel_size, self.kernel_size,
activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(Flatten())
model.add(Dense(2048, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(2048, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(self.nb_classes, activation='softmax'))
sgd = SGD(lr=self.lr, decay=0.01, momentum=0.9, nesterov=True)
model.compile(optimizer = 'sgd', loss = 'categorical_crossentropy')
return model
def _load_model_architecture(self, model_name):
'''
Load a model arcitecture from a json file
INPUT model_name (str): Name of model to load
OUTPUT Loaded model architecture
'''
print 'Loading model {}'.format(self.model_name)
#load model
with open(model_name + '.json') as f:
mod = model_from_json(json.load(f))
return mod
def save_model(self, model_name):
'''
Saves model architecture as a json file and current weigts as h5df file
INPUT model_name (str): Name inder which to save the architecture and weights.
This should not include the file extension.
'''
# Save architecture
arch, arch_json = '{}.json'.format(model_name), self.model.to_json()
with open(arch, 'w') as f:
json.dump(arch_json, f)
# Save weights
weights = '{}.h5'.format(model_name)
self.model.save_weights(weights)
def fit_from_geojson(self, train_geojson, max_side_dim=None, min_side_dim=0,
chips_per_batch=5000, train_size=10000, validation_split=0.1,
bit_depth=8, save_model=None, nb_epoch=10,
shuffle_btwn_epochs=True, return_history=False,
save_all_weights=True, retrain=False, learning_rate_2=0.01):
'''
Fit a model from a geojson file with training data. This method iteratively
yields large batches of chips to train on for each epoch. Please ensure that
your current working directory contains all imagery referenced in the
image_id property in train_geojson, and are named as follows: <image_id>.tif,
where image_id is the catalog id of the image.
INPUT train_geojson (string): Filename for the training data (must be a
geojson). The geojson must be filtered such that all polygons are of
valid size (as defined by max_side_dim and min_side_dim)
max_side_dim (int): Maximum acceptable side dimension (in pixels) for a
chip. If None, defaults to input_shape[-1]. If larger than the
input shape the chips extracted will be downsampled to match the
input shape. Defaults to None.
min_side_dim (int): Minimum acceptable side dimension (in pixels) for a
chip. Defaults to 0.
chips_per_batch (int): Number of chips to yield per batch. Must be small
enough to fit into memory. Defaults to 5000 (decrease for larger
input sizes).
train_size (int): Number of chips to use for training data.
validation_split (float): Proportion of training chips to use as validation
data. Defaults to 0.1.
bit_depth (int): Bit depth of the image strips from which training chips
are extracted. Defaults to 8 (standard for DRA'ed imagery).
save_model (string): Name of model for saving. if None, does not save
model to disk. Defaults to None
nb_epoch (int): Number of epochs to train for. Each epoch will be trained
on batches * batches_per_epoch chips. Defaults to 10.
shuffle_btwn_epochs (bool): Shuffle the features in train_geojson
between each epoch. Defaults to True.
return_history (bool): Return a list containing metrics from past epochs.
Defaults to False.
save_all_weights (bool): Save model weights after each epoch. A directory
called models will be created in the working directory. Defaults to
True.
retrain (bool): Freeze all layers except final softmax to retrain only
the final weights of the model. Defaults to False
learning_rate_2 (float): Learning rate for the second round of training.
Only relevant if retrain is True. Defaults to 0.01.
OUTPUT trained model, history
'''
resize_dim, validation_data, full_hist = None, None, []
# load geojson training polygons
with open(train_geojson) as f:
polygons = geojson.load(f)['features'][:train_size]
if len(polygons) < train_size:
raise Exception('Not enough polygons to train on. Please add more training ' \
'data or decrease value of batches_per_epoch.')
# Determine size of chips to extract and resize dimension
if not max_side_dim:
max_side_dim = self.input_shape[-1]
elif max_side_dim != self.input_shape[-1]:
resize_dim = self.input_shape # resize chips to match input shape
# Recompile model with retrain params
if retrain:
for i in xrange(len(self.model.layers[:-1])):
self.model.layers[i].trainable = False
sgd = SGD(lr=learning_rate_2, momentum=0.9, nesterov=True)
self.model.compile(loss='categorical_crossentropy', optimizer='sgd')
# Set aside validation data
if validation_split > 0:
val_size = int(validation_split * train_size)
val_data, polygons = polygons[: val_size], polygons[val_size: ]
train_size = len(polygons)
# extract validation chips
print 'Getting validation data...\n'
valX, valY = get_chips(val_data, min_side_dim=min_side_dim,
max_side_dim=max_side_dim, classes=self.classes,
normalize=True, return_labels=True, mask=True,
bit_depth=bit_depth, show_percentage=True,
assert_all_valid=True, resize_dim=resize_dim)
validation_data = (valX, valY)
# Train model
for e in range(nb_epoch):
print 'Epoch {}/{}'.format(e + 1, nb_epoch)
# Make callback and diretory for saved weights
if save_all_weights:
chk = ModelCheckpoint(filepath="./models/epoch" + str(e) + \
"_{val_loss:.2f}.h5", verbose=1,
save_weights_only=True)
if 'models' not in os.listdir('.'):
os.makedirs('models')
if shuffle_btwn_epochs:
np.random.shuffle(polygons)
# Cycle through batches of chips and train
for batch_start in range(0, train_size, chips_per_batch):
callbacks = []
this_batch = polygons[batch_start: batch_start + chips_per_batch]
# Get chips from batch
X, Y = get_chips(this_batch, min_side_dim=min_side_dim,
max_side_dim=max_side_dim, classes=self.classes,
normalize=True, return_labels=True, mask=True,
bit_depth=bit_depth, show_percentage=False,
assert_all_valid=True, resize_dim=resize_dim)
# Save weights if this is the final batch in the epoch
if batch_start == range(0, train_size, chips_per_batch)[-1]:
callbacks = [chk]
# Fit the model on this batch
hist = self.model.fit(X, Y, batch_size=self.batch_size, nb_epoch=1,
validation_data=validation_data,
callbacks=callbacks)
# Dict recording loss and val_loss after each epoch
full_hist.append(hist.history)
if save_model:
self.save_model(save_model)
if return_history:
return full_hist
def fit_xy(self, X_train, Y_train, validation_split=0.1, save_model=None,
nb_epoch=10, shuffle_btwn_epochs=True, return_history=False,
save_all_weights=True, retrain=False, learning_rate_2=0.01):
'''
Fit model on training chips already loaded into memory
INPUT X_train (array): Training chips with the following dimensions:
(train_size, num_channels, rows, cols). Dimensions of each chip
should match the input_size to the model.
Y_train (list): One-hot encoded labels to X_train with dimensions as
follows: (train_size, n_classes)
validation_split (float): Proportion of X_train to validate on while
training.
save_model (string): Name under which to save model. if None, does not
save model. Defualts to None.
nb_epoch (int): Number of training epochs to complete
shuffle_btwn_epochs (bool): Shuffle the features in train_geojson
between each epoch. Defaults to True.
return_history (bool): Return a list containing metrics from past epochs.
Defaults to False.
save_all_weights (bool): Save model weights after each epoch. A directory
called models will be created in the working directory. Defaults to
True.
retrain (bool): Freeze all layers except final softmax to retrain only
the final weights of the model. Defaults to False
learning_rate_2 (float): Learning rate for the second round of training.
Only relevant if retrain is True. Defaults to 0.01.
OUTPUT trained Keras model.
'''
callbacks = []
# Recompile model with retrain params
if retrain:
for i in xrange(len(self.model.layers[:-1])):
self.model.layers[i].trainable = False
sgd = SGD(lr=learning_rate_2, momentum=0.9, nesterov=True)
self.model.compile(loss='categorical_crossentropy', optimizer='sgd')
# Define callback to save weights after each epoch
if save_all_weights:
chk = ModelCheckpoint(filepath="./models/ch_{epoch:02d}-{val_loss:.2f}.h5",
verbose=1, save_weights_only=True)
callbacks = [chk]
# Fit model
hist = self.model.fit(X_train, Y_train, validation_split=validation_split,
callbacks=callbacks, nb_epoch=nb_epoch,
shuffle=shuffle_btwn_epochs)
if save_model:
self.save_model(save_model)
if return_history:
return hist
def classify_geojson(self, target_geojson, output_name, max_side_dim=None,
min_side_dim=0, numerical_classes=True, chips_in_mem=5000,
bit_depth=8):
'''
Use the current model and weights to classify all polygons in target_geojson. The
output file will have a 'CNN_class' property with the net's classification
result, and a 'certainty' property with the net's certainty in the assigned
classification.
Please ensure that your current working directory contains all imagery referenced
in the image_id property in target_geojson, and are named as follows:
<image_id>.tif, where image_id is the catalog id of the image.
INPUT target_geojson (string): Name of the geojson to classify. This file
should only contain chips with side dimensions between min_side_dim
and max_side_dim (see below).
output_name (string): Name under which to save the classified geojson.
max_side_dim (int): Maximum acceptable side dimension (in pixels) for a
chip. If None, defaults to input_shape[-1]. If larger than the
input shape the chips extracted will be downsampled to match the
input shape. Defaults to None.
min_side_dim (int): Minimum acceptable side dimension (in pixels) for a
chip. Defaults to 0.
numerical_classes (bool): Make output classifications correspond to the
indicies (base 0) of the 'classes' attribute. If False, 'CNN_class'
is a string with the class name. Defaults to True.
chips_in_mem (int): Number of chips to load in memory at once. Decrease
this parameter for larger chip sizes. Defaults to 5000.
bit_depth (int): Bit depth of the image strips from which training chips
are extracted. Defaults to 8 (standard for DRA'ed imagery).
'''
resize_dim, yprob, ytrue = None, [], []
# Determine size of chips to extract and resize dimension
if not max_side_dim:
max_side_dim = self.input_shape[-1]
elif max_side_dim != self.input_shape[-1]:
resize_dim = self.input_shape # resize chips to match input shape
# Format output filename
if not output_name.endswith('.geojson'):
output_name = '{}.geojson'.format(output_name)
# Get polygon list from geojson
with open(target_geojson) as f:
features = geojson.load(f)['features']
# Classify in batches of 1000
for ix in xrange(0, len(features), chips_in_mem):
this_batch = features[ix: (ix + chips_in_mem)]
try:
X = get_chips(this_batch, min_side_dim=min_side_dim,
max_side_dim=max_side_dim, classes=self.classes,
normalize=True, return_labels=False,
bit_depth=bit_depth, mask=True, show_percentage=False,
assert_all_valid=True, resize_dim=resize_dim)
except (AssertionError):
raise ValueError('Please filter the input geojson file using ' \
'geojoson_tools.filter_geojson() and ensure all ' \
'polygons are valid before using this method.')
# Predict classes of test data
yprob += list(self.model.predict_proba(X))
# Get predicted classes and certainty
yhat = [np.argmax(i) for i in yprob]
ycert = [str(np.max(j)) for j in yprob]
if not numerical_classes:
yhat = [self.classes[i] for i in yhat]
# Update geojson, save as output_name
data = zip(yhat, ycert)
property_names = ['CNN_class', 'certainty']
gt.write_properties_to(data, property_names=property_names,
input_file=target_geojson, output_file=output_name)
# Tools for analyzing network performance
def x_to_rgb(X):
'''
Transform a normalized (3,h,w) image (theano ordering) to a (h,w,3) rgb image
(tensor flow).
Use this to view or save rgb polygons as images.
INPUT (1) 3d array 'X': originial chip with theano dimensional ordering (3, h, w)
OUTPUT (1) 3d array: rgb image in tensor flow dim-prdering (h,w,3)
'''
rgb_array = np.zeros((X.shape[1], X.shape[2], 3), 'uint8')
rgb_array[...,0] = X[0] * 255
rgb_array[...,1] = X[1] * 255
rgb_array[...,2] = X[2] * 255
return rgb_array
|
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import jsonschema
from mock import Mock
from testtools.matchers import Is, Equals
from testtools.testcase import skip
from trove.common import apischema
from trove.instance.service import InstanceController
from trove.tests.unittests import trove_testtools
class TestInstanceController(trove_testtools.TestCase):
def setUp(self):
super(TestInstanceController, self).setUp()
self.controller = InstanceController()
self.instance = {
"instance": {
"volume": {"size": "1"},
"users": [
{"name": "user1",
"password": "litepass",
"databases": [{"name": "firstdb"}]}
],
"flavorRef": "https://localhost:8779/v1.0/2500/1",
"name": "TEST-XYS2d2fe2kl;zx;jkl2l;sjdcma239(E)@(D",
"databases": [
{
"name": "firstdb",
"collate": "latin2_general_ci",
"character_set": "latin2"
},
{
"name": "db2"
}
]
}
}
self.context = trove_testtools.TroveTestContext(self)
self.req = Mock(remote_addr='ip:port', host='myhost')
def verify_errors(self, errors, msg=None, properties=None, path=None):
msg = msg or []
properties = properties or []
self.assertThat(len(errors), Is(len(msg)))
i = 0
while i < len(msg):
self.assertIn(errors[i].message, msg)
if path:
self.assertThat(path, Equals(properties[i]))
else:
self.assertThat(errors[i].path.pop(), Equals(properties[i]))
i += 1
def test_get_schema_create(self):
schema = self.controller.get_schema('create', {'instance': {}})
self.assertIsNotNone(schema)
self.assertTrue('instance' in schema['properties'])
def test_get_schema_action_restart(self):
schema = self.controller.get_schema('action', {'restart': {}})
self.assertIsNotNone(schema)
self.assertTrue('restart' in schema['properties'])
def test_get_schema_action_resize_volume(self):
schema = self.controller.get_schema(
'action', {'resize': {'volume': {}}})
self.assertIsNotNone(schema)
self.assertTrue('resize' in schema['properties'])
self.assertTrue(
'volume' in schema['properties']['resize']['properties'])
def test_get_schema_action_resize_flavorRef(self):
schema = self.controller.get_schema(
'action', {'resize': {'flavorRef': {}}})
self.assertIsNotNone(schema)
self.assertTrue('resize' in schema['properties'])
self.assertTrue(
'flavorRef' in schema['properties']['resize']['properties'])
def test_get_schema_action_other(self):
schema = self.controller.get_schema(
'action', {'supersized': {'flavorRef': {}}})
self.assertIsNotNone(schema)
self.assertThat(len(schema.keys()), Is(0))
def test_validate_create_complete(self):
body = self.instance
schema = self.controller.get_schema('create', body)
validator = jsonschema.Draft4Validator(schema)
self.assertTrue(validator.is_valid(body))
def test_validate_create_complete_with_restore(self):
body = self.instance
body['instance']['restorePoint'] = {
"backupRef": "d761edd8-0771-46ff-9743-688b9e297a3b"
}
schema = self.controller.get_schema('create', body)
validator = jsonschema.Draft4Validator(schema)
self.assertTrue(validator.is_valid(body))
def test_validate_create_complete_with_restore_error(self):
body = self.instance
backup_id_ref = "invalid-backup-id-ref"
body['instance']['restorePoint'] = {
"backupRef": backup_id_ref
}
schema = self.controller.get_schema('create', body)
validator = jsonschema.Draft4Validator(schema)
self.assertFalse(validator.is_valid(body))
errors = sorted(validator.iter_errors(body), key=lambda e: e.path)
self.assertThat(len(errors), Is(1))
self.assertThat(errors[0].message,
Equals("'%s' does not match '%s'" %
(backup_id_ref, apischema.uuid['pattern'])))
def test_validate_create_blankname(self):
body = self.instance
body['instance']['name'] = " "
schema = self.controller.get_schema('create', body)
validator = jsonschema.Draft4Validator(schema)
self.assertFalse(validator.is_valid(body))
errors = sorted(validator.iter_errors(body), key=lambda e: e.path)
self.assertThat(len(errors), Is(1))
self.assertThat(errors[0].message,
Equals("' ' does not match '^.*[0-9a-zA-Z]+.*$'"))
def test_validate_create_invalid_name(self):
body = self.instance
body['instance']['name'] = "$#$%^^"
schema = self.controller.get_schema('create', body)
validator = jsonschema.Draft4Validator(schema)
self.assertFalse(validator.is_valid(body))
errors = sorted(validator.iter_errors(body), key=lambda e: e.path)
self.assertEqual(1, len(errors))
self.assertIn("'$#$%^^' does not match '^.*[0-9a-zA-Z]+.*$'",
errors[0].message)
def test_validate_restart(self):
body = {"restart": {}}
schema = self.controller.get_schema('action', body)
validator = jsonschema.Draft4Validator(schema)
self.assertTrue(validator.is_valid(body))
def test_validate_invalid_action(self):
# TODO(juice) perhaps we should validate the schema not recognized
body = {"restarted": {}}
schema = self.controller.get_schema('action', body)
validator = jsonschema.Draft4Validator(schema)
self.assertTrue(validator.is_valid(body))
def test_validate_resize_volume(self):
body = {"resize": {"volume": {"size": 4}}}
schema = self.controller.get_schema('action', body)
validator = jsonschema.Draft4Validator(schema)
self.assertTrue(validator.is_valid(body))
def test_validate_resize_volume_string(self):
body = {"resize": {"volume": {"size": "4"}}}
schema = self.controller.get_schema('action', body)
validator = jsonschema.Draft4Validator(schema)
self.assertTrue(validator.is_valid(body))
def test_validate_resize_volume_string_invalid_number(self):
body = {"resize": {"volume": {"size": '-44.0'}}}
schema = self.controller.get_schema('action', body)
validator = jsonschema.Draft4Validator(schema)
self.assertFalse(validator.is_valid(body))
errors = sorted(validator.iter_errors(body), key=lambda e: e.path)
self.assertThat(errors[0].context[1].message,
Equals("'-44.0' does not match '^[0-9]+$'"))
self.assertThat(errors[0].path.pop(), Equals('size'))
def test_validate_resize_volume_invalid_characters(self):
body = {"resize": {"volume": {"size": 'x'}}}
schema = self.controller.get_schema('action', body)
validator = jsonschema.Draft4Validator(schema)
self.assertFalse(validator.is_valid(body))
errors = sorted(validator.iter_errors(body), key=lambda e: e.path)
self.assertThat(errors[0].context[0].message,
Equals("'x' is not of type 'integer'"))
self.assertThat(errors[0].context[1].message,
Equals("'x' does not match '^[0-9]+$'"))
self.assertThat(errors[0].path.pop(), Equals('size'))
def test_validate_resize_instance(self):
body = {"resize": {"flavorRef": "https://endpoint/v1.0/123/flavors/2"}}
schema = self.controller.get_schema('action', body)
validator = jsonschema.Draft4Validator(schema)
self.assertTrue(validator.is_valid(body))
def test_validate_resize_instance_int(self):
body = {"resize": {"flavorRef": 2}}
schema = self.controller.get_schema('action', body)
validator = jsonschema.Draft4Validator(schema)
self.assertTrue(validator.is_valid(body))
def test_validate_resize_instance_string(self):
body = {"resize": {"flavorRef": 'foo'}}
schema = self.controller.get_schema('action', body)
validator = jsonschema.Draft4Validator(schema)
self.assertTrue(validator.is_valid(body))
def test_validate_resize_instance_empty_url(self):
body = {"resize": {"flavorRef": ""}}
schema = self.controller.get_schema('action', body)
validator = jsonschema.Draft4Validator(schema)
self.assertFalse(validator.is_valid(body))
errors = sorted(validator.iter_errors(body), key=lambda e: e.path)
self.verify_errors(errors[0].context,
["'' is too short",
"'' does not match '^.*[0-9a-zA-Z]+.*$'",
"'' is not of type 'integer'"],
["flavorRef", "flavorRef", "flavorRef",
"flavorRef"],
errors[0].path.pop())
@skip("This URI validator allows just about anything you give it")
def test_validate_resize_instance_invalid_url(self):
body = {"resize": {"flavorRef": "xyz-re1f2-daze329d-f23901"}}
schema = self.controller.get_schema('action', body)
self.assertIsNotNone(schema)
validator = jsonschema.Draft4Validator(schema)
self.assertFalse(validator.is_valid(body))
errors = sorted(validator.iter_errors(body), key=lambda e: e.path)
self.verify_errors(errors, ["'' is too short"], ["flavorRef"])
def _setup_modify_instance_mocks(self):
instance = Mock()
instance.detach_replica = Mock()
instance.assign_configuration = Mock()
instance.unassign_configuration = Mock()
instance.update_db = Mock()
return instance
def test_modify_instance_with_empty_args(self):
instance = self._setup_modify_instance_mocks()
args = {}
self.controller._modify_instance(self.context, self.req,
instance, **args)
self.assertEqual(0, instance.detach_replica.call_count)
self.assertEqual(0, instance.unassign_configuration.call_count)
self.assertEqual(0, instance.assign_configuration.call_count)
self.assertEqual(0, instance.update_db.call_count)
def test_modify_instance_with_nonempty_args_calls_update_db(self):
instance = self._setup_modify_instance_mocks()
args = {}
args['any'] = 'anything'
self.controller._modify_instance(self.context, self.req,
instance, **args)
instance.update_db.assert_called_once_with(**args)
def test_modify_instance_with_False_detach_replica_arg(self):
instance = self._setup_modify_instance_mocks()
args = {}
args['detach_replica'] = False
self.controller._modify_instance(self.context, self.req,
instance, **args)
self.assertEqual(0, instance.detach_replica.call_count)
def test_modify_instance_with_True_detach_replica_arg(self):
instance = self._setup_modify_instance_mocks()
args = {}
args['detach_replica'] = True
self.controller._modify_instance(self.context, self.req,
instance, **args)
self.assertEqual(1, instance.detach_replica.call_count)
def test_modify_instance_with_configuration_id_arg(self):
instance = self._setup_modify_instance_mocks()
args = {}
args['configuration_id'] = 'some_id'
self.controller._modify_instance(self.context, self.req,
instance, **args)
self.assertEqual(1, instance.assign_configuration.call_count)
def test_modify_instance_with_None_configuration_id_arg(self):
instance = self._setup_modify_instance_mocks()
args = {}
args['configuration_id'] = None
self.controller._modify_instance(self.context, self.req,
instance, **args)
self.assertEqual(1, instance.unassign_configuration.call_count)
def test_modify_instance_with_all_args(self):
instance = self._setup_modify_instance_mocks()
args = {}
args['detach_replica'] = True
args['configuration_id'] = 'some_id'
self.controller._modify_instance(self.context, self.req,
instance, **args)
self.assertEqual(1, instance.detach_replica.call_count)
self.assertEqual(1, instance.assign_configuration.call_count)
instance.update_db.assert_called_once_with(**args)
|
|
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import print_function, absolute_import
import codecs
from collections import OrderedDict
from collections import defaultdict
from itertools import chain
import logging
import os
from os.path import exists
from os.path import join
import sys
from licensedcode import saneyaml
from commoncode import fileutils
from licensedcode import licenses_data_dir, rules_data_dir
from licensedcode import index
from textcode import analysis
from textcode.analysis import Token
from os.path import dirname
from licensedcode import src_dir
"""
Model objects for license and rule persisted as YAML and text files.
"""
logger = logging.getLogger(__name__)
# import sys
# logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
# logger.setLevel(logging.DEBUG)
# special magic key for rules pointing to non-license text
not_a_license_key = 'not-a-license'
class License(object):
"""
A license consists of these files, where <key> is the license key:
- <key>.yml : the license data in YAML
- <key>.LICENSE: the license text
- <key>.SPDX: the SPDX license text
"""
def __init__(self, key=None, src_dir=licenses_data_dir):
# unique key: lower case ASCII characters, digits, underscore and dots.
self.key = key or ''
self.src_dir = src_dir
# commonly used short name, often abbreviated.
self.short_name = ''
# full name.
self.name = ''
# Attribution, Copyleft, etc
self.category = ''
self.owner = ''
self.homepage_url = ''
self.notes = ''
self.spdx_license_key = ''
self.spdx_full_name = ''
self.spdx_url = ''
self.spdx_notes = ''
self.text_urls = []
self.osi_url = ''
self.faq_url = ''
self.other_urls = []
self.data_file = join(self.src_dir, self.key + '.yml')
self.text_file = join(self.src_dir, self.key + '.LICENSE')
self.spdx_file = join(self.src_dir, self.key + '.SPDX')
if src_dir:
self.load(src_dir)
@property
def text(self):
"""
License text, re-loaded on demand.
"""
return self._read_text(self.text_file)
@property
def spdx_license_text(self):
"""
SPDX license text, re-loaded on demand.
"""
if self.spdx_license_key:
return self._read_text(self.spdx_file)
else:
return u''
def asdict(self):
"""
Return an OrderedDict of license data (excluding texts).
Empty values are not included.
"""
data = OrderedDict()
data['key'] = self.key
if self.short_name:
data['short_name'] = self.short_name
if self.name:
data['name'] = self.name
data['category'] = self.category
if self.owner:
data['owner'] = self.owner
if self.homepage_url:
data['homepage_url'] = self.homepage_url
if self.notes:
data['notes'] = self.notes
if self.spdx_license_key:
data['spdx_license_key'] = self.spdx_license_key
data['spdx_full_name'] = self.spdx_full_name
data['spdx_url'] = self.spdx_url
if self.spdx_notes:
data['spdx_notes'] = self.spdx_notes
if self.text_urls:
data['text_urls'] = self.text_urls
if self.osi_url:
data['osi_url'] = self.osi_url
if self.faq_url:
data['faq_url'] = self.faq_url
if self.other_urls:
data['other_urls'] = self.other_urls
return data
def dump(self):
"""
Dump a representation of self as multiple files named
this way:
- <key>.yml : the license data in YAML
- <key>.LICENSE: the license text
- <key>.SPDX: the SPDX license text
"""
as_yaml = saneyaml.dump(self.asdict())
self._write(self.data_file, as_yaml)
if self.text:
self._write(self.text_file, self.text)
if self.spdx_license_text:
self._write(self.spdx_file, self.spdx_license_text)
def _write(self, f, d):
with codecs.open(f, 'wb', encoding='utf-8') as of:
of.write(d)
def load(self, src_dir):
"""
Populate license data from a YAML file stored in of src_dir.
Does not load text files.
"""
data_file = join(src_dir, self.data_file)
try:
with codecs.open(data_file, encoding='utf-8') as f:
data = saneyaml.load(f.read())
except Exception, e:
print()
print('#############################')
print('INVALID LICENSE FILE:', data_file)
print('#############################')
print(e)
print('#############################')
# this is a rare case, but yes we abruptly exit globally
sys.exit(1)
for k, v in data.items():
setattr(self, k, v)
def _read_text(self, location):
if not exists(location):
text = u''
else:
with codecs.open(location, encoding='utf-8') as f:
text = f.read()
return text
# cache license objects in a map by license key
_LICENSES_BY_KEY = {}
def get_licenses_by_key():
"""
Return a mapping of license key -> license object.
"""
global _LICENSES_BY_KEY
if not _LICENSES_BY_KEY :
_LICENSES_BY_KEY = load_licenses()
return _LICENSES_BY_KEY
def get_license(key):
"""
Return a license object for this key.
Raise a KeyError if the license does not exists.
"""
return get_licenses_by_key()[key]
def load_licenses(license_dir=licenses_data_dir):
"""
Return a mapping of key -> license objects, loaded from license files.
"""
licenses = {}
# TODO: add check for unknown files
for top, _, files in os.walk(license_dir):
for yfile in files:
if not yfile.endswith('.yml'):
continue
key = yfile.replace('.yml', '')
yfile = join(top, yfile)
src_dir = os.path.dirname(yfile)
licenses[key] = License(key, src_dir)
return licenses
def get_rules_from_license_texts(licenses_list=None):
"""
Return an iterable of rules built from license texts and spdx texts from
in the `licenses_list` license objects iterable.
Load the reference list list from disk if list_list is not provided.
"""
if not licenses_list:
licenses_list = get_licenses_by_key()
for license_key, license_obj in licenses_list.items():
text = license_obj.text
spdx_text = license_obj.spdx_license_text
if text:
yield Rule(
text_file=join(license_obj.src_dir, license_obj.text_file),
licenses=[license_key],
)
if spdx_text:
yield Rule(
text_file=join(license_obj.src_dir, license_obj.spdx_file),
licenses=[license_key],
)
text_tknzr, template_tknzr, _ = index.tokenizers()
# token caching
cache_dir = join(dirname(dirname(src_dir)), '.cache', 'license_tokens')
if not os.path.exists(cache_dir):
fileutils.create_dir(cache_dir)
def get_tokens(location, template, use_cache=False):
"""
Return a list of tokens from a from a file at location using the tokenizer
function.
"""
location = os.path.abspath(location)
if not exists(location):
return []
file_name = fileutils.file_name(location)
cached_tokens = os.path.join(cache_dir,file_name)
if use_cache and os.path.exists(cached_tokens):
# TODO: improve cache check
tokens = list(load_tokens(cached_tokens))
else:
tokenizr = template and template_tknzr or text_tknzr
lines = analysis.unicode_text_lines(location)
tokens = list(tokenizr(lines))
if use_cache:
dump_tokens(cached_tokens, tokens)
return tokens
def dump_tokens(location, tokens):
"""
Dump a list of tokens to a file location
"""
location = os.path.abspath(location)
with codecs.open(location, 'wb', encoding='utf-8') as loc:
loc.writelines([tok.dumps() for tok in tokens])
def load_tokens(location):
"""
Return a list of tokens loaded from a file location
"""
location = os.path.abspath(location)
with codecs.open(location, 'rb', encoding='utf-8') as loc:
loaded = loc.read().splitlines(False)
for l in loaded:
yield Token.loads(l)
class Rule(object):
"""
Base class for detection rules.
"""
def __init__(self, data_file=None, text_file=None,
licenses=None, license_choice=False,
template=False, notes=None):
self.licenses = licenses or []
self.license_choice = license_choice
self.notes = notes
self.template = template
self.data_file = data_file
if data_file:
self.load()
self.text_file = text_file
self.tokens = None # a list
self.tokens_count = 0
def get_tokens(self):
if self.tokens is None:
self.tokens = get_tokens(self.text_file, self.template)
self.tokens_count = len(self.tokens)
return self.tokens
@property
def text(self):
if not exists(self.text_file):
text = u''
else:
with codecs.open(self.text_file, encoding='utf-8') as f:
text = f.read()
return text
@property
def identifier(self):
return fileutils.file_name(self.text_file)
def __repr__(self):
rt = self.template
idf = self.identifier
text = self.text[:10] + '...'
return 'Rule(%(idf)r, template=%(rt)r, text=%(text)r)' % locals()
def asdict(self):
"""
Return an OrderedDict of self, excluding texts.
Empty values are not included.
"""
data = OrderedDict()
if self.licenses:
data['licenses'] = self.licenses
if self.license_choice:
data['license_choice'] = self.license_choice
if self.template:
data['template'] = self.template
if self.notes:
data['notes'] = self.note
return data
def dump(self):
"""
Dump a representation of self to tgt_dir using two files:
- a .yml for the rule data in YAML block format
- a .RULE: the rule text as a UTF-8 file
"""
if self.data_file:
as_yaml = saneyaml.dump(self.asdict())
with codecs.open(self.data_file, 'wb', encoding='utf-8') as df:
df.write(as_yaml)
with codecs.open(self.text_file, 'wb', encoding='utf-8') as tf:
tf.write(self.text)
def load(self, load_notes=False):
"""
Load self from a .RULE YAML file stored in self.data_file.
Does not load the rule text file.
"""
try:
with codecs.open(self.data_file, encoding='utf-8') as f:
data = saneyaml.load(f.read())
except Exception, e:
print()
print('#############################')
print('INVALID LICENSE RULE FILE:', self.data_file)
print('#############################')
print(e)
print('#############################')
# this is a rare case, but yes we abruptly exit globally
sys.exit(1)
self.licenses = data.get('licenses', [])
self.license_choice = data.get('license_choice', False)
self.template = data.get('template', False)
# these are purely informational and not used at run time
if load_notes:
self.notes = data.get('notes')
return self
def load_rules(rule_dir=rules_data_dir):
"""
Return a list of rules, loaded from rules files.
FIXME: return an iterable instead
"""
rules = []
seen_files = set()
processed_files = set()
for top, _, files in os.walk(rule_dir):
for yfile in files:
if yfile.endswith('.yml'):
data_file = join(top, yfile)
base_name = fileutils.file_base_name(yfile)
text_file = join(top, base_name + '.RULE')
rule = Rule(data_file=data_file, text_file=text_file)
rules.append(rule)
processed_files.add(data_file)
processed_files.add(text_file)
seen_file = join(top, yfile)
seen_files.add(seen_file)
unknown_files = seen_files - processed_files
if unknown_files:
print(unknown_files)
files = '\n'.join(sorted(unknown_files))
msg = 'Unknown files in rule directory: %(rule_dir)r\n%(files)s'
raise Exception(msg % locals())
return rules
def get_all_rules(_use_cache=False):
"""
Return an iterable of all unique rules loaded from licenses and rules files.
"""
rules = chain(get_rules_from_license_texts(), load_rules())
unique = unique_rules(rules)
verify_rules_license(unique)
return unique
class MissingLicense(Exception):
pass
def verify_rules_license(rules):
"""
Ensure that every rules license is a valid license. Raise a MissingLicense
exception with a message containing the list of rule files that do not have
a corresponding existing license.
"""
invalid_rules = defaultdict(list)
for rule in rules:
for key in rule.licenses:
try:
get_license(key)
except KeyError:
invalid_rules[rule.data_file].append(key)
if invalid_rules:
invalid_rules = (data_file + ': ' + ' '.join(keys)
for data_file, keys in invalid_rules.iteritems())
msg = 'Rules data file with missing licenses:\n' + '\n'.join(invalid_rules)
raise MissingLicense(msg)
def unique_rules(rules):
"""
Return a list of unique rules.
FIXME: return an iterable instead
"""
seen = set()
uniques = []
for rule in rules:
ridt = rule_identifier(rule)
if ridt in seen:
continue
else:
seen.add(ridt)
uniques.append(rule)
return uniques
def rule_identifier(rule):
"""
Return a string used to compare similar rules.
"""
comparable = rule.text.strip().lower().split()
comparable.append(repr(rule.license_choice))
comparable.append(repr(rule.template))
comparable.extend(sorted(rule.licenses))
return u''.join([t for t in comparable if t])
|
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
import subprocess
import json
import threading
import warnings
import re
from functools import partial
from collections import defaultdict
from uuid import uuid1
try:
from Queue import Queue
except ImportError:
from queue import Queue
import sublime
from .console_logging import getLogger
from .settings import get_settings_param
logger = getLogger(__name__)
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
PY3 = sys.version_info[0] == 3
DAEMONS = defaultdict(dict) # per window
def run_in_active_view(window_id, callback, response):
for window in sublime.windows():
if window.id() == window_id:
callback(window.active_view(), response)
break
class BaseThread(threading.Thread):
def __init__(self, fd, window_id, waiting, lock):
self.fd = fd
self.done = False
self.waiting = waiting
self.wait_lock = lock
self.window_id = window_id
super(BaseThread, self).__init__()
self.daemon = True
self.start()
class ThreadReader(BaseThread):
def run(self):
while not self.done:
line = self.fd.readline()
if line:
data = None
try:
data = json.loads(line.strip())
except ValueError:
if not isinstance(data, dict):
logger.exception(
"Non JSON data from daemon: {0}".format(line)
)
else:
self.call_callback(data)
def call_callback(self, data):
"""
Call callback for response data
:type data: dict
"""
if 'logging' in data:
getattr(logger, data['logging'])(data['content'])
return
with self.wait_lock:
callback = self.waiting.pop(data['uuid'], None)
if callback is not None:
delayed_callback = partial(
run_in_active_view,
self.window_id,
callback,
data[data['type']]
)
sublime.set_timeout(delayed_callback, 0)
class ThreadWriter(BaseThread, Queue):
def __init__(self, *args, **kwargs):
Queue.__init__(self)
super(ThreadWriter, self).__init__(*args, **kwargs)
def run(self):
while not self.done:
request_data = self.get()
if not request_data:
continue
callback, data = request_data
with self.wait_lock:
self.waiting[data['uuid']] = callback
if not isinstance(data, str):
data = json.dumps(data)
self.fd.write(data)
if not data.endswith('\n'):
self.fd.write('\n')
self.fd.flush()
class Daemon(object):
def __init__(self, view):
window_id = view.window().id()
self.waiting = dict()
self.wlock = threading.RLock()
self.process = self._start_process(get_settings(view))
self.stdin = ThreadWriter(self.process.stdin, window_id,
self.waiting, self.wlock)
self.stdout = ThreadReader(self.process.stdout, window_id,
self.waiting, self.wlock)
self.stderr = ThreadReader(self.process.stderr, window_id,
self.waiting, self.wlock)
def _start_process(self, settings):
options = {
'stdin': subprocess.PIPE,
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE,
'universal_newlines': True,
'cwd': CUR_DIR,
'bufsize': -1,
}
# hide "cmd" window in Windows
if sys.platform == "win32":
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
options['startupinfo'] = startupinfo
command = [
settings['python_interpreter'],
'-B', 'daemon.py',
'-p', settings['project_name']
]
for folder in settings['extra_packages']:
command.extend(['-e', folder])
command.extend(['-f', settings['complete_funcargs']])
logger.debug(
'Daemon process starting with parameters: {0} {1}'
.format(command, options)
)
try:
return subprocess.Popen(command, **options)
except OSError:
logger.error(
'Daemon process failed with next parameters: {0} {1}'
.format(command, options)
)
raise
def request(self, view, request_type, callback, location=None):
"""
Send request to daemon process
:type view: sublime.View
:type request_type: str
:type callback: callabel
:type location: type of (int, int) or None
"""
logger.info('Sending request to daemon for "{0}"'.format(request_type))
if location is None:
location = view.sel()[0].begin()
current_line, current_column = view.rowcol(location)
source = view.substr(sublime.Region(0, view.size()))
if PY3:
uuid = uuid1().hex
else:
uuid = uuid1().get_hex()
data = {
'source': source,
'line': current_line + 1,
'offset': current_column,
'filename': view.file_name() or '',
'type': request_type,
'uuid': uuid,
}
self.stdin.put_nowait((callback, data))
def ask_daemon(view, callback, ask_type, location=None):
"""
Daemon request shortcut
:type view: sublime.View
:type callback: callabel
:type ask_type: str
:type location: type of (int, int) or None
"""
window_id = view.window().id()
if window_id not in DAEMONS:
DAEMONS[window_id] = Daemon(view)
DAEMONS[window_id].request(view, ask_type, callback, location)
def get_settings(view):
"""
get settings for daemon
:type view: sublime.View
:rtype: dict
"""
python_interpreter = get_settings_param(view, 'python_interpreter_path')
if not python_interpreter:
python_interpreter = get_settings_param(view, 'python_interpreter',
'python')
else:
warnings.warn('`python_interpreter_path` parameter is deprecated.'
'Please, use `python_interpreter` instead.',
DeprecationWarning)
python_interpreter = expand_path(view, python_interpreter)
extra_packages = get_settings_param(view, 'python_package_paths', [])
extra_packages = [expand_path(view, p) for p in extra_packages]
complete_funcargs = get_settings_param(view,
'auto_complete_function_params',
'all')
first_folder = ''
if view.window().folders():
first_folder = os.path.split(view.window().folders()[0])[-1]
project_name = get_settings_param(view, 'project_name', first_folder)
return {
'python_interpreter': python_interpreter,
'extra_packages': extra_packages,
'project_name': project_name,
'complete_funcargs': complete_funcargs
}
def is_python_scope(view, location):
""" (View, Point) -> bool
Get if this is a python source scope (not a string and not a comment)
"""
return view.match_selector(location, "source.python - string - comment")
def is_repl(view):
"""
Is SublimeREPL ?
"""
return view.settings().get("repl", False)
def to_relative_path(path):
"""
Trim project root pathes from **path** passed as argument
If no any folders opened, path will be retuned unchanged
"""
folders = sublime.active_window().folders()
for folder in folders:
# close path with separator
if folder[-1] != os.path.sep:
folder += os.path.sep
if path.startswith(folder):
return path.replace(folder, '')
return path
def split_path(d, keys):
assert isinstance(d, dict) and isinstance(keys, list)
for k in [x for x in keys if d.get(x) and os.path.exists(d[x])]:
d['%s_path' % k], d['%s_name' % k] = os.path.split(d[k])
d['%s_base_name' % k], d['%s_extension' % k] = \
os.path.splitext(d['%s_name' % k])
d['%s_extension' % k] = d['%s_extension' % k].lstrip('.')
return d
def expand_path(view, path):
"""
Expand ST build system and OS environment variables to normalized path
that allows collapsing up-level references for basic path manipulation
through combination of variables and/or separators, i.e.:
"python_interpreter": "$project_path/../../virtual/bin/python",
"python_package_paths": ["$home/.buildout/eggs"]
:type view: sublime.View
:type path: str
:rtype: str
"""
subl_vars = {}
try:
subl_vars['$file'] = view.file_name()
subl_vars['$packages'] = sublime.packages_path()
try:
subl_vars['$project'] = view.window().project_file_name()
except AttributeError:
subl_vars['$project'] = get_project_file_name(view.window())
subl_vars = split_path(subl_vars, ['$file', '$project'])
if '$' in path or '%' in path:
exp_path = path
for k in sorted(subl_vars, key=len, reverse=True):
if subl_vars[k]:
exp_path = exp_path.replace(k, subl_vars[k])
exp_path = os.path.normpath(os.path.expandvars(exp_path))
if os.path.exists(exp_path):
path = exp_path
except Exception:
logger.exception('Exception while expanding "{0}"'.format(path))
return path
def get_project_file_name(window):
"""
Getting project file name for ST2
"""
if not window.folders():
return None
projects = _get_projects_from_session()
for project_file in projects:
project_file = re.sub(r'^/([^/])/', '\\1:/', project_file)
project_json = json.loads(file(project_file, 'r').read(), strict=False)
if 'folders' in project_json:
folders = project_json['folders']
found_all = True
for directory in window.folders():
found = False
for folder in folders:
folder_path = re.sub(r'^/([^/])/', '\\1:/', folder['path'])
if folder_path == directory.replace('\\', '/'):
found = True
break
if not found:
found_all = False
break
if found_all:
return project_file
return None
def _get_projects_from_session():
session_file_path = os.path.join(sublime.packages_path(), '..', 'Settings', 'Session.sublime_session')
auto_session_file_path = os.path.join(sublime.packages_path(), '..', 'Settings', 'Auto Save Session.sublime_session')
projects = []
for file_path in [session_file_path, auto_session_file_path]:
try:
with file(os.path.normpath(file_path), 'r') as fd:
data = fd.read().replace('\t', ' ')
data = json.loads(data, strict=False)
projects += data.get('workspaces', {}).get('recent_workspaces', [])
except:
logger.info("File {0} missed".format(file_path))
continue
projects = list(set(projects))
return projects
|
|
patterns = {
'bump_on_decreasing_sequence' : {
'a' : '1',
'entry' : 's',
'states' : {
's': {
'>' : { 'semantic' : 'out', 'next_state' : 'r'},
'=' : { 'semantic' : 'out', 'next_state' : 's'},
'<' : { 'semantic' : 'out', 'next_state' : 's'}
},
'r': {
'>' : { 'semantic' : 'out', 'next_state' : 't'},
'=' : { 'semantic' : 'out', 'next_state' : 's'},
'<' : { 'semantic' : 'out', 'next_state' : 's'}
},
't': {
'>' : { 'semantic' : 'out', 'next_state' : 't'},
'=' : { 'semantic' : 'out', 'next_state' : 's'},
'<' : { 'semantic' : 'maybe_b', 'next_state' : 'u'}
},
'u': {
'>' : { 'semantic' : 'maybe_b', 'next_state' : 'v'},
'=' : { 'semantic' : 'out_r', 'next_state' : 's'},
'<' : { 'semantic' : 'out_r', 'next_state' : 's'}
},
'v': {
'>' : { 'semantic' : 'found_e', 'next_state' : 't'},
'=' : { 'semantic' : 'out_r', 'next_state' : 's'},
'<' : { 'semantic' : 'out_r', 'next_state' : 's'}
}
}
},
'decreasing' : {
'a' : '0',
'entry' : 's',
'states' : {
's': {
'>' : { 'semantic' : 'found_e', 'next_state' : 's'},
'=' : { 'semantic' : 'out', 'next_state' : 's'},
'<' : { 'semantic' : 'out', 'next_state' : 's'}
}
}
},
'decreasing_sequence' : {
'a' : '0',
'entry' : 's',
'states' : {
's': {
'>' : { 'semantic' : 'found', 'next_state' : 't'},
'=' : { 'semantic' : 'out', 'next_state' : 's'},
'<' : { 'semantic' : 'out', 'next_state' : 's'}
},
't': {
'>' : { 'semantic' : 'in', 'next_state' : 't'},
'=' : { 'semantic' : 'maybe_a', 'next_state' : 't'},
'<' : { 'semantic' : 'out_a', 'next_state' : 's'}
}
}
},
'decreasing_terrace' : {
'a' : '1',
'entry' : 's',
'states' : {
's': {
'>' : { 'semantic' : 'out', 'next_state' : 'r'},
'=' : { 'semantic' : 'out', 'next_state' : 's'},
'<' : { 'semantic' : 'out', 'next_state' : 's'}
},
'r': {
'>' : { 'semantic' : 'out', 'next_state' : 'r'},
'=' : { 'semantic' : 'maybe_b', 'next_state' : 't'},
'<' : { 'semantic' : 'out', 'next_state' : 's'}
},
't': {
'>' : { 'semantic' : 'found_e', 'next_state' : 'r'},
'=' : { 'semantic' : 'maybe_b', 'next_state' : 't'},
'<' : { 'semantic' : 'out_r', 'next_state' : 's'}
}
}
},
'dip_on_increasing_sequence' : {
'a' : '1',
'entry' : 's',
'states' : {
's': {
'>' : { 'semantic' : 'out', 'next_state' : 's'},
'=' : { 'semantic' : 'out', 'next_state' : 's'},
'<' : { 'semantic' : 'out', 'next_state' : 'r'}
},
'r': {
'>' : { 'semantic' : 'out', 'next_state' : 's'},
'=' : { 'semantic' : 'out', 'next_state' : 's'},
'<' : { 'semantic' : 'out', 'next_state' : 't'}
},
't': {
'>' : { 'semantic' : 'maybe_b', 'next_state' : 'u'},
'=' : { 'semantic' : 'out', 'next_state' : 's'},
'<' : { 'semantic' : 'out', 'next_state' : 't'}
},
'u': {
'>' : { 'semantic' : 'out_r', 'next_state' : 's'},
'=' : { 'semantic' : 'out_r', 'next_state' : 's'},
'<' : { 'semantic' : 'maybe_b', 'next_state' : 'v'}
},
'v': {
'>' : { 'semantic' : 'out_r', 'next_state' : 's'},
'=' : { 'semantic' : 'out_r', 'next_state' : 's'},
'<' : { 'semantic' : 'found_e', 'next_state' : 't'}
}
}
},
'gorge' : {
'a' : '1',
'entry' : 's',
'states' : {
's': {
'>' : { 'semantic' : 'out', 'next_state' : 'r'},
'=' : { 'semantic' : 'out', 'next_state' : 's'},
'<' : { 'semantic' : 'out', 'next_state' : 's'}
},
'r': {
'>' : { 'semantic' : 'maybe_b', 'next_state' : 'r'},
'=' : { 'semantic' : 'maybe_b', 'next_state' : 'u'},
'<' : { 'semantic' : 'found', 'next_state' : 't'}
},
't': {
'>' : { 'semantic' : 'out_a', 'next_state' : 'r'},
'=' : { 'semantic' : 'maybe_a', 'next_state' : 't'},
'<' : { 'semantic' : 'in', 'next_state' : 't'}
},
'u': {
'>' : { 'semantic' : 'maybe_b', 'next_state' : 'r'},
'=' : { 'semantic' : 'maybe_b', 'next_state' : 'u'},
'<' : { 'semantic' : 'out_r', 'next_state' : 's'}
}
}
},
'increasing' : {
'a' : '0',
'entry' : 's',
'states' : {
's': {
'>' : { 'semantic' : 'out', 'next_state' : 's'},
'=' : { 'semantic' : 'out', 'next_state' : 's'},
'<' : { 'semantic' : 'found_e', 'next_state' : 's'}
}
}
},
'increasing_sequence' : {
'a' : '0',
'entry' : 's',
'states' : {
's': {
'>' : { 'semantic' : 'out', 'next_state' : 's'},
'=' : { 'semantic' : 'out', 'next_state' : 's'},
'<' : { 'semantic' : 'found', 'next_state' : 't'}
},
't': {
'>' : { 'semantic' : 'out_a', 'next_state' : 's'},
'=' : { 'semantic' : 'maybe_a', 'next_state' : 't'},
'<' : { 'semantic' : 'in', 'next_state' : 't'}
}
}
},
'increasing_terrace' : {
'a' : '1',
'entry' : 's',
'states' : {
's': {
'>' : { 'semantic' : 'out', 'next_state' : 's'},
'=' : { 'semantic' : 'out', 'next_state' : 's'},
'<' : { 'semantic' : 'out', 'next_state' : 'r'}
},
'r': {
'>' : { 'semantic' : 'out', 'next_state' : 's'},
'=' : { 'semantic' : 'maybe_b', 'next_state' : 't'},
'<' : { 'semantic' : 'out', 'next_state' : 'r'}
},
't': {
'>' : { 'semantic' : 'out_r', 'next_state' : 's'},
'=' : { 'semantic' : 'maybe_b', 'next_state' : 't'},
'<' : { 'semantic' : 'found_e', 'next_state' : 'r'}
}
}
},
'inflexion' : {
'a' : '1',
'entry' : 's',
'states' : {
's': {
'>' : { 'semantic' : 'out', 'next_state' : 't'},
'=' : { 'semantic' : 'out', 'next_state' : 's'},
'<' : { 'semantic' : 'out', 'next_state' : 'r'}
},
'r': {
'>' : { 'semantic' : 'found_e', 'next_state' : 't'},
'=' : { 'semantic' : 'maybe_b', 'next_state' : 'r'},
'<' : { 'semantic' : 'maybe_b', 'next_state' : 'r'}
},
't': {
'>' : { 'semantic' : 'maybe_b', 'next_state' : 't'},
'=' : { 'semantic' : 'maybe_b', 'next_state' : 't'},
'<' : { 'semantic' : 'found_e', 'next_state' : 'r'}
}
}
},
'peak' : {
'a' : '1',
'entry' : 's',
'states' : {
's': {
'>' : { 'semantic' : 'out', 'next_state' : 's'},
'=' : { 'semantic' : 'out', 'next_state' : 's'},
'<' : { 'semantic' : 'out', 'next_state' : 'r'}
},
'r': {
'>' : { 'semantic' : 'found', 'next_state' : 't'},
'=' : { 'semantic' : 'maybe_b', 'next_state' : 'r'},
'<' : { 'semantic' : 'maybe_b', 'next_state' : 'r'},
},
't': {
'>' : { 'semantic' : 'in', 'next_state' : 't'},
'=' : { 'semantic' : 'maybe_a', 'next_state' : 't'},
'<' : { 'semantic' : 'out_a', 'next_state' : 'r'}
}
}
},
'plain' : {
'a' : '1',
'entry' : 's',
'states' : {
's': {
'>' : { 'semantic' : 'out', 'next_state' : 'r'},
'=' : { 'semantic' : 'out', 'next_state' : 's'},
'<' : { 'semantic' : 'out', 'next_state' : 's'}
},
'r': {
'>' : { 'semantic' : 'out', 'next_state' : 'r'},
'=' : { 'semantic' : 'maybe_b', 'next_state' : 't'},
'<' : { 'semantic' : 'found_e', 'next_state' : 's'},
},
't': {
'>' : { 'semantic' : 'out_r', 'next_state' : 'r'},
'=' : { 'semantic' : 'maybe_b', 'next_state' : 't'},
'<' : { 'semantic' : 'found_e', 'next_state' : 's'}
}
}
},
'plateau' : {
'a' : '1',
'entry' : 's',
'states' : {
's': {
'>' : { 'semantic' : 'out', 'next_state' : 's'},
'=' : { 'semantic' : 'out', 'next_state' : 's'},
'<' : { 'semantic' : 'out', 'next_state' : 'r'}
},
'r': {
'>' : { 'semantic' : 'found_e', 'next_state' : 's'},
'=' : { 'semantic' : 'maybe_b', 'next_state' : 't'},
'<' : { 'semantic' : 'out', 'next_state' : 'r'},
},
't': {
'>' : { 'semantic' : 'found_e', 'next_state' : 's'},
'=' : { 'semantic' : 'maybe_b', 'next_state' : 't'},
'<' : { 'semantic' : 'out_r', 'next_state' : 'r'}
}
}
},
'proper_plain' : {
'a' : '1',
'entry' : 's',
'states' : {
's': {
'>' : { 'semantic' : 'out', 'next_state' : 'r'},
'=' : { 'semantic' : 'out', 'next_state' : 's'},
'<' : { 'semantic' : 'out', 'next_state' : 's'}
},
'r': {
'>' : { 'semantic' : 'out', 'next_state' : 'r'},
'=' : { 'semantic' : 'maybe_b', 'next_state' : 't'},
'<' : { 'semantic' : 'out', 'next_state' : 's'},
},
't': {
'>' : { 'semantic' : 'out_r', 'next_state' : 'r'},
'=' : { 'semantic' : 'maybe_b', 'next_state' : 't'},
'<' : { 'semantic' : 'found_e', 'next_state' : 's'}
}
}
},
'proper_plateau' : {
'a' : '1',
'entry' : 's',
'states' : {
's': {
'>' : { 'semantic' : 'out', 'next_state' : 's'},
'=' : { 'semantic' : 'out', 'next_state' : 's'},
'<' : { 'semantic' : 'out', 'next_state' : 'r'}
},
'r': {
'>' : { 'semantic' : 'out', 'next_state' : 's'},
'=' : { 'semantic' : 'maybe_b', 'next_state' : 't'},
'<' : { 'semantic' : 'out', 'next_state' : 'r'},
},
't': {
'>' : { 'semantic' : 'found_e', 'next_state' : 's'},
'=' : { 'semantic' : 'maybe_b', 'next_state' : 't'},
'<' : { 'semantic' : 'out_r', 'next_state' : 'r'}
}
}
},
'steady' : {
'a' : '0',
'entry' : 's',
'states' : {
's': {
'>' : { 'semantic' : 'out', 'next_state' : 's'},
'=' : { 'semantic' : 'found_e', 'next_state' : 's'},
'<' : { 'semantic' : 'out', 'next_state' : 's'}
}
}
},
'steady_sequence' : {
'a' : '0',
'entry' : 's',
'states' : {
's': {
'>' : { 'semantic' : 'out', 'next_state' : 's'},
'=' : { 'semantic' : 'found', 'next_state' : 'r'},
'<' : { 'semantic' : 'out', 'next_state' : 's'}
},
'r': {
'>' : { 'semantic' : 'out_a', 'next_state' : 's'},
'=' : { 'semantic' : 'in', 'next_state' : 'r'},
'<' : { 'semantic' : 'out_a', 'next_state' : 's'}
}
}
},
'strictly_decreasing_sequence' : {
'a' : '0',
'entry' : 's',
'states' : {
's': {
'>' : { 'semantic' : 'found', 'next_state' : 'r'},
'=' : { 'semantic' : 'out', 'next_state' : 's'},
'<' : { 'semantic' : 'out', 'next_state' : 's'}
},
'r': {
'>' : { 'semantic' : 'in', 'next_state' : 'r'},
'=' : { 'semantic' : 'out_a', 'next_state' : 's'},
'<' : { 'semantic' : 'out_a', 'next_state' : 's'}
}
}
},
'strictly_increasing_sequence' : {
'a' : '0',
'entry' : 's',
'states' : {
's': {
'>' : { 'semantic' : 'out', 'next_state' : 's'},
'=' : { 'semantic' : 'out', 'next_state' : 's'},
'<' : { 'semantic' : 'found', 'next_state' : 'r'}
},
'r': {
'>' : { 'semantic' : 'out_a', 'next_state' : 's'},
'=' : { 'semantic' : 'out_a', 'next_state' : 's'},
'<' : { 'semantic' : 'in', 'next_state' : 'r'}
}
}
},
'summit' : {
'a' : '1',
'entry' : 's',
'states' : {
's': {
'>' : { 'semantic' : 'out', 'next_state' : 's'},
'=' : { 'semantic' : 'out', 'next_state' : 's'},
'<' : { 'semantic' : 'found', 'next_state' : 'r'}
},
'r': {
'>' : { 'semantic' : 'found', 'next_state' : 't'},
'=' : { 'semantic' : 'maybe_b', 'next_state' : 'u'},
'<' : { 'semantic' : 'maybe_b', 'next_state' : 'r'}
},
't': {
'>' : { 'semantic' : 'in', 'next_state' : 't'},
'=' : { 'semantic' : 'maybe_a', 'next_state' : 't'},
'<' : { 'semantic' : 'out_a', 'next_state' : 'r'}
},
'u': {
'>' : { 'semantic' : 'out_r', 'next_state' : 's'},
'=' : { 'semantic' : 'maybe_b', 'next_state' : 'u'},
'<' : { 'semantic' : 'maybe_b', 'next_state' : 'r'}
}
}
},
'valley' : {
'a' : '1',
'entry' : 's',
'states' : {
's': {
'>' : { 'semantic' : 'out', 'next_state' : 'r'},
'=' : { 'semantic' : 'out', 'next_state' : 's'},
'<' : { 'semantic' : 'out', 'next_state' : 's'}
},
'r': {
'>' : { 'semantic' : 'maybe_b', 'next_state' : 'r'},
'=' : { 'semantic' : 'maybe_b', 'next_state' : 'r'},
'<' : { 'semantic' : 'found', 'next_state' : 't'}
},
't': {
'>' : { 'semantic' : 'out_a', 'next_state' : 'r'},
'=' : { 'semantic' : 'maybe_a', 'next_state' : 't'},
'<' : { 'semantic' : 'in', 'next_state' : 't'}
}
}
},
'zigzag' : {
'a' : '1',
'entry' : 's',
'states' : {
's': {
'>' : { 'semantic' : 'out', 'next_state' : 'd'},
'=' : { 'semantic' : 'out', 'next_state' : 's'},
'<' : { 'semantic' : 'out', 'next_state' : 'a'}
},
'a': {
'>' : { 'semantic' : 'maybe_b', 'next_state' : 'b'},
'=' : { 'semantic' : 'out', 'next_state' : 's'},
'<' : { 'semantic' : 'out', 'next_state' : 'a'}
},
'b': {
'>' : { 'semantic' : 'out', 'next_state' : 'd'},
'=' : { 'semantic' : 'out_r', 'next_state' : 's'},
'<' : { 'semantic' : 'found', 'next_state' : 'c'}
},
'c': {
'>' : { 'semantic' : 'in', 'next_state' : 'f'},
'=' : { 'semantic' : 'out_a', 'next_state' : 's'},
'<' : { 'semantic' : 'out_a', 'next_state' : 'a'}
},
'd': {
'>' : { 'semantic' : 'out', 'next_state' : 'd'},
'=' : { 'semantic' : 'out', 'next_state' : 's'},
'<' : { 'semantic' : 'maybe_b', 'next_state' : 'e'}
},
'e': {
'>' : { 'semantic' : 'found', 'next_state' : 'f'},
'=' : { 'semantic' : 'out_r', 'next_state' : 's'},
'<' : { 'semantic' : 'out_r', 'next_state' : 'a'}
},
'f': {
'>' : { 'semantic' : 'out_a', 'next_state' : 'd'},
'=' : { 'semantic' : 'out_a', 'next_state' : 's'},
'<' : { 'semantic' : 'in', 'next_state' : 'c'}
}
}
}
}
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Developed by niphlod@gmail.com
License MIT/BSD/GPL
Redis-backed sessions
"""
import logging
import thread
from gluon import current
from gluon.storage import Storage
from gluon.contrib.redis_utils import acquire_lock, release_lock
from gluon.contrib.redis_utils import register_release_lock
logger = logging.getLogger("web2py.session.redis")
locker = thread.allocate_lock()
def RedisSession(redis_conn, session_expiry=False, with_lock=False, db=None):
"""
Usage example: put in models::
from gluon.contrib.redis_utils import RConn
rconn = RConn()
from gluon.contrib.redis_session import RedisSession
sessiondb = RedisSession(redis_conn=rconn, with_lock=True, session_expiry=False)
session.connect(request, response, db = sessiondb)
Args:
redis_conn: a redis-like connection object
with_lock: prevent concurrent modifications to the same session
session_expiry: delete automatically sessions after n seconds
(still need to run sessions2trash.py every 1M sessions
or so)
Simple slip-in storage for session
"""
locker.acquire()
try:
instance_name = 'redis_instance_' + current.request.application
if not hasattr(RedisSession, instance_name):
setattr(RedisSession, instance_name,
RedisClient(redis_conn, session_expiry=session_expiry, with_lock=with_lock))
return getattr(RedisSession, instance_name)
finally:
locker.release()
class RedisClient(object):
def __init__(self, redis_conn, session_expiry=False, with_lock=False):
self.r_server = redis_conn
self._release_script = register_release_lock(self.r_server)
self.tablename = None
self.session_expiry = session_expiry
self.with_lock = with_lock
def get(self, what, default):
return self.tablename
def Field(self, fieldname, type='string', length=None, default=None,
required=False, requires=None):
return None
def define_table(self, tablename, *fields, **args):
if not self.tablename:
self.tablename = MockTable(
self, self.r_server, tablename, self.session_expiry,
self.with_lock)
return self.tablename
def __getitem__(self, key):
return self.tablename
def __call__(self, where=''):
q = self.tablename.query
return q
def commit(self):
# this is only called by session2trash.py
pass
class MockTable(object):
def __init__(self, db, r_server, tablename, session_expiry, with_lock=False):
# here self.db is the RedisClient instance
self.db = db
self.tablename = tablename
# set the namespace for sessions of this app
self.keyprefix = 'w2p:sess:%s' % tablename.replace('web2py_session_', '')
# fast auto-increment id (needed for session handling)
self.serial = "%s:serial" % self.keyprefix
# index of all the session keys of this app
self.id_idx = "%s:id_idx" % self.keyprefix
# remember the session_expiry setting
self.session_expiry = session_expiry
self.with_lock = with_lock
def __call__(self, record_id, unique_key=None):
# Support DAL shortcut query: table(record_id)
# This will call the __getattr__ below
# returning a MockQuery
q = self.id
# Instructs MockQuery, to behave as db(table.id == record_id)
q.op = 'eq'
q.value = record_id
q.unique_key = unique_key
row = q.select()
return row[0] if row else Storage()
def __getattr__(self, key):
if key == 'id':
# return a fake query. We need to query it just by id for normal operations
self.query = MockQuery(
field='id', db=self.db,
prefix=self.keyprefix, session_expiry=self.session_expiry,
with_lock=self.with_lock, unique_key=self.unique_key
)
return self.query
elif key == '_db':
# needed because of the calls in sessions2trash.py and globals.py
return self.db
def insert(self, **kwargs):
# usually kwargs would be a Storage with several keys:
# 'locked', 'client_ip','created_datetime','modified_datetime'
# 'unique_key', 'session_data'
# retrieve a new key
newid = str(self.db.r_server.incr(self.serial))
key = self.keyprefix + ':' + newid
if self.with_lock:
key_lock = key + ':lock'
acquire_lock(self.db.r_server, key_lock, newid)
with self.db.r_server.pipeline() as pipe:
# add it to the index
pipe.sadd(self.id_idx, key)
# set a hash key with the Storage
pipe.hmset(key, kwargs)
if self.session_expiry:
pipe.expire(key, self.session_expiry)
pipe.execute()
if self.with_lock:
release_lock(self.db, key_lock, newid)
return newid
class MockQuery(object):
"""a fake Query object that supports querying by id
and listing all keys. No other operation is supported
"""
def __init__(self, field=None, db=None, prefix=None, session_expiry=False,
with_lock=False, unique_key=None):
self.field = field
self.value = None
self.db = db
self.keyprefix = prefix
self.op = None
self.session_expiry = session_expiry
self.with_lock = with_lock
self.unique_key = unique_key
def __eq__(self, value, op='eq'):
self.value = value
self.op = op
def __gt__(self, value, op='ge'):
self.value = value
self.op = op
def select(self):
if self.op == 'eq' and self.field == 'id' and self.value:
# means that someone wants to retrieve the key self.value
key = self.keyprefix + ':' + str(self.value)
if self.with_lock:
acquire_lock(self.db.r_server, key + ':lock', self.value, 2)
rtn = self.db.r_server.hgetall(key)
if rtn:
if self.unique_key:
# make sure the id and unique_key are correct
if rtn['unique_key'] == self.unique_key:
rtn['update_record'] = self.update # update record support
else:
rtn = None
return [Storage(rtn)] if rtn else []
elif self.op == 'ge' and self.field == 'id' and self.value == 0:
# means that someone wants the complete list
rtn = []
id_idx = "%s:id_idx" % self.keyprefix
# find all session keys of this app
allkeys = self.db.r_server.smembers(id_idx)
for sess in allkeys:
val = self.db.r_server.hgetall(sess)
if not val:
if self.session_expiry:
# clean up the idx, because the key expired
self.db.r_server.srem(id_idx, sess)
continue
val = Storage(val)
# add a delete_record method (necessary for sessions2trash.py)
val.delete_record = RecordDeleter(
self.db, sess, self.keyprefix)
rtn.append(val)
return rtn
else:
raise Exception("Operation not supported")
def update(self, **kwargs):
# means that the session has been found and needs an update
if self.op == 'eq' and self.field == 'id' and self.value:
key = self.keyprefix + ':' + str(self.value)
if not self.db.r_server.exists(key):
return None
with self.db.r_server.pipeline() as pipe:
pipe.hmset(key, kwargs)
if self.session_expiry:
pipe.expire(key, self.session_expiry)
rtn = pipe.execute()[0]
if self.with_lock:
release_lock(self.db, key + ':lock', self.value)
return rtn
def delete(self, **kwargs):
# means that we want this session to be deleted
if self.op == 'eq' and self.field == 'id' and self.value:
id_idx = "%s:id_idx" % self.keyprefix
key = self.keyprefix + ':' + str(self.value)
with self.db.r_server.pipeline() as pipe:
pipe.delete(key)
pipe.srem(id_idx, key)
rtn = pipe.execute()
return rtn[1]
class RecordDeleter(object):
"""Dumb record deleter to support sessions2trash.py"""
def __init__(self, db, key, keyprefix):
self.db, self.key, self.keyprefix = db, key, keyprefix
def __call__(self):
id_idx = "%s:id_idx" % self.keyprefix
# remove from the index
self.db.r_server.srem(id_idx, self.key)
# remove the key itself
self.db.r_server.delete(self.key)
|
|
from datetime import datetime
from active_alchemy import ActiveAlchemy
from sqlalchemy import distinct, func
from elasticsearch import Elasticsearch
from decimal import Decimal
import pytz
import calendar
import json
import os
db = ActiveAlchemy(os.environ['DATABASE_URL'])
es_service = os.environ.get("ES_SERVICE", "localhost")
es = Elasticsearch(['http://'+es_service+':9200/'])
pricing = json.load(open("region_instance_prices.json"))
EXTRA_MONEY = 1.2 # if you want to tune billings, this is the dial. 1.2 means add 20% on top of what is calculated
# that we pay to AWS or whichever host
SECONDS_IN_HR = 3600
BYTES_IN_GB = 1000000000
STORAGE_PRICE_GB_MONTH = 0.03
class Billing(db.Model):
id = db.Column(db.Integer, primary_key=True)
storage_cost = db.Column(db.Numeric, nullable=False, default=0)
compute_cost = db.Column(db.Numeric, nullable=False, default=0)
project = db.Column(db.Text)
start_date = db.Column(db.DateTime)
end_date = db.Column(db.DateTime)
created_date = db.Column(db.DateTime, default=datetime.utcnow())
closed_out = db.Column(db.Boolean, nullable=False, default=False)
cost_by_analysis = db.Column(db.JSON)
__table_args__ = (db.UniqueConstraint('is_deleted', 'project', 'start_date', name='unique_prj_start'),)
def __init__(self, compute_cost, storage_cost, project, cost_by_analysis, start_date, end_date, **kwargs):
db.Model.__init__(self, compute_cost=compute_cost, storage_cost=storage_cost, project=project,
start_date=start_date.replace(tzinfo=pytz.UTC),
end_date=end_date.replace(tzinfo=pytz.UTC),
cost_by_analysis=cost_by_analysis,
**kwargs)
def __repr__(self):
return "<Billing, Project: {} , Cost: {}, Time Range: {}-{}, Time created: {}".format(
self.project, self.cost, str(self.start_date),
str(self.end_date), str(self.created_date))
def to_json(self):
dict_representation = {}
dict_representation["cost"] = str(round(self.cost,2))
dict_representation["compute_cost"] = str(round(self.compute_cost, 2))
dict_representation["storage_cost"] = str(round(self.storage_cost,2))
dict_representation["project"] = self.project
dict_representation["start_date"] = datetime.strftime(self.start_date, format="%a %b %d %H:%M:%S %Z %Y")
dict_representation["end_date"] = datetime.strftime(self.end_date, format="%a %b %d %H:%M:%S %Z %Y")
dict_representation["by_analysis"] = self.cost_by_analysis
dict_representation["month_of"] = datetime.strftime(self.start_date, format="%B-%Y")
return dict_representation
def __close_out__(self):
self.end_date = datetime.utcnow().replace(tzinfo=pytz.UTC)
self.closed_out = True
@property
def cost(self):
return self.compute_cost+self.storage_cost
def get_projects_list():
es_resp = es.search(index='billing_idx', body={"query": {"match_all": {}}, "aggs": {
"projects":{
"terms":{
"field": "project.keyword",
"size": 9999
}
}
}}, size=0)
projects = []
for project in es_resp['aggregations']['projects']['buckets']:
projects.append(project['key'])
return projects
def get_previous_file_sizes (timeend, project):
timeendstring = timeend.replace(tzinfo=pytz.UTC).strftime('%Y-%m-%dT%H:%M:%S')
es_resp = es.search(index='billing_idx', body={
"query": {
"bool": {
"must": [
{
"term": {
"project.keyword": project
}
},
{
"range": {
"timestamp": {
"lt": timeendstring,
}
}
}
]
}
},
"aggs": {
"filtered_nested_timestamps": {
"nested": {
"path": "specimen.samples.analysis"
},
"aggs": {
"sum_sizes": {
"sum": {
"field": "specimen.samples.analysis.workflow_outputs.file_size"
}
}
}
}
}
}, size=9999)
return es_resp
def get_months_uploads(project, timefrom, timetil):
timestartstring = timefrom.replace(tzinfo=pytz.UTC).strftime('%Y-%m-%dT%H:%M:%S')
timeendstring = timetil.replace(tzinfo=pytz.UTC).strftime('%Y-%m-%dT%H:%M:%S')
es_resp = es.search(index='billing_idx', body =
{
"query": {
"bool": {
"must": [
{
"range": {
"timestamp": {
"gte": timestartstring,
"lt": timeendstring
}
}
},
{
"term": {
"project.keyword": project
}
}
]
}
},
"aggs": {
"filtered_nested_timestamps": {
"nested": {
"path": "specimen.samples.analysis"
},
"aggs": {
"times": {
"terms": {
"field": "specimen.samples.analysis.timestamp"
},
"aggs": {
"sum_sizes": {
"sum": {
"field": "specimen.samples.analysis.workflow_outputs.file_size"
}
}
}
}
}
}
}
}, size=9999)
return es_resp
def make_search_filter_query(timefrom, timetil, project):
"""
:param timefrom: datetime object, filters all values less than this
:param timetil: datetime object, filters all values greater than or equal to this
:param project: string, this is the name of the particular project that we are trying to generate for
:return:
"""
timestartstring = timefrom.replace(tzinfo=pytz.UTC).strftime('%Y-%m-%dT%H:%M:%S')
timeendstring = timetil.replace(tzinfo=pytz.UTC).strftime('%Y-%m-%dT%H:%M:%S')
es_resp = es.search(index='billing_idx', body={
"query": {
"bool": {
"must": [
{
"term": {
"project.keyword": project
}
},
{
"nested": {
"path": "specimen.samples.analysis",
"score_mode": "max",
"query": {
"range": {
"specimen.samples.analysis.timing_metrics.overall_stop_time_utc": {
"gte": timestartstring,
"lt": timeendstring,
"format": "yyy-MM-dd'T'HH:mm:ss"
}
}
}
}
}
]
}
},
"aggs": {
"filtered_nested_timestamps": {
"nested": {
"path": "specimen.samples.analysis"
},
"aggs": {
"filtered_range": {
"filter": {
"range": {
"specimen.samples.analysis.timing_metrics.overall_stop_time_utc": {
"gte": timestartstring,
"lt": timeendstring,
"format": "yyy-MM-dd'T'HH:mm:ss"
}}
},
"aggs": {
"vmtype": {
"terms": {
"field": "specimen.samples.analysis.host_metrics.vm_instance_type.raw",
"size": 9999
},
"aggs": {
"regions": {
"terms": {
"field": "specimen.samples.analysis.host_metrics.vm_region.raw",
"size": 9999
},
"aggs": {
"totaltime": {
"sum": {
"field": "specimen.samples.analysis.timing_metrics.overall_walltime_seconds"
}
}
}
}
}
}
}
}
}
}
}
}, size=9999)
return es_resp
def get_datetime_from_es(timestr):
return datetime.strptime(timestr, "%Y-%m-%dT%H:%M:%S.%f").replace(tzinfo=pytz.UTC)
def calculate_compute_cost(total_seconds, vm_cost_hr):
return Decimal(Decimal(total_seconds)/Decimal(SECONDS_IN_HR)*Decimal(vm_cost_hr)*Decimal(EXTRA_MONEY))
def calculate_storage_cost(portion_month_stored, file_size_gb):
return Decimal(portion_month_stored)*Decimal(file_size_gb)*Decimal(STORAGE_PRICE_GB_MONTH)*Decimal(EXTRA_MONEY)
def get_vm_string(host_metrics):
return str(host_metrics.get("vm_region")) + str(host_metrics.get("vm_instance_type"))
def make_bills(comp_aggregations, previous_month_bytes, portion_of_month, this_month_timestamps_sizes, curr_time,
seconds_in_month):
x=comp_aggregations
print(x)
instances = x["aggregations"]["filtered_nested_timestamps"]["filtered_range"]["vmtype"]["buckets"]
total_pricing = Decimal()
for instance in instances:
instanceType = instance["key"]
regions = instance["regions"]["buckets"]
for region in regions:
regionName = region["key"]
totalTime = region["totaltime"]["value"]
print(regionName, instanceType, totalTime, pricing[regionName+instanceType])
total_pricing += calculate_compute_cost(totalTime, pricing[regionName + instanceType])
# need to get the storage size for files completed before start of this month
storage_size_bytes = previous_month_bytes['aggregations']['filtered_nested_timestamps']['sum_sizes']['value']
storage_size_gb = Decimal(storage_size_bytes)/Decimal(BYTES_IN_GB)
total_pricing += Decimal(STORAGE_PRICE_GB_MONTH)*storage_size_gb*portion_of_month*Decimal(EXTRA_MONEY)
# calculate the money spent on storing workflow outputs which were uploaded during this month
this_month_timestamps = this_month_timestamps_sizes['aggregations']['filtered_nested_timestamps']['times'][
'buckets']
for ts_sum in this_month_timestamps:
time_string = ts_sum['key_as_string']
time = datetime.strptime(time_string, "%Y-%m-%dT%H:%M:%S.%fZ").replace(tzinfo=pytz.UTC)
timediff = (curr_time - time).total_seconds()
month_portion = Decimal(timediff)/Decimal(seconds_in_month)
storage_size_bytes = ts_sum['sum_sizes']['value']
storage_size_gb = Decimal(storage_size_bytes)/Decimal(BYTES_IN_GB)
cost_here = storage_size_gb * month_portion
total_pricing += cost_here
return total_pricing
def get_compute_costs(comp_aggregations):
#create total compute cost for an entire project for a month
instances = comp_aggregations["aggregations"]["filtered_nested_timestamps"]["filtered_range"]["vmtype"]["buckets"]
compute_costs = Decimal(0)
for instance in instances:
instanceType = instance["key"]
regions = instance["regions"]["buckets"]
for region in regions:
regionName = region["key"]
totalTime = region["totaltime"]["value"]
print(regionName, instanceType, totalTime, pricing[regionName+instanceType])
compute_costs += calculate_compute_cost(totalTime, pricing[regionName + instanceType])
return compute_costs
def create_analysis_costs_json(this_month_comp_hits, bill_time_start, bill_time_end):
analysis_costs = []
analysis_cost_actual = 0
for donor_doc in this_month_comp_hits:
donor = donor_doc.get("_source")
for specimen in donor.get("specimen"):
for sample in specimen.get("samples"):
for analysis in sample.get("analysis"):
timing_stats = analysis.get("timing_metrics")
if timing_stats:
time = timing_stats["overall_walltime_seconds"]
analysis_end_time = get_datetime_from_es(timing_stats["overall_stop_time_utc"])
analysis_start_time = get_datetime_from_es(timing_stats["overall_start_time_utc"])
if analysis_end_time < bill_time_end and analysis_start_time >= bill_time_start:
host_metrics = analysis.get("host_metrics")
if host_metrics:
cost = calculate_compute_cost(time, pricing.get(get_vm_string(host_metrics)))
analysis_costs.append(
{
"donor": donor.get("submitter_donor_id"),
"specimen": specimen.get("submitter_specimen_id"),
"sample": sample.get("submitter_sample_id"),
"workflow": analysis.get("analysis_type"),
"version": analysis.get("workflow_version"),
"cost": str(cost)
}
)
analysis_cost_actual += cost
return analysis_costs
def workflow_output_total_size(workflow_outputs_array):
size = 0
if workflow_outputs_array:
for output in workflow_outputs_array:
this_size = output.get("file_size")
if this_size:
size+=this_size
return size
def get_gb_size(byte_size):
return Decimal(byte_size)/Decimal(BYTES_IN_GB)
def create_storage_costs_json(project_files_hits, bill_time_start, bill_time_end, month_total_seconds):
storage_costs = []
storage_cost_actual = 0
for donor_doc in project_files_hits:
donor = donor_doc.get("_source")
for specimen in donor.get("specimen"):
for sample in specimen.get("samples"):
for analysis in sample.get("analysis"):
timing_stats = analysis.get("timing_metrics")
if timing_stats:
analysis_end_time = get_datetime_from_es(timing_stats["overall_stop_time_utc"])
if analysis_end_time < bill_time_end:
this_size = get_gb_size(workflow_output_total_size(analysis.get("workflow_outputs")))
if analysis_end_time >= bill_time_start: #means it's from this month
seconds = (bill_time_end - analysis_end_time).total_seconds()
else:#it's from previous month, charge it portion of month
seconds = (bill_time_end - bill_time_start).total_seconds()
cost = calculate_storage_cost(Decimal(seconds)/Decimal(month_total_seconds), this_size)
storage_costs.append(
{
"donor": donor.get("submitter_donor_id"),
"specimen": specimen.get("submitter_specimen_id"),
"sample": sample.get("submitter_sample_id"),
"workflow": analysis.get("analysis_type"),
"version": analysis.get("workflow_version"),
"cost": str(cost)
}
)
storage_cost_actual += cost
return storage_costs
def get_storage_costs(previous_month_bytes, portion_of_month, this_month_timestamps_sizes, curr_time, seconds_in_month):
storage_costs = Decimal(0)
storage_size_bytes = previous_month_bytes['aggregations']['filtered_nested_timestamps']['sum_sizes']['value']
storage_size_gb = Decimal(storage_size_bytes)/Decimal(BYTES_IN_GB)
storage_costs += calculate_storage_cost(portion_of_month, storage_size_gb)
# calculate the money spent on storing workflow outputs which were uploaded during this month
this_month_timestamps = this_month_timestamps_sizes['aggregations']['filtered_nested_timestamps']['times'][
'buckets']
for ts_sum in this_month_timestamps:
time_string = ts_sum['key_as_string']
time = datetime.strptime(time_string, "%Y-%m-%dT%H:%M:%S.%fZ").replace(tzinfo=pytz.UTC)
timediff = (curr_time - time).total_seconds()
month_portion = Decimal(timediff)/Decimal(seconds_in_month)
storage_size_bytes = ts_sum['sum_sizes']['value']
storage_size_gb = Decimal(storage_size_bytes)/Decimal(BYTES_IN_GB)
storage_costs += calculate_storage_cost(month_portion, storage_size_gb)
return storage_costs
def generate_daily_reports(date):
# Need to pass app context around because of how flask works
# can take a single argument date as follows
# flask generate_daily_reports --date 2017/01/31 will compute the billings for jan 2017, up to the 31st day of
# January
try:
timeend = datetime.strptime(date, '%Y/%m/%d').replace(tzinfo=pytz.UTC)
except:
timeend = datetime.utcnow().replace(tzinfo=pytz.UTC).replace(minute=0, second=0, hour=0, microsecond=0)
# HANDLE CLOSING OUT BILLINGS at end of month
if timeend.day == 1:
projects = get_projects_list()
for project in projects:
bill = Billing.query.filter(Billing.end_date.month == (timeend.month-1) % 12) \
.filter(Billing.closed_out is False).filter(Billing.project == project).first()
if bill:
bill.update(end_date=timeend, closed_out=True)
monthstart = timeend.replace(day=1)
projects = get_projects_list()
seconds_into_month = (timeend-monthstart).total_seconds()
daysinmonth = calendar.monthrange(timeend.year, timeend.month)[1]
portion_of_month = Decimal(seconds_into_month)/Decimal(daysinmonth*3600*24)
for project in projects:
print(project)
file_size = get_previous_file_sizes(monthstart, project=project)
this_months_files = get_months_uploads(project, monthstart, timeend)
compute_cost_search = make_search_filter_query(monthstart,timeend,project)
compute_costs = get_compute_costs(compute_cost_search)
analysis_compute_json = create_analysis_costs_json(compute_cost_search['hits']['hits'], monthstart, timeend)
all_proj_files = get_previous_file_sizes(timeend, project)['hits']['hits']
analysis_storage_json = create_storage_costs_json(all_proj_files, monthstart, timeend, daysinmonth*3600*24)
storage_costs = get_storage_costs( file_size, portion_of_month,
this_months_files, timeend, daysinmonth*3600*24)
bill = Billing.query().filter(Billing.project == project).filter(func.extract('month', Billing.start_date) == monthstart.month).first()
itemized_costs = {
"itemized_compute_costs": analysis_compute_json,
"itemized_storage_costs": analysis_storage_json
}
try:
if bill:
bill.update(compute_cost=compute_costs, storage_cost=storage_costs, end_date=timeend,
cost_by_analysis=itemized_costs)
else:
Billing.create(compute_cost=compute_costs, storage_cost=storage_costs, start_date=monthstart, \
end_date=timeend, project=project, closed_out=False,
cost_by_analysis=itemized_costs)
except:
print("IT'S GONE FAR SOUTH")
if __name__ == '__main__':
generate_daily_reports("")
|
|
#!/usr/bin/env python
import sys
import time, json
from selfdrive.test.plant import plant
from selfdrive.config import Conversions as CV, CruiseButtons as CB
from maneuver import *
maneuvers = [
Maneuver(
'while cruising at 40 mph, change cruise speed to 50mph',
duration=30.,
initial_speed = 40. * CV.MPH_TO_MS,
cruise_button_presses = [(CB.DECEL_SET, 2.), (0, 2.3),
(CB.RES_ACCEL, 10.), (0, 10.1),
(CB.RES_ACCEL, 10.2), (0, 10.3)]
),
Maneuver(
'while cruising at 60 mph, change cruise speed to 50mph',
duration=30.,
initial_speed=60. * CV.MPH_TO_MS,
cruise_button_presses = [(CB.DECEL_SET, 2.), (0, 2.3),
(CB.DECEL_SET, 10.), (0, 10.1),
(CB.DECEL_SET, 10.2), (0, 10.3)]
),
Maneuver(
'while cruising at 20mph, grade change +10%',
duration=25.,
initial_speed=20. * CV.MPH_TO_MS,
cruise_button_presses = [(CB.DECEL_SET, 1.2), (0, 1.3)],
grade_values = [0., 0., 1.0],
grade_breakpoints = [0., 10., 11.]
),
Maneuver(
'while cruising at 20mph, grade change -10%',
duration=25.,
initial_speed=20. * CV.MPH_TO_MS,
cruise_button_presses = [(CB.DECEL_SET, 1.2), (0, 1.3)],
grade_values = [0., 0., -1.0],
grade_breakpoints = [0., 10., 11.]
),
Maneuver(
'approaching a 40mph car while cruising at 60mph from 100m away',
duration=30.,
initial_speed = 60. * CV.MPH_TO_MS,
lead_relevancy=True,
initial_distance_lead=100.,
speed_lead_values = [40.*CV.MPH_TO_MS, 40.*CV.MPH_TO_MS],
speed_lead_breakpoints = [0., 100.],
cruise_button_presses = [(CB.DECEL_SET, 1.2), (0, 1.3)]
),
Maneuver(
'approaching a 0mph car while cruising at 40mph from 150m away',
duration=30.,
initial_speed = 40. * CV.MPH_TO_MS,
lead_relevancy=True,
initial_distance_lead=150.,
speed_lead_values = [0.*CV.MPH_TO_MS, 0.*CV.MPH_TO_MS],
speed_lead_breakpoints = [0., 100.],
cruise_button_presses = [(CB.DECEL_SET, 1.2), (0, 1.3)]
),
Maneuver(
'steady state following a car at 20m/s, then lead decel to 0mph at 1m/s^2',
duration=50.,
initial_speed = 20.,
lead_relevancy=True,
initial_distance_lead=35.,
speed_lead_values = [20.*CV.MPH_TO_MS, 20.*CV.MPH_TO_MS, 0.*CV.MPH_TO_MS],
speed_lead_breakpoints = [0., 15., 35.0],
cruise_button_presses = [(CB.DECEL_SET, 1.2), (0, 1.3)]
),
Maneuver(
'steady state following a car at 20m/s, then lead decel to 0mph at 2m/s^2',
duration=50.,
initial_speed = 20.,
lead_relevancy=True,
initial_distance_lead=35.,
speed_lead_values = [20.*CV.MPH_TO_MS, 20.*CV.MPH_TO_MS, 0.*CV.MPH_TO_MS],
speed_lead_breakpoints = [0., 15., 25.0],
cruise_button_presses = [(CB.DECEL_SET, 1.2), (0, 1.3)]
),
Maneuver(
'starting at 0mph, approaching a stopped car 100m away',
duration=30.,
initial_speed = 0.,
lead_relevancy=True,
initial_distance_lead=100.,
cruise_button_presses = [(CB.DECEL_SET, 1.2), (0, 1.3),
(CB.RES_ACCEL, 1.4), (0.0, 1.5),
(CB.RES_ACCEL, 1.6), (0.0, 1.7),
(CB.RES_ACCEL, 1.8), (0.0, 1.9)]
),
Maneuver(
"following a car at 60mph, lead accel and decel at 0.5m/s^2 every 2s",
duration=25.,
initial_speed=30.,
lead_relevancy=True,
initial_distance_lead=49.,
speed_lead_values=[30.,30.,29.,31.,29.,31.,29.],
speed_lead_breakpoints=[0., 6., 8., 12.,16.,20.,24.],
cruise_button_presses = [(CB.DECEL_SET, 1.2), (0, 1.3),
(CB.RES_ACCEL, 1.4), (0.0, 1.5),
(CB.RES_ACCEL, 1.6), (0.0, 1.7)]
),
Maneuver(
"following a car at 10mph, stop and go at 1m/s2 lead dece1 and accel",
duration=70.,
initial_speed=10.,
lead_relevancy=True,
initial_distance_lead=20.,
speed_lead_values=[10., 0., 0., 10., 0.,10.],
speed_lead_breakpoints=[10., 20., 30., 40., 50., 60.],
cruise_button_presses = [(CB.DECEL_SET, 1.2), (0, 1.3),
(CB.RES_ACCEL, 1.4), (0.0, 1.5),
(CB.RES_ACCEL, 1.6), (0.0, 1.7)]
),
Maneuver(
"green light: stopped behind lead car, lead car accelerates at 1.5 m/s",
duration=30.,
initial_speed=0.,
lead_relevancy=True,
initial_distance_lead=4.,
speed_lead_values=[0, 0 , 45],
speed_lead_breakpoints=[0, 10., 40.],
cruise_button_presses = [(CB.DECEL_SET, 1.2), (0, 1.3),
(CB.RES_ACCEL, 1.4), (0.0, 1.5),
(CB.RES_ACCEL, 1.6), (0.0, 1.7),
(CB.RES_ACCEL, 1.8), (0.0, 1.9),
(CB.RES_ACCEL, 2.0), (0.0, 2.1),
(CB.RES_ACCEL, 2.2), (0.0, 2.3)]
),
Maneuver(
"stop and go with 1m/s2 lead decel and accel, with full stops",
duration=70.,
initial_speed=0.,
lead_relevancy=True,
initial_distance_lead=20.,
speed_lead_values=[10., 0., 0., 10., 0., 0.] ,
speed_lead_breakpoints=[10., 20., 30., 40., 50., 60.],
cruise_button_presses = [(CB.DECEL_SET, 1.2), (0, 1.3),
(CB.RES_ACCEL, 1.4), (0.0, 1.5),
(CB.RES_ACCEL, 1.6), (0.0, 1.7)]
),
Maneuver(
"accelerate from 20 while lead vehicle decelerates from 40 to 20 at 1m/s2",
duration=30.,
initial_speed=10.,
lead_relevancy=True,
initial_distance_lead=10.,
speed_lead_values=[20., 10.],
speed_lead_breakpoints=[1., 11.],
cruise_button_presses = [(CB.DECEL_SET, 1.2), (0, 1.3),
(CB.RES_ACCEL, 1.4), (0.0, 1.5),
(CB.RES_ACCEL, 1.6), (0.0, 1.7),
(CB.RES_ACCEL, 1.8), (0.0, 1.9),
(CB.RES_ACCEL, 2.0), (0.0, 2.1),
(CB.RES_ACCEL, 2.2), (0.0, 2.3)]
),
Maneuver(
"accelerate from 20 while lead vehicle decelerates from 40 to 0 at 2m/s2",
duration=30.,
initial_speed=10.,
lead_relevancy=True,
initial_distance_lead=10.,
speed_lead_values=[20., 0.],
speed_lead_breakpoints=[1., 11.],
cruise_button_presses = [(CB.DECEL_SET, 1.2), (0, 1.3),
(CB.RES_ACCEL, 1.4), (0.0, 1.5),
(CB.RES_ACCEL, 1.6), (0.0, 1.7),
(CB.RES_ACCEL, 1.8), (0.0, 1.9),
(CB.RES_ACCEL, 2.0), (0.0, 2.1),
(CB.RES_ACCEL, 2.2), (0.0, 2.3)]
)
]
css_style = """
.maneuver_title {
font-size: 24px;
text-align: center;
}
.maneuver_graph {
width: 100%;
}
"""
def main(output_dir):
view_html = "<html><head><style>%s</style></head><body><table>" % (css_style,)
for i, man in enumerate(maneuvers):
view_html += "<tr><td class='maneuver_title' colspan=5><div>%s</div></td></tr><tr>" % (man.title,)
for c in ['distance.svg', 'speeds.svg', 'acceleration.svg', 'pedals.svg', 'pid.svg']:
view_html += "<td><img class='maneuver_graph' src='%s'/></td>" % (os.path.join("maneuver" + str(i+1).zfill(2), c), )
view_html += "</tr>"
with open(os.path.join(output_dir, "index.html"), "w") as f:
f.write(view_html)
for i, man in enumerate(maneuvers):
score, plot = man.evaluate()
plot.write_plot(output_dir, "maneuver" + str(i+1).zfill(2))
if __name__ == "__main__":
if len(sys.argv) <= 1:
print "Usage:", sys.argv[0], "<output_dir>"
exit(1)
main(sys.argv[1])
|
|
#!/usr/bin/env python
"""
Copyright (c) 2015-2016 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
import os
import wb
def bench():
# Inputs
clk = Signal(bool(0))
rst = Signal(bool(0))
current_test = Signal(intbv(0)[8:])
port0_adr_i = Signal(intbv(0)[32:])
port0_dat_i = Signal(intbv(0)[32:])
port0_we_i = Signal(bool(0))
port0_sel_i = Signal(intbv(0)[2:])
port0_stb_i = Signal(bool(0))
port0_cyc_i = Signal(bool(0))
# Outputs
port0_dat_o = Signal(intbv(0)[32:])
port0_ack_o = Signal(bool(0))
# WB master
wb_master_inst = wb.WBMaster()
wb_master_logic = wb_master_inst.create_logic(
clk,
adr_o=port0_adr_i,
dat_i=port0_dat_o,
dat_o=port0_dat_i,
we_o=port0_we_i,
sel_o=port0_sel_i,
stb_o=port0_stb_i,
ack_i=port0_ack_o,
cyc_o=port0_cyc_i,
name='master'
)
# WB RAM model
wb_ram_inst = wb.WBRam(2**16)
wb_ram_port0 = wb_ram_inst.create_port(
clk,
adr_i=port0_adr_i,
dat_i=port0_dat_i,
dat_o=port0_dat_o,
we_i=port0_we_i,
sel_i=port0_sel_i,
stb_i=port0_stb_i,
ack_o=port0_ack_o,
cyc_i=port0_cyc_i,
latency=1,
asynchronous=False,
name='port0'
)
@always(delay(4))
def clkgen():
clk.next = not clk
@instance
def check():
yield delay(100)
yield clk.posedge
rst.next = 1
yield clk.posedge
rst.next = 0
yield clk.posedge
yield delay(100)
yield clk.posedge
yield clk.posedge
print("test 1: baseline")
current_test.next = 1
data = wb_ram_inst.read_mem(0, 32)
for i in range(0, len(data), 16):
print(" ".join(("{:02x}".format(c) for c in bytearray(data[i:i+16]))))
yield delay(100)
yield clk.posedge
print("test 2: direct write")
current_test.next = 2
wb_ram_inst.write_mem(0, b'test')
data = wb_ram_inst.read_mem(0, 32)
for i in range(0, len(data), 16):
print(" ".join(("{:02x}".format(c) for c in bytearray(data[i:i+16]))))
assert wb_ram_inst.read_mem(0,4) == b'test'
yield clk.posedge
print("test 3: write via port0")
current_test.next = 3
wb_master_inst.init_write(4, b'\x11\x22\x33\x44')
yield wb_master_inst.wait()
yield clk.posedge
data = wb_ram_inst.read_mem(0, 32)
for i in range(0, len(data), 16):
print(" ".join(("{:02x}".format(c) for c in bytearray(data[i:i+16]))))
assert wb_ram_inst.read_mem(4,4) == b'\x11\x22\x33\x44'
yield delay(100)
yield clk.posedge
print("test 4: read via port0")
current_test.next = 4
wb_master_inst.init_read(4, 4)
yield wb_master_inst.wait()
yield clk.posedge
data = wb_master_inst.get_read_data()
assert data[0] == 4
assert data[1] == b'\x11\x22\x33\x44'
yield delay(100)
yield clk.posedge
print("test 5: various writes")
current_test.next = 5
for length in range(1,8):
for offset in range(4,8):
wb_ram_inst.write_mem(256*(16*offset+length), b'\xAA'*32)
wb_master_inst.init_write(256*(16*offset+length)+offset, b'\x11\x22\x33\x44\x55\x66\x77\x88'[0:length])
yield wb_master_inst.wait()
yield clk.posedge
data = wb_ram_inst.read_mem(256*(16*offset+length), 32)
for i in range(0, len(data), 16):
print(" ".join(("{:02x}".format(c) for c in bytearray(data[i:i+16]))))
assert wb_ram_inst.read_mem(256*(16*offset+length)+offset, length) == b'\x11\x22\x33\x44\x55\x66\x77\x88'[0:length]
assert wb_ram_inst.read_mem(256*(16*offset+length)+offset-2, 1) == b'\xAA'
assert wb_ram_inst.read_mem(256*(16*offset+length)+offset+length+1, 1) == b'\xAA'
yield delay(100)
yield clk.posedge
print("test 6: various reads")
current_test.next = 6
for length in range(1,8):
for offset in range(4,8):
wb_master_inst.init_read(256*(16*offset+length)+offset, length)
yield wb_master_inst.wait()
yield clk.posedge
data = wb_master_inst.get_read_data()
assert data[0] == 256*(16*offset+length)+offset
assert data[1] == b'\x11\x22\x33\x44\x55\x66\x77\x88'[0:length]
yield delay(100)
yield clk.posedge
print("test 7: write words")
current_test.next = 7
for offset in range(4):
wb_master_inst.init_write_words((0x4000+offset*64+0)/2+offset, [0x1234])
wb_master_inst.init_write_dwords((0x4000+offset*64+16)/4+offset, [0x12345678])
wb_master_inst.init_write_qwords((0x4000+offset*64+32)/8+offset, [0x1234567887654321])
yield wb_master_inst.wait()
yield clk.posedge
data = wb_ram_inst.read_mem(0x4000+offset*64, 64)
for i in range(0, len(data), 16):
print(" ".join(("{:02x}".format(c) for c in bytearray(data[i:i+16]))))
assert wb_ram_inst.read_mem((0x4000+offset*64+0)+offset*2, 2) == b'\x34\x12'
assert wb_ram_inst.read_mem((0x4000+offset*64+16)+offset*4, 4) == b'\x78\x56\x34\x12'
assert wb_ram_inst.read_mem((0x4000+offset*64+32)+offset*8, 8) == b'\x21\x43\x65\x87\x78\x56\x34\x12'
assert wb_ram_inst.read_words((0x4000+offset*64+0)/2+offset, 1)[0] == 0x1234
assert wb_ram_inst.read_dwords((0x4000+offset*64+16)/4+offset, 1)[0] == 0x12345678
assert wb_ram_inst.read_qwords((0x4000+offset*64+32)/8+offset, 1)[0] == 0x1234567887654321
yield delay(100)
yield clk.posedge
print("test 8: read words")
current_test.next = 8
for offset in range(4):
wb_master_inst.init_read_words((0x4000+offset*64+0)/2+offset, 1)
wb_master_inst.init_read_dwords((0x4000+offset*64+16)/4+offset, 1)
wb_master_inst.init_read_qwords((0x4000+offset*64+32)/8+offset, 1)
yield wb_master_inst.wait()
yield clk.posedge
data = wb_master_inst.get_read_data_words()
assert data[0] == (0x4000+offset*64+0)/2+offset
assert data[1][0] == 0x1234
data = wb_master_inst.get_read_data_dwords()
assert data[0] == (0x4000+offset*64+16)/4+offset
assert data[1][0] == 0x12345678
data = wb_master_inst.get_read_data_qwords()
assert data[0] == (0x4000+offset*64+32)/8+offset
assert data[1][0] == 0x1234567887654321
yield delay(100)
raise StopSimulation
return instances()
def test_bench():
os.chdir(os.path.dirname(os.path.abspath(__file__)))
#sim = Simulation(bench())
traceSignals.name = os.path.basename(__file__).rsplit('.',1)[0]
sim = Simulation(traceSignals(bench))
sim.run()
if __name__ == '__main__':
print("Running test...")
test_bench()
|
|
import io
import json
import time
from unittest import mock
from http import HTTPStatus
import pytest
import aiohttpretty
from waterbutler.core import exceptions
from waterbutler.core.streams import FileStreamReader, ResponseStreamReader
from waterbutler.providers.googlecloud.metadata import GoogleCloudFileMetadata
from waterbutler.providers.googlecloud import utils, settings, GoogleCloudProvider
from tests.providers.googlecloud.fixtures.providers import (mock_auth,
mock_auth_2,
mock_creds,
mock_creds_2,
mock_settings,
mock_settings_2)
from tests.providers.googlecloud.fixtures.files import (file_raw,
file_name,
file_wb_path,
file_obj_name,
meta_file_raw,
meta_file_parsed,
meta_file_upload_raw,
meta_file_copy_raw,
file_2_wb_path,
file_2_obj_name,
file_2_copy_obj_name)
from tests.providers.googlecloud.fixtures.folders import folder_wb_path, folder_obj_name
@pytest.fixture()
def mock_provider(mock_auth, mock_creds, mock_settings):
return GoogleCloudProvider(mock_auth, mock_creds, mock_settings)
@pytest.fixture()
def mock_provider_2(mock_auth_2, mock_creds_2, mock_settings_2):
return GoogleCloudProvider(mock_auth_2, mock_creds_2, mock_settings_2)
@pytest.fixture
def mock_time(monkeypatch):
mock_time = mock.Mock(return_value=1234567890.0)
monkeypatch.setattr(time, 'time', mock_time)
@pytest.fixture
def file_stream_file(file_raw):
return FileStreamReader(io.BytesIO(file_raw))
class TestProviderInit:
async def test_provider_init(self, mock_provider):
assert mock_provider is not None
assert mock_provider.NAME == 'googlecloud'
assert mock_provider.BASE_URL == settings.BASE_URL
assert mock_provider.bucket == mock_settings.get('bucket')
json_creds = mock_creds.get('json_creds')
assert mock_provider.creds is not None
assert mock_provider.creds.project_id == json_creds.get('project_id')
assert mock_provider.creds.service_account_email == json_creds.get('client_email')
class TestValidatePath:
@pytest.mark.asyncio
async def test_validate_v1_path_file(self, mock_provider, file_wb_path):
file_path = '/{}'.format(file_wb_path.path)
assert file_path.startswith('/') and not file_path.endswith('/')
wb_path = await mock_provider.validate_path(file_path)
assert wb_path == file_wb_path
@pytest.mark.asyncio
async def test_validate_v1_path_folder(self, mock_provider, folder_wb_path):
folder_path = '/{}'.format(folder_wb_path.path)
assert folder_path.startswith('/') and folder_path.endswith('/')
wb_path = await mock_provider.validate_path(folder_path)
assert wb_path == folder_wb_path
@pytest.mark.asyncio
async def test_validate_path_file(self, mock_provider, file_wb_path):
file_path = '/{}'.format(file_wb_path.path)
assert file_path.startswith('/') and not file_path.endswith('/')
wb_path = await mock_provider.validate_path(file_path)
assert wb_path == file_wb_path
@pytest.mark.asyncio
async def test_validate_path_folder(self, mock_provider, folder_wb_path):
folder_path = '/{}'.format(folder_wb_path.path)
assert folder_path.startswith('/') and folder_path.endswith('/')
wb_path = await mock_provider.validate_path(folder_path)
assert wb_path == folder_wb_path
class TestMetadata:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_file(self, mock_time, mock_provider, file_wb_path, meta_file_raw,
meta_file_parsed):
file_obj_name = utils.get_obj_name(file_wb_path, is_folder=False)
signed_url = mock_provider._build_and_sign_url('HEAD', file_obj_name, **{})
resp_headers = utils.get_multi_dict_from_python_dict(dict(json.loads(meta_file_raw)))
aiohttpretty.register_uri(
'HEAD',
signed_url,
headers=resp_headers,
status=HTTPStatus.OK
)
metadata_json = json.loads(meta_file_parsed)
metadata_expected = GoogleCloudFileMetadata(metadata_json)
metadata = await mock_provider._metadata_object(file_wb_path, is_folder=False)
assert isinstance(metadata, GoogleCloudFileMetadata)
assert metadata == metadata_expected
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_object_401_unauthorized(self, mock_time, mock_provider, file_wb_path):
file_obj_name = utils.get_obj_name(file_wb_path, is_folder=False)
signed_url = mock_provider._build_and_sign_url('HEAD', file_obj_name, **{})
aiohttpretty.register_uri(
'HEAD',
signed_url,
status=HTTPStatus.UNAUTHORIZED
)
with pytest.raises(exceptions.MetadataError) as exc:
await mock_provider._metadata_object(file_wb_path, is_folder=False)
assert exc.value.code == HTTPStatus.UNAUTHORIZED
assert aiohttpretty.has_call(method='HEAD', uri=signed_url)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_object_404_not_found(self, mock_time, mock_provider, file_wb_path):
file_obj_name = utils.get_obj_name(file_wb_path, is_folder=False)
signed_url = mock_provider._build_and_sign_url('HEAD', file_obj_name, **{})
aiohttpretty.register_uri(
'HEAD',
signed_url,
status=HTTPStatus.NOT_FOUND
)
with pytest.raises(exceptions.MetadataError) as exc:
await mock_provider._metadata_object(file_wb_path, is_folder=False)
assert exc.value.code == HTTPStatus.NOT_FOUND
assert aiohttpretty.has_call(method='HEAD', uri=signed_url)
class TestOperations:
def test_provider_equality(self, mock_provider, mock_provider_2):
assert mock_provider != mock_provider_2
assert type(mock_provider) == type(mock_provider_2)
def test_can_intra_move(self, mock_provider, mock_provider_2, file_wb_path, folder_wb_path):
assert mock_provider.can_intra_move(mock_provider, file_wb_path)
assert not mock_provider.can_intra_move(mock_provider_2, file_wb_path)
assert not mock_provider.can_intra_move(mock_provider, folder_wb_path)
assert not mock_provider.can_intra_move(mock_provider_2, folder_wb_path)
def test_can_intra_copy(self, mock_provider, mock_provider_2, file_wb_path, folder_wb_path):
assert mock_provider.can_intra_copy(mock_provider, file_wb_path)
assert not mock_provider.can_intra_copy(mock_provider_2, file_wb_path)
assert not mock_provider.can_intra_copy(mock_provider, folder_wb_path)
assert not mock_provider.can_intra_copy(mock_provider_2, folder_wb_path)
def test_can_duplicate_names(self, mock_provider):
assert mock_provider.can_duplicate_names()
class TestCRUD:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_download_file(self, mock_time, mock_provider, file_wb_path, file_raw):
file_obj_name = utils.get_obj_name(file_wb_path, is_folder=False)
signed_url = mock_provider._build_and_sign_url('GET', file_obj_name, **{})
aiohttpretty.register_uri(
'GET',
signed_url,
body=file_raw,
status=HTTPStatus.OK
)
resp_stream_reader = await mock_provider.download(file_wb_path)
file_content = await resp_stream_reader.read()
assert aiohttpretty.has_call(method='GET', uri=signed_url)
assert isinstance(resp_stream_reader, ResponseStreamReader)
assert file_content == file_raw
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_download_file_with_accept_url(self, mock_time, mock_provider, file_wb_path,
file_name):
file_obj_name = utils.get_obj_name(file_wb_path, is_folder=False)
query = {'response-content-disposition': 'attachment; filename={}'.format(file_name)}
signed_url = mock_provider._build_and_sign_url('GET', file_obj_name, **query)
return_url = await mock_provider.download(file_wb_path, accept_url=True, display_name=file_name)
assert not aiohttpretty.has_call(method='GET', uri=signed_url)
assert isinstance(return_url, str)
assert signed_url == return_url
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_download_file_not_found(self, mock_time, mock_provider, file_wb_path):
file_obj_name = utils.get_obj_name(file_wb_path, is_folder=False)
signed_url = mock_provider._build_and_sign_url('GET', file_obj_name, **{})
aiohttpretty.register_uri(
'GET',
signed_url,
status=HTTPStatus.NOT_FOUND
)
with pytest.raises(exceptions.DownloadError) as exc:
await mock_provider.download(file_wb_path, is_folder=False)
assert exc.value.code == HTTPStatus.NOT_FOUND
assert aiohttpretty.has_call(method='GET', uri=signed_url)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_upload_file(self, mock_time, mock_provider, file_wb_path, meta_file_raw,
meta_file_parsed, meta_file_upload_raw, file_stream_file):
file_obj_name = utils.get_obj_name(file_wb_path, is_folder=False)
signed_url_upload = mock_provider._build_and_sign_url('PUT', file_obj_name, **{})
resp_headers = utils.get_multi_dict_from_python_dict(dict(json.loads(meta_file_upload_raw)))
aiohttpretty.register_uri(
'PUT',
signed_url_upload,
headers=resp_headers,
status=HTTPStatus.OK
)
signed_url_metadata = mock_provider._build_and_sign_url('HEAD', file_obj_name, **{})
resp_headers = utils.get_multi_dict_from_python_dict(dict(json.loads(meta_file_raw)))
aiohttpretty.register_uri(
'HEAD',
signed_url_metadata,
headers=resp_headers,
status=HTTPStatus.OK
)
metadata_json = json.loads(meta_file_parsed)
metadata_expected = GoogleCloudFileMetadata(metadata_json)
metadata, _ = await mock_provider.upload(file_stream_file, file_wb_path)
assert metadata == metadata_expected
assert aiohttpretty.has_call(method='PUT', uri=signed_url_upload)
assert aiohttpretty.has_call(method='HEAD', uri=signed_url_metadata)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_upload_file_checksum_mismatch(self, mock_time, mock_provider, file_wb_path,
meta_file_raw, meta_file_upload_raw,
file_stream_file):
file_obj_name = utils.get_obj_name(file_wb_path, is_folder=False)
signed_url_upload = mock_provider._build_and_sign_url('PUT', file_obj_name, **{})
# There is no need to use `MultiDict` since the hashes are not used
resp_headers_dict = dict(json.loads(meta_file_upload_raw))
resp_headers_dict.update({'etag': '"9e780e1c4ee28c44642160b349b3aab0"'})
resp_headers = utils.get_multi_dict_from_python_dict(resp_headers_dict)
aiohttpretty.register_uri(
'PUT',
signed_url_upload,
headers=resp_headers,
status=HTTPStatus.OK
)
signed_url_metadata = mock_provider._build_and_sign_url('HEAD', file_obj_name, **{})
# There is no need to use `MultiDict` since the hashes are not used
resp_headers = utils.get_multi_dict_from_python_dict(dict(json.loads(meta_file_raw)))
aiohttpretty.register_uri(
'HEAD',
signed_url_metadata,
headers=resp_headers,
status=HTTPStatus.OK
)
with pytest.raises(exceptions.UploadChecksumMismatchError) as exc:
await mock_provider.upload(file_stream_file, file_wb_path)
assert exc.value.code == HTTPStatus.INTERNAL_SERVER_ERROR
assert aiohttpretty.has_call(method='HEAD', uri=signed_url_metadata)
assert aiohttpretty.has_call(method='PUT', uri=signed_url_upload)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_delete_file(self, mock_time, mock_provider, file_wb_path):
file_obj_name = utils.get_obj_name(file_wb_path, is_folder=False)
signed_url = mock_provider._build_and_sign_url('DELETE', file_obj_name, **{})
aiohttpretty.register_uri(
'DELETE',
signed_url,
status=HTTPStatus.NO_CONTENT
)
await mock_provider.delete(file_wb_path)
assert aiohttpretty.has_call(method='DELETE', uri=signed_url)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_delete_file_not_found(self, mock_time, mock_provider, file_wb_path):
file_obj_name = utils.get_obj_name(file_wb_path, is_folder=False)
signed_url = mock_provider._build_and_sign_url('DELETE', file_obj_name, **{})
aiohttpretty.register_uri(
'DELETE',
signed_url,
status=HTTPStatus.NOT_FOUND
)
with pytest.raises(exceptions.DeleteError) as exc:
await mock_provider.delete(file_wb_path)
assert exc.value.code == HTTPStatus.NOT_FOUND
assert aiohttpretty.has_call(method='DELETE', uri=signed_url)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_intra_copy_file(self, mock_time, mock_provider, file_wb_path, file_2_wb_path,
meta_file_raw, meta_file_parsed, meta_file_copy_raw):
src_file_path = file_2_wb_path
dest_file_path = file_wb_path
src_file_obj_name = utils.get_obj_name(src_file_path, is_folder=False)
dest_file_obj_name = utils.get_obj_name(dest_file_path, is_folder=False)
object_name_with_bucket = '{}/{}'.format(mock_provider.bucket, src_file_obj_name)
canonical_ext_headers = {'x-goog-copy-source': object_name_with_bucket}
signed_url_intra_copy = mock_provider._build_and_sign_url(
'PUT',
dest_file_obj_name,
canonical_ext_headers=canonical_ext_headers,
**{}
)
resp_headers = utils.get_multi_dict_from_python_dict(dict(json.loads(meta_file_copy_raw)))
aiohttpretty.register_uri(
'PUT',
signed_url_intra_copy,
headers=resp_headers,
status=HTTPStatus.OK
)
signed_url_metadata = mock_provider._build_and_sign_url('HEAD', dest_file_obj_name, **{})
resp_headers = utils.get_multi_dict_from_python_dict(dict(json.loads(meta_file_raw)))
aiohttpretty.register_uri(
'HEAD',
signed_url_metadata,
headers=resp_headers,
status=HTTPStatus.OK
)
metadata_json = json.loads(meta_file_parsed)
metadata_expected = GoogleCloudFileMetadata(metadata_json)
metadata, _ = await mock_provider.intra_copy(mock_provider, src_file_path, dest_file_path)
assert metadata == metadata_expected
assert aiohttpretty.has_call(method='PUT', uri=signed_url_intra_copy)
assert aiohttpretty.has_call(method='HEAD', uri=signed_url_metadata)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_intra_copy_file_not_found(self, mock_time, mock_provider, file_wb_path,
file_2_wb_path, meta_file_raw, meta_file_copy_raw):
src_file_path = file_2_wb_path
dest_file_path = file_wb_path
src_file_obj_name = utils.get_obj_name(src_file_path, is_folder=False)
dest_file_obj_name = utils.get_obj_name(dest_file_path, is_folder=False)
object_name_with_bucket = '{}/{}'.format(mock_provider.bucket, src_file_obj_name)
canonical_ext_headers = {'x-goog-copy-source': object_name_with_bucket}
signed_url_intra_copy = mock_provider._build_and_sign_url(
'PUT',
dest_file_obj_name,
canonical_ext_headers=canonical_ext_headers,
**{}
)
resp_headers = utils.get_multi_dict_from_python_dict(dict(json.loads(meta_file_copy_raw)))
aiohttpretty.register_uri(
'PUT',
signed_url_intra_copy,
headers=resp_headers,
status=HTTPStatus.NOT_FOUND
)
signed_url_metadata = mock_provider._build_and_sign_url('HEAD', dest_file_obj_name, **{})
resp_headers = utils.get_multi_dict_from_python_dict(dict(json.loads(meta_file_raw)))
aiohttpretty.register_uri(
'HEAD',
signed_url_metadata,
headers=resp_headers,
status=HTTPStatus.OK
)
with pytest.raises(exceptions.CopyError) as exc:
await mock_provider.intra_copy(mock_provider, src_file_path, dest_file_path)
assert exc.value.code == HTTPStatus.NOT_FOUND
assert aiohttpretty.has_call(method='PUT', uri=signed_url_intra_copy)
assert aiohttpretty.has_call(method='HEAD', uri=signed_url_metadata)
|
|
# Authors : Alexandre Gramfort, alexandre.gramfort@inria.fr (2011)
# Denis A. Engemann <denis.engemann@gmail.com>
# License : BSD-3-Clause
from functools import partial
import numpy as np
from ..parallel import parallel_func, check_n_jobs
from ..io.pick import _picks_to_idx
from ..utils import logger, verbose, _time_mask, _check_option
from .multitaper import psd_array_multitaper
def _decomp_aggregate_mask(epoch, func, average, freq_sl):
_, _, spect = func(epoch)
spect = spect[..., freq_sl, :]
# Do the averaging here (per epoch) to save memory
if average == 'mean':
spect = np.nanmean(spect, axis=-1)
elif average == 'median':
spect = np.nanmedian(spect, axis=-1)
return spect
def _spect_func(epoch, func, freq_sl, average):
"""Aux function."""
# Decide if we should split this to save memory or not, since doing
# multiple calls will incur some performance overhead. Eventually we might
# want to write (really, go back to) our own spectrogram implementation
# that, if possible, averages after each transform, but this will incur
# a lot of overhead because of the many Python calls required.
kwargs = dict(func=func, average=average, freq_sl=freq_sl)
if epoch.nbytes > 10e6:
spect = np.apply_along_axis(
_decomp_aggregate_mask, -1, epoch, **kwargs)
else:
spect = _decomp_aggregate_mask(epoch, **kwargs)
return spect
def _check_nfft(n, n_fft, n_per_seg, n_overlap):
"""Ensure n_fft, n_per_seg and n_overlap make sense."""
if n_per_seg is None and n_fft > n:
raise ValueError(('If n_per_seg is None n_fft is not allowed to be > '
'n_times. If you want zero-padding, you have to set '
'n_per_seg to relevant length. Got n_fft of %d while'
' signal length is %d.') % (n_fft, n))
n_per_seg = n_fft if n_per_seg is None or n_per_seg > n_fft else n_per_seg
n_per_seg = n if n_per_seg > n else n_per_seg
if n_overlap >= n_per_seg:
raise ValueError(('n_overlap cannot be greater than n_per_seg (or '
'n_fft). Got n_overlap of %d while n_per_seg is '
'%d.') % (n_overlap, n_per_seg))
return n_fft, n_per_seg, n_overlap
def _check_psd_data(inst, tmin, tmax, picks, proj, reject_by_annotation=False):
"""Check PSD data / pull arrays from inst."""
from ..io.base import BaseRaw
from ..epochs import BaseEpochs
from ..evoked import Evoked
if not isinstance(inst, (BaseEpochs, BaseRaw, Evoked)):
raise ValueError('epochs must be an instance of Epochs, Raw, or'
'Evoked. Got type {}'.format(type(inst)))
time_mask = _time_mask(inst.times, tmin, tmax, sfreq=inst.info['sfreq'])
picks = _picks_to_idx(inst.info, picks, 'data', with_ref_meg=False)
if proj:
# Copy first so it's not modified
inst = inst.copy().apply_proj()
sfreq = inst.info['sfreq']
if isinstance(inst, BaseRaw):
start, stop = np.where(time_mask)[0][[0, -1]]
rba = 'NaN' if reject_by_annotation else None
data = inst.get_data(picks, start, stop + 1, reject_by_annotation=rba)
elif isinstance(inst, BaseEpochs):
data = inst.get_data(picks=picks)[:, :, time_mask]
else: # Evoked
data = inst.data[picks][:, time_mask]
return data, sfreq
@verbose
def psd_array_welch(x, sfreq, fmin=0, fmax=np.inf, n_fft=256, n_overlap=0,
n_per_seg=None, n_jobs=1, average='mean', window='hamming',
verbose=None):
"""Compute power spectral density (PSD) using Welch's method.
Parameters
----------
x : array, shape=(..., n_times)
The data to compute PSD from.
sfreq : float
The sampling frequency.
fmin : float
The lower frequency of interest.
fmax : float
The upper frequency of interest.
n_fft : int
The length of FFT used, must be ``>= n_per_seg`` (default: 256).
The segments will be zero-padded if ``n_fft > n_per_seg``.
n_overlap : int
The number of points of overlap between segments. Will be adjusted
to be <= n_per_seg. The default value is 0.
n_per_seg : int | None
Length of each Welch segment (windowed with a Hamming window). Defaults
to None, which sets n_per_seg equal to n_fft.
%(n_jobs)s
%(average-psd)s
.. versionadded:: 0.19.0
%(window-psd)s
.. versionadded:: 0.22.0
%(verbose)s
Returns
-------
psds : ndarray, shape (..., n_freqs) or (..., n_freqs, n_segments)
The power spectral densities. If ``average='mean`` or
``average='median'``, the returned array will have the same shape
as the input data plus an additional frequency dimension.
If ``average=None``, the returned array will have the same shape as
the input data plus two additional dimensions corresponding to
frequencies and the unaggregated segments, respectively.
freqs : ndarray, shape (n_freqs,)
The frequencies.
Notes
-----
.. versionadded:: 0.14.0
"""
_check_option('average', average, (None, 'mean', 'median'))
dshape = x.shape[:-1]
n_times = x.shape[-1]
x = x.reshape(-1, n_times)
# Prep the PSD
n_fft, n_per_seg, n_overlap = _check_nfft(n_times, n_fft, n_per_seg,
n_overlap)
win_size = n_fft / float(sfreq)
logger.info("Effective window size : %0.3f (s)" % win_size)
freqs = np.arange(n_fft // 2 + 1, dtype=float) * (sfreq / n_fft)
freq_mask = (freqs >= fmin) & (freqs <= fmax)
if not freq_mask.any():
raise ValueError(
f'No frequencies found between fmin={fmin} and fmax={fmax}')
freq_sl = slice(*(np.where(freq_mask)[0][[0, -1]] + [0, 1]))
del freq_mask
freqs = freqs[freq_sl]
# Parallelize across first N-1 dimensions
n_jobs = check_n_jobs(n_jobs)
x_splits = np.array_split(x, n_jobs)
logger.debug(
f'Spectogram using {n_fft}-point FFT on {n_per_seg} samples with '
f'{n_overlap} overlap and {window} window')
from scipy.signal import spectrogram
parallel, my_spect_func, n_jobs = parallel_func(_spect_func, n_jobs=n_jobs)
func = partial(spectrogram, noverlap=n_overlap, nperseg=n_per_seg,
nfft=n_fft, fs=sfreq, window=window)
f_spect = parallel(my_spect_func(d, func=func, freq_sl=freq_sl,
average=average)
for d in x_splits)
psds = np.concatenate(f_spect, axis=0)
shape = dshape + (len(freqs),)
if average is None:
shape = shape + (-1,)
psds.shape = shape
return psds, freqs
@verbose
def psd_welch(inst, fmin=0, fmax=np.inf, tmin=None, tmax=None, n_fft=256,
n_overlap=0, n_per_seg=None, picks=None, proj=False, n_jobs=1,
reject_by_annotation=True, average='mean', window='hamming',
verbose=None):
"""Compute the power spectral density (PSD) using Welch's method.
Calculates periodograms for a sliding window over the time dimension, then
averages them together for each channel/epoch.
Parameters
----------
inst : instance of Epochs or Raw or Evoked
The data for PSD calculation.
fmin : float
Min frequency of interest.
fmax : float
Max frequency of interest.
tmin : float | None
Min time of interest.
tmax : float | None
Max time of interest.
n_fft : int
The length of FFT used, must be ``>= n_per_seg`` (default: 256).
The segments will be zero-padded if ``n_fft > n_per_seg``.
If n_per_seg is None, n_fft must be <= number of time points
in the data.
n_overlap : int
The number of points of overlap between segments. Will be adjusted
to be <= n_per_seg. The default value is 0.
n_per_seg : int | None
Length of each Welch segment (windowed with a Hamming window). Defaults
to None, which sets n_per_seg equal to n_fft.
%(picks_good_data_noref)s
proj : bool
Apply SSP projection vectors. If inst is ndarray this is not used.
%(n_jobs)s
%(reject_by_annotation_raw)s
.. versionadded:: 0.15.0
%(average-psd)s
.. versionadded:: 0.19.0
%(window-psd)s
.. versionadded:: 0.22.0
%(verbose)s
Returns
-------
psds : ndarray, shape (..., n_freqs) or (..., n_freqs, n_segments)
The power spectral densities. If ``average='mean`` or
``average='median'`` and input is of type Raw or Evoked, then psds will
be of shape (n_channels, n_freqs); if input is of type Epochs, then
psds will be of shape (n_epochs, n_channels, n_freqs).
If ``average=None``, the returned array will have an additional
dimension corresponding to the unaggregated segments.
freqs : ndarray, shape (n_freqs,)
The frequencies.
See Also
--------
mne.io.Raw.plot_psd
mne.Epochs.plot_psd
psd_multitaper
psd_array_welch
Notes
-----
.. versionadded:: 0.12.0
"""
# Prep data
data, sfreq = _check_psd_data(inst, tmin, tmax, picks, proj,
reject_by_annotation=reject_by_annotation)
return psd_array_welch(data, sfreq, fmin=fmin, fmax=fmax, n_fft=n_fft,
n_overlap=n_overlap, n_per_seg=n_per_seg,
average=average, n_jobs=n_jobs, window=window,
verbose=verbose)
@verbose
def psd_multitaper(inst, fmin=0, fmax=np.inf, tmin=None, tmax=None,
bandwidth=None, adaptive=False, low_bias=True,
normalization='length', picks=None, proj=False,
n_jobs=1, reject_by_annotation=False, verbose=None):
"""Compute the power spectral density (PSD) using multitapers.
Calculates spectral density for orthogonal tapers, then averages them
together for each channel/epoch. See :footcite:`Slepian1978` for a
description of the tapers and :footcite:`PercivalWalden1993` for the
general method.
Parameters
----------
inst : instance of Epochs or Raw or Evoked
The data for PSD calculation.
fmin : float
Min frequency of interest.
fmax : float
Max frequency of interest.
tmin : float | None
Min time of interest.
tmax : float | None
Max time of interest.
bandwidth : float
The bandwidth of the multi taper windowing function in Hz. The default
value is a window half-bandwidth of 4.
adaptive : bool
Use adaptive weights to combine the tapered spectra into PSD
(slow, use n_jobs >> 1 to speed up computation).
low_bias : bool
Only use tapers with more than 90%% spectral concentration within
bandwidth.
%(normalization)s
%(picks_good_data_noref)s
proj : bool
Apply SSP projection vectors. If inst is ndarray this is not used.
%(n_jobs)s
%(reject_by_annotation_raw)s
%(verbose)s
Returns
-------
psds : ndarray, shape (..., n_freqs)
The power spectral densities. If input is of type Raw,
then psds will be shape (n_channels, n_freqs), if input is type Epochs
then psds will be shape (n_epochs, n_channels, n_freqs).
freqs : ndarray, shape (n_freqs,)
The frequencies.
See Also
--------
mne.io.Raw.plot_psd
mne.Epochs.plot_psd
psd_array_multitaper
psd_welch
csd_multitaper
Notes
-----
.. versionadded:: 0.12.0
References
----------
.. footbibliography::
"""
# Prep data
data, sfreq = _check_psd_data(inst, tmin, tmax, picks, proj,
reject_by_annotation=reject_by_annotation)
return psd_array_multitaper(data, sfreq, fmin=fmin, fmax=fmax,
bandwidth=bandwidth, adaptive=adaptive,
low_bias=low_bias, normalization=normalization,
n_jobs=n_jobs, verbose=verbose)
|
|
from sympy.integrals.transforms import (mellin_transform,
inverse_mellin_transform, laplace_transform, inverse_laplace_transform,
fourier_transform, inverse_fourier_transform,
sine_transform, inverse_sine_transform,
cosine_transform, inverse_cosine_transform,
hankel_transform, inverse_hankel_transform,
LaplaceTransform, FourierTransform, SineTransform, CosineTransform,
InverseLaplaceTransform, InverseFourierTransform,
InverseSineTransform, InverseCosineTransform, IntegralTransformError)
from sympy import (
gamma, exp, oo, Heaviside, symbols, Symbol, re, factorial, pi, arg,
cos, S, Abs, And, sin, sqrt, I, log, tan, hyperexpand, meijerg,
EulerGamma, erf, erfc, besselj, bessely, besseli, besselk,
exp_polar, unpolarify, Function, expint, expand_mul, Rational,
gammasimp, trigsimp, atan, sinh, cosh, Ne, periodic_argument, atan2)
from sympy.utilities.pytest import XFAIL, slow, skip, raises
from sympy.matrices import Matrix, eye
from sympy.abc import x, s, a, b, c, d
nu, beta, rho = symbols('nu beta rho')
def test_undefined_function():
from sympy import Function, MellinTransform
f = Function('f')
assert mellin_transform(f(x), x, s) == MellinTransform(f(x), x, s)
assert mellin_transform(f(x) + exp(-x), x, s) == \
(MellinTransform(f(x), x, s) + gamma(s), (0, oo), True)
assert laplace_transform(2*f(x), x, s) == 2*LaplaceTransform(f(x), x, s)
# TODO test derivative and other rules when implemented
def test_free_symbols():
from sympy import Function
f = Function('f')
assert mellin_transform(f(x), x, s).free_symbols == {s}
assert mellin_transform(f(x)*a, x, s).free_symbols == {s, a}
def test_as_integral():
from sympy import Function, Integral
f = Function('f')
assert mellin_transform(f(x), x, s).rewrite('Integral') == \
Integral(x**(s - 1)*f(x), (x, 0, oo))
assert fourier_transform(f(x), x, s).rewrite('Integral') == \
Integral(f(x)*exp(-2*I*pi*s*x), (x, -oo, oo))
assert laplace_transform(f(x), x, s).rewrite('Integral') == \
Integral(f(x)*exp(-s*x), (x, 0, oo))
assert str(2*pi*I*inverse_mellin_transform(f(s), s, x, (a, b)).rewrite('Integral')) \
== "Integral(x**(-s)*f(s), (s, _c - oo*I, _c + oo*I))"
assert str(2*pi*I*inverse_laplace_transform(f(s), s, x).rewrite('Integral')) == \
"Integral(f(s)*exp(s*x), (s, _c - oo*I, _c + oo*I))"
assert inverse_fourier_transform(f(s), s, x).rewrite('Integral') == \
Integral(f(s)*exp(2*I*pi*s*x), (s, -oo, oo))
# NOTE this is stuck in risch because meijerint cannot handle it
@slow
@XFAIL
def test_mellin_transform_fail():
skip("Risch takes forever.")
MT = mellin_transform
bpos = symbols('b', positive=True)
bneg = symbols('b', negative=True)
expr = (sqrt(x + b**2) + b)**a/sqrt(x + b**2)
# TODO does not work with bneg, argument wrong. Needs changes to matching.
assert MT(expr.subs(b, -bpos), x, s) == \
((-1)**(a + 1)*2**(a + 2*s)*bpos**(a + 2*s - 1)*gamma(a + s)
*gamma(1 - a - 2*s)/gamma(1 - s),
(-re(a), -re(a)/2 + S.Half), True)
expr = (sqrt(x + b**2) + b)**a
assert MT(expr.subs(b, -bpos), x, s) == \
(
2**(a + 2*s)*a*bpos**(a + 2*s)*gamma(-a - 2*
s)*gamma(a + s)/gamma(-s + 1),
(-re(a), -re(a)/2), True)
# Test exponent 1:
assert MT(expr.subs({b: -bpos, a: 1}), x, s) == \
(-bpos**(2*s + 1)*gamma(s)*gamma(-s - S.Half)/(2*sqrt(pi)),
(-1, Rational(-1, 2)), True)
def test_mellin_transform():
from sympy import Max, Min
MT = mellin_transform
bpos = symbols('b', positive=True)
# 8.4.2
assert MT(x**nu*Heaviside(x - 1), x, s) == \
(-1/(nu + s), (-oo, -re(nu)), True)
assert MT(x**nu*Heaviside(1 - x), x, s) == \
(1/(nu + s), (-re(nu), oo), True)
assert MT((1 - x)**(beta - 1)*Heaviside(1 - x), x, s) == \
(gamma(beta)*gamma(s)/gamma(beta + s), (0, oo), re(beta) > 0)
assert MT((x - 1)**(beta - 1)*Heaviside(x - 1), x, s) == \
(gamma(beta)*gamma(1 - beta - s)/gamma(1 - s),
(-oo, -re(beta) + 1), re(beta) > 0)
assert MT((1 + x)**(-rho), x, s) == \
(gamma(s)*gamma(rho - s)/gamma(rho), (0, re(rho)), True)
# TODO also the conditions should be simplified, e.g.
# And(re(rho) - 1 < 0, re(rho) < 1) should just be
# re(rho) < 1
assert MT(abs(1 - x)**(-rho), x, s) == (
2*sin(pi*rho/2)*gamma(1 - rho)*
cos(pi*(rho/2 - s))*gamma(s)*gamma(rho-s)/pi,
(0, re(rho)), And(re(rho) - 1 < 0, re(rho) < 1))
mt = MT((1 - x)**(beta - 1)*Heaviside(1 - x)
+ a*(x - 1)**(beta - 1)*Heaviside(x - 1), x, s)
assert mt[1], mt[2] == ((0, -re(beta) + 1), re(beta) > 0)
assert MT((x**a - b**a)/(x - b), x, s)[0] == \
pi*b**(a + s - 1)*sin(pi*a)/(sin(pi*s)*sin(pi*(a + s)))
assert MT((x**a - bpos**a)/(x - bpos), x, s) == \
(pi*bpos**(a + s - 1)*sin(pi*a)/(sin(pi*s)*sin(pi*(a + s))),
(Max(-re(a), 0), Min(1 - re(a), 1)), True)
expr = (sqrt(x + b**2) + b)**a
assert MT(expr.subs(b, bpos), x, s) == \
(-a*(2*bpos)**(a + 2*s)*gamma(s)*gamma(-a - 2*s)/gamma(-a - s + 1),
(0, -re(a)/2), True)
expr = (sqrt(x + b**2) + b)**a/sqrt(x + b**2)
assert MT(expr.subs(b, bpos), x, s) == \
(2**(a + 2*s)*bpos**(a + 2*s - 1)*gamma(s)
*gamma(1 - a - 2*s)/gamma(1 - a - s),
(0, -re(a)/2 + S.Half), True)
# 8.4.2
assert MT(exp(-x), x, s) == (gamma(s), (0, oo), True)
assert MT(exp(-1/x), x, s) == (gamma(-s), (-oo, 0), True)
# 8.4.5
assert MT(log(x)**4*Heaviside(1 - x), x, s) == (24/s**5, (0, oo), True)
assert MT(log(x)**3*Heaviside(x - 1), x, s) == (6/s**4, (-oo, 0), True)
assert MT(log(x + 1), x, s) == (pi/(s*sin(pi*s)), (-1, 0), True)
assert MT(log(1/x + 1), x, s) == (pi/(s*sin(pi*s)), (0, 1), True)
assert MT(log(abs(1 - x)), x, s) == (pi/(s*tan(pi*s)), (-1, 0), True)
assert MT(log(abs(1 - 1/x)), x, s) == (pi/(s*tan(pi*s)), (0, 1), True)
# 8.4.14
assert MT(erf(sqrt(x)), x, s) == \
(-gamma(s + S.Half)/(sqrt(pi)*s), (Rational(-1, 2), 0), True)
@slow
def test_mellin_transform2():
MT = mellin_transform
# TODO we cannot currently do these (needs summation of 3F2(-1))
# this also implies that they cannot be written as a single g-function
# (although this is possible)
mt = MT(log(x)/(x + 1), x, s)
assert mt[1:] == ((0, 1), True)
assert not hyperexpand(mt[0], allow_hyper=True).has(meijerg)
mt = MT(log(x)**2/(x + 1), x, s)
assert mt[1:] == ((0, 1), True)
assert not hyperexpand(mt[0], allow_hyper=True).has(meijerg)
mt = MT(log(x)/(x + 1)**2, x, s)
assert mt[1:] == ((0, 2), True)
assert not hyperexpand(mt[0], allow_hyper=True).has(meijerg)
@slow
def test_mellin_transform_bessel():
from sympy import Max
MT = mellin_transform
# 8.4.19
assert MT(besselj(a, 2*sqrt(x)), x, s) == \
(gamma(a/2 + s)/gamma(a/2 - s + 1), (-re(a)/2, Rational(3, 4)), True)
assert MT(sin(sqrt(x))*besselj(a, sqrt(x)), x, s) == \
(2**a*gamma(-2*s + S.Half)*gamma(a/2 + s + S.Half)/(
gamma(-a/2 - s + 1)*gamma(a - 2*s + 1)), (
-re(a)/2 - S.Half, Rational(1, 4)), True)
assert MT(cos(sqrt(x))*besselj(a, sqrt(x)), x, s) == \
(2**a*gamma(a/2 + s)*gamma(-2*s + S.Half)/(
gamma(-a/2 - s + S.Half)*gamma(a - 2*s + 1)), (
-re(a)/2, Rational(1, 4)), True)
assert MT(besselj(a, sqrt(x))**2, x, s) == \
(gamma(a + s)*gamma(S.Half - s)
/ (sqrt(pi)*gamma(1 - s)*gamma(1 + a - s)),
(-re(a), S.Half), True)
assert MT(besselj(a, sqrt(x))*besselj(-a, sqrt(x)), x, s) == \
(gamma(s)*gamma(S.Half - s)
/ (sqrt(pi)*gamma(1 - a - s)*gamma(1 + a - s)),
(0, S.Half), True)
# NOTE: prudnikov gives the strip below as (1/2 - re(a), 1). As far as
# I can see this is wrong (since besselj(z) ~ 1/sqrt(z) for z large)
assert MT(besselj(a - 1, sqrt(x))*besselj(a, sqrt(x)), x, s) == \
(gamma(1 - s)*gamma(a + s - S.Half)
/ (sqrt(pi)*gamma(Rational(3, 2) - s)*gamma(a - s + S.Half)),
(S.Half - re(a), S.Half), True)
assert MT(besselj(a, sqrt(x))*besselj(b, sqrt(x)), x, s) == \
(4**s*gamma(1 - 2*s)*gamma((a + b)/2 + s)
/ (gamma(1 - s + (b - a)/2)*gamma(1 - s + (a - b)/2)
*gamma( 1 - s + (a + b)/2)),
(-(re(a) + re(b))/2, S.Half), True)
assert MT(besselj(a, sqrt(x))**2 + besselj(-a, sqrt(x))**2, x, s)[1:] == \
((Max(re(a), -re(a)), S.Half), True)
# Section 8.4.20
assert MT(bessely(a, 2*sqrt(x)), x, s) == \
(-cos(pi*(a/2 - s))*gamma(s - a/2)*gamma(s + a/2)/pi,
(Max(-re(a)/2, re(a)/2), Rational(3, 4)), True)
assert MT(sin(sqrt(x))*bessely(a, sqrt(x)), x, s) == \
(-4**s*sin(pi*(a/2 - s))*gamma(S.Half - 2*s)
* gamma((1 - a)/2 + s)*gamma((1 + a)/2 + s)
/ (sqrt(pi)*gamma(1 - s - a/2)*gamma(1 - s + a/2)),
(Max(-(re(a) + 1)/2, (re(a) - 1)/2), Rational(1, 4)), True)
assert MT(cos(sqrt(x))*bessely(a, sqrt(x)), x, s) == \
(-4**s*cos(pi*(a/2 - s))*gamma(s - a/2)*gamma(s + a/2)*gamma(S.Half - 2*s)
/ (sqrt(pi)*gamma(S.Half - s - a/2)*gamma(S.Half - s + a/2)),
(Max(-re(a)/2, re(a)/2), Rational(1, 4)), True)
assert MT(besselj(a, sqrt(x))*bessely(a, sqrt(x)), x, s) == \
(-cos(pi*s)*gamma(s)*gamma(a + s)*gamma(S.Half - s)
/ (pi**S('3/2')*gamma(1 + a - s)),
(Max(-re(a), 0), S.Half), True)
assert MT(besselj(a, sqrt(x))*bessely(b, sqrt(x)), x, s) == \
(-4**s*cos(pi*(a/2 - b/2 + s))*gamma(1 - 2*s)
* gamma(a/2 - b/2 + s)*gamma(a/2 + b/2 + s)
/ (pi*gamma(a/2 - b/2 - s + 1)*gamma(a/2 + b/2 - s + 1)),
(Max((-re(a) + re(b))/2, (-re(a) - re(b))/2), S.Half), True)
# NOTE bessely(a, sqrt(x))**2 and bessely(a, sqrt(x))*bessely(b, sqrt(x))
# are a mess (no matter what way you look at it ...)
assert MT(bessely(a, sqrt(x))**2, x, s)[1:] == \
((Max(-re(a), 0, re(a)), S.Half), True)
# Section 8.4.22
# TODO we can't do any of these (delicate cancellation)
# Section 8.4.23
assert MT(besselk(a, 2*sqrt(x)), x, s) == \
(gamma(
s - a/2)*gamma(s + a/2)/2, (Max(-re(a)/2, re(a)/2), oo), True)
assert MT(besselj(a, 2*sqrt(2*sqrt(x)))*besselk(
a, 2*sqrt(2*sqrt(x))), x, s) == (4**(-s)*gamma(2*s)*
gamma(a/2 + s)/(2*gamma(a/2 - s + 1)), (Max(0, -re(a)/2), oo), True)
# TODO bessely(a, x)*besselk(a, x) is a mess
assert MT(besseli(a, sqrt(x))*besselk(a, sqrt(x)), x, s) == \
(gamma(s)*gamma(
a + s)*gamma(-s + S.Half)/(2*sqrt(pi)*gamma(a - s + 1)),
(Max(-re(a), 0), S.Half), True)
assert MT(besseli(b, sqrt(x))*besselk(a, sqrt(x)), x, s) == \
(2**(2*s - 1)*gamma(-2*s + 1)*gamma(-a/2 + b/2 + s)* \
gamma(a/2 + b/2 + s)/(gamma(-a/2 + b/2 - s + 1)* \
gamma(a/2 + b/2 - s + 1)), (Max(-re(a)/2 - re(b)/2, \
re(a)/2 - re(b)/2), S.Half), True)
# TODO products of besselk are a mess
mt = MT(exp(-x/2)*besselk(a, x/2), x, s)
mt0 = gammasimp((trigsimp(gammasimp(mt[0].expand(func=True)))))
assert mt0 == 2*pi**Rational(3, 2)*cos(pi*s)*gamma(-s + S.Half)/(
(cos(2*pi*a) - cos(2*pi*s))*gamma(-a - s + 1)*gamma(a - s + 1))
assert mt[1:] == ((Max(-re(a), re(a)), oo), True)
# TODO exp(x/2)*besselk(a, x/2) [etc] cannot currently be done
# TODO various strange products of special orders
@slow
def test_expint():
from sympy import E1, expint, Max, re, lerchphi, Symbol, simplify, Si, Ci, Ei
aneg = Symbol('a', negative=True)
u = Symbol('u', polar=True)
assert mellin_transform(E1(x), x, s) == (gamma(s)/s, (0, oo), True)
assert inverse_mellin_transform(gamma(s)/s, s, x,
(0, oo)).rewrite(expint).expand() == E1(x)
assert mellin_transform(expint(a, x), x, s) == \
(gamma(s)/(a + s - 1), (Max(1 - re(a), 0), oo), True)
# XXX IMT has hickups with complicated strips ...
assert simplify(unpolarify(
inverse_mellin_transform(gamma(s)/(aneg + s - 1), s, x,
(1 - aneg, oo)).rewrite(expint).expand(func=True))) == \
expint(aneg, x)
assert mellin_transform(Si(x), x, s) == \
(-2**s*sqrt(pi)*gamma(s/2 + S.Half)/(
2*s*gamma(-s/2 + 1)), (-1, 0), True)
assert inverse_mellin_transform(-2**s*sqrt(pi)*gamma((s + 1)/2)
/(2*s*gamma(-s/2 + 1)), s, x, (-1, 0)) \
== Si(x)
assert mellin_transform(Ci(sqrt(x)), x, s) == \
(-2**(2*s - 1)*sqrt(pi)*gamma(s)/(s*gamma(-s + S.Half)), (0, 1), True)
assert inverse_mellin_transform(
-4**s*sqrt(pi)*gamma(s)/(2*s*gamma(-s + S.Half)),
s, u, (0, 1)).expand() == Ci(sqrt(u))
# TODO LT of Si, Shi, Chi is a mess ...
assert laplace_transform(Ci(x), x, s) == (-log(1 + s**2)/2/s, 0, True)
assert laplace_transform(expint(a, x), x, s) == \
(lerchphi(s*exp_polar(I*pi), 1, a), 0, re(a) > S.Zero)
assert laplace_transform(expint(1, x), x, s) == (log(s + 1)/s, 0, True)
assert laplace_transform(expint(2, x), x, s) == \
((s - log(s + 1))/s**2, 0, True)
assert inverse_laplace_transform(-log(1 + s**2)/2/s, s, u).expand() == \
Heaviside(u)*Ci(u)
assert inverse_laplace_transform(log(s + 1)/s, s, x).rewrite(expint) == \
Heaviside(x)*E1(x)
assert inverse_laplace_transform((s - log(s + 1))/s**2, s,
x).rewrite(expint).expand() == \
(expint(2, x)*Heaviside(x)).rewrite(Ei).rewrite(expint).expand()
@slow
def test_inverse_mellin_transform():
from sympy import (sin, simplify, Max, Min, expand,
powsimp, exp_polar, cos, cot)
IMT = inverse_mellin_transform
assert IMT(gamma(s), s, x, (0, oo)) == exp(-x)
assert IMT(gamma(-s), s, x, (-oo, 0)) == exp(-1/x)
assert simplify(IMT(s/(2*s**2 - 2), s, x, (2, oo))) == \
(x**2 + 1)*Heaviside(1 - x)/(4*x)
# test passing "None"
assert IMT(1/(s**2 - 1), s, x, (-1, None)) == \
-x*Heaviside(-x + 1)/2 - Heaviside(x - 1)/(2*x)
assert IMT(1/(s**2 - 1), s, x, (None, 1)) == \
-x*Heaviside(-x + 1)/2 - Heaviside(x - 1)/(2*x)
# test expansion of sums
assert IMT(gamma(s) + gamma(s - 1), s, x, (1, oo)) == (x + 1)*exp(-x)/x
# test factorisation of polys
r = symbols('r', real=True)
assert IMT(1/(s**2 + 1), s, exp(-x), (None, oo)
).subs(x, r).rewrite(sin).simplify() \
== sin(r)*Heaviside(1 - exp(-r))
# test multiplicative substitution
_a, _b = symbols('a b', positive=True)
assert IMT(_b**(-s/_a)*factorial(s/_a)/s, s, x, (0, oo)) == exp(-_b*x**_a)
assert IMT(factorial(_a/_b + s/_b)/(_a + s), s, x, (-_a, oo)) == x**_a*exp(-x**_b)
def simp_pows(expr):
return simplify(powsimp(expand_mul(expr, deep=False), force=True)).replace(exp_polar, exp)
# Now test the inverses of all direct transforms tested above
# Section 8.4.2
nu = symbols('nu', real=True)
assert IMT(-1/(nu + s), s, x, (-oo, None)) == x**nu*Heaviside(x - 1)
assert IMT(1/(nu + s), s, x, (None, oo)) == x**nu*Heaviside(1 - x)
assert simp_pows(IMT(gamma(beta)*gamma(s)/gamma(s + beta), s, x, (0, oo))) \
== (1 - x)**(beta - 1)*Heaviside(1 - x)
assert simp_pows(IMT(gamma(beta)*gamma(1 - beta - s)/gamma(1 - s),
s, x, (-oo, None))) \
== (x - 1)**(beta - 1)*Heaviside(x - 1)
assert simp_pows(IMT(gamma(s)*gamma(rho - s)/gamma(rho), s, x, (0, None))) \
== (1/(x + 1))**rho
assert simp_pows(IMT(d**c*d**(s - 1)*sin(pi*c)
*gamma(s)*gamma(s + c)*gamma(1 - s)*gamma(1 - s - c)/pi,
s, x, (Max(-re(c), 0), Min(1 - re(c), 1)))) \
== (x**c - d**c)/(x - d)
assert simplify(IMT(1/sqrt(pi)*(-c/2)*gamma(s)*gamma((1 - c)/2 - s)
*gamma(-c/2 - s)/gamma(1 - c - s),
s, x, (0, -re(c)/2))) == \
(1 + sqrt(x + 1))**c
assert simplify(IMT(2**(a + 2*s)*b**(a + 2*s - 1)*gamma(s)*gamma(1 - a - 2*s)
/gamma(1 - a - s), s, x, (0, (-re(a) + 1)/2))) == \
b**(a - 1)*(sqrt(1 + x/b**2) + 1)**(a - 1)*(b**2*sqrt(1 + x/b**2) +
b**2 + x)/(b**2 + x)
assert simplify(IMT(-2**(c + 2*s)*c*b**(c + 2*s)*gamma(s)*gamma(-c - 2*s)
/ gamma(-c - s + 1), s, x, (0, -re(c)/2))) == \
b**c*(sqrt(1 + x/b**2) + 1)**c
# Section 8.4.5
assert IMT(24/s**5, s, x, (0, oo)) == log(x)**4*Heaviside(1 - x)
assert expand(IMT(6/s**4, s, x, (-oo, 0)), force=True) == \
log(x)**3*Heaviside(x - 1)
assert IMT(pi/(s*sin(pi*s)), s, x, (-1, 0)) == log(x + 1)
assert IMT(pi/(s*sin(pi*s/2)), s, x, (-2, 0)) == log(x**2 + 1)
assert IMT(pi/(s*sin(2*pi*s)), s, x, (Rational(-1, 2), 0)) == log(sqrt(x) + 1)
assert IMT(pi/(s*sin(pi*s)), s, x, (0, 1)) == log(1 + 1/x)
# TODO
def mysimp(expr):
from sympy import expand, logcombine, powsimp
return expand(
powsimp(logcombine(expr, force=True), force=True, deep=True),
force=True).replace(exp_polar, exp)
assert mysimp(mysimp(IMT(pi/(s*tan(pi*s)), s, x, (-1, 0)))) in [
log(1 - x)*Heaviside(1 - x) + log(x - 1)*Heaviside(x - 1),
log(x)*Heaviside(x - 1) + log(1 - 1/x)*Heaviside(x - 1) + log(-x +
1)*Heaviside(-x + 1)]
# test passing cot
assert mysimp(IMT(pi*cot(pi*s)/s, s, x, (0, 1))) in [
log(1/x - 1)*Heaviside(1 - x) + log(1 - 1/x)*Heaviside(x - 1),
-log(x)*Heaviside(-x + 1) + log(1 - 1/x)*Heaviside(x - 1) + log(-x +
1)*Heaviside(-x + 1), ]
# 8.4.14
assert IMT(-gamma(s + S.Half)/(sqrt(pi)*s), s, x, (Rational(-1, 2), 0)) == \
erf(sqrt(x))
# 8.4.19
assert simplify(IMT(gamma(a/2 + s)/gamma(a/2 - s + 1), s, x, (-re(a)/2, Rational(3, 4)))) \
== besselj(a, 2*sqrt(x))
assert simplify(IMT(2**a*gamma(S.Half - 2*s)*gamma(s + (a + 1)/2)
/ (gamma(1 - s - a/2)*gamma(1 - 2*s + a)),
s, x, (-(re(a) + 1)/2, Rational(1, 4)))) == \
sin(sqrt(x))*besselj(a, sqrt(x))
assert simplify(IMT(2**a*gamma(a/2 + s)*gamma(S.Half - 2*s)
/ (gamma(S.Half - s - a/2)*gamma(1 - 2*s + a)),
s, x, (-re(a)/2, Rational(1, 4)))) == \
cos(sqrt(x))*besselj(a, sqrt(x))
# TODO this comes out as an amazing mess, but simplifies nicely
assert simplify(IMT(gamma(a + s)*gamma(S.Half - s)
/ (sqrt(pi)*gamma(1 - s)*gamma(1 + a - s)),
s, x, (-re(a), S.Half))) == \
besselj(a, sqrt(x))**2
assert simplify(IMT(gamma(s)*gamma(S.Half - s)
/ (sqrt(pi)*gamma(1 - s - a)*gamma(1 + a - s)),
s, x, (0, S.Half))) == \
besselj(-a, sqrt(x))*besselj(a, sqrt(x))
assert simplify(IMT(4**s*gamma(-2*s + 1)*gamma(a/2 + b/2 + s)
/ (gamma(-a/2 + b/2 - s + 1)*gamma(a/2 - b/2 - s + 1)
*gamma(a/2 + b/2 - s + 1)),
s, x, (-(re(a) + re(b))/2, S.Half))) == \
besselj(a, sqrt(x))*besselj(b, sqrt(x))
# Section 8.4.20
# TODO this can be further simplified!
assert simplify(IMT(-2**(2*s)*cos(pi*a/2 - pi*b/2 + pi*s)*gamma(-2*s + 1) *
gamma(a/2 - b/2 + s)*gamma(a/2 + b/2 + s) /
(pi*gamma(a/2 - b/2 - s + 1)*gamma(a/2 + b/2 - s + 1)),
s, x,
(Max(-re(a)/2 - re(b)/2, -re(a)/2 + re(b)/2), S.Half))) == \
besselj(a, sqrt(x))*-(besselj(-b, sqrt(x)) -
besselj(b, sqrt(x))*cos(pi*b))/sin(pi*b)
# TODO more
# for coverage
assert IMT(pi/cos(pi*s), s, x, (0, S.Half)) == sqrt(x)/(x + 1)
@slow
def test_laplace_transform():
from sympy import fresnels, fresnelc
LT = laplace_transform
a, b, c, = symbols('a b c', positive=True)
t = symbols('t')
w = Symbol("w")
f = Function("f")
# Test unevaluated form
assert laplace_transform(f(t), t, w) == LaplaceTransform(f(t), t, w)
assert inverse_laplace_transform(
f(w), w, t, plane=0) == InverseLaplaceTransform(f(w), w, t, 0)
# test a bug
spos = symbols('s', positive=True)
assert LT(exp(t), t, spos)[:2] == (1/(spos - 1), 1)
# basic tests from wikipedia
assert LT((t - a)**b*exp(-c*(t - a))*Heaviside(t - a), t, s) == \
((s + c)**(-b - 1)*exp(-a*s)*gamma(b + 1), -c, True)
assert LT(t**a, t, s) == (s**(-a - 1)*gamma(a + 1), 0, True)
assert LT(Heaviside(t), t, s) == (1/s, 0, True)
assert LT(Heaviside(t - a), t, s) == (exp(-a*s)/s, 0, True)
assert LT(1 - exp(-a*t), t, s) == (a/(s*(a + s)), 0, True)
assert LT((exp(2*t) - 1)*exp(-b - t)*Heaviside(t)/2, t, s, noconds=True) \
== exp(-b)/(s**2 - 1)
assert LT(exp(t), t, s)[:2] == (1/(s - 1), 1)
assert LT(exp(2*t), t, s)[:2] == (1/(s - 2), 2)
assert LT(exp(a*t), t, s)[:2] == (1/(s - a), a)
assert LT(log(t/a), t, s) == ((log(a*s) + EulerGamma)/s/-1, 0, True)
assert LT(erf(t), t, s) == (erfc(s/2)*exp(s**2/4)/s, 0, True)
assert LT(sin(a*t), t, s) == (a/(a**2 + s**2), 0, True)
assert LT(cos(a*t), t, s) == (s/(a**2 + s**2), 0, True)
# TODO would be nice to have these come out better
assert LT(exp(-a*t)*sin(b*t), t, s) == (b/(b**2 + (a + s)**2), -a, True)
assert LT(exp(-a*t)*cos(b*t), t, s) == \
((a + s)/(b**2 + (a + s)**2), -a, True)
assert LT(besselj(0, t), t, s) == (1/sqrt(1 + s**2), 0, True)
assert LT(besselj(1, t), t, s) == (1 - 1/sqrt(1 + 1/s**2), 0, True)
# TODO general order works, but is a *mess*
# TODO besseli also works, but is an even greater mess
# test a bug in conditions processing
# TODO the auxiliary condition should be recognised/simplified
assert LT(exp(t)*cos(t), t, s)[:-1] in [
((s - 1)/(s**2 - 2*s + 2), -oo),
((s - 1)/((s - 1)**2 + 1), -oo),
]
# Fresnel functions
assert laplace_transform(fresnels(t), t, s) == \
((-sin(s**2/(2*pi))*fresnels(s/pi) + sin(s**2/(2*pi))/2 -
cos(s**2/(2*pi))*fresnelc(s/pi) + cos(s**2/(2*pi))/2)/s, 0, True)
assert laplace_transform(fresnelc(t), t, s) == (
((2*sin(s**2/(2*pi))*fresnelc(s/pi) - 2*cos(s**2/(2*pi))*fresnels(s/pi)
+ sqrt(2)*cos(s**2/(2*pi) + pi/4))/(2*s), 0, True))
cond = Ne(1/s, 1) & (
0 < cos(Abs(periodic_argument(s, oo)))*Abs(s) - 1)
assert LT(Matrix([[exp(t), t*exp(-t)], [t*exp(-t), exp(t)]]), t, s) ==\
Matrix([
[(1/(s - 1), 1, True), ((s + 1)**(-2), 0, True)],
[((s + 1)**(-2), 0, True), (1/(s - 1), 1, True)]
])
def test_issue_8368_7173():
LT = laplace_transform
# hyperbolic
assert LT(sinh(x), x, s) == (1/(s**2 - 1), 1, True)
assert LT(cosh(x), x, s) == (s/(s**2 - 1), 1, True)
assert LT(sinh(x + 3), x, s) == (
(-s + (s + 1)*exp(6) + 1)*exp(-3)/(s - 1)/(s + 1)/2, 1, True)
assert LT(sinh(x)*cosh(x), x, s) == (
1/(s**2 - 4), 2, Ne(s/2, 1))
# trig (make sure they are not being rewritten in terms of exp)
assert LT(cos(x + 3), x, s) == ((s*cos(3) - sin(3))/(s**2 + 1), 0, True)
def test_inverse_laplace_transform():
from sympy import sinh, cosh, besselj, besseli, simplify, factor_terms
ILT = inverse_laplace_transform
a, b, c, = symbols('a b c', positive=True)
t = symbols('t')
def simp_hyp(expr):
return factor_terms(expand_mul(expr)).rewrite(sin)
# just test inverses of all of the above
assert ILT(1/s, s, t) == Heaviside(t)
assert ILT(1/s**2, s, t) == t*Heaviside(t)
assert ILT(1/s**5, s, t) == t**4*Heaviside(t)/24
assert ILT(exp(-a*s)/s, s, t) == Heaviside(t - a)
assert ILT(exp(-a*s)/(s + b), s, t) == exp(b*(a - t))*Heaviside(-a + t)
assert ILT(a/(s**2 + a**2), s, t) == sin(a*t)*Heaviside(t)
assert ILT(s/(s**2 + a**2), s, t) == cos(a*t)*Heaviside(t)
# TODO is there a way around simp_hyp?
assert simp_hyp(ILT(a/(s**2 - a**2), s, t)) == sinh(a*t)*Heaviside(t)
assert simp_hyp(ILT(s/(s**2 - a**2), s, t)) == cosh(a*t)*Heaviside(t)
assert ILT(a/((s + b)**2 + a**2), s, t) == exp(-b*t)*sin(a*t)*Heaviside(t)
assert ILT(
(s + b)/((s + b)**2 + a**2), s, t) == exp(-b*t)*cos(a*t)*Heaviside(t)
# TODO sinh/cosh shifted come out a mess. also delayed trig is a mess
# TODO should this simplify further?
assert ILT(exp(-a*s)/s**b, s, t) == \
(t - a)**(b - 1)*Heaviside(t - a)/gamma(b)
assert ILT(exp(-a*s)/sqrt(1 + s**2), s, t) == \
Heaviside(t - a)*besselj(0, a - t) # note: besselj(0, x) is even
# XXX ILT turns these branch factor into trig functions ...
assert simplify(ILT(a**b*(s + sqrt(s**2 - a**2))**(-b)/sqrt(s**2 - a**2),
s, t).rewrite(exp)) == \
Heaviside(t)*besseli(b, a*t)
assert ILT(a**b*(s + sqrt(s**2 + a**2))**(-b)/sqrt(s**2 + a**2),
s, t).rewrite(exp) == \
Heaviside(t)*besselj(b, a*t)
assert ILT(1/(s*sqrt(s + 1)), s, t) == Heaviside(t)*erf(sqrt(t))
# TODO can we make erf(t) work?
assert ILT(1/(s**2*(s**2 + 1)),s,t) == (t - sin(t))*Heaviside(t)
assert ILT( (s * eye(2) - Matrix([[1, 0], [0, 2]])).inv(), s, t) ==\
Matrix([[exp(t)*Heaviside(t), 0], [0, exp(2*t)*Heaviside(t)]])
def test_inverse_laplace_transform_delta():
from sympy import DiracDelta
ILT = inverse_laplace_transform
t = symbols('t')
assert ILT(2, s, t) == 2*DiracDelta(t)
assert ILT(2*exp(3*s) - 5*exp(-7*s), s, t) == \
2*DiracDelta(t + 3) - 5*DiracDelta(t - 7)
a = cos(sin(7)/2)
assert ILT(a*exp(-3*s), s, t) == a*DiracDelta(t - 3)
assert ILT(exp(2*s), s, t) == DiracDelta(t + 2)
r = Symbol('r', real=True)
assert ILT(exp(r*s), s, t) == DiracDelta(t + r)
def test_inverse_laplace_transform_delta_cond():
from sympy import DiracDelta, Eq, im, Heaviside
ILT = inverse_laplace_transform
t = symbols('t')
r = Symbol('r', real=True)
assert ILT(exp(r*s), s, t, noconds=False) == (DiracDelta(t + r), True)
z = Symbol('z')
assert ILT(exp(z*s), s, t, noconds=False) == \
(DiracDelta(t + z), Eq(im(z), 0))
# inversion does not exist: verify it doesn't evaluate to DiracDelta
for z in (Symbol('z', extended_real=False),
Symbol('z', imaginary=True, zero=False)):
f = ILT(exp(z*s), s, t, noconds=False)
f = f[0] if isinstance(f, tuple) else f
assert f.func != DiracDelta
# issue 15043
assert ILT(1/s + exp(r*s)/s, s, t, noconds=False) == (
Heaviside(t) + Heaviside(r + t), True)
def test_fourier_transform():
from sympy import simplify, expand, expand_complex, factor, expand_trig
FT = fourier_transform
IFT = inverse_fourier_transform
def simp(x):
return simplify(expand_trig(expand_complex(expand(x))))
def sinc(x):
return sin(pi*x)/(pi*x)
k = symbols('k', real=True)
f = Function("f")
# TODO for this to work with real a, need to expand abs(a*x) to abs(a)*abs(x)
a = symbols('a', positive=True)
b = symbols('b', positive=True)
posk = symbols('posk', positive=True)
# Test unevaluated form
assert fourier_transform(f(x), x, k) == FourierTransform(f(x), x, k)
assert inverse_fourier_transform(
f(k), k, x) == InverseFourierTransform(f(k), k, x)
# basic examples from wikipedia
assert simp(FT(Heaviside(1 - abs(2*a*x)), x, k)) == sinc(k/a)/a
# TODO IFT is a *mess*
assert simp(FT(Heaviside(1 - abs(a*x))*(1 - abs(a*x)), x, k)) == sinc(k/a)**2/a
# TODO IFT
assert factor(FT(exp(-a*x)*Heaviside(x), x, k), extension=I) == \
1/(a + 2*pi*I*k)
# NOTE: the ift comes out in pieces
assert IFT(1/(a + 2*pi*I*x), x, posk,
noconds=False) == (exp(-a*posk), True)
assert IFT(1/(a + 2*pi*I*x), x, -posk,
noconds=False) == (0, True)
assert IFT(1/(a + 2*pi*I*x), x, symbols('k', negative=True),
noconds=False) == (0, True)
# TODO IFT without factoring comes out as meijer g
assert factor(FT(x*exp(-a*x)*Heaviside(x), x, k), extension=I) == \
1/(a + 2*pi*I*k)**2
assert FT(exp(-a*x)*sin(b*x)*Heaviside(x), x, k) == \
b/(b**2 + (a + 2*I*pi*k)**2)
assert FT(exp(-a*x**2), x, k) == sqrt(pi)*exp(-pi**2*k**2/a)/sqrt(a)
assert IFT(sqrt(pi/a)*exp(-(pi*k)**2/a), k, x) == exp(-a*x**2)
assert FT(exp(-a*abs(x)), x, k) == 2*a/(a**2 + 4*pi**2*k**2)
# TODO IFT (comes out as meijer G)
# TODO besselj(n, x), n an integer > 0 actually can be done...
# TODO are there other common transforms (no distributions!)?
def test_sine_transform():
from sympy import EulerGamma
t = symbols("t")
w = symbols("w")
a = symbols("a")
f = Function("f")
# Test unevaluated form
assert sine_transform(f(t), t, w) == SineTransform(f(t), t, w)
assert inverse_sine_transform(
f(w), w, t) == InverseSineTransform(f(w), w, t)
assert sine_transform(1/sqrt(t), t, w) == 1/sqrt(w)
assert inverse_sine_transform(1/sqrt(w), w, t) == 1/sqrt(t)
assert sine_transform((1/sqrt(t))**3, t, w) == 2*sqrt(w)
assert sine_transform(t**(-a), t, w) == 2**(
-a + S.Half)*w**(a - 1)*gamma(-a/2 + 1)/gamma((a + 1)/2)
assert inverse_sine_transform(2**(-a + S(
1)/2)*w**(a - 1)*gamma(-a/2 + 1)/gamma(a/2 + S.Half), w, t) == t**(-a)
assert sine_transform(
exp(-a*t), t, w) == sqrt(2)*w/(sqrt(pi)*(a**2 + w**2))
assert inverse_sine_transform(
sqrt(2)*w/(sqrt(pi)*(a**2 + w**2)), w, t) == exp(-a*t)
assert sine_transform(
log(t)/t, t, w) == -sqrt(2)*sqrt(pi)*(log(w**2) + 2*EulerGamma)/4
assert sine_transform(
t*exp(-a*t**2), t, w) == sqrt(2)*w*exp(-w**2/(4*a))/(4*a**Rational(3, 2))
assert inverse_sine_transform(
sqrt(2)*w*exp(-w**2/(4*a))/(4*a**Rational(3, 2)), w, t) == t*exp(-a*t**2)
def test_cosine_transform():
from sympy import Si, Ci
t = symbols("t")
w = symbols("w")
a = symbols("a")
f = Function("f")
# Test unevaluated form
assert cosine_transform(f(t), t, w) == CosineTransform(f(t), t, w)
assert inverse_cosine_transform(
f(w), w, t) == InverseCosineTransform(f(w), w, t)
assert cosine_transform(1/sqrt(t), t, w) == 1/sqrt(w)
assert inverse_cosine_transform(1/sqrt(w), w, t) == 1/sqrt(t)
assert cosine_transform(1/(
a**2 + t**2), t, w) == sqrt(2)*sqrt(pi)*exp(-a*w)/(2*a)
assert cosine_transform(t**(
-a), t, w) == 2**(-a + S.Half)*w**(a - 1)*gamma((-a + 1)/2)/gamma(a/2)
assert inverse_cosine_transform(2**(-a + S(
1)/2)*w**(a - 1)*gamma(-a/2 + S.Half)/gamma(a/2), w, t) == t**(-a)
assert cosine_transform(
exp(-a*t), t, w) == sqrt(2)*a/(sqrt(pi)*(a**2 + w**2))
assert inverse_cosine_transform(
sqrt(2)*a/(sqrt(pi)*(a**2 + w**2)), w, t) == exp(-a*t)
assert cosine_transform(exp(-a*sqrt(t))*cos(a*sqrt(
t)), t, w) == a*exp(-a**2/(2*w))/(2*w**Rational(3, 2))
assert cosine_transform(1/(a + t), t, w) == sqrt(2)*(
(-2*Si(a*w) + pi)*sin(a*w)/2 - cos(a*w)*Ci(a*w))/sqrt(pi)
assert inverse_cosine_transform(sqrt(2)*meijerg(((S.Half, 0), ()), (
(S.Half, 0, 0), (S.Half,)), a**2*w**2/4)/(2*pi), w, t) == 1/(a + t)
assert cosine_transform(1/sqrt(a**2 + t**2), t, w) == sqrt(2)*meijerg(
((S.Half,), ()), ((0, 0), (S.Half,)), a**2*w**2/4)/(2*sqrt(pi))
assert inverse_cosine_transform(sqrt(2)*meijerg(((S.Half,), ()), ((0, 0), (S.Half,)), a**2*w**2/4)/(2*sqrt(pi)), w, t) == 1/(t*sqrt(a**2/t**2 + 1))
def test_hankel_transform():
from sympy import gamma, sqrt, exp
r = Symbol("r")
k = Symbol("k")
nu = Symbol("nu")
m = Symbol("m")
a = symbols("a")
assert hankel_transform(1/r, r, k, 0) == 1/k
assert inverse_hankel_transform(1/k, k, r, 0) == 1/r
assert hankel_transform(
1/r**m, r, k, 0) == 2**(-m + 1)*k**(m - 2)*gamma(-m/2 + 1)/gamma(m/2)
assert inverse_hankel_transform(
2**(-m + 1)*k**(m - 2)*gamma(-m/2 + 1)/gamma(m/2), k, r, 0) == r**(-m)
assert hankel_transform(1/r**m, r, k, nu) == (
2*2**(-m)*k**(m - 2)*gamma(-m/2 + nu/2 + 1)/gamma(m/2 + nu/2))
assert inverse_hankel_transform(2**(-m + 1)*k**(
m - 2)*gamma(-m/2 + nu/2 + 1)/gamma(m/2 + nu/2), k, r, nu) == r**(-m)
assert hankel_transform(r**nu*exp(-a*r), r, k, nu) == \
2**(nu + 1)*a*k**(-nu - 3)*(a**2/k**2 + 1)**(-nu - S(
3)/2)*gamma(nu + Rational(3, 2))/sqrt(pi)
assert inverse_hankel_transform(
2**(nu + 1)*a*k**(-nu - 3)*(a**2/k**2 + 1)**(-nu - Rational(3, 2))*gamma(
nu + Rational(3, 2))/sqrt(pi), k, r, nu) == r**nu*exp(-a*r)
def test_issue_7181():
assert mellin_transform(1/(1 - x), x, s) != None
def test_issue_8882():
# This is the original test.
# from sympy import diff, Integral, integrate
# r = Symbol('r')
# psi = 1/r*sin(r)*exp(-(a0*r))
# h = -1/2*diff(psi, r, r) - 1/r*psi
# f = 4*pi*psi*h*r**2
# assert integrate(f, (r, -oo, 3), meijerg=True).has(Integral) == True
# To save time, only the critical part is included.
F = -a**(-s + 1)*(4 + 1/a**2)**(-s/2)*sqrt(1/a**2)*exp(-s*I*pi)* \
sin(s*atan(sqrt(1/a**2)/2))*gamma(s)
raises(IntegralTransformError, lambda:
inverse_mellin_transform(F, s, x, (-1, oo),
**{'as_meijerg': True, 'needeval': True}))
def test_issue_7173():
from sympy import cse
x0, x1, x2, x3 = symbols('x:4')
ans = laplace_transform(sinh(a*x)*cosh(a*x), x, s)
r, e = cse(ans)
assert r == [
(x0, arg(a)),
(x1, Abs(x0)),
(x2, pi/2),
(x3, Abs(x0 + pi))]
assert e == [
a/(-4*a**2 + s**2),
0,
((x1 <= x2) | (x1 < x2)) & ((x3 <= x2) | (x3 < x2))]
def test_issue_8514():
from sympy import simplify
a, b, c, = symbols('a b c', positive=True)
t = symbols('t', positive=True)
ft = simplify(inverse_laplace_transform(1/(a*s**2+b*s+c),s, t))
assert ft == (I*exp(t*cos(atan2(0, -4*a*c + b**2)/2)*sqrt(Abs(4*a*c -
b**2))/a)*sin(t*sin(atan2(0, -4*a*c + b**2)/2)*sqrt(Abs(
4*a*c - b**2))/(2*a)) + exp(t*cos(atan2(0, -4*a*c + b**2)
/2)*sqrt(Abs(4*a*c - b**2))/a)*cos(t*sin(atan2(0, -4*a*c
+ b**2)/2)*sqrt(Abs(4*a*c - b**2))/(2*a)) + I*sin(t*sin(
atan2(0, -4*a*c + b**2)/2)*sqrt(Abs(4*a*c - b**2))/(2*a))
- cos(t*sin(atan2(0, -4*a*c + b**2)/2)*sqrt(Abs(4*a*c -
b**2))/(2*a)))*exp(-t*(b + cos(atan2(0, -4*a*c + b**2)/2)
*sqrt(Abs(4*a*c - b**2)))/(2*a))/sqrt(-4*a*c + b**2)
def test_issue_12591():
x, y = symbols("x y", real=True)
assert fourier_transform(exp(x), x, y) == FourierTransform(exp(x), x, y)
def test_issue_14692():
b = Symbol('b', negative=True)
assert laplace_transform(1/(I*x - b), x, s) == \
(-I*exp(I*b*s)*expint(1, b*s*exp_polar(I*pi/2)), 0, True)
|
|
import asyncio
import calendar
import datetime
import functools
import time
from typing import Any, Dict
from .base import MovingWindowSupport, Storage
def ensure_indices(func):
@functools.wraps(func)
async def wrapped(self, *args, **kwargs):
await self.create_indices()
return await func(self, *args, **kwargs)
return wrapped
class MongoDBStorage(Storage, MovingWindowSupport):
"""
Rate limit storage with MongoDB as backend.
Depends on :pypi:`motor`
.. warning:: This is a beta feature
.. versionadded:: 2.1
"""
STORAGE_SCHEME = ["async+mongodb", "async+mongodb+srv"]
"""
The storage scheme for MongoDB for use in an async context
"""
DEFAULT_OPTIONS = {
"serverSelectionTimeoutMS": 1000,
"socketTimeoutMS": 1000,
"connectTimeoutMS": 1000,
}
"Default options passed to :class:`~motor.motor_asyncio.AsyncIOMotorClient`"
DEPENDENCIES = ["motor.motor_asyncio", "pymongo"]
def __init__(self, uri: str, database_name: str = "limits", **options):
"""
:param uri: uri of the form ``async+mongodb://[user:password]@host:port?...``,
This uri is passed directly to :class:`~motor.motor_asyncio.AsyncIOMotorClient`
:param database_name: The database to use for storing the rate limit
collections.
:param options: all remaining keyword arguments are merged with
:data:`DEFAULT_OPTIONS` and passed to the constructor of
:class:`~motor.motor_asyncio.AsyncIOMotorClient`
:raise ConfigurationError: when the :pypi:`motor` or :pypi:`pymongo` are
not available
"""
mongo_opts = options.copy()
[mongo_opts.setdefault(k, v) for k, v in self.DEFAULT_OPTIONS.items()]
uri = uri.replace("async+mongodb", "mongodb", 1)
super(MongoDBStorage, self).__init__(uri, **options)
self.dependency = self.dependencies["motor.motor_asyncio"]
self.proxy_dependency = self.dependencies["pymongo"]
self.storage = self.dependency.AsyncIOMotorClient(uri, **mongo_opts)
# TODO: Fix this hack. It was noticed when running a benchmark
# with FastAPI - however - doesn't appear in unit tests or in an isolated
# use. Reference: https://jira.mongodb.org/browse/MOTOR-822
self.storage.get_io_loop = asyncio.get_running_loop
self.__database_name = database_name
self.__indices_created = False
@property
def database(self):
return self.storage.get_database(self.__database_name)
async def create_indices(self):
if not self.__indices_created:
await asyncio.gather(
self.database.counters.create_index("expireAt", expireAfterSeconds=0),
self.database.windows.create_index("expireAt", expireAfterSeconds=0),
)
self.__indices_created = True
async def reset(self) -> int:
"""
Delete all rate limit keys in the rate limit collections (counters, windows)
"""
num_keys = sum(
await asyncio.gather(
self.database.counters.count_documents({}),
self.database.windows.count_documents({}),
)
)
await asyncio.gather(
self.database.counters.drop(), self.database.windows.drop()
)
return num_keys
async def clear(self, key: str):
"""
:param key: the key to clear rate limits for
"""
await asyncio.gather(
self.database.counters.find_one_and_delete({"_id": key}),
self.database.windows.find_one_and_delete({"_id": key}),
)
async def get_expiry(self, key: str) -> int:
"""
:param key: the key to get the expiry for
"""
counter = await self.database.counters.find_one({"_id": key})
expiry = counter["expireAt"] if counter else datetime.datetime.utcnow()
return calendar.timegm(expiry.timetuple())
async def get(self, key: str):
"""
:param key: the key to get the counter value for
"""
counter = await self.database.counters.find_one(
{"_id": key, "expireAt": {"$gte": datetime.datetime.utcnow()}},
projection=["count"],
)
return counter and counter["count"] or 0
@ensure_indices
async def incr(
self, key: str, expiry: int, elastic_expiry=False, amount: int = 1
) -> int:
"""
increments the counter for a given rate limit key
:param key: the key to increment
:param expiry: amount in seconds for the key to expire in
:param elastic_expiry: whether to keep extending the rate limit
window every hit.
:param amount: the number to increment by
"""
expiration = datetime.datetime.utcnow() + datetime.timedelta(seconds=expiry)
response = await self.database.counters.find_one_and_update(
{"_id": key},
[
{
"$set": {
"count": {
"$cond": {
"if": {"$lt": ["$expireAt", "$$NOW"]},
"then": amount,
"else": {"$add": ["$count", amount]},
}
},
"expireAt": {
"$cond": {
"if": {"$lt": ["$expireAt", "$$NOW"]},
"then": expiration,
"else": (expiration if elastic_expiry else "$expireAt"),
}
},
}
},
],
upsert=True,
projection=["count"],
return_document=self.proxy_dependency.ReturnDocument.AFTER,
)
return response["count"]
async def check(self) -> bool:
"""
Check if storage is healthy by calling
:meth:`motor.motor_asyncio.AsyncIOMotorClient.server_info`
"""
try:
await self.storage.server_info()
return True
except: # noqa: E722
return False
async def get_moving_window(self, key, limit, expiry):
"""
returns the starting point and the number of entries in the moving
window
:param str key: rate limit key
:param int expiry: expiry of entry
:return: (start of window, number of acquired entries)
"""
timestamp = time.time()
result = await self.database.windows.aggregate(
[
{"$match": {"_id": key}},
{
"$project": {
"entries": {
"$filter": {
"input": "$entries",
"as": "entry",
"cond": {"$gte": ["$$entry", timestamp - expiry]},
}
}
}
},
{"$unwind": "$entries"},
{
"$group": {
"_id": "$_id",
"max": {"$max": "$entries"},
"count": {"$sum": 1},
}
},
]
).to_list(length=1)
if result:
return (int(result[0]["max"]), result[0]["count"])
return (int(timestamp), 0)
@ensure_indices
async def acquire_entry(
self, key: str, limit: int, expiry: int, amount: int = 1
) -> bool:
"""
:param key: rate limit key to acquire an entry in
:param limit: amount of entries allowed
:param expiry: expiry of the entry
:param amount: the number of entries to acquire
"""
timestamp = time.time()
try:
updates: Dict[str, Any] = {
"$push": {"entries": {"$each": [], "$position": 0, "$slice": limit}}
}
updates["$set"] = {
"expireAt": (
datetime.datetime.utcnow() + datetime.timedelta(seconds=expiry)
)
}
updates["$push"]["entries"]["$each"] = [timestamp] * amount
await self.database.windows.update_one(
{
"_id": key,
"entries.%d"
% (limit - amount): {"$not": {"$gte": timestamp - expiry}},
},
updates,
upsert=True,
)
return True
except self.proxy_dependency.errors.DuplicateKeyError:
return False
|
|
import os
import unittest
from conans.client.importer import IMPORTS_MANIFESTS
from conans.model.manifest import FileTreeManifest
from conans.test.utils.test_files import temp_folder
from conans.test.utils.tools import TestClient
from conans.util.files import load, mkdir
conanfile = """
from conans import ConanFile
from conans.util.files import save
class HelloConan(ConanFile):
name = "Hello"
version = "0.1"
build_policy = "missing"
def build(self):
save("file1.txt", "Hello")
save("file2.txt", "World")
def package(self):
self.copy("file1.txt")
self.copy("file2.txt")
"""
test1 = """[requires]
Hello/0.1@lasote/stable
[imports]
., file* -> .
"""
test2 = """
from conans import ConanFile
from conans.util.files import save
class HelloReuseConan(ConanFile):
requires = "Hello/0.1@lasote/stable"
def imports(self):
self.copy("*1.txt")
"""
test3 = """
from conans import ConanFile
from conans.util.files import save
class HelloReuseConan(ConanFile):
requires = "Hello/0.1@lasote/stable"
def imports(self):
self.copy("*2.txt")
"""
class ImportsTest(unittest.TestCase):
def setUp(self):
self.client = TestClient()
self.client.save({"conanfile.py": conanfile})
self.client.run("export . lasote/stable")
def imports_global_path_removed_test(self):
""" Ensure that when importing files in a global path, outside the package build,
they are removed too
"""
dst_global_folder = temp_folder().replace("\\", "/")
conanfile2 = '''
from conans import ConanFile
class ConanLib(ConanFile):
name = "Say"
version = "0.1"
requires = "Hello/0.1@lasote/stable"
def imports(self):
self.copy("file*.txt", dst="%s")
''' % dst_global_folder
self.client.save({"conanfile.py": conanfile2}, clean_first=True)
self.client.run("export . lasote/stable")
self.client.current_folder = temp_folder()
self.client.run("install Say/0.1@lasote/stable --build=missing")
for filename in ["file1.txt", "file2.txt"]:
self.assertFalse(os.path.exists(os.path.join(dst_global_folder, filename)))
def imports_env_var_test(self):
conanfile2 = '''
from conans import ConanFile
import os
class ConanLib(ConanFile):
requires = "Hello/0.1@lasote/stable"
def imports(self):
self.copy("file*.txt", dst=os.environ["MY_IMPORT_PATH"])
'''
for folder in ("folder1", "folder2"):
self.client.save({"conanfile.py": conanfile2}, clean_first=True)
self.client.run("install conanfile.py -e MY_IMPORT_PATH=%s" % folder)
self.assertEqual("Hello",
load(os.path.join(self.client.current_folder, folder, "file1.txt")))
def imports_error_test(self):
self.client.save({"conanfile.txt": test1}, clean_first=True)
self.client.run("install . --no-imports")
self.assertNotIn("file1.txt", os.listdir(self.client.current_folder))
self.assertNotIn("file2.txt", os.listdir(self.client.current_folder))
self.client.run("imports .") # Automatic conanbuildinfo.txt
self.assertNotIn("conanbuildinfo.txt file not found", self.client.user_io.out)
def install_manifest_test(self):
self.client.save({"conanfile.txt": test1}, clean_first=True)
self.client.run("install ./conanfile.txt")
self.assertIn("imports(): Copied 2 '.txt' files", self.client.user_io.out)
self.assertIn("file1.txt", os.listdir(self.client.current_folder))
self.assertIn("file2.txt", os.listdir(self.client.current_folder))
self._check_manifest()
def install_manifest_without_install_test(self):
self.client.save({"conanfile.txt": test1}, clean_first=True)
self.client.run('imports . ', assert_error=True)
self.assertIn("You can generate it using 'conan install'", self.client.user_io.out)
def install_dest_test(self):
self.client.save({"conanfile.txt": test1}, clean_first=True)
self.client.run("install ./ --no-imports")
self.assertNotIn("file1.txt", os.listdir(self.client.current_folder))
self.assertNotIn("file2.txt", os.listdir(self.client.current_folder))
self.client.run("imports . -imf myfolder")
files = os.listdir(os.path.join(self.client.current_folder, "myfolder"))
self.assertIn("file1.txt", files)
self.assertIn("file2.txt", files)
def imports_build_folder_test(self):
self.client.save({"conanfile.txt": test1}, clean_first=True)
tmp = self.client.current_folder
self.client.current_folder = os.path.join(self.client.current_folder, "build")
mkdir(self.client.current_folder)
self.client.run("install .. --no-imports")
self.client.current_folder = tmp
self.client.run("imports . --install-folder=build --import-folder=.")
files = os.listdir(self.client.current_folder)
self.assertIn("file1.txt", files)
self.assertIn("file2.txt", files)
def install_abs_dest_test(self):
self.client.save({"conanfile.txt": test1}, clean_first=True)
self.client.run("install . --no-imports")
self.assertNotIn("file1.txt", os.listdir(self.client.current_folder))
self.assertNotIn("file2.txt", os.listdir(self.client.current_folder))
tmp_folder = temp_folder()
self.client.run('imports . -imf "%s"' % tmp_folder)
files = os.listdir(tmp_folder)
self.assertIn("file1.txt", files)
self.assertIn("file2.txt", files)
def undo_install_manifest_test(self):
self.client.save({"conanfile.txt": test1}, clean_first=True)
self.client.run("install conanfile.txt")
self.client.run("imports . --undo")
self.assertNotIn("file1.txt", os.listdir(self.client.current_folder))
self.assertNotIn("file2.txt", os.listdir(self.client.current_folder))
self.assertNotIn(IMPORTS_MANIFESTS, os.listdir(self.client.current_folder))
self.assertIn("Removed 2 imported files", self.client.user_io.out)
self.assertIn("Removed imports manifest file", self.client.user_io.out)
def _check_manifest(self):
manifest_content = load(os.path.join(self.client.current_folder, IMPORTS_MANIFESTS))
manifest = FileTreeManifest.loads(manifest_content)
self.assertEqual(manifest.file_sums,
{os.path.join(self.client.current_folder, "file1.txt"):
"8b1a9953c4611296a827abf8c47804d7",
os.path.join(self.client.current_folder, "file2.txt"):
"f5a7924e621e84c9280a9a27e1bcb7f6"})
def imports_test(self):
self.client.save({"conanfile.txt": test1}, clean_first=True)
self.client.run("install . --no-imports -g txt")
self.assertNotIn("file1.txt", os.listdir(self.client.current_folder))
self.assertNotIn("file2.txt", os.listdir(self.client.current_folder))
self.client.run("imports .")
self.assertIn("imports(): Copied 2 '.txt' files", self.client.user_io.out)
self.assertIn("file1.txt", os.listdir(self.client.current_folder))
self.assertIn("file2.txt", os.listdir(self.client.current_folder))
self._check_manifest()
def imports_filename_test(self):
self.client.save({"conanfile.txt": test1,
"conanfile.py": test2,
"conanfile2.py": test3}, clean_first=True)
self.client.run("install . --no-imports")
self.assertNotIn("file1.txt", os.listdir(self.client.current_folder))
self.assertNotIn("file2.txt", os.listdir(self.client.current_folder))
self.client.run("imports conanfile2.py")
self.assertNotIn("file1.txt", os.listdir(self.client.current_folder))
self.assertIn("file2.txt", os.listdir(self.client.current_folder))
os.unlink(os.path.join(self.client.current_folder, "file2.txt"))
self.client.run("imports .")
self.assertIn("file1.txt", os.listdir(self.client.current_folder))
self.assertNotIn("file2.txt", os.listdir(self.client.current_folder))
os.unlink(os.path.join(self.client.current_folder, "file1.txt"))
self.client.run("imports ./conanfile.txt")
self.assertIn("file1.txt", os.listdir(self.client.current_folder))
self.assertIn("file2.txt", os.listdir(self.client.current_folder))
|
|
import numpy as np
import models
import pytest
import matplotlib.pyplot as plt
import matplotlib as mpl
from abc import ABCMeta, abstractmethod
import copy
#====================================================================
# Define "slow" tests
# - indicated by @slow decorator
# - slow tests are run only if using --runslow cmd line arg
#====================================================================
slow = pytest.mark.skipif(
not pytest.config.getoption("--runslow"),
reason="need --runslow option to run"
)
#====================================================================
# SEC:1 Abstract Test Classes
#====================================================================
class BaseTestCompressPathMod(object):
@abstractmethod
def load_compress_path_mod(self, eos_d):
assert False, 'must implement load_compress_path_mod()'
def init_params(self,eos_d):
# Set model parameter values
E0 = 0.0 # eV/atom
V0 = 38.0 # 1e-5 m^3 / kg
K0 = 25.0 # GPa
KP0 = 9.0 # 1
param_key_a = ['V0','K0','KP0','E0']
param_val_a = np.array([ V0, K0, KP0, E0 ])
models.Control.set_consts( [], [], eos_d )
self.load_compress_path_mod( eos_d )
models.Control.set_params( param_key_a, param_val_a, eos_d )
return eos_d
def test_press(self):
TOL = 1e-4
Nsamp = 10001
eos_d = self.init_params({})
param_d = eos_d['param_d']
Vmod_a = np.linspace(.7,1.2,Nsamp)*param_d['V0']
dV = Vmod_a[1] - Vmod_a[0]
# print eos_d['modtype_d']
compress_path_mod = eos_d['modtype_d']['CompressPathMod']
press_a = compress_path_mod.press(Vmod_a,eos_d)
energy_a = compress_path_mod.energy(Vmod_a,eos_d)
press_num_a = -eos_d['const_d']['PV_ratio']*np.gradient(energy_a,dV)
Prange = np.max(press_a)-np.min(press_a)
press_diff_a = press_num_a-press_a
#Exclude 1st and last points to avoid numerical derivative errors
Perr = np.max(np.abs(press_diff_a/Prange))
PTOL = 3*Prange/Nsamp
# print self
# print PTOL*Prange
# def plot_press_mismatch(Vmod_a,press_a,press_num_a):
# plt.figure()
# plt.ion()
# plt.clf()
# plt.plot(Vmod_a,press_num_a,'bx',Vmod_a,press_a,'r-')
# from IPython import embed; embed(); import ipdb; ipdb.set_trace()
# plot_press_mismatch(Vmod_a,press_a,press_num_a)
assert np.abs(Perr) < PTOL, '(Press error)/Prange, ' + np.str(Perr) + \
', must be less than PTOL'
def do_test_energy_perturb_eval(self):
TOL = 1e-4
dxfrac = 1e-8
Nsamp = 10001
eos_d = self.init_params({})
param_d = eos_d['param_d']
Vmod_a = np.linspace(.7,1.3,Nsamp)*param_d['V0']
dV = Vmod_a[1] - Vmod_a[0]
compress_path_mod = eos_d['modtype_d']['CompressPathMod']
if compress_path_mod.expand_adj:
scale_a, paramkey_a = \
compress_path_mod.get_param_scale( eos_d,apply_expand_adj=True )
else:
scale_a, paramkey_a = compress_path_mod.get_param_scale( eos_d)
Eperturb_num_a = np.zeros((paramkey_a.size,Nsamp))
for ind,paramkey in enumerate(paramkey_a):
Eperturb_num_a[ind,:] = compress_path_mod.param_deriv\
( 'energy', paramkey, Vmod_a, eos_d, dxfrac=dxfrac)
# dEdV0_a = compress_path_mod.param_deriv( 'energy', 'V0', Vmod_a, eos_d, dxfrac=dxfrac)
# dEdK0_a = compress_path_mod.param_deriv( 'energy', 'K0', Vmod_a, eos_d, dxfrac=dxfrac)
# dEdKP0_a = compress_path_mod.param_deriv( 'energy', 'KP0', Vmod_a, eos_d, dxfrac=dxfrac)
# dEdKP20_a = compress_path_mod.param_deriv( 'energy', 'KP20', Vmod_a, eos_d, dxfrac=dxfrac)
# dEdE0_a = compress_path_mod.param_deriv( 'energy', 'E0', Vmod_a, eos_d, dxfrac=dxfrac)
Eperturb_a, scale_a, paramkey_a = compress_path_mod.energy_perturb(Vmod_a, eos_d)
# print paramkey_a
# Eperturb_num_a = np.vstack((dEdV0_a,dEdK0_a,dEdKP0_a,dEdKP20_a,dEdE0_a))
max_error_a = np.max(np.abs(Eperturb_a-Eperturb_num_a),axis=1)
# try:
# except:
# from IPython import embed; embed(); import ipdb; ipdb.set_trace()
# plt.plot(Vmod_a,Eperturb_a.T,'-',Vmod_a, Eperturb_num_a.T,'--')
# plt.ion()
# plt.figure()
# plt.clf()
# plt.plot(Vmod_a[::100], Eperturb_num_a[:,::100].T,'x',
# Vmod_a[::100], Eperturb_a[3,::100].T,'r-')
# plt.plot(Vmod_a[::100], Eperturb_num_a[:,::100].T,'x',
# Vmod_a, Eperturb_a.T,'-')
# plt.plot(Vmod_a[::100], Eperturb_a[3,::100].T,'r-')
# Eperturb_num_a-Eperturb_a
assert np.all(max_error_a < TOL),'Error in energy perturbation must be'\
'less than TOL.'
#====================================================================
class BaseTestThermalPathMod(object):
@abstractmethod
def load_thermal_path_mod(self, eos_d):
assert False, 'must implement load_thermal_path_mod()'
@abstractmethod
def init_params(self,eos_d):
assert False, 'must implement init_params()'
return eos_d
def test_heat_capacity(self):
Nsamp = 10001
eos_d = self.init_params({})
param_d = eos_d['param_d']
Tmod_a = np.linspace(.7,1.3,Nsamp)*param_d['T0']
dT = Tmod_a[1] - Tmod_a[0]
# print eos_d['modtype_d']
thermal_path_mod = eos_d['modtype_d']['ThermalPathMod']
heat_capacity_a = thermal_path_mod.heat_capacity(Tmod_a,eos_d)
energy_a = thermal_path_mod.energy(Tmod_a,eos_d)
heat_capacity_num_a = np.gradient(energy_a,dT)
E_range = np.max(energy_a)-np.min(energy_a)
T_range = Tmod_a[-1]-Tmod_a[0]
Cv_scl = E_range/T_range
# Cv_range = np.max(heat_capacity_a)-np.min(heat_capacity_a)
Cv_diff_a = heat_capacity_num_a-heat_capacity_a
# Cverr = np.max(np.abs(Cv_diff_a/Cv_range))
Cverr = np.max(np.abs(Cv_diff_a/Cv_scl))
CVTOL = 1.0/Nsamp
# print self
# print PTOL*Prange
# def plot_press_mismatch(Tmod_a,press_a,press_num_a):
# plt.figure()
# plt.ion()
# plt.clf()
# plt.plot(Tmod_a,press_num_a,'bx',Tmod_a,press_a,'r-')
# from IPython import embed; embed(); import ipdb; ipdb.set_trace()
# plot_press_mismatch(Tmod_a,press_a,press_num_a)
assert np.abs(Cverr) < CVTOL, '(Cv error)/Cv_scl, ' + np.str(Cverr) + \
', must be less than CVTOL, ' + np.str(CVTOL)
#====================================================================
class BaseTestThermalMod(object):
@abstractmethod
def load_thermal_mod(self, eos_d):
assert False, 'must implement load_thermal_mod()'
@abstractmethod
def init_params(self,eos_d):
assert False, 'must implement init_params()'
return eos_d
def test_heat_capacity_isochore(self):
Nsamp = 10001
eos_d = self.init_params({})
param_d = eos_d['param_d']
Viso = 0.7*param_d['V0']
Tmod_a = np.linspace(.7,1.3,Nsamp)*param_d['T0']
dT = Tmod_a[1] - Tmod_a[0]
# print eos_d['modtype_d']
thermal_mod = eos_d['modtype_d']['ThermalMod']
heat_capacity_a = thermal_mod.heat_capacity(Viso,Tmod_a,eos_d)
energy_a = np.squeeze( thermal_mod.energy(Viso,Tmod_a,eos_d) )
heat_capacity_num_a = np.gradient(energy_a,dT)
E_range = np.max(energy_a)-np.min(energy_a)
T_range = Tmod_a[-1]-Tmod_a[0]
Cv_scl = E_range/T_range
# Cv_range = np.max(heat_capacity_a)-np.min(heat_capacity_a)
Cv_diff_a = heat_capacity_num_a-heat_capacity_a
# Cverr = np.max(np.abs(Cv_diff_a/Cv_range))
Cverr = np.max(np.abs(Cv_diff_a/Cv_scl))
CVTOL = 1.0/Nsamp
# print self
# print PTOL*Prange
# def plot_press_mismatch(Tmod_a,press_a,press_num_a):
# plt.figure()
# plt.ion()
# plt.clf()
# plt.plot(Tmod_a,press_num_a,'bx',Tmod_a,press_a,'r-')
# from IPython import embed; embed(); import ipdb; ipdb.set_trace()
# plot_press_mismatch(Tmod_a,press_a,press_num_a)
assert np.abs(Cverr) < CVTOL, '(Cv error)/Cv_scl, ' + np.str(Cverr) + \
', must be less than CVTOL, ' + np.str(CVTOL)
#====================================================================
class BaseTest4thOrdCompressPathMod(BaseTestCompressPathMod):
def init_params(self,eos_d):
# Use parents init_params method
eos_d = super(BaseTest4thOrdCompressPathMod,self).init_params(eos_d)
# Add K''0 param
KP20 = -1.1*eos_d['param_d']['KP0']/eos_d['param_d']['K0']
models.Control.set_params( ['KP20'], [KP20], eos_d )
return eos_d
#====================================================================
#====================================================================
# SEC:2 Implimented Test Clases
#====================================================================
# 2.1: CompressPathMod Tests
#====================================================================
class TestVinetCompressPathMod(BaseTestCompressPathMod):
def load_compress_path_mod(self, eos_d):
compress_path_mod = models.Vinet(path_const='S')
models.Control.set_modtypes( ['CompressPathMod'], [compress_path_mod], eos_d )
pass
def test_energy_perturb_eval(self):
self.do_test_energy_perturb_eval()
pass
#====================================================================
class TestBM3CompressPathMod(BaseTestCompressPathMod):
def load_compress_path_mod(self, eos_d):
compress_path_mod = models.BirchMurn3(path_const='S')
models.Control.set_modtypes( ['CompressPathMod'], [compress_path_mod], eos_d )
pass
#====================================================================
class TestBM4CompressPathMod(BaseTest4thOrdCompressPathMod):
def load_compress_path_mod(self, eos_d):
compress_path_mod = models.BirchMurn4(path_const='S')
models.Control.set_modtypes( ['CompressPathMod'], [compress_path_mod], eos_d )
pass
#====================================================================
class TestGenFiniteStrainCompressPathMod(BaseTest4thOrdCompressPathMod):
def init_params(self,eos_d):
# Use parents init_params method
eos_d = super(TestGenFiniteStrainCompressPathMod,self).init_params(eos_d)
# Add nexp param
nexp = +2.0
models.Control.set_params( ['nexp'], [nexp], eos_d )
return eos_d
def load_compress_path_mod(self, eos_d):
compress_path_mod = models.GenFiniteStrain(path_const='S')
models.Control.set_modtypes( ['CompressPathMod'], [compress_path_mod], eos_d )
pass
#====================================================================
class TestTaitCompressPathMod(BaseTest4thOrdCompressPathMod):
def load_compress_path_mod(self, eos_d):
compress_path_mod = models.Tait(path_const='S')
models.Control.set_modtypes( ['CompressPathMod'], [compress_path_mod], eos_d )
pass
def test_energy_perturb_eval(self):
self.do_test_energy_perturb_eval()
pass
#====================================================================
class TestCompareCompressPathMods(object):
def init_params(self,eos_d):
# Set model parameter values
E0 = 0.0 # eV/atom
V0 = 38.0 # 1e-5 m^3 / kg
K0 = 25.0 # GPa
KP0 = 9.0 # 1
param_key_a = ['V0','K0','KP0','E0']
param_val_a = np.array([ V0, K0, KP0, E0 ])
models.Control.set_consts( [], [], eos_d )
models.Control.set_params( param_key_a, param_val_a, eos_d )
return eos_d
def get_eos_mods(self):
eos_vinet_d = self.init_params({})
eos_tait_d = self.init_params({})
models.Control.set_modtypes( ['CompressPathMod'], [models.Vinet(path_const='S')],
eos_vinet_d )
models.Control.set_modtypes( ['CompressPathMod'], [models.Tait(path_const='S')],
eos_tait_d )
return eos_vinet_d, eos_tait_d
def calc_energy_perturb( self, eos_d ):
dxfrac = 1e-6
Nsamp = 10001
param_d = eos_d['param_d']
Vmod_a = np.linspace(.7,1.1,Nsamp)*param_d['V0']
dV = Vmod_a[1] - Vmod_a[0]
compress_path_mod = eos_d['modtype_d']['CompressPathMod']
scale_a, paramkey_a = compress_path_mod.get_param_scale( eos_d )
Eperturb_num_a = np.zeros((paramkey_a.size,Nsamp))
for ind,paramkey in enumerate(paramkey_a):
Eperturb_num_a[ind,:] = compress_path_mod.param_deriv\
( 'energy', paramkey, Vmod_a, eos_d, dxfrac=dxfrac)
Eperturb_a, scale_a, paramkey_a = compress_path_mod.energy_perturb(Vmod_a, eos_d)
Eperturb_num_a = np.zeros((paramkey_a.size,Nsamp))
for ind,paramkey in enumerate(paramkey_a):
Eperturb_num_a[ind,:] = compress_path_mod.param_deriv\
( 'energy', paramkey, Vmod_a, eos_d, dxfrac=dxfrac)
return Eperturb_a, Eperturb_num_a, Vmod_a, scale_a, paramkey_a
def calc_energy( self, eos_d ):
dxfrac = 1e-6
Nsamp = 10001
param_d = eos_d['param_d']
Vmod_a = np.linspace(.7,1.1,Nsamp)*param_d['V0']
dV = Vmod_a[1] - Vmod_a[0]
compress_path_mod = eos_d['modtype_d']['CompressPathMod']
scale_a, paramkey_a = compress_path_mod.get_param_scale( eos_d )
energy_a = compress_path_mod.energy( Vmod_a, eos_d )
return energy_a, Vmod_a
def test_compare(self):
TOL = 1e-4
eos_vinet_d, eos_tait_d = self.get_eos_mods()
KP20 = -1.1*eos_tait_d['param_d']['KP0']/eos_tait_d['param_d']['K0']
models.Control.set_params( ['KP20'], [KP20], eos_tait_d )
energy_vin_a, Vmod_vin_a = self.calc_energy( eos_vinet_d )
energy_tait_a, Vmod_tait_a = self.calc_energy( eos_tait_d )
# plt.ion()
# plt.figure()
# plt.clf()
# plt.plot(Vmod_vin_a, energy_vin_a,'k-',
# Vmod_tait_a, energy_tait_a, 'r-')
Eperturb_vin_a, Eperturb_num_vin_a, Vmod_vin_a, scale_vin_a, \
paramkey_vin_a = self.calc_energy_perturb( eos_vinet_d )
Eperturb_tait_a, Eperturb_num_tait_a, Vmod_tait_a, scale_tait_a, \
paramkey_tait_a = self.calc_energy_perturb( eos_tait_d )
# from IPython import embed; embed(); import ipdb; ipdb.set_trace()
# plt.ion()
# plt.figure()
# plt.clf()
# plt.plot(Vmod_vin_a[::100], Eperturb_vin_a[:,::100].T,'x',
# Vmod_tait_a, Eperturb_tait_a.T,'-')
dV = Vmod_vin_a[1] - Vmod_vin_a[0]
V0 = eos_tait_d['param_d']['V0']
indV0 = np.where(Vmod_vin_a==V0)[0][0]
Eperturb_diff = Eperturb_vin_a[:,indV0] - Eperturb_tait_a[[0,1,2,4],indV0]
assert np.all(np.abs(Eperturb_diff)<TOL), \
'Energy perturbations for Vinet and Tait EOS at V0 must agree to within TOL'
# Calc numerical volume derivs
# Some of these curves take very small values, making numerical
# comparison difficult, but comparison by eye checks out
dE1_perturb_vin_a = np.gradient(Eperturb_vin_a,dV)[1]
dE2_perturb_vin_a = np.gradient(dE1_perturb_vin_a,dV)[1]
dE3_perturb_vin_a = np.gradient(dE2_perturb_vin_a,dV)[1]
dE1_perturb_tait_a = np.gradient(Eperturb_tait_a,dV)[1]
dE2_perturb_tait_a = np.gradient(dE1_perturb_tait_a,dV)[1]
dE3_perturb_tait_a = np.gradient(dE2_perturb_tait_a,dV)[1]
# plt.clf()
# plt.plot(Vmod_vin_a[::100], dE1_perturb_vin_a[:,::100].T,'x',
# Vmod_tait_a, dE1_perturb_tait_a.T,'-')
# plt.clf()
# plt.plot(Vmod_vin_a[::100], dE2_perturb_vin_a[:,::100].T,'x',
# Vmod_tait_a, dE2_perturb_tait_a.T,'-')
# Eperturb_vin_a[:,indV0]-Eperturb_tait_a[[0,1,2,4],indV0]
# Eperturb_vin_a[:,indV0]
# dE1_perturb_vin_a[:,indV0]-dE1_perturb_tait_a[[0,1,2,4],indV0]
# dE1_perturb_vin_a[:,indV0]
# plt.clf()
# plt.plot(Vmod_vin_a[::100], dE3_perturb_vin_a[:,::100].T,'x',
# Vmod_tait_a, dE3_perturb_tait_a.T,'-')
pass
#====================================================================
class TestExpandCompressPathMod(BaseTest4thOrdCompressPathMod):
def load_compress_path_mod(self, eos_d):
compress_path_mod = models.Vinet(path_const='S',expand_adj_mod=models.Tait())
models.Control.set_modtypes(['CompressPathMod'],[compress_path_mod], eos_d )
pass
def test_press_components(self):
TOL = 1e-4
dxfrac = 1e-8
Nsamp = 10001
eos_d = self.init_params({})
param_d = eos_d['param_d']
Vmod_a = np.linspace(.7,1.3,Nsamp)*param_d['V0']
dV = Vmod_a[1] - Vmod_a[0]
compress_path_mod = eos_d['modtype_d']['CompressPathMod']
press_a = compress_path_mod.press( Vmod_a, eos_d )
press_pos_a = compress_path_mod.press( Vmod_a, eos_d, apply_expand_adj=False)
press_neg_a = compress_path_mod.expand_adj_mod.press( Vmod_a, eos_d )
# press_pos_a = expand_pos_mod.press( Vmod_a, eos_d )
# press_neg_a = expand_neg_mod.press( Vmod_a, eos_d )
ind_neg = Vmod_a>param_d['V0']
ind_pos = Vmod_a<param_d['V0']
assert np.all(press_a[ind_neg]==press_neg_a[ind_neg]),\
'The expansion corrected press must match ExpandNegMod for negative pressure values'
assert np.all(press_a[ind_pos]==press_pos_a[ind_pos]),\
'The expansion corrected press must match ExpandPosMod for positive pressure values'
# from IPython import embed; embed(); import ipdb; ipdb.set_trace()
# plt.ion()
# plt.figure()
# plt.clf()
# plt.plot(Vmod_a, press_pos_a, 'r--', Vmod_a, press_neg_a, 'b--',
# Vmod_a, press_a, 'k-')
pass
def test_energy_components(self):
TOL = 1e-4
dxfrac = 1e-8
Nsamp = 10001
eos_d = self.init_params({})
param_d = eos_d['param_d']
Vmod_a = np.linspace(.7,1.3,Nsamp)*param_d['V0']
dV = Vmod_a[1] - Vmod_a[0]
compress_path_mod = eos_d['modtype_d']['CompressPathMod']
energy_a = compress_path_mod.energy( Vmod_a, eos_d )
energy_pos_a = compress_path_mod.energy( Vmod_a, eos_d, apply_expand_adj=False )
energy_neg_a = compress_path_mod.expand_adj_mod.energy( Vmod_a, eos_d )
ind_neg = Vmod_a>param_d['V0']
ind_pos = Vmod_a<param_d['V0']
assert np.all(energy_a[ind_neg]==energy_neg_a[ind_neg]),\
'The expansion corrected energy must match ExpandNegMod for negative pressure values'
assert np.all(energy_a[ind_pos]==energy_pos_a[ind_pos]),\
'The expansion corrected energy must match ExpandPosMod for positive pressure values'
# from IPython import embed; embed(); import ipdb; ipdb.set_trace()
# plt.ion()
# plt.figure()
# plt.clf()
# plt.plot(Vmod_a, energy_pos_a, 'r--', Vmod_a, energy_neg_a, 'b--',
# Vmod_a, energy_a, 'k-')
pass
def test_energy_perturb_eval(self):
self.do_test_energy_perturb_eval()
pass
#====================================================================
# 2.2: ThermalPathMod Tests
#====================================================================
class TestGenRosenfeldTaranzona(BaseTestThermalPathMod):
def load_thermal_path_mod(self, eos_d):
thermal_path_mod = models.GenRosenfeldTaranzona(path_const='V')
models.Control.set_modtypes( ['ThermalPathMod'], [thermal_path_mod], eos_d )
pass
def init_params(self,eos_d):
# Set model parameter values
acoef = -158.2
bcoef = .042
mexp = 3.0/5
lognfac = 0.0
T0 = 5000.0
param_key_a = ['acoef','bcoef','mexp','lognfac','T0']
param_val_a = np.array([acoef,bcoef,mexp,lognfac,T0])
models.Control.set_consts( [], [], eos_d )
self.load_thermal_path_mod( eos_d )
models.Control.set_params( param_key_a, param_val_a, eos_d )
return eos_d
#====================================================================
class TestRosenfeldTaranzonaPoly(BaseTestThermalMod):
def load_thermal_mod(self, eos_d):
thermal_mod = models.RosenfeldTaranzonaPoly()
models.Control.set_modtypes( ['ThermalMod'], [thermal_mod], eos_d )
pass
def load_compress_path_mod(self, eos_d):
T0, = models.Control.get_params(['T0'],eos_d)
compress_path_mod = models.Vinet(path_const='T',level_const=T0,
supress_energy=True,
supress_press=True)
# NOTE that supress press is included to impliment all terms according
# to Spera2011
# (but the current implimentation actually uses the compress path
# pressure unscaled)
models.Control.set_modtypes( ['CompressPathMod'], [compress_path_mod], eos_d )
pass
def load_eos_mod(self, eos_d):
self.load_thermal_mod(eos_d)
self.load_compress_path_mod(eos_d)
full_mod = models.ThermalPressMod()
models.Control.set_modtypes( ['FullMod'], [full_mod], eos_d )
pass
def init_params(self,eos_d):
models.Control.set_consts( [], [], eos_d )
# Set model parameter values
mexp = 3.0/5
T0 = 4000.0
V0_ccperg = 0.408031 # cc/g
K0 = 13.6262
KP0= 7.66573
E0 = 0.0
# nfac = 5.0
# mass = (24.31+28.09+3*16.0) # g/(mol atom)
# V0 = V0_ccperg
# NOTE that units are all per atom
# requires conversion from values reported in Spera2011
lognfac = 0.0
mass = (24.31+28.09+3*16.0)/5.0 # g/(mol atom)
Vconv_fac = mass*eos_d['const_d']['ang3percc']/eos_d['const_d']['Nmol']
V0 = V0_ccperg*Vconv_fac
param_key_a = ['mexp','lognfac','T0','V0','K0','KP0','E0','mass']
param_val_a = np.array([mexp,lognfac,T0,V0,K0,KP0,E0,mass])
models.Control.set_params( param_key_a, param_val_a, eos_d )
# Set parameter values from Spera et al. (2011)
# for MgSiO3 melt using (Oganov potential)
# Must convert energy units from kJ/g to eV/atom
energy_conv_fac = mass/eos_d['const_d']['kJ_molpereV']
models.Control.set_consts( ['energy_conv_fac'], [energy_conv_fac],
eos_d )
# change coefficients to relative
# acoef_a = energy_conv_fac*\
# np.array([127.116,-3503.98,20724.4,-60212.0,86060.5,-48520.4])
# bcoef_a = energy_conv_fac*\
# np.array([-0.371466,7.09542,-45.7362,139.020,-201.487,112.513])
Vconv_a = (1.0/Vconv_fac)**np.arange(6)
unit_conv = energy_conv_fac*Vconv_a
# Reported vol-dependent polynomial coefficients for a and b
# in Spera2011
acoef_unscl_a = np.array([127.116,-3503.98,20724.4,-60212.0,\
86060.5,-48520.4])
bcoef_unscl_a = np.array([-0.371466,7.09542,-45.7362,139.020,\
-201.487,112.513])
# Convert units and transfer to normalized version of RT model
acoef_a = unit_conv*(acoef_unscl_a+bcoef_unscl_a*T0**mexp)
bcoef_a = unit_conv*bcoef_unscl_a*T0**mexp
models.Control.set_array_params( 'acoef', acoef_a, eos_d )
models.Control.set_array_params( 'bcoef', bcoef_a, eos_d )
self.load_eos_mod( eos_d )
# from IPython import embed; embed(); import ipdb; ipdb.set_trace()
return eos_d
def test_RT_potenergy_curves_Spera2011(self):
Nsamp = 101
eos_d = self.init_params({})
param_d = eos_d['param_d']
Vgrid_a = np.linspace(0.5,1.1,Nsamp)*param_d['V0']
Tgrid_a = np.linspace(100.0**(5./3),180.0**(5./3),11)
full_mod = eos_d['modtype_d']['FullMod']
thermal_mod = eos_d['modtype_d']['ThermalMod']
energy_conv_fac, = models.Control.get_consts(['energy_conv_fac'],eos_d)
potenergy_mod_a = []
for iV in Vgrid_a:
ipotenergy_a = thermal_mod.calc_energy_pot(iV,Tgrid_a,eos_d)
potenergy_mod_a.append(ipotenergy_a)
# energy_mod_a = np.array( energy_mod_a )
potenergy_mod_a = np.array( potenergy_mod_a )
plt.ion()
plt.figure()
plt.plot(Tgrid_a**(3./5), potenergy_mod_a.T/energy_conv_fac,'-')
plt.xlim(100,180)
plt.ylim(-102,-95)
print 'Compare this plot with Spera2011 Fig 1b (Oganov potential):'
print 'Do the figures agree (y/n or k for keyboard)?'
s = raw_input('--> ')
if s=='k':
from IPython import embed; embed(); import ipdb; ipdb.set_trace()
assert s=='y', 'Figure must match published figure'
pass
def test_energy_curves_Spera2011(self):
Nsamp = 101
eos_d = self.init_params({})
param_d = eos_d['param_d']
Vgrid_a = np.linspace(0.4,1.1,Nsamp)*param_d['V0']
Tgrid_a = np.array([2500,3000,3500,4000,4500,5000])
full_mod = eos_d['modtype_d']['FullMod']
energy_conv_fac, = models.Control.get_consts(['energy_conv_fac'],eos_d)
energy_mod_a = []
press_mod_a = []
for iT in Tgrid_a:
ienergy_a = full_mod.energy(Vgrid_a,iT,eos_d)
ipress_a = full_mod.press(Vgrid_a,iT,eos_d)
energy_mod_a.append(ienergy_a)
press_mod_a.append(ipress_a)
# energy_mod_a = np.array( energy_mod_a )
energy_mod_a = np.array( energy_mod_a )
press_mod_a = np.array( press_mod_a )
plt.ion()
plt.figure()
plt.plot(press_mod_a.T, energy_mod_a.T/energy_conv_fac,'-')
plt.legend(Tgrid_a,loc='lower right')
plt.xlim(-5,165)
plt.ylim(-100.5,-92)
# from IPython import embed; embed(); import ipdb; ipdb.set_trace()
print 'Compare this plot with Spera2011 Fig 2b (Oganov potential):'
print 'Do the figures agree (y/n or k for keyboard)?'
s = raw_input('--> ')
if s=='k':
from IPython import embed; embed(); import ipdb; ipdb.set_trace()
assert s=='y', 'Figure must match published figure'
pass
def test_heat_capacity_curves_Spera2011(self):
Nsamp = 101
eos_d = self.init_params({})
param_d = eos_d['param_d']
Vgrid_a = np.linspace(0.4,1.2,Nsamp)*param_d['V0']
Tgrid_a = np.array([2500,3000,3500,4000,4500,5000])
full_mod = eos_d['modtype_d']['FullMod']
thermal_mod = eos_d['modtype_d']['ThermalMod']
heat_capacity_mod_a = []
energy_conv_fac, = models.Control.get_consts(['energy_conv_fac'],eos_d)
energy_mod_a = []
press_mod_a = []
for iT in Tgrid_a:
iheat_capacity_a = thermal_mod.heat_capacity(Vgrid_a,iT,eos_d)
ienergy_a = full_mod.energy(Vgrid_a,iT,eos_d)
ipress_a = full_mod.press(Vgrid_a,iT,eos_d)
heat_capacity_mod_a.append(iheat_capacity_a)
energy_mod_a.append(ienergy_a)
press_mod_a.append(ipress_a)
# energy_mod_a = np.array( energy_mod_a )
heat_capacity_mod_a = np.array( heat_capacity_mod_a )
energy_mod_a = np.array( energy_mod_a )
press_mod_a = np.array( press_mod_a )
plt.ion()
plt.figure()
plt.plot(press_mod_a.T,1e3*heat_capacity_mod_a.T/energy_conv_fac,'-')
plt.legend(Tgrid_a,loc='lower right')
# plt.ylim(1.2,1.9)
plt.xlim(-5,240)
print 'Compare this plot with Spera2011 Fig 2b (Oganov potential):'
print 'Do the figures agree (y/n or k for keyboard)?'
s = raw_input('--> ')
if s=='k':
from IPython import embed; embed(); import ipdb; ipdb.set_trace()
assert s=='y', 'Figure must match published figure'
pass
#====================================================================
class TestRosenfeldTaranzonaPerturb(BaseTestThermalMod):
def load_thermal_mod(self, eos_d):
thermal_mod = models.RosenfeldTaranzonaPerturb()
models.Control.set_modtypes( ['ThermalMod'], [thermal_mod], eos_d )
pass
def load_gamma_mod(self, eos_d):
gamma_mod = models.GammaPowLaw()
models.Control.set_modtypes( ['GammaMod'], [gamma_mod], eos_d )
pass
def load_compress_path_mod(self, eos_d):
S0, = models.Control.get_params(['S0'],eos_d)
compress_path_mod = models.Vinet(path_const='S',level_const=S0,
supress_energy=False,
supress_press=False)
models.Control.set_modtypes( ['CompressPathMod'], [compress_path_mod], eos_d )
pass
def load_eos_mod(self, eos_d):
self.load_compress_path_mod(eos_d)
self.load_gamma_mod(eos_d)
self.load_thermal_mod(eos_d)
full_mod = models.ThermalPressMod()
models.Control.set_modtypes( ['FullMod'], [full_mod], eos_d )
pass
def init_params(self,eos_d):
models.Control.set_consts( [], [], eos_d )
# EOS Parameter values initially set by Mosenfelder2009
# Set model parameter values
mass_avg = (24.31+28.09+3*16.0)/5.0 # g/(mol atom)
T0 = 1673.0
S0 = 0.0 # must adjust
param_key_a = ['T0','S0','mass_avg']
param_val_a = np.array([T0,S0,mass_avg])
models.Control.set_params( param_key_a, param_val_a, eos_d )
V0 = (38.575*1e-5)*mass_avg/eos_d['const_d']['Nmol']/1e3*1e30 # ang^3/atom
K0 = 20.8
KP0= 10.2
# KP20 = -2.86 # Not actually used!
E0 = 0.0
param_key_a = ['V0','K0','KP0','E0']
param_val_a = np.array([V0,K0,KP0,E0])
models.Control.set_params( param_key_a, param_val_a, eos_d )
VR = V0
gammaR = 0.46
qR = -1.35
param_key_a = ['VR','gammaR','qR']
param_val_a = np.array([VR,gammaR,qR])
models.Control.set_params( param_key_a, param_val_a, eos_d )
dE0th = +1.0
dV0th = -0.02
dK0th = +0.1
dKP0th = -0.00
# dE0th = +0.4
# dV0th = -0.0
# dK0th = -0.01
# dKP0th = -0.03
lognfac = 0.0
mexp = 3.0/5
param_key_a = ['dE0th','dV0th','dK0th','dKP0th','lognfac','mexp']
param_val_a = np.array([dE0th,dV0th,dK0th,dKP0th,lognfac,mexp])
models.Control.set_params( param_key_a, param_val_a, eos_d )
# Must convert energy units from kJ/g to eV/atom
energy_conv_fac = mass_avg/eos_d['const_d']['kJ_molpereV']
models.Control.set_consts( ['energy_conv_fac'], [energy_conv_fac],
eos_d )
self.load_eos_mod( eos_d )
# from IPython import embed; embed(); import ipdb; ipdb.set_trace()
return eos_d
def test_energy_curves_Spera2011(self):
Nsamp = 101
eos_d = self.init_params({})
param_d = eos_d['param_d']
Vgrid_a = np.linspace(0.4,1.1,Nsamp)*param_d['V0']
Tgrid_a = np.array([2500,3000,3500,4000,4500,5000])
full_mod = eos_d['modtype_d']['FullMod']
# energy_conv_fac, = models.Control.get_consts(['energy_conv_fac'],eos_d)
energy_mod_a = []
press_mod_a = []
for iT in Tgrid_a:
ienergy_a = full_mod.energy(Vgrid_a,iT,eos_d)
ipress_a = full_mod.press(Vgrid_a,iT,eos_d)
energy_mod_a.append(ienergy_a)
press_mod_a.append(ipress_a)
# energy_mod_a = np.array( energy_mod_a )
energy_mod_a = np.array( energy_mod_a )
press_mod_a = np.array( press_mod_a )
# from IPython import embed; embed(); import ipdb; ipdb.set_trace()
cmap=plt.get_cmap('coolwarm')
col_a = cmap(1.0*(Tgrid_a-Tgrid_a[0])/np.ptp(Tgrid_a))[:,:3]
plt.ion()
plt.figure()
[plt.plot(ipress_a, ienergy_a,'-',color=icol_a,label=iT) \
for ipress_a,ienergy_a,icol_a,iT in zip(press_mod_a,energy_mod_a,col_a,Tgrid_a)]
ax = plt.axes()
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[::-1],labels[::-1],loc='upper left')
plt.xlim(-5,165)
ybnd = [np.min(energy_mod_a[press_mod_a<165]), np.max(energy_mod_a[press_mod_a<165])]
plt.ylim(ybnd[0],ybnd[1])
# plt.ylim(-100.5,-92)
print 'Compare this plot with Spera2011 Fig 2b (Oganov potential):'
print 'Do the figures agree (y/n or k for keyboard)?'
s = raw_input('--> ')
if s=='k':
from IPython import embed; embed(); import ipdb; ipdb.set_trace()
assert s=='y', 'Figure must match published figure'
pass
def test_kinetic_contribution(self):
Nsamp = 1001
eos_d = self.init_params({})
eos_d['param_d']['E0'] = -21.3
eos_d['param_d']['dE0th'] = 0.5
V0 = eos_d['param_d']['V0']
Vgrid_a = V0*np.arange(0.4,1.11,0.1)
Tgrid_a = np.linspace( 2500, 5000, Nsamp)
dT = Tgrid_a[1]-Tgrid_a[0]
kboltz = eos_d['const_d']['kboltz']
# Test entropy
TOL = 1e-4
iV = Vgrid_a[0]
genRT_mod = models.GenRosenfeldTaranzona()
thermal_mod = eos_d['modtype_d']['ThermalMod']
full_mod = eos_d['modtype_d']['FullMod']
Cvkin_a = genRT_mod.calc_heat_capacity_kin( Tgrid_a ,eos_d )
Ekin_a = genRT_mod.calc_energy_kin( Tgrid_a ,eos_d )
Cvkin_dE_err_a = ( Cvkin_a - np.gradient( Ekin_a, dT ) )/kboltz
assert np.all( np.abs(Cvkin_dE_err_a[1:-1]) < TOL ), \
'Cvkin must match numerical energy deriv'
Skin_a = genRT_mod.calc_entropy_kin( Tgrid_a ,eos_d, Tref=eos_d['param_d']['T0'] )
Cvkin_dS_err_a = ( Cvkin_a - Tgrid_a*np.gradient( Skin_a, dT ) )/kboltz
assert np.all( np.abs(Cvkin_dS_err_a[1:-1]) < TOL ), \
'Cvkin must match numerical entropy deriv'
Fkin_a = Ekin_a-Tgrid_a*Skin_a
Skin_dF_err_a = ( Skin_a + np.gradient( Fkin_a, dT ) )/kboltz
assert np.all( np.abs(Skin_dF_err_a[1:-1]) < TOL ), \
'Skin must match numerical free energy deriv'
def test_potential_contribution(self):
Nsamp = 1001
eos_d = self.init_params({})
eos_d['param_d']['E0'] = -21.3
eos_d['param_d']['dE0th'] = 0.5
V0 = eos_d['param_d']['V0']
Vgrid_a = V0*np.arange(0.4,1.11,0.1)
Tgrid_a = np.linspace( 2500, 5000, Nsamp)
dT = Tgrid_a[1]-Tgrid_a[0]
kboltz = eos_d['const_d']['kboltz']
# Test entropy
TOL = 1e-4
iV = Vgrid_a[0]
genRT_mod = models.GenRosenfeldTaranzona()
thermal_mod = eos_d['modtype_d']['ThermalMod']
full_mod = eos_d['modtype_d']['FullMod']
# verify potential heat capacity (energy deriv)
acoef_a, bcoef_a = thermal_mod.calc_RT_coef( iV, eos_d )
Cvpot_a = np.squeeze( genRT_mod.calc_heat_capacity_pot( Tgrid_a, eos_d,
bcoef_a=bcoef_a ) )
Epot_a = np.squeeze( genRT_mod.calc_energy_pot( Tgrid_a, eos_d,
acoef_a=acoef_a,
bcoef_a=bcoef_a ) )
Cvpot_dE_a = (Cvpot_a - np.gradient( Epot_a, dT ))/kboltz
assert np.all( np.abs(Cvpot_dE_a[1:-1]) < TOL ), \
'Cvpot must match numerical energy deriv'
Spot_a = np.squeeze( genRT_mod.calc_entropy_pot( Tgrid_a, eos_d,
bcoef_a=bcoef_a ) )
Cvpot_dS_a = ( Cvpot_a - Tgrid_a*np.gradient( Spot_a, dT ) )/kboltz
assert np.all( np.abs(Cvpot_dS_a[1:-1]) < TOL ), \
'Cvpot must match numerical entropy deriv'
Fpot_a = Epot_a-Tgrid_a*Spot_a
Spot_dF_err_a = ( Spot_a + np.gradient( Fpot_a, dT ) )/kboltz
assert np.all( np.abs(Spot_dF_err_a[1:-1]) < TOL ), \
'Spot must match numerical free energy deriv'
def test_total_entropy(self):
Nsamp = 1001
eos_d = self.init_params({})
eos_d['param_d']['E0'] = -21.3
eos_d['param_d']['dE0th'] = 0.5
V0 = eos_d['param_d']['V0']
Vgrid_a = V0*np.arange(0.4,1.11,0.1)
Tgrid_a = np.linspace( 2500, 5000, Nsamp)
dT = Tgrid_a[1]-Tgrid_a[0]
kboltz = eos_d['const_d']['kboltz']
# Test entropy
TOL = 1e-4
iV = Vgrid_a[0]
genRT_mod = models.GenRosenfeldTaranzona()
thermal_mod = eos_d['modtype_d']['ThermalMod']
full_mod = eos_d['modtype_d']['FullMod']
# verify total entropy
iFtot = np.squeeze( full_mod.free_energy( Vgrid_a[0], Tgrid_a, eos_d ) )
iStot = np.squeeze( full_mod.entropy( Vgrid_a[0], Tgrid_a, eos_d ) )
iSnum = -np.gradient( iFtot, dT )
Stot_dF_err_a = ( iStot - iSnum )/kboltz
assert np.all( np.abs(Stot_dF_err_a[1:-1]) < TOL ), \
'Spot must match numerical free energy deriv'
#====================================================================
class TestGammaComparison():
def init_params(self,eos_d):
VR = 1.0
gammaR = 1.0
gammapR = -1.0
qR = gammapR/gammaR
# qR = +1.0
# qR = +0.5
param_key_a = ['VR','gammaR','gammapR','qR']
param_val_a = np.array([VR,gammaR,gammapR,qR])
models.Control.set_params( param_key_a, param_val_a, eos_d )
return eos_d
def load_gamma_mod(self, eos_d):
gamma_mod = models.GammaPowLaw()
models.Control.set_modtypes( ['GammaMod'], [gamma_mod], eos_d )
pass
def test_gamma(self):
eos_d = self.init_params({})
VR = eos_d['param_d']['VR']
TR = 1000.0
eos_pow_d = copy.deepcopy(eos_d)
eos_str_d = copy.deepcopy(eos_d)
models.Control.set_modtypes( ['GammaMod'], [models.GammaPowLaw],
eos_pow_d )
models.Control.set_modtypes( ['GammaMod'], [models.GammaFiniteStrain],
eos_str_d )
gammaR = eos_d['param_d']['gammaR']
qR = eos_d['param_d']['qR']
N = 1001
V_a = VR*np.linspace(0.4,1.3,N)
dV = V_a[1]-V_a[0]
gam_pow_mod = eos_pow_d['modtype_d']['GammaMod']()
gam_str_mod = eos_str_d['modtype_d']['GammaMod']()
gam_pow_a = gam_pow_mod.gamma(V_a,eos_pow_d)
gam_str_a = gam_str_mod.gamma(V_a,eos_str_d)
temp_pow_a = gam_pow_mod.temp(V_a,TR,eos_pow_d)
temp_str_a = gam_str_mod.temp(V_a,TR,eos_str_d)
q_pow_a = V_a/gam_pow_a*np.gradient(gam_pow_a,dV)
q_str_a = V_a/gam_str_a*np.gradient(gam_str_a,dV)
# mpl.rcParams(fontsize=16)
plt.ion()
plt.figure()
plt.clf()
hleg = plt.plot(V_a,q_pow_a,'k--',V_a,q_str_a,'r-',lw=2)
plt.legend(hleg,['Power-Law','Finite Strain'], loc='upper right',fontsize=16)
plt.xlabel('$V / V_0$',fontsize=16)
plt.ylabel('$q$',fontsize=16)
plt.text(.9,1.1*qR,'$(\gamma_0,q_0) = ('+np.str(gammaR)+','+np.str(qR)+')$',fontsize=20)
plt.savefig('test/figs/gamma-q-comparison.png',dpi=450)
# from IPython import embed; embed(); import ipdb; ipdb.set_trace()
# plt.clf()
# hleg = plt.plot(1.0/V_a,gam_str_a,'r-',lw=2)
# eos_str_d['param_d']['gammapR'] = -0.5
# eos_str_d['param_d']['gammapR'] = -2
# eos_str_d['param_d']['gammapR'] = -1.0
# eos_str_d['param_d']['gammaR'] = 0.5
# eos_str_d['param_d']['gammapR'] = -2.0
# eos_str_d['param_d']['gammapR'] = -10.0
# eos_str_d['param_d']['gammaR'] = 0.75
# eos_str_d['param_d']['gammapR'] = -10.0
# eos_str_d['param_d']['gammapR'] = -30.0
gam_str_a = gam_str_mod.gamma(V_a,eos_str_d)
eos_str_d['param_d']['gammapR'] = -0.5
plt.clf()
hleg = plt.plot(V_a,gam_pow_a,'k--',V_a,gam_str_a,'r-',lw=2)
plt.legend(hleg,['Power-Law','Finite Strain'], loc='upper right',fontsize=16)
plt.xlabel('$V / V_0$',fontsize=16)
plt.ylabel('$\gamma$',fontsize=16)
plt.text(.9,1.1*gammaR,'$(\gamma_0,q_0) = ('+np.str(gammaR)+','+np.str(qR)+')$',fontsize=20)
plt.savefig('test/figs/gamma-comparison.png',dpi=450)
plt.clf()
hleg = plt.plot(V_a,temp_pow_a,'k--',V_a,temp_str_a,'r-',lw=2)
plt.legend(hleg,['Power-Law','Finite Strain'], loc='upper right',
fontsize=16)
plt.xlabel('$V / V_0$',fontsize=16)
plt.ylabel('$T\; [K]$',fontsize=16)
plt.text(.9,1.1*TR,'$(\gamma_0,q_0) = ('+np.str(gammaR)+','+np.str(qR)+')$',fontsize=20)
plt.savefig('test/figs/gamma-temp-comparison.png',dpi=450)
#====================================================================
class TestRosenfeldTaranzonaPerturbExpand(TestRosenfeldTaranzonaPerturb):
def load_compress_path_mod(self, eos_d):
S0, = models.Control.get_params(['S0'],eos_d)
expand_adj_mod=models.Tait()
compress_path_mod = models.Vinet(path_const='S',level_const=S0,
supress_energy=False,
supress_press=False,
expand_adj_mod=expand_adj_mod)
models.Control.set_modtypes( ['CompressPathMod'], [compress_path_mod], eos_d )
pass
def init_params(self,eos_d):
models.Control.set_consts( [], [], eos_d )
# EOS Parameter values initially set by Mosenfelder2009
# Set model parameter values
mass_avg = (24.31+28.09+3*16.0)/5.0 # g/(mol atom)
T0 = 1673.0
S0 = 0.0 # must adjust
param_key_a = ['T0','S0','mass_avg']
param_val_a = np.array([T0,S0,mass_avg])
models.Control.set_params( param_key_a, param_val_a, eos_d )
V0 = (38.575*1e-5)*mass_avg/eos_d['const_d']['Nmol']/1e3*1e30 # ang^3/atom
K0 = 20.8
KP0= 10.2
KP20 = -2.86 # Not actually used!
E0 = 0.0
param_key_a = ['V0','K0','KP0','KP20','E0']
param_val_a = np.array([V0,K0,KP0,KP20,E0])
models.Control.set_params( param_key_a, param_val_a, eos_d )
VR = V0
gammaR = 0.46
qR = -1.35
param_key_a = ['VR','gammaR','qR']
param_val_a = np.array([VR,gammaR,qR])
models.Control.set_params( param_key_a, param_val_a, eos_d )
dE0th = +1.0
dV0th = -0.02
dK0th = +0.1
dKP0th = -0.00
dKP20th = +1.0
# dE0th = +0.4
# dV0th = -0.0
# dK0th = -0.01
# dKP0th = -0.03
lognfac = 0.0
mexp = 3.0/5
param_key_a = ['dE0th','dV0th','dK0th','dKP0th','dKP20th','lognfac','mexp']
param_val_a = np.array([dE0th,dV0th,dK0th,dKP0th,dKP20th,lognfac,mexp])
models.Control.set_params( param_key_a, param_val_a, eos_d )
# Must convert energy units from kJ/g to eV/atom
energy_conv_fac = mass_avg/eos_d['const_d']['kJ_molpereV']
models.Control.set_consts( ['energy_conv_fac'], [energy_conv_fac],
eos_d )
self.load_eos_mod( eos_d )
# from IPython import embed; embed(); import ipdb; ipdb.set_trace()
return eos_d
def test_energy_curves_Spera2011_exp(self):
Nsamp = 101
eos_d = self.init_params({})
param_d = eos_d['param_d']
Vgrid_a = np.linspace(0.4,1.1,Nsamp)*param_d['V0']
Tgrid_a = np.array([2500,3000,3500,4000,4500,5000])
full_mod = eos_d['modtype_d']['FullMod']
compress_path_mod = eos_d['modtype_d']['CompressPathMod']
thermal_mod = eos_d['modtype_d']['ThermalMod']
# energy_conv_fac, = models.Control.get_consts(['energy_conv_fac'],eos_d)
energy_mod_a = []
press_mod_a = []
for iT in Tgrid_a:
ienergy_a = full_mod.energy(Vgrid_a,iT,eos_d)
ipress_a = full_mod.press(Vgrid_a,iT,eos_d)
energy_mod_a.append(ienergy_a)
press_mod_a.append(ipress_a)
# energy_mod_a = np.array( energy_mod_a )
energy_mod_a = np.array( energy_mod_a )
press_mod_a = np.array( press_mod_a )
# from IPython import embed; embed(); import ipdb; ipdb.set_trace()
cmap=plt.get_cmap('coolwarm')
col_a = cmap(1.0*(Tgrid_a-Tgrid_a[0])/np.ptp(Tgrid_a))[:,:3]
plt.ion()
plt.figure()
[plt.plot(ipress_a, ienergy_a,'-',color=icol_a,label=iT) \
for ipress_a,ienergy_a,icol_a,iT in zip(press_mod_a,energy_mod_a,col_a,Tgrid_a)]
ax = plt.axes()
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[::-1],labels[::-1],loc='upper left')
plt.xlim(-5,165)
ybnd = [np.min(energy_mod_a[press_mod_a<165]), np.max(energy_mod_a[press_mod_a<165])]
plt.ylim(ybnd[0],ybnd[1])
# plt.ylim(-100.5,-92)
print 'Compare this plot with Spera2011 Fig 2b (Oganov potential):'
print 'Do the figures agree (y/n or k for keyboard)?'
s = raw_input('--> ')
if s=='k':
from IPython import embed; embed(); import ipdb; ipdb.set_trace()
assert s=='y', 'Figure must match published figure'
pass
#====================================================================
# def test_RT_potenergy_curves_Spera2011(self):
# Nsamp = 101
# eos_d = self.init_params({})
#
# param_d = eos_d['param_d']
# Vgrid_a = np.linspace(0.5,1.1,Nsamp)*param_d['V0']
# Tgrid_a = np.linspace(100.0**(5./3),180.0**(5./3),11)
#
# full_mod = eos_d['modtype_d']['FullMod']
# thermal_mod = eos_d['modtype_d']['ThermalMod']
#
# energy_conv_fac, = models.Control.get_consts(['energy_conv_fac'],eos_d)
#
# potenergy_mod_a = []
#
# for iV in Vgrid_a:
# ipotenergy_a = thermal_mod.calc_potential_energy(iV,Tgrid_a,eos_d)
# potenergy_mod_a.append(ipotenergy_a)
#
# # energy_mod_a = np.array( energy_mod_a )
# potenergy_mod_a = np.array( potenergy_mod_a )
#
# plt.ion()
# plt.figure()
# plt.plot(Tgrid_a**(3./5), potenergy_mod_a.T/energy_conv_fac,'-')
# plt.xlim(100,180)
# plt.ylim(-102,-95)
#
# print 'Compare this plot with Spera2011 Fig 1b (Oganov potential):'
# print 'Do the figures agree (y/n or k for keyboard)?'
# s = raw_input('--> ')
# if s=='k':
# from IPython import embed; embed(); import ipdb; ipdb.set_trace()
#
# assert s=='y', 'Figure must match published figure'
# pass
#
# def test_heat_capacity_curves_Spera2011(self):
# Nsamp = 101
# eos_d = self.init_params({})
#
# param_d = eos_d['param_d']
# Vgrid_a = np.linspace(0.4,1.2,Nsamp)*param_d['V0']
# Tgrid_a = np.array([2500,3000,3500,4000,4500,5000])
#
# full_mod = eos_d['modtype_d']['FullMod']
# thermal_mod = eos_d['modtype_d']['ThermalMod']
#
# heat_capacity_mod_a = []
# energy_conv_fac, = models.Control.get_consts(['energy_conv_fac'],eos_d)
#
# energy_mod_a = []
# press_mod_a = []
#
# for iT in Tgrid_a:
# iheat_capacity_a = thermal_mod.heat_capacity(Vgrid_a,iT,eos_d)
# ienergy_a = full_mod.energy(Vgrid_a,iT,eos_d)
# ipress_a = full_mod.press(Vgrid_a,iT,eos_d)
#
# heat_capacity_mod_a.append(iheat_capacity_a)
# energy_mod_a.append(ienergy_a)
# press_mod_a.append(ipress_a)
#
#
# # energy_mod_a = np.array( energy_mod_a )
# heat_capacity_mod_a = np.array( heat_capacity_mod_a )
# energy_mod_a = np.array( energy_mod_a )
# press_mod_a = np.array( press_mod_a )
#
# plt.ion()
# plt.figure()
# plt.plot(press_mod_a.T,1e3*heat_capacity_mod_a.T/energy_conv_fac,'-')
# plt.legend(Tgrid_a,loc='lower right')
# # plt.ylim(1.2,1.9)
# plt.xlim(-5,240)
#
# print 'Compare this plot with Spera2011 Fig 2b (Oganov potential):'
# print 'Do the figures agree (y/n or k for keyboard)?'
# s = raw_input('--> ')
# if s=='k':
# from IPython import embed; embed(); import ipdb; ipdb.set_trace()
#
# assert s=='y', 'Figure must match published figure'
# pass
#====================================================================
#====================================================================
# SEC:3 Test Admin Funcs
#====================================================================
class TestControl(object):
def test_get_array_params(self):
TOL = 1e-6
eos_d, acoef_a = self.init_params()
param_a = models.Control.get_array_params('acoef',eos_d)
assert np.all(np.abs(param_a-acoef_a)<TOL), 'Stored and retrieved parameter array do not match within TOL'
param_a = models.Control.get_array_params('V0',eos_d)
assert param_a.size==0, 'non-array parameter should not be retrievable with get_array_params()'
pass
def test_set_array_params(self):
TOL = 1e-6
eos_d = {}
# Set model parameter values
E0 = 0.0 # eV/atom
V0 = 38.0 # 1e-5 m^3 / kg
K0 = 25.0 # GPa
KP0 = 9.0 # 1
acoef_a = np.array([1.3,-.23,9.99,-88])
param_key_a = ['V0','K0','KP0','E0']
param_val_a = np.array([ V0, K0, KP0, E0 ])
models.Control.set_params( param_key_a, param_val_a, eos_d )
models.Control.set_array_params( 'acoef', acoef_a, eos_d )
models.Control.set_consts( [], [], eos_d )
param_a = models.Control.get_array_params( 'acoef', eos_d )
assert np.all(np.abs(param_a-acoef_a)<TOL), 'Stored and retrieved parameter array do not match within TOL'
pass
def init_params(self):
eos_d = {}
# Set model parameter values
E0 = 0.0 # eV/atom
V0 = 38.0 # 1e-5 m^3 / kg
K0 = 25.0 # GPa
KP0 = 9.0 # 1
acoef = np.array([1.3,-.23,9.99,-88])
param_key_a = ['V0','K0','KP0','E0','acoef_0','acoef_1','acoef_2','acoef_3']
param_val_a = np.array([ V0, K0, KP0, E0, acoef[0], acoef[1], acoef[2], acoef[3] ])
models.Control.set_consts( [], [], eos_d )
models.Control.set_params( param_key_a, param_val_a, eos_d )
return eos_d, acoef
#====================================================================
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import iso8601
import mox
from oslo.config import cfg
from nova.compute import task_states
from nova.compute import vm_states
from nova import db
from nova.objects import instance as instance_obj
from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
from nova.tests.compute import test_compute
from nova.tests.image import fake as fake_image
from nova import utils
CONF = cfg.CONF
class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
def test_shelve(self):
CONF.shelved_offload_time = -1
db_instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=db_instance)
instance = instance_obj.Instance.get_by_uuid(
self.context, db_instance['uuid'],
expected_attrs=['metadata', 'system_metadata'])
image_id = 'fake_image_id'
host = 'fake-mini'
cur_time = timeutils.utcnow()
timeutils.set_time_override(cur_time)
instance.task_state = task_states.SHELVING
instance.save()
sys_meta = dict(instance.system_metadata)
sys_meta['shelved_at'] = timeutils.strtime(at=cur_time)
sys_meta['shelved_image_id'] = image_id
sys_meta['shelved_host'] = host
db_instance['system_metadata'] = utils.dict_to_metadata(sys_meta)
self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
self.mox.StubOutWithMock(self.compute.driver, 'snapshot')
self.mox.StubOutWithMock(self.compute.driver, 'power_off')
self.mox.StubOutWithMock(self.compute, '_get_power_state')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
self.compute._notify_about_instance_usage(self.context, instance,
'shelve.start')
self.compute.driver.power_off(instance)
self.compute._get_power_state(self.context,
instance).AndReturn(123)
self.compute.driver.snapshot(self.context, instance, 'fake_image_id',
mox.IgnoreArg())
db.instance_update_and_get_original(self.context, instance['uuid'],
{'power_state': 123,
'vm_state': vm_states.SHELVED,
'task_state': None,
'expected_task_state': [task_states.SHELVING,
task_states.SHELVING_IMAGE_UPLOADING],
'system_metadata': sys_meta},
update_cells=False,
columns_to_join=['metadata', 'system_metadata'],
).AndReturn((db_instance,
db_instance))
self.compute._notify_about_instance_usage(self.context,
instance, 'shelve.end')
self.mox.ReplayAll()
self.compute.shelve_instance(self.context, instance,
image_id=image_id)
def test_shelve_volume_backed(self):
db_instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=db_instance)
instance = instance_obj.Instance.get_by_uuid(
self.context, db_instance['uuid'],
expected_attrs=['metadata', 'system_metadata'])
instance.task_state = task_states.SHELVING
instance.save()
host = 'fake-mini'
cur_time = timeutils.utcnow()
timeutils.set_time_override(cur_time)
sys_meta = dict(instance.system_metadata)
sys_meta['shelved_at'] = timeutils.strtime(at=cur_time)
sys_meta['shelved_image_id'] = None
sys_meta['shelved_host'] = host
db_instance['system_metadata'] = utils.dict_to_metadata(sys_meta)
self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
self.mox.StubOutWithMock(self.compute.driver, 'power_off')
self.mox.StubOutWithMock(self.compute, '_get_power_state')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
self.compute._notify_about_instance_usage(self.context, instance,
'shelve_offload.start')
self.compute.driver.power_off(instance)
self.compute._get_power_state(self.context,
instance).AndReturn(123)
db.instance_update_and_get_original(self.context, instance['uuid'],
{'power_state': 123, 'host': None, 'node': None,
'vm_state': vm_states.SHELVED_OFFLOADED,
'task_state': None,
'expected_task_state': [task_states.SHELVING,
task_states.SHELVING_OFFLOADING]},
update_cells=False,
columns_to_join=['metadata', 'system_metadata'],
).AndReturn((db_instance, db_instance))
self.compute._notify_about_instance_usage(self.context, instance,
'shelve_offload.end')
self.mox.ReplayAll()
self.compute.shelve_offload_instance(self.context, instance)
def test_unshelve(self):
db_instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=db_instance)
instance = instance_obj.Instance.get_by_uuid(
self.context, db_instance['uuid'],
expected_attrs=['metadata', 'system_metadata'])
instance.task_state = task_states.UNSHELVING
instance.save()
image = {'id': 'fake_id'}
host = 'fake-mini'
cur_time = timeutils.utcnow()
cur_time_tz = cur_time.replace(tzinfo=iso8601.iso8601.Utc())
timeutils.set_time_override(cur_time)
sys_meta = dict(instance.system_metadata)
sys_meta['shelved_at'] = timeutils.strtime(at=cur_time)
sys_meta['shelved_image_id'] = image['id']
sys_meta['shelved_host'] = host
hypervisor_hostname = 'fake_hypervisor_hostname'
fake_compute_info = {'hypervisor_hostname': hypervisor_hostname}
self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
self.mox.StubOutWithMock(self.compute, '_prep_block_device')
self.mox.StubOutWithMock(self.compute.driver, 'spawn')
self.mox.StubOutWithMock(self.compute, '_get_power_state')
self.mox.StubOutWithMock(self.compute, '_get_compute_info')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
self.deleted_image_id = None
def fake_delete(self2, ctxt, image_id):
self.deleted_image_id = image_id
fake_image.stub_out_image_service(self.stubs)
self.stubs.Set(fake_image._FakeImageService, 'delete', fake_delete)
self.compute._notify_about_instance_usage(self.context, instance,
'unshelve.start')
self.compute._get_compute_info(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
fake_compute_info)
db.instance_update_and_get_original(self.context, instance['uuid'],
{'task_state': task_states.SPAWNING, 'host': host,
'node': hypervisor_hostname},
update_cells=False,
columns_to_join=['metadata', 'system_metadata'],
).AndReturn((db_instance, db_instance))
self.compute._prep_block_device(self.context, instance,
[]).AndReturn('fake_bdm')
db_instance['key_data'] = None
db_instance['auto_disk_config'] = None
self.compute.driver.spawn(self.context, instance, image,
injected_files=[], admin_password=None,
network_info=[],
block_device_info='fake_bdm')
self.compute._get_power_state(self.context, instance).AndReturn(123)
db.instance_update_and_get_original(self.context, instance['uuid'],
{'power_state': 123,
'vm_state': vm_states.ACTIVE,
'task_state': None,
'key_data': None,
'auto_disk_config': False,
'expected_task_state': task_states.SPAWNING,
'launched_at': cur_time_tz},
update_cells=False,
columns_to_join=['metadata', 'system_metadata']
).AndReturn((db_instance, db_instance))
self.compute._notify_about_instance_usage(self.context, instance,
'unshelve.end')
self.mox.ReplayAll()
self.compute.unshelve_instance(self.context, instance,
image=image)
self.assertEqual(image['id'], self.deleted_image_id)
self.assertEqual(instance.host, self.compute.host)
def test_unshelve_volume_backed(self):
db_instance = jsonutils.to_primitive(self._create_fake_instance())
host = 'fake-mini'
cur_time = timeutils.utcnow()
cur_time_tz = cur_time.replace(tzinfo=iso8601.iso8601.Utc())
timeutils.set_time_override(cur_time)
self.compute.run_instance(self.context, instance=db_instance)
instance = instance_obj.Instance.get_by_uuid(
self.context, db_instance['uuid'],
expected_attrs=['metadata', 'system_metadata'])
instance.task_state = task_states.UNSHELVING
instance.save()
sys_meta = dict(instance.system_metadata)
sys_meta['shelved_at'] = timeutils.strtime(at=cur_time)
sys_meta['shelved_image_id'] = None
sys_meta['shelved_host'] = host
hypervisor_hostname = 'fake_hypervisor_hostname'
fake_compute_info = {'hypervisor_hostname': hypervisor_hostname}
self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
self.mox.StubOutWithMock(self.compute, '_prep_block_device')
self.mox.StubOutWithMock(self.compute.driver, 'spawn')
self.mox.StubOutWithMock(self.compute, '_get_power_state')
self.mox.StubOutWithMock(self.compute, '_get_compute_info')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
self.compute._notify_about_instance_usage(self.context, instance,
'unshelve.start')
self.compute._get_compute_info(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
fake_compute_info)
db.instance_update_and_get_original(self.context, instance['uuid'],
{'task_state': task_states.SPAWNING, 'host': host,
'node': hypervisor_hostname},
update_cells=False,
columns_to_join=['metadata', 'system_metadata']
).AndReturn((db_instance, db_instance))
self.compute._prep_block_device(self.context, instance,
[]).AndReturn('fake_bdm')
db_instance['key_data'] = None
db_instance['auto_disk_config'] = None
self.compute.driver.spawn(self.context, instance, None,
injected_files=[], admin_password=None,
network_info=[],
block_device_info='fake_bdm')
self.compute._get_power_state(self.context, instance).AndReturn(123)
db.instance_update_and_get_original(self.context, instance['uuid'],
{'power_state': 123,
'vm_state': vm_states.ACTIVE,
'task_state': None,
'key_data': None,
'auto_disk_config': False,
'expected_task_state': task_states.SPAWNING,
'launched_at': cur_time_tz},
update_cells=False,
columns_to_join=['metadata', 'system_metadata']
).AndReturn((db_instance, db_instance))
self.compute._notify_about_instance_usage(self.context, instance,
'unshelve.end')
self.mox.ReplayAll()
self.compute.unshelve_instance(self.context, instance, image=None)
def test_shelved_poll_none_exist(self):
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
self.mox.StubOutWithMock(self.compute.driver, 'destroy')
self.mox.StubOutWithMock(timeutils, 'is_older_than')
self.mox.ReplayAll()
self.compute._poll_shelved_instances(self.context)
def test_shelved_poll_not_timedout(self):
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
sys_meta = utils.metadata_to_dict(instance['system_metadata'])
shelved_time = timeutils.utcnow()
timeutils.set_time_override(shelved_time)
timeutils.advance_time_seconds(CONF.shelved_offload_time - 1)
sys_meta['shelved_at'] = timeutils.strtime(at=shelved_time)
db.instance_update_and_get_original(self.context, instance['uuid'],
{'vm_state': vm_states.SHELVED, 'system_metadata': sys_meta})
self.mox.StubOutWithMock(self.compute.driver, 'destroy')
self.mox.ReplayAll()
self.compute._poll_shelved_instances(self.context)
def test_shelved_poll_timedout(self):
active_instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=active_instance)
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
sys_meta = utils.metadata_to_dict(instance['system_metadata'])
shelved_time = timeutils.utcnow()
timeutils.set_time_override(shelved_time)
timeutils.advance_time_seconds(CONF.shelved_offload_time + 1)
sys_meta['shelved_at'] = timeutils.strtime(at=shelved_time)
(old, instance) = db.instance_update_and_get_original(self.context,
instance['uuid'], {'vm_state': vm_states.SHELVED,
'system_metadata': sys_meta})
def fake_destroy(inst, nw_info, bdm):
# NOTE(alaski) There are too many differences between an instance
# as returned by instance_update_and_get_original and
# instance_get_all_by_filters so just compare the uuid.
self.assertEqual(instance['uuid'], inst['uuid'])
self.stubs.Set(self.compute.driver, 'destroy', fake_destroy)
self.compute._poll_shelved_instances(self.context)
class ShelveComputeAPITestCase(test_compute.BaseTestCase):
def test_shelve(self):
# Ensure instance can be shelved.
fake_instance = self._create_fake_instance({'display_name': 'vm01'})
instance = jsonutils.to_primitive(fake_instance)
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
self.assertEqual(instance['task_state'], None)
def fake_init(self2):
# In original _FakeImageService.__init__(), some fake images are
# created. To verify the snapshot name of this test only, here
# sets a fake method.
self2.images = {}
def fake_create(self2, ctxt, metadata):
self.assertEqual(metadata['name'], 'vm01-shelved')
metadata['id'] = '8b24ed3f-ee57-43bc-bc2e-fb2e9482bc42'
return metadata
fake_image.stub_out_image_service(self.stubs)
self.stubs.Set(fake_image._FakeImageService, '__init__', fake_init)
self.stubs.Set(fake_image._FakeImageService, 'create', fake_create)
inst_obj = instance_obj.Instance.get_by_uuid(self.context,
instance_uuid)
self.compute_api.shelve(self.context, inst_obj)
inst_obj.refresh()
self.assertEqual(inst_obj.task_state, task_states.SHELVING)
db.instance_destroy(self.context, instance['uuid'])
def test_unshelve(self):
# Ensure instance can be unshelved.
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
self.assertEqual(instance['task_state'], None)
inst_obj = instance_obj.Instance.get_by_uuid(self.context,
instance_uuid)
self.compute_api.shelve(self.context, inst_obj)
inst_obj.refresh()
inst_obj.task_state = None
inst_obj.vm_state = vm_states.SHELVED
inst_obj.save()
self.compute_api.unshelve(self.context, inst_obj)
inst_obj.refresh()
self.assertEqual(inst_obj.task_state, task_states.UNSHELVING)
db.instance_destroy(self.context, instance['uuid'])
|
|
from __future__ import division
import numpy as np
import pandas as pd
from six.moves.urllib.parse import urlparse, parse_qs
from toolz import flip, identity
from toolz.curried import merge_with, operator as op
from zipline.data.bundles.core import _make_bundle_core
from zipline.data.bundles import yahoo_equities
from zipline.lib.adjustment import Float64Multiply
from zipline.testing import test_resource_path, tmp_dir, read_compressed
from zipline.testing.fixtures import WithResponses, ZiplineTestCase
from zipline.testing.predicates import assert_equal
from zipline.utils.calendars import get_calendar
class YahooBundleTestCase(WithResponses, ZiplineTestCase):
symbols = 'AAPL', 'IBM', 'MSFT'
columns = 'open', 'high', 'low', 'close', 'volume'
asset_start = pd.Timestamp('2014-01-02', tz='utc')
asset_end = pd.Timestamp('2014-12-31', tz='utc')
calendar = get_calendar('NYSE')
sessions = calendar.sessions_in_range(asset_start, asset_end)
@classmethod
def init_class_fixtures(cls):
super(YahooBundleTestCase, cls).init_class_fixtures()
(cls.bundles,
cls.register,
cls.unregister,
cls.ingest,
cls.load,
cls.clean) = map(staticmethod, _make_bundle_core())
def _expected_data(self):
sids = 0, 1, 2
modifier = {
'low': 0,
'open': 1,
'close': 2,
'high': 3,
'volume': 0,
}
pricing = [
np.hstack((
np.arange(252, dtype='float64')[:, np.newaxis] +
1 +
sid * 10000 +
modifier[column] * 1000
for sid in sorted(sids)
))
for column in self.columns
]
# There are two dividends and 1 split for each company.
def dividend_adjustment(sid, which):
"""The dividends occur at indices 252 // 4 and 3 * 252 / 4
with a cash amount of sid + 1 / 10 and sid + 2 / 10
"""
if which == 'first':
idx = 252 // 4
else:
idx = 3 * 252 // 4
return {
idx: [Float64Multiply(
first_row=0,
last_row=idx,
first_col=sid,
last_col=sid,
value=float(
1 -
((sid + 1 + (which == 'second')) / 10) /
(idx - 1 + sid * 10000 + 2000)
),
)],
}
def split_adjustment(sid, volume):
"""The splits occur at index 252 // 2 with a ratio of (sid + 1):1
"""
idx = 252 // 2
return {
idx: [Float64Multiply(
first_row=0,
last_row=idx,
first_col=sid,
last_col=sid,
value=(identity if volume else op.truediv(1))(sid + 2),
)],
}
merge_adjustments = merge_with(flip(sum, []))
adjustments = [
# ohlc
merge_adjustments(
*tuple(dividend_adjustment(sid, 'first') for sid in sids) +
tuple(dividend_adjustment(sid, 'second') for sid in sids) +
tuple(split_adjustment(sid, volume=False) for sid in sids)
)
] * (len(self.columns) - 1) + [
# volume
merge_adjustments(
split_adjustment(sid, volume=True) for sid in sids
),
]
return pricing, adjustments
def test_bundle(self):
def get_symbol_from_url(url):
params = parse_qs(urlparse(url).query)
symbol, = params['s']
return symbol
def pricing_callback(request):
headers = {
'content-encoding': 'gzip',
'content-type': 'text/csv',
}
path = test_resource_path(
'yahoo_samples',
get_symbol_from_url(request.url) + '.csv.gz',
)
with open(path, 'rb') as f:
return (
200,
headers,
f.read(),
)
for _ in range(3):
self.responses.add_callback(
self.responses.GET,
'http://ichart.finance.yahoo.com/table.csv',
pricing_callback,
)
def adjustments_callback(request):
path = test_resource_path(
'yahoo_samples',
get_symbol_from_url(request.url) + '.adjustments.gz',
)
return 200, {}, read_compressed(path)
for _ in range(3):
self.responses.add_callback(
self.responses.GET,
'http://ichart.finance.yahoo.com/x',
adjustments_callback,
)
self.register(
'bundle',
yahoo_equities(self.symbols),
calendar=self.calendar,
start_session=self.asset_start,
end_session=self.asset_end,
)
zipline_root = self.enter_instance_context(tmp_dir()).path
environ = {
'ZIPLINE_ROOT': zipline_root,
}
self.ingest('bundle', environ=environ, show_progress=False)
bundle = self.load('bundle', environ=environ)
sids = 0, 1, 2
equities = bundle.asset_finder.retrieve_all(sids)
for equity, expected_symbol in zip(equities, self.symbols):
assert_equal(equity.symbol, expected_symbol)
for equity in bundle.asset_finder.retrieve_all(sids):
assert_equal(equity.start_date, self.asset_start, msg=equity)
assert_equal(equity.end_date, self.asset_end, msg=equity)
sessions = self.sessions
actual = bundle.equity_daily_bar_reader.load_raw_arrays(
self.columns,
sessions[sessions.get_loc(self.asset_start, 'bfill')],
sessions[sessions.get_loc(self.asset_end, 'ffill')],
sids,
)
expected_pricing, expected_adjustments = self._expected_data()
assert_equal(actual, expected_pricing, array_decimal=2)
adjustments_for_cols = bundle.adjustment_reader.load_adjustments(
self.columns,
self.sessions,
pd.Index(sids),
)
for column, adjustments, expected in zip(self.columns,
adjustments_for_cols,
expected_adjustments):
assert_equal(
adjustments,
expected,
msg=column,
decimal=4,
)
|
|
import time
from datetime import datetime
import os
import sys
import dateutil.parser
import requests
try:
from urlparse import urlparse
except:
from urllib.parse import urlparse
try:
import json
except:
import simplejson as json
class IronTokenProvider(object):
def __init__(self, token):
self.token = token
def getToken(self):
return self.token
class KeystoneTokenProvider(object):
def __init__(self, keystone):
self.server = keystone["server"] + ("" if keystone["server"].endswith("/") else "/")
self.tenant = keystone["tenant"]
self.username = keystone["username"]
self.password = keystone["password"]
self.token = None
self.local_expires_at_timestamp = 0
def getToken(self):
date_diff = time.mktime(datetime.now().timetuple()) - self.local_expires_at_timestamp
if self.token is None or date_diff > -10:
payload = {
'auth': {
'tenantName': self.tenant,
'passwordCredentials': {
'username': self.username,
'password': self.password
}
}
}
headers = {'content-type': 'application/json', 'Accept': 'application/json'}
response = requests.post(self.server + 'tokens', data=json.dumps(payload), headers=headers)
response.raise_for_status()
result = response.json()
token_data = result['access']['token']
issued_at = dateutil.parser.parse(token_data['issued_at']).replace(tzinfo=None)
expires = dateutil.parser.parse(token_data['expires']).replace(tzinfo=None)
duration = expires - issued_at
self.local_expires_at_timestamp = time.mktime((datetime.now() + duration).timetuple())
self.token = token_data['id']
return self.token
class IronClient(object):
__version__ = "1.2.0"
def __init__(self, name, version, product, host=None, project_id=None,
token=None, protocol=None, port=None, api_version=None,
config_file=None, keystone=None, cloud=None, path_prefix=''):
"""Prepare a Client that can make HTTP calls and return it.
Keyword arguments:
name -- the name of the client. Required.
version -- the version of the client. Required.
product -- the name of the product the client will access. Required.
host -- the default domain the client will be requesting. Defaults
to None.
project_id -- the project ID the client will be requesting. Can be
found on http://hud.iron.io. Defaults to None.
token -- an API token found on http://hud.iron.io. Defaults to None.
protocol -- The default protocol the client will use for its requests.
Defaults to None.
port -- The default port the client will use for its requests. Defaults
to None.
api_version -- The version of the API the client will use for its
requests. Defaults to None.
config_file -- The config file to load configuration from. Defaults to
None.
"""
config = {
"host": None,
"protocol": "https",
"port": 443,
"api_version": None,
"project_id": None,
"token": None,
"keystone": None,
"path_prefix": None,
"cloud": None,
}
products = {
"iron_worker": {
"host": "worker-aws-us-east-1.iron.io",
"version": 2
},
"iron_mq": {
"host": "mq-aws-us-east-1-1.iron.io",
"version": 3
},
"iron_cache": {
"host": "cache-aws-us-east-1.iron.io",
"version": 1
}
}
if product in products:
config["host"] = products[product]["host"]
config["api_version"] = products[product]["version"]
try:
config = configFromFile(config,
os.path.expanduser("~/.iron.json"), product)
except:
pass
config = configFromEnv(config)
config = configFromEnv(config, product)
config = configFromFile(config, "iron.json", product)
config = configFromFile(config, config_file, product)
config = configFromArgs(config, host=host, project_id=project_id,
token=token, protocol=protocol, port=port,
api_version=api_version, keystone=keystone, cloud=cloud, path_prefix=path_prefix)
required_fields = ["project_id"]
for field in required_fields:
if config[field] is None:
raise ValueError("No %s set. %s is a required field." % (field, field))
keystone_configured = False
if config["keystone"] is not None:
keystone_required_keys = ["server", "tenant", "username", "password"]
if len(intersect(keystone_required_keys, config["keystone"].keys())) == len(keystone_required_keys):
self.token_provider = KeystoneTokenProvider(config["keystone"])
keystone_configured = True
else:
raise ValueError("Missing keystone keys.")
elif config["token"] is not None:
self.token_provider = IronTokenProvider(config["token"])
if config["token"] is None and not keystone_configured:
raise ValueError("At least one of token or keystone should be specified.")
self.name = name
self.version = version
self.product = product
self.host = config["host"]
self.project_id = config["project_id"]
self.token = config["token"]
self.keystone = config["keystone"]
self.protocol = config["protocol"]
self.port = config["port"]
self.api_version = config["api_version"]
self.cloud = config["cloud"]
self.headers = {
"Accept": "application/json",
"User-Agent": "%s (version: %s)" % (self.name, self.version)
}
self.path_prefix = config["path_prefix"]
if self.cloud is not None:
url = urlparse(self.cloud)
self.protocol = url.scheme
self.host = url.netloc.split(":")[0]
if url.port:
self.port = url.port
self.path_prefix = url.path.rstrip("/")
if self.protocol == "https" and self.port == 443:
self.base_url = "%s://%s%s/%s/" % (self.protocol, self.host, self.path_prefix, self.api_version)
else:
self.base_url = "%s://%s:%s%s/%s/" % (self.protocol, self.host,
self.port, self.path_prefix, self.api_version)
if self.project_id:
self.base_url += "projects/%s/" % self.project_id
def _doRequest(self, url, method, body="", headers={}):
if self.token or self.keystone:
headers["Authorization"] = "OAuth %s" % self.token_provider.getToken()
if method == "GET":
r = requests.get(url, headers=headers)
elif method == "POST":
r = requests.post(url, data=body, headers=headers)
elif method == "PUT":
r = requests.put(url, data=body, headers=headers)
elif method == "DELETE":
r = requests.delete(url, data=body, headers=headers)
elif method == "PATCH":
r = requests.patch(url, data=body, headers=headers)
else:
raise ValueError("Invalid HTTP method")
return r
def request(self, url, method, body="", headers={}, retry=True):
"""Execute an HTTP request and return a dict containing the response
and the response status code.
Keyword arguments:
url -- The path to execute the result against, not including the API
version or project ID, with no leading /. Required.
method -- The HTTP method to use. Required.
body -- A string or file object to send as the body of the request.
Defaults to an empty string.
headers -- HTTP Headers to send with the request. Can overwrite the
defaults. Defaults to {}.
retry -- Whether exponential backoff should be employed. Defaults
to True.
"""
if headers:
headers = dict(list(headers.items()) + list(self.headers.items()))
else:
headers = self.headers
if not sys.version_info >= (3,) and headers:
headers = dict((k.encode('ascii') if isinstance(k, unicode) else k,
v.encode('ascii') if isinstance(v, unicode) else v)
for k, v in headers.items())
url = self.base_url + url
if not sys.version_info >= (3,):
if isinstance(url, unicode):
url = url.encode('ascii')
r = self._doRequest(url, method, body, headers)
retry_http_codes = [503, 504]
if r.status_code in retry_http_codes and retry:
tries = 5
delay = .5
backoff = 2
while r.status_code in retry_http_codes and tries > 0:
tries -= 1
time.sleep(delay)
delay *= backoff
r = self._doRequest(url, method, body, headers)
r.raise_for_status()
result = {}
contentType = r.headers["Content-Type"]
if contentType is None:
contentType = "text/plain"
else:
contentType = contentType.split(";")[0]
if contentType.lower() == "application/json":
try:
result["body"] = json.loads(r.text)
except:
result["body"] = r.text
else:
result["body"] = r.text
result["status"] = r.status_code
result["resp"] = r
result["content-type"] = contentType
return result
def get(self, url, headers={}, retry=True):
"""Execute an HTTP GET request and return a dict containing the
response and the response status code.
Keyword arguments:
url -- The path to execute the result against, not including the API
version or project ID, with no leading /. Required.
headers -- HTTP Headers to send with the request. Can overwrite the
defaults. Defaults to {}.
retry -- Whether exponential backoff should be employed. Defaults
to True.
"""
return self.request(url=url, method="GET", headers=headers,
retry=retry)
def post(self, url, body="", headers={}, retry=True):
"""Execute an HTTP POST request and return a dict containing the
response and the response status code.
Keyword arguments:
url -- The path to execute the result against, not including the API
version or project ID, with no leading /. Required.
body -- A string or file object to send as the body of the request.
Defaults to an empty string.
headers -- HTTP Headers to send with the request. Can overwrite the
defaults. Defaults to {}.
retry -- Whether exponential backoff should be employed. Defaults
to True.
"""
headers["Content-Length"] = str(len(body))
return self.request(url=url, method="POST", body=body, headers=headers,
retry=retry)
def delete(self, url, headers={}, retry=True, body=""):
"""Execute an HTTP DELETE request and return a dict containing the
response and the response status code.
Keyword arguments:
url -- The path to execute the result against, not including the API
version or project ID, with no leading /. Required.
headers -- HTTP Headers to send with the request. Can overwrite the
defaults. Defaults to an empty dict.
retry -- Whether exponential backoff should be employed. Defaults
to True.
body -- A string or file object to send as the body of the request.
Defaults to an empty string.
"""
return self.request(url=url, method="DELETE", headers=headers,
retry=retry, body=body)
def put(self, url, body="", headers={}, retry=True):
"""Execute an HTTP PUT request and return a dict containing the
response and the response status code.
Keyword arguments:
url -- The path to execute the result against, not including the API
version or project ID, with no leading /. Required.
body -- A string or file object to send as the body of the request.
Defaults to an empty string.
headers -- HTTP Headers to send with the request. Can overwrite the
defaults. Defaults to {}.
retry -- Whether exponential backoff should be employed. Defaults
to True.
"""
return self.request(url=url, method="PUT", body=body, headers=headers,
retry=retry)
def patch(self, url, body="", headers={}, retry=True):
"""Execute an HTTP PATCH request and return a dict containing the
response and the response status code.
Keyword arguments:
url -- The path to execute the result against, not including the API
version or project ID, with no leading /. Required.
body -- A string or file object to send as the body of the request.
Defaults to an empty string.
headers -- HTTP Headers to send with the request. Can overwrite the
defaults. Defaults to {}.
retry -- Whether exponential backoff should be employed. Defaults
to True.
"""
return self.request(url=url, method="PATCH", body=body, headers=headers,
retry=retry)
@staticmethod
def fromRfc3339(timestamp=None):
if timestamp is None:
timestamp = datetime.now()
return timestamp
return dateutil.parser.parse(timestamp)
@staticmethod
def toRfc3339(timestamp=None):
if timestamp is None:
timestamp = datetime.now()
return timestamp.isoformat()
@staticmethod
def fromTimestamp(timestamp=None):
if timestamp is None:
timestamp = time.now()
return timestamp
return datetime.fromtimestamp(float(timestamp))
def configFromFile(config, path, product=None):
if path is None:
return config
if not os.path.exists(path):
return config
try:
file = open(path, "r")
except IOError:
return config
raw = json.loads(file.read())
file.close()
for k in raw.keys():
if k in config:
config[k] = raw[k]
if product is not None:
if product in raw:
for k in raw[product].keys():
config[k] = raw[product][k]
return config
def configFromEnv(config, product=None):
if product is None:
product = "iron"
for k in config.keys():
key = "%s_%s" % (product, k)
if key.upper() in os.environ:
config[k] = os.environ[key.upper()]
return config
def configFromArgs(config, **kwargs):
for k in kwargs:
if kwargs[k] is not None:
config[k] = kwargs[k]
return config
def intersect(a, b):
return list(set(a) & set(b))
|
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for when the training and inference graphs are different."""
import os
import tempfile
import tensorflow as tf
from tensorflow_model_optimization.python.core.common.keras.compression.algorithms import different_training_and_inference as svd
from tensorflow_model_optimization.python.core.keras.testing import test_utils_mnist
# TODO(tfmot): dedup.
def _build_model():
i = tf.keras.layers.Input(shape=(28, 28), name='input')
x = tf.keras.layers.Reshape((28, 28, 1))(i)
x = tf.keras.layers.Conv2D(
20, 5, activation='relu', padding='valid', name='conv1')(
x)
x = tf.keras.layers.MaxPool2D(2, 2)(x)
x = tf.keras.layers.Conv2D(
50, 5, activation='relu', padding='valid', name='conv2')(
x)
x = tf.keras.layers.MaxPool2D(2, 2)(x)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(500, activation='relu', name='fc1')(x)
output = tf.keras.layers.Dense(10, name='fc2')(x)
model = tf.keras.Model(inputs=[i], outputs=[output])
return model
def _get_dataset():
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
# Use subset of 60000 examples to keep unit test speed fast.
x_train = x_train[0:1000]
y_train = y_train[0:1000]
return (x_train, y_train), (x_test, y_test)
def _train_model(model):
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
model.compile(optimizer='adam', loss=loss_fn, metrics=['accuracy'])
(x_train, y_train), _ = _get_dataset()
model.fit(x_train, y_train, epochs=1)
def _save_as_saved_model(model):
saved_model_dir = tempfile.mkdtemp()
model.save(saved_model_dir)
return saved_model_dir
# TODO(tfmot): reuse existing test utilities.
def _convert_to_tflite(saved_model_dir):
_, tflite_file = tempfile.mkstemp()
converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
with open(tflite_file, 'wb') as f:
f.write(tflite_model)
return tflite_file
def _get_directory_size_in_bytes(directory):
total = 0
try:
for entry in os.scandir(directory):
if entry.is_file():
# if it's a file, use stat() function
total += entry.stat().st_size
elif entry.is_dir():
# if it's a directory, recursively call this function
total += _get_directory_size_in_bytes(entry.path)
except NotADirectoryError:
# if `directory` isn't a directory, get the file size then
return os.path.getsize(directory)
except PermissionError:
# if for whatever reason we can't open the folder, return 0
return 0
return total
class FunctionalTest(tf.test.TestCase):
# TODO(tfmot): can simplify to single layer test that checks exact
# dimensions of weights.
def testSVD_ReducesSavedModelSize(self):
model = _build_model()
original_saved_model_dir = _save_as_saved_model(model)
model_for_inference = svd.SVD(rank=16).compress_model(model)
saved_model_dir = _save_as_saved_model(model_for_inference)
original_size = _get_directory_size_in_bytes(original_saved_model_dir)
compressed_size = _get_directory_size_in_bytes(saved_model_dir)
self.assertLess(compressed_size, original_size / 3)
def testSVD_HasReasonableAccuracy_TF(self):
model = _build_model()
_train_model(model)
model_for_inference = svd.SVD(rank=16).compress_model(model)
_, (x_test, y_test) = _get_dataset()
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
model_for_inference.compile(
optimizer='adam', loss=loss_fn, metrics=['accuracy'])
results = model_for_inference.evaluate(x_test, y_test)
self.assertGreater(results[1], 0.60)
def testSVD_ReducesTFLiteModelSize(self):
model = _build_model()
original_saved_model_dir = _save_as_saved_model(model)
original_tflite_file = _convert_to_tflite(original_saved_model_dir)
model_for_inference = svd.SVD(rank=16).compress_model(model)
saved_model_dir = _save_as_saved_model(model_for_inference)
compressed_tflite_file = _convert_to_tflite(saved_model_dir)
original_size = os.path.getsize(original_tflite_file)
compressed_size = os.path.getsize(compressed_tflite_file)
self.assertLess(compressed_size, original_size / 6)
def testSVD_HasReasonableAccuracy_TFLite(self):
model = _build_model()
_train_model(model)
model_for_inference = svd.SVD(rank=16).compress_model(model)
saved_model_dir = _save_as_saved_model(model_for_inference)
compressed_tflite_file = _convert_to_tflite(saved_model_dir)
accuracy = test_utils_mnist.eval_tflite(compressed_tflite_file)
self.assertGreater(accuracy, 0.60)
# TODO(tfmot): can simplify to single layer test.
def testSVD_BreaksDownLayerWeights(self):
model = _build_model()
first_conv_layer = model.layers[2]
self.assertLen(first_conv_layer.weights, 2)
model_for_inference = svd.SVD(rank=16).compress_model(model)
first_conv_layer = model_for_inference.layers[2]
self.assertLen(first_conv_layer.weights, 3)
# TODO(tfmot): can simplify to single layer test.
def testSVD_PreservesPretrainedWeights(self):
i = tf.keras.layers.Input(shape=(2), name='input')
output = tf.keras.layers.Dense(3, name='fc1')(i)
model = tf.keras.Model(inputs=[i], outputs=[output])
dense_layer_weights = model.layers[1].get_weights()
algorithm = svd.SVD(rank=1)
model_for_inference = algorithm.compress_model(model)
dense_layer_compressed_weights = model_for_inference.layers[1].get_weights()
# kernel
w1, w2 = algorithm.compress_training_weights(
tf.constant(dense_layer_weights[0]))
assert (w1 == dense_layer_compressed_weights[0]).numpy().all()
assert (w2 == dense_layer_compressed_weights[1]).numpy().all()
# bias
assert (dense_layer_weights[1] == dense_layer_compressed_weights[2]).all()
if __name__ == '__main__':
tf.test.main()
|
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import numpy as np
import paddle.fluid as fluid
import paddle.fluid.layers as layers
pos_enc_param_names = (
"src_pos_enc_table",
"trg_pos_enc_table", )
batch_size = 64
def position_encoding_init(n_position, d_pos_vec):
"""
Generate the initial values for the sinusoid position encoding table.
"""
position_enc = np.array([[
pos / np.power(10000, 2 * (j // 2) / d_pos_vec)
for j in range(d_pos_vec)
] if pos != 0 else np.zeros(d_pos_vec) for pos in range(n_position)])
position_enc[1:, 0::2] = np.sin(position_enc[1:, 0::2]) # dim 2i
position_enc[1:, 1::2] = np.cos(position_enc[1:, 1::2]) # dim 2i+1
return position_enc.astype("float32")
def multi_head_attention(queries,
keys,
values,
attn_bias,
d_key,
d_value,
d_model,
n_head=1,
dropout_rate=0.):
"""
Multi-Head Attention. Note that attn_bias is added to the logit before
computing softmax activiation to mask certain selected positions so that
they will not considered in attention weights.
"""
if not (len(queries.shape) == len(keys.shape) == len(values.shape) == 3):
raise ValueError(
"Inputs: quries, keys and values should all be 3-D tensors.")
def __compute_qkv(queries, keys, values, n_head, d_key, d_value):
"""
Add linear projection to queries, keys, and values.
"""
q = layers.fc(input=queries,
size=d_key * n_head,
param_attr=fluid.initializer.Xavier(
uniform=False,
fan_in=d_model * d_key,
fan_out=n_head * d_key),
bias_attr=False,
num_flatten_dims=2)
k = layers.fc(input=keys,
size=d_key * n_head,
param_attr=fluid.initializer.Xavier(
uniform=False,
fan_in=d_model * d_key,
fan_out=n_head * d_key),
bias_attr=False,
num_flatten_dims=2)
v = layers.fc(input=values,
size=d_value * n_head,
param_attr=fluid.initializer.Xavier(
uniform=False,
fan_in=d_model * d_value,
fan_out=n_head * d_value),
bias_attr=False,
num_flatten_dims=2)
return q, k, v
def __split_heads(x, n_head):
"""
Reshape the last dimension of inpunt tensor x so that it becomes two
dimensions and then transpose. Specifically, input a tensor with shape
[bs, max_sequence_length, n_head * hidden_dim] then output a tensor
with shape [bs, n_head, max_sequence_length, hidden_dim].
"""
if n_head == 1:
return x
hidden_size = x.shape[-1]
# FIXME(guosheng): Decouple the program desc with batch_size.
reshaped = layers.reshape(
x=x, shape=[batch_size, -1, n_head, hidden_size // n_head])
# permuate the dimensions into:
# [batch_size, n_head, max_sequence_len, hidden_size_per_head]
return layers.transpose(x=reshaped, perm=[0, 2, 1, 3])
def __combine_heads(x):
"""
Transpose and then reshape the last two dimensions of inpunt tensor x
so that it becomes one dimension, which is reverse to __split_heads.
"""
if len(x.shape) == 3: return x
if len(x.shape) != 4:
raise ValueError("Input(x) should be a 4-D Tensor.")
trans_x = layers.transpose(x, perm=[0, 2, 1, 3])
# FIXME(guosheng): Decouple the program desc with batch_size.
return layers.reshape(
x=trans_x,
shape=map(int,
[batch_size, -1, trans_x.shape[2] * trans_x.shape[3]]))
def scaled_dot_product_attention(q, k, v, attn_bias, d_model, dropout_rate):
"""
Scaled Dot-Product Attention
"""
# FIXME(guosheng): Optimize the shape in reshape_op or softmax_op.
# The current implementation of softmax_op only supports 2D tensor,
# consequently it cannot be directly used here.
# If to use the reshape_op, Besides, the shape of product inferred in
# compile-time is not the actual shape in run-time. It cann't be used
# to set the attribute of reshape_op.
# So, here define the softmax for temporary solution.
def __softmax(x, eps=1e-9):
exp_out = layers.exp(x=x)
sum_out = layers.reduce_sum(exp_out, dim=-1, keep_dim=False)
return layers.elementwise_div(x=exp_out, y=sum_out, axis=0)
scaled_q = layers.scale(x=q, scale=d_model**-0.5)
product = layers.matmul(x=scaled_q, y=k, transpose_y=True)
weights = __softmax(layers.elementwise_add(x=product, y=attn_bias))
if dropout_rate:
weights = layers.dropout(
weights, dropout_prob=dropout_rate, is_test=False)
out = layers.matmul(weights, v)
return out
q, k, v = __compute_qkv(queries, keys, values, n_head, d_key, d_value)
q = __split_heads(q, n_head)
k = __split_heads(k, n_head)
v = __split_heads(v, n_head)
ctx_multiheads = scaled_dot_product_attention(q, k, v, attn_bias, d_model,
dropout_rate)
out = __combine_heads(ctx_multiheads)
# Project back to the model size.
proj_out = layers.fc(input=out,
size=d_model,
param_attr=fluid.initializer.Xavier(uniform=False),
bias_attr=False,
num_flatten_dims=2)
return proj_out
def positionwise_feed_forward(x, d_inner_hid, d_hid):
"""
Position-wise Feed-Forward Networks.
This module consists of two linear transformations with a ReLU activation
in between, which is applied to each position separately and identically.
"""
hidden = layers.fc(input=x,
size=d_inner_hid,
num_flatten_dims=2,
param_attr=fluid.initializer.Uniform(
low=-(d_hid**-0.5), high=(d_hid**-0.5)),
act="relu")
out = layers.fc(input=hidden,
size=d_hid,
num_flatten_dims=2,
param_attr=fluid.initializer.Uniform(
low=-(d_inner_hid**-0.5), high=(d_inner_hid**-0.5)))
return out
def pre_post_process_layer(prev_out, out, process_cmd, dropout=0.):
"""
Add residual connection, layer normalization and droput to the out tensor
optionally according to the value of process_cmd.
This will be used before or after multi-head attention and position-wise
feed-forward networks.
"""
for cmd in process_cmd:
if cmd == "a": # add residual connection
out = out + prev_out if prev_out else out
elif cmd == "n": # add layer normalization
out = layers.layer_norm(
out,
begin_norm_axis=len(out.shape) - 1,
param_attr=fluid.initializer.Constant(1.),
bias_attr=fluid.initializer.Constant(0.))
elif cmd == "d": # add dropout
if dropout:
out = layers.dropout(out, dropout_prob=dropout, is_test=False)
return out
pre_process_layer = partial(pre_post_process_layer, None)
post_process_layer = pre_post_process_layer
def prepare_encoder(src_word,
src_pos,
src_vocab_size,
src_emb_dim,
src_pad_idx,
src_max_len,
dropout=0.,
pos_pad_idx=0,
pos_enc_param_name=None):
"""Add word embeddings and position encodings.
The output tensor has a shape of:
[batch_size, max_src_length_in_batch, d_model].
This module is used at the bottom of the encoder stacks.
"""
src_word_emb = layers.embedding(
src_word,
size=[src_vocab_size, src_emb_dim],
padding_idx=src_pad_idx,
param_attr=fluid.initializer.Normal(0., 1.))
src_pos_enc = layers.embedding(
src_pos,
size=[src_max_len, src_emb_dim],
padding_idx=pos_pad_idx,
param_attr=fluid.ParamAttr(
name=pos_enc_param_name, trainable=False))
enc_input = src_word_emb + src_pos_enc
# FIXME(guosheng): Decouple the program desc with batch_size.
enc_input = layers.reshape(x=enc_input, shape=[batch_size, -1, src_emb_dim])
return layers.dropout(
enc_input, dropout_prob=dropout,
is_test=False) if dropout else enc_input
prepare_encoder = partial(
prepare_encoder, pos_enc_param_name=pos_enc_param_names[0])
prepare_decoder = partial(
prepare_encoder, pos_enc_param_name=pos_enc_param_names[1])
def encoder_layer(enc_input,
attn_bias,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate=0.):
"""The encoder layers that can be stacked to form a deep encoder.
This module consits of a multi-head (self) attention followed by
position-wise feed-forward networks and both the two components companied
with the post_process_layer to add residual connection, layer normalization
and droput.
"""
attn_output = multi_head_attention(enc_input, enc_input, enc_input,
attn_bias, d_key, d_value, d_model,
n_head, dropout_rate)
attn_output = post_process_layer(enc_input, attn_output, "dan",
dropout_rate)
ffd_output = positionwise_feed_forward(attn_output, d_inner_hid, d_model)
return post_process_layer(attn_output, ffd_output, "dan", dropout_rate)
def encoder(enc_input,
attn_bias,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate=0.):
"""
The encoder is composed of a stack of identical layers returned by calling
encoder_layer.
"""
for i in range(n_layer):
enc_output = encoder_layer(enc_input, attn_bias, n_head, d_key, d_value,
d_model, d_inner_hid, dropout_rate)
enc_input = enc_output
return enc_output
def decoder_layer(dec_input,
enc_output,
slf_attn_bias,
dec_enc_attn_bias,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate=0.):
""" The layer to be stacked in decoder part.
The structure of this module is similar to that in the encoder part except
a multi-head attention is added to implement encoder-decoder attention.
"""
slf_attn_output = multi_head_attention(
dec_input,
dec_input,
dec_input,
slf_attn_bias,
d_key,
d_value,
d_model,
n_head,
dropout_rate, )
slf_attn_output = post_process_layer(
dec_input,
slf_attn_output,
"dan", # residual connection + dropout + layer normalization
dropout_rate, )
enc_attn_output = multi_head_attention(
slf_attn_output,
enc_output,
enc_output,
dec_enc_attn_bias,
d_key,
d_value,
d_model,
n_head,
dropout_rate, )
enc_attn_output = post_process_layer(
slf_attn_output,
enc_attn_output,
"dan", # residual connection + dropout + layer normalization
dropout_rate, )
ffd_output = positionwise_feed_forward(
enc_attn_output,
d_inner_hid,
d_model, )
dec_output = post_process_layer(
enc_attn_output,
ffd_output,
"dan", # residual connection + dropout + layer normalization
dropout_rate, )
return dec_output
def decoder(dec_input,
enc_output,
dec_slf_attn_bias,
dec_enc_attn_bias,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate=0.):
"""
The decoder is composed of a stack of identical decoder_layer layers.
"""
for i in range(n_layer):
dec_output = decoder_layer(
dec_input,
enc_output,
dec_slf_attn_bias,
dec_enc_attn_bias,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate, )
dec_input = dec_output
return dec_output
def transformer(
src_vocab_size,
trg_vocab_size,
max_length,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate,
src_pad_idx,
trg_pad_idx,
pos_pad_idx, ):
file_obj = fluid.layers.open_recordio_file(
filename='/tmp/wmt16.recordio',
shapes=[
[batch_size * max_length, 1],
[batch_size * max_length, 1],
[batch_size * max_length, 1],
[batch_size * max_length, 1],
[batch_size, n_head, max_length, max_length],
[batch_size, n_head, max_length, max_length],
[batch_size, n_head, max_length, max_length],
[batch_size * max_length, 1],
[batch_size * max_length, 1],
],
dtypes=[
'int64',
'int64',
'int64',
'int64',
'float32',
'float32',
'float32',
'int64',
'float32',
],
lod_levels=[0] * 9)
src_word, src_pos, trg_word, trg_pos, src_slf_attn_bias, trg_slf_attn_bias, trg_src_attn_bias, gold, weights = fluid.layers.read_file(
file_obj)
enc_input = prepare_encoder(
src_word,
src_pos,
src_vocab_size,
d_model,
src_pad_idx,
max_length,
dropout_rate, )
enc_output = encoder(
enc_input,
src_slf_attn_bias,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate, )
dec_input = prepare_decoder(
trg_word,
trg_pos,
trg_vocab_size,
d_model,
trg_pad_idx,
max_length,
dropout_rate, )
dec_output = decoder(
dec_input,
enc_output,
trg_slf_attn_bias,
trg_src_attn_bias,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate, )
# TODO(guosheng): Share the weight matrix between the embedding layers and
# the pre-softmax linear transformation.
predict = layers.reshape(
x=layers.fc(input=dec_output,
size=trg_vocab_size,
param_attr=fluid.initializer.Xavier(uniform=False),
bias_attr=False,
num_flatten_dims=2),
shape=[-1, trg_vocab_size],
act="softmax")
cost = layers.cross_entropy(input=predict, label=gold)
weighted_cost = cost * weights
return layers.reduce_sum(weighted_cost)
|
|
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @author: psimakov@google.com (Pavel Simakov)
"""Enforces schema and verifies course files for referential integrity.
Use this script to verify referential integrity of your course definition files
before you import them into the production instance of Google AppEngine.
Here is how to use the script:
- prepare your course files
- edit the data/unit.csv file
- edit the data/lesson.csv file
- edit the assets/js/activity-*.*.js files
- edit the assets/js/assessment-*.js files
- run the script from a command line by navigating to the root
directory of the app and then typing "python tools/verify.py"
- review the report printed to the console for errors and warnings
Good luck!
"""
import csv
import json
import os
import re
import sys
BOOLEAN = object()
STRING = object()
FLOAT = object()
INTEGER = object()
CORRECT = object()
REGEX = object()
SCHEMA = {
'assessment': {
'assessmentName': STRING,
'preamble': STRING,
'checkAnswers': BOOLEAN,
'questionsList': [{
'questionHTML': STRING,
'lesson': STRING,
'choices': [STRING, CORRECT],
'correctAnswerNumeric': FLOAT,
'correctAnswerString': STRING,
'correctAnswerRegex': REGEX}]
}, 'activity': [
STRING,
{
'questionType': 'multiple choice',
'choices': [[STRING, BOOLEAN, STRING]]
}, {
'questionType': 'multiple choice group',
'questionsList': [{
'questionHTML': STRING,
'choices': [STRING],
'correctIndex': INTEGER}],
'allCorrectOutput': STRING,
'someIncorrectOutput': STRING
}, {
'questionType': 'freetext',
'correctAnswerRegex': REGEX,
'correctAnswerOutput': STRING,
'incorrectAnswerOutput': STRING,
'showAnswerOutput': STRING,
'showAnswerPrompt': STRING,
'outputHeight': STRING
}]}
UNITS_HEADER = (
'id,type,unit_id,title,release_date,now_available')
LESSONS_HEADER = (
'unit_id,unit_title,lesson_id,lesson_title,lesson_activity,'
'lesson_activity_name,lesson_notes,lesson_video_id,lesson_objectives')
UNIT_CSV_TO_DB_CONVERTER = {
'id': ('id', int),
'type': ('type', unicode),
'unit_id': ('unit_id', unicode),
'title': ('title', unicode),
'release_date': ('release_date', unicode),
'now_available': ('now_available', bool)
}
LESSON_CSV_TO_DB_CONVERTER = {
'unit_id': ('unit_id', int),
# Field 'unit_title' is a duplicate of Unit.title. We enforce that both
# values are the same and ignore this value altogether.
'unit_title': None,
'lesson_id': ('id', int),
'lesson_title': ('title', unicode),
'lesson_activity': ('activity', unicode),
'lesson_activity_name': ('activity_title', unicode),
'lesson_video_id': ('video', unicode),
'lesson_objectives': ('objectives', unicode),
'lesson_notes': ('notes', unicode)
}
# pylint: disable-msg=anomalous-backslash-in-string
NO_VERIFY_TAG_NAME_OPEN = '<gcb-no-verify>\s*\n'
# pylint: enable-msg=anomalous-backslash-in-string
NO_VERIFY_TAG_NAME_CLOSE = '</gcb-no-verify>'
OUTPUT_FINE_LOG = False
OUTPUT_DEBUG_LOG = False
class Term(object):
def __init__(self, term_type, value=None):
self.term_type = term_type
self.value = value
def __eq__(self, other):
if type(other) is not Term:
return False
else:
return ((self.term_type == other.term_type) and
(self.value == other.value))
class SchemaException(Exception):
"""A class to represent a schema error."""
def format_primitive_value_name(self, name):
if name == REGEX:
return 'REGEX(...)'
if name == CORRECT:
return 'CORRECT(...)'
if name == BOOLEAN:
return 'BOOLEAN'
return name
def format_primitive_type_name(self, name):
"""Formats a name for a primitive type."""
if name == BOOLEAN:
return 'BOOLEAN'
if name == REGEX:
return 'REGEX(...)'
if name == CORRECT:
return 'CORRECT(...)'
if name == STRING or isinstance(name, str):
return 'STRING'
if name == FLOAT:
return 'FLOAT'
if name == INTEGER:
return 'INTEGER'
if isinstance(name, dict):
return '{...}'
if isinstance(name, list):
return '[...]'
return 'Unknown type name \'%s\'' % name.__class__.__name__
def format_type_names(self, names):
if isinstance(names, list):
captions = []
for name in names:
captions.append(self.format_primitive_type_name(name))
return captions
else:
return self.format_primitive_type_name(names)
def __init__(self, message, value=None, types=None, path=None):
prefix = ''
if path:
prefix = 'Error at %s\n' % path
if types:
if value:
message = prefix + message % (
self.format_primitive_value_name(value),
self.format_type_names(types))
else:
message = prefix + message % self.format_type_names(types)
else:
if value:
message = prefix + (
message % self.format_primitive_value_name(value))
else:
message = prefix + message
super(SchemaException, self).__init__(message)
class Context(object):
""""A class that manages a stack of traversal contexts."""
def __init__(self):
self.parent = None
self.path = ['/']
def new(self, names):
""""Derives a new context from the current one."""
context = Context()
context.parent = self
context.path = list(self.path)
if names:
if isinstance(names, list):
for name in names:
if name:
context.path.append('/' + '%s' % name)
else:
context.path.append('/' + '%s' % names)
return context
def format_path(self):
"""Formats the canonical name of this context."""
return ''.join(self.path)
class SchemaHelper(object):
"""A class that knows how to apply the schema."""
def __init__(self):
self.type_stats = {}
def visit_element(self, atype, value, context, is_terminal=True):
"""Callback for each schema element being traversed."""
if atype in self.type_stats:
count = self.type_stats[atype]
else:
count = 0
self.type_stats[atype] = count + 1
if is_terminal:
self.parse_log.append(' TERMINAL: %s %s = %s' % (
atype, context.format_path(), value))
else:
self.parse_log.append(' NON-TERMINAL: %s %s' % (
atype, context.format_path()))
def extract_all_terms_to_depth(self, key, values, type_map):
"""Walks schema type map recursively to depth."""
# Walks schema type map recursively to depth and creates a list of all
# possible {key: value} pairs. The latter is a list of all non-terminal
# and terminal terms allowed in the schema. The list of terms from this
# method can be bound to an execution context for evaluating whether a
# given instance's map complies with the schema.
if key:
type_map.update({key: key})
if values == REGEX:
type_map.update({'regex': lambda x: Term(REGEX, x)})
return
if values == CORRECT:
type_map.update({'correct': lambda x: Term(CORRECT, x)})
return
if values == BOOLEAN:
type_map.update(
{'true': Term(BOOLEAN, True), 'false': Term(BOOLEAN, False)})
return
if values == STRING or values == INTEGER:
return
if isinstance(values, dict):
for new_key, new_value in values.items():
self.extract_all_terms_to_depth(new_key, new_value, type_map)
return
if isinstance(values, list):
for new_value in values:
self.extract_all_terms_to_depth(None, new_value, type_map)
return
def find_selectors(self, type_map):
"""Finds all type selectors."""
# Finds all elements in the type map where both a key and a value are
# strings. These elements are used to find one specific type map among
# several alternative type maps.
selector = {}
for akey, avalue in type_map.items():
if isinstance(akey, str) and isinstance(avalue, str):
selector.update({akey: avalue})
return selector
def find_compatible_dict(self, value_map, type_map, unused_context):
"""Find the type map most compatible with the value map."""
# A value map is considered compatible with a type map when former
# contains the same key names and the value types as the type map.
# special case when we have just one type; check name and type are the
# same
if len(type_map) == 1:
for value_key in value_map.keys():
for key in type_map[0].keys():
if value_key == key:
return key, type_map[0]
raise SchemaException(
"Expected: '%s'\nfound: %s", type_map[0].keys()[0], value_map)
# case when we have several types to choose from
for adict in type_map:
dict_selector = self.find_selectors(adict)
for akey, avalue in dict_selector.items():
if value_map[akey] == avalue:
return akey, adict
return None, None
def check_single_value_matches_type(self, value, atype, context):
"""Checks if a single value matches a specific (primitive) type."""
if atype == BOOLEAN:
if isinstance(value, bool) or value.term_type == BOOLEAN:
self.visit_element('BOOLEAN', value, context)
return True
else:
raise SchemaException(
'Expected: \'true\' or \'false\'\nfound: %s', value)
if isinstance(atype, str):
if isinstance(value, str):
self.visit_element('str', value, context)
return True
else:
raise SchemaException('Expected: \'string\'\nfound: %s', value)
if atype == STRING:
if isinstance(value, str):
self.visit_element('STRING', value, context)
return True
else:
raise SchemaException('Expected: \'string\'\nfound: %s', value)
if atype == REGEX and value.term_type == REGEX:
self.visit_element('REGEX', value, context)
return True
if atype == CORRECT and value.term_type == CORRECT:
self.visit_element('CORRECT', value, context)
return True
if atype == FLOAT:
if is_number(value):
self.visit_element('NUMBER', value, context)
return True
else:
raise SchemaException('Expected: \'number\'\nfound: %s', value)
if atype == INTEGER:
if is_integer(value):
self.visit_element('INTEGER', value, context)
return True
else:
raise SchemaException(
'Expected: \'integer\'\nfound: %s', value,
path=context.format_path())
raise SchemaException(
'Unexpected value \'%s\'\n'
'for type %s', value, atype, path=context.format_path())
def check_value_list_matches_type(self, value, atype, context):
"""Checks if all items in value list match a specific type."""
for value_item in value:
found = False
for atype_item in atype:
if isinstance(atype_item, list):
for atype_item_item in atype_item:
if self.does_value_match_type(
value_item, atype_item_item, context):
found = True
break
else:
if self.does_value_match_type(
value_item, atype_item, context):
found = True
break
if not found:
raise SchemaException(
'Expected: \'%s\'\nfound: %s', atype, value)
return True
def check_value_matches_type(self, value, atype, context):
"""Checks if single value or a list of values match a specific type."""
if isinstance(atype, list) and isinstance(value, list):
return self.check_value_list_matches_type(value, atype, context)
else:
return self.check_single_value_matches_type(value, atype, context)
def does_value_match_type(self, value, atype, context):
"""Same as other method, but does not throw an exception."""
try:
return self.check_value_matches_type(value, atype, context)
except SchemaException:
return False
def does_value_match_one_of_types(self, value, types, context):
"""Checks if a value matches to one of the types in the list."""
type_names = None
if isinstance(types, list):
type_names = types
if type_names:
for i in range(0, len(type_names)):
if self.does_value_match_type(value, type_names[i], context):
return True
return False
def does_value_match_map_of_type(self, value, types, context):
"""Checks if value matches any variation of {...} type."""
# find all possible map types
maps = []
for atype in types:
if isinstance(atype, dict):
maps.append(atype)
if not maps and isinstance(types, dict):
maps.append(types)
# check if the structure of value matches one of the maps
if isinstance(value, dict):
aname, adict = self.find_compatible_dict(value, maps, context)
if adict:
self.visit_element('dict', value, context.new(aname), False)
for akey, avalue in value.items():
if akey not in adict:
raise SchemaException(
'Unknown term \'%s\'', akey,
path=context.format_path())
self.check_value_of_valid_type(
avalue, adict[akey], context.new([aname, akey]))
return True
raise SchemaException(
'The value:\n %s\n'
'is incompatible with expected type(s):\n %s',
value, types, path=context.format_path())
return False
def format_name_with_index(self, alist, aindex):
"""A function to format a context name with an array element index."""
if len(alist) == 1:
return ''
else:
return '[%s]' % aindex
def does_value_match_list_of_types_in_order(
self, value, types, context, target):
"""Iterates the value and types in given order and checks for match."""
all_values_are_lists = True
for avalue in value:
if not isinstance(avalue, list):
all_values_are_lists = False
if all_values_are_lists:
for i in range(0, len(value)):
self.check_value_of_valid_type(value[i], types, context.new(
self.format_name_with_index(value, i)), True)
else:
if len(target) != len(value):
raise SchemaException(
'Expected: \'%s\' values\n' + 'found: %s.' % value,
len(target), path=context.format_path())
for i in range(0, len(value)):
self.check_value_of_valid_type(value[i], target[i], context.new(
self.format_name_with_index(value, i)))
return True
def does_value_match_list_of_types_any_order(self, value, types,
context, lists):
"""Iterates the value and types, checks if they match in any order."""
target = lists
if not target:
if not isinstance(types, list):
raise SchemaException(
'Unsupported type %s',
None, types, path=context.format_path())
target = types
for i in range(0, len(value)):
found = False
for atarget in target:
try:
self.check_value_of_valid_type(
value[i], atarget,
context.new(self.format_name_with_index(value, i)))
found = True
break
except SchemaException as unused_e:
continue
if not found:
raise SchemaException(
'The value:\n %s\n'
'is incompatible with expected type(s):\n %s',
value, types, path=context.format_path())
return True
def does_value_match_list_of_type(self, value, types, context, in_order):
"""Checks if a value matches a variation of [...] type."""
# Extra argument controls whether matching must be done in a specific
# or in any order. A specific order is demanded by [[...]]] construct,
# i.e. [[STRING, INTEGER, BOOLEAN]], while sub elements inside {...} and
# [...] can be matched in any order.
# prepare a list of list types
lists = []
for atype in types:
if isinstance(atype, list):
lists.append(atype)
if len(lists) > 1:
raise SchemaException(
'Unable to validate types with multiple alternative '
'lists %s', None, types, path=context.format_path())
if isinstance(value, list):
if len(lists) > 1:
raise SchemaException(
'Allowed at most one list\nfound: %s.',
None, types, path=context.format_path())
# determine if list is in order or not as hinted by double array
# [[..]]; [STRING, NUMBER] is in any order, but [[STRING, NUMBER]]
# demands order
ordered = len(lists) == 1 and isinstance(types, list)
if in_order or ordered:
return self.does_value_match_list_of_types_in_order(
value, types, context, lists[0])
else:
return self.does_value_match_list_of_types_any_order(
value, types, context, lists)
return False
def check_value_of_valid_type(self, value, types, context, in_order=None):
"""Check if a value matches any of the given types."""
if not (isinstance(types, list) or isinstance(types, dict)):
self.check_value_matches_type(value, types, context)
return
if (self.does_value_match_list_of_type(value, types,
context, in_order) or
self.does_value_match_map_of_type(value, types, context) or
self.does_value_match_one_of_types(value, types, context)):
return
raise SchemaException(
'Unknown type %s', value, path=context.format_path())
def check_instances_match_schema(self, values, types, name):
"""Recursively decompose 'values' to see if they match schema types."""
self.parse_log = []
context = Context().new(name)
self.parse_log.append(' ROOT %s' % context.format_path())
# pylint: disable-msg=protected-access
values_class = values.__class__
# pylint: enable-msg=protected-access
# handle {..} containers
if isinstance(types, dict):
if not isinstance(values, dict):
raise SchemaException(
'Error at \'/\': expected {...}, found %s' % (
values_class.__name__))
self.check_value_of_valid_type(values, types, context.new([]))
return
# handle [...] containers
if isinstance(types, list):
if not isinstance(values, list):
raise SchemaException(
'Error at \'/\': expected [...], found %s' % (
values_class.__name__))
for i in range(0, len(values)):
self.check_value_of_valid_type(
values[i], types, context.new('[%s]' % i))
return
raise SchemaException(
'Expected an array or a dictionary.', None,
path=context.format_path())
def escape_quote(value):
return unicode(value).replace('\'', r'\'')
class Unit(object):
"""A class to represent a Unit."""
def __init__(self):
self.id = 0
self.type = ''
self.unit_id = ''
self.title = ''
self.release_date = ''
self.now_available = False
def list_properties(self, name, output):
"""Outputs all properties of the unit."""
output.append('%s[\'id\'] = %s;' % (name, self.id))
output.append('%s[\'type\'] = \'%s\';' % (
name, escape_quote(self.type)))
output.append('%s[\'unit_id\'] = \'%s\';' % (
name, escape_quote(self.unit_id)))
output.append('%s[\'title\'] = \'%s\';' % (
name, escape_quote(self.title)))
output.append('%s[\'release_date\'] = \'%s\';' % (
name, escape_quote(self.release_date)))
output.append('%s[\'now_available\'] = %s;' % (
name, str(self.now_available).lower()))
class Lesson(object):
"""A class to represent a Lesson."""
def __init__(self):
self.unit_id = 0
self.unit_title = ''
self.lesson_id = 0
self.lesson_title = ''
self.lesson_activity = ''
self.lesson_activity_name = ''
self.lesson_notes = ''
self.lesson_video_id = ''
self.lesson_objectives = ''
def list_properties(self, name, output):
"""Outputs all properties of the lesson."""
activity = 'false'
if self.lesson_activity == 'yes':
activity = 'true'
output.append('%s[\'unit_id\'] = %s;' % (name, self.unit_id))
output.append('%s[\'unit_title\'] = \'%s\';' % (
name, escape_quote(self.unit_title)))
output.append('%s[\'lesson_id\'] = %s;' % (name, self.lesson_id))
output.append('%s[\'lesson_title\'] = \'%s\';' % (
name, escape_quote(self.lesson_title)))
output.append('%s[\'lesson_activity\'] = %s;' % (name, activity))
output.append('%s[\'lesson_activity_name\'] = \'%s\';' % (
name, escape_quote(self.lesson_activity_name)))
output.append('%s[\'lesson_notes\'] = \'%s\';' % (
name, escape_quote(self.lesson_notes)))
output.append('%s[\'lesson_video_id\'] = \'%s\';' % (
name, escape_quote(self.lesson_video_id)))
output.append('%s[\'lesson_objectives\'] = \'%s\';' % (
name, escape_quote(self.lesson_objectives)))
def to_id_string(self):
return '%s.%s.%s' % (self.unit_id, self.lesson_id, self.lesson_title)
class Assessment(object):
"""A class to represent a Assessment."""
def __init__(self):
self.scope = {}
SchemaHelper().extract_all_terms_to_depth(
'assessment', SCHEMA['assessment'], self.scope)
class Activity(object):
"""A class to represent a Activity."""
def __init__(self):
self.scope = {}
SchemaHelper().extract_all_terms_to_depth(
'activity', SCHEMA['activity'], self.scope)
def silent_echo(unused_message):
pass
def echo(message):
print message
def is_integer(s):
try:
return int(s) == float(s)
except ValueError:
return False
def is_boolean(s):
try:
return s == 'True' or s == 'False'
except ValueError:
return False
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def is_one_of(value, values):
for current in values:
if value == current:
return True
return False
def text_to_line_numbered_text(text):
"""Adds line numbers to the provided text."""
lines = text.split('\n')
results = []
i = 1
for line in lines:
results.append(str(i) + ': ' + line)
i += 1
return '\n '.join(results)
def set_object_attributes(target_object, names, values):
"""Sets object attributes from provided values."""
if len(names) != len(values):
raise SchemaException(
'The number of elements must match: %s and %s' % (names, values))
for i in range(0, len(names)):
if is_integer(values[i]):
# if we are setting an attribute of an object that support
# metadata, try to infer the target type and convert 'int' into
# 'str' here
target_type = None
if hasattr(target_object.__class__, names[i]):
attribute = getattr(target_object.__class__, names[i])
if hasattr(attribute, 'data_type'):
target_type = attribute.data_type.__name__
if target_type and (target_type == 'str' or
target_type == 'basestring'):
setattr(target_object, names[i], str(values[i]))
else:
setattr(target_object, names[i], int(values[i]))
continue
if is_boolean(values[i]):
setattr(target_object, names[i], bool(values[i]))
continue
setattr(target_object, names[i], values[i])
def read_objects_from_csv_file(fname, header, new_object):
return read_objects_from_csv(csv.reader(open(fname)), header, new_object)
def read_objects_from_csv(value_rows, header, new_object):
"""Reads objects from the rows of a CSV file."""
values = []
for row in value_rows:
if not row:
continue
values.append(row)
names = header.split(',')
if names != values[0]:
raise SchemaException(
'Error reading CSV header.\n '
'Header row had %s element(s): %s\n '
'Expected header row with %s element(s): %s' % (
len(values[0]), values[0], len(names), names))
items = []
for i in range(1, len(values)):
if len(names) != len(values[i]):
raise SchemaException(
'Error reading CSV data row.\n '
'Row #%s had %s element(s): %s\n '
'Expected %s element(s): %s' % (
i, len(values[i]), values[i], len(names), names))
decoded_values = []
for value in values[i]:
if isinstance(value, basestring):
value = unicode(value.decode('utf-8'))
decoded_values.append(value)
item = new_object()
set_object_attributes(item, names, decoded_values)
items.append(item)
return items
def escape_javascript_regex(text):
return re.sub(
r'([:][ ]*)([/])(.*)([/][ismx]*)', r': regex("\2\3\4")', text)
def remove_javascript_single_line_comment(text):
text = re.sub(re.compile('^(.*?)[ ]+//(.*)$', re.MULTILINE), r'\1', text)
text = re.sub(re.compile('^//(.*)$', re.MULTILINE), r'', text)
return text
def remove_javascript_multi_line_comment(text):
# pylint: disable-msg=anomalous-backslash-in-string
return re.sub(
re.compile('/\*(.*)\*/', re.MULTILINE + re.DOTALL), r'', text)
# pylint: enable-msg=anomalous-backslash-in-string
def parse_content_marked_no_verify(content):
"""Parses and returns a tuple of real content and no-verify text."""
# If you have any free-form JavaScript in the activity file, you need
# to place it between //<gcb-no-verify> ... //</gcb-no-verify> tags
# so that the verifier can selectively ignore it.
pattern = re.compile('%s(.*)%s' % (
NO_VERIFY_TAG_NAME_OPEN, NO_VERIFY_TAG_NAME_CLOSE), re.DOTALL)
m = pattern.search(content)
noverify_text = None
if m:
noverify_text = m.group(1)
return (re.sub(pattern, '', content), noverify_text)
def convert_javascript_to_python(content, root_name):
"""Removes JavaScript specific syntactic constructs and returns a tuple."""
# Reads the content and removes JavaScript comments, var's, and escapes
# regular expressions.
(content, noverify_text) = parse_content_marked_no_verify(content)
content = remove_javascript_multi_line_comment(content)
content = remove_javascript_single_line_comment(content)
content = content.replace('var %s = ' % root_name, '%s = ' % root_name)
content = escape_javascript_regex(content)
return (content, noverify_text)
def convert_javascript_file_to_python(fname, root_name):
return convert_javascript_to_python(
''.join(open(fname, 'r').readlines()), root_name)
def evaluate_python_expression_from_text(content, root_name, scope,
noverify_text):
"""Compiles and evaluates a Python script in a restricted environment."""
# First compiles and then evaluates a Python script text in a restricted
# environment using provided bindings. Returns the resulting bindings if
# evaluation completed.
# create a new execution scope that has only the schema terms defined;
# remove all other languages constructs including __builtins__
restricted_scope = {}
restricted_scope.update(scope)
restricted_scope.update({'__builtins__': {}})
code = compile(content, '<string>', 'exec')
# pylint: disable-msg=exec-statement
exec code in restricted_scope
# pylint: enable-msg=exec-statement
if noverify_text:
restricted_scope['noverify'] = noverify_text
if not restricted_scope[root_name]:
raise Exception('Unable to find \'%s\'' % root_name)
return restricted_scope
def evaluate_javascript_expression_from_file(fname, root_name, scope, error):
(content, noverify_text) = convert_javascript_file_to_python(fname,
root_name)
try:
return evaluate_python_expression_from_text(content, root_name, scope,
noverify_text)
except:
error('Unable to parse %s in file %s\n %s' % (
root_name, fname, text_to_line_numbered_text(content)))
for message in sys.exc_info():
error(str(message))
raise
class Verifier(object):
"""Verifies Units, Lessons, Assessments, Activities and their relations."""
def __init__(self):
self.echo_func = silent_echo
self.schema_helper = SchemaHelper()
self.errors = 0
self.warnings = 0
self.export = []
def verify_unit_fields(self, units):
self.export.append('units = Array();')
for unit in units:
if not is_one_of(unit.now_available, [True, False]):
self.error(
'Bad now_available \'%s\' for unit id %s; expected '
'\'True\' or \'False\'' % (unit.now_available, unit.id))
if not is_one_of(unit.type, ['U', 'A', 'O']):
self.error(
'Bad type \'%s\' for unit id %s; '
'expected \'U\', \'A\', or \'O\'' % (unit.type, unit.id))
if unit.type == 'A':
if not is_one_of(unit.unit_id, ('A11','A12','A21','A22','A31','A32','A41', 'A42', 'A51', 'A52','A61', 'A62', 'A71', 'A72','A81', 'A82','A91', 'A92','A101', 'A102', 'Fin')):
self.error(
'Bad unit_id \'%s\'; expected \'A11\',\'A12\',\'A21\',\'A22\',\'A31\',\'A32\',\'A41\',\'A42\',\'A51\',\'A52\',\'A61\',\'A62\',\'A71\',\'A72\',\'A81\',\'A82\',\'A91\',\'A92\',\'A101\',\'A102\' or '
'\'Fin\' for unit id %s' % (unit.unit_id, unit.id))
if unit.type == 'U':
if not is_integer(unit.unit_id):
self.error(
'Expected integer unit_id, found %s in unit id '
' %s' % (unit.unit_id, unit.id))
self.export.append('')
self.export.append('units[%s] = Array();' % unit.id)
self.export.append('units[%s][\'lessons\'] = Array();' % unit.id)
unit.list_properties('units[%s]' % unit.id, self.export)
def verify_lesson_fields(self, lessons):
for lesson in lessons:
if not is_one_of(lesson.lesson_activity, ['yes', '']):
self.error('Bad lesson_activity \'%s\' for lesson_id %s' % (
lesson.lesson_activity, lesson.lesson_id))
self.export.append('')
self.export.append('units[%s][\'lessons\'][%s] = Array();' % (
lesson.unit_id, lesson.lesson_id))
lesson.list_properties('units[%s][\'lessons\'][%s]' % (
lesson.unit_id, lesson.lesson_id), self.export)
def verify_unit_lesson_relationships(self, units, lessons):
"""Checks each lesson points to a unit and all lessons are in use."""
used_lessons = []
units.sort(key=lambda x: x.id)
# for unit in units:
for i in range(0, len(units)):
unit = units[i]
# check that unit ids are 1-based and sequential
if unit.id != i + 1:
self.error('Unit out of order: %s' % (unit.id))
# get the list of lessons for each unit
self.fine('Unit %s: %s' % (unit.id, unit.title))
unit_lessons = []
for lesson in lessons:
if lesson.unit_id == unit.unit_id:
if not lesson.unit_title == unit.title:
raise Exception(''.join([
'A unit_title of a lesson (id=%s) must match ',
'title of a unit (id=%s) the lesson belongs to.'
]) % (lesson.lesson_id, lesson.unit_id))
unit_lessons.append(lesson)
used_lessons.append(lesson)
# inspect all lessons for the current unit
unit_lessons.sort(key=lambda x: x.lesson_id)
for j in range(0, len(unit_lessons)):
lesson = unit_lessons[j]
# check that lesson_ids are 1-based and sequential
if lesson.lesson_id != j + 1:
self.warn(
'Lesson lesson_id is out of order: expected %s, found '
' %s (%s)' % (
j + 1, lesson.lesson_id, lesson.to_id_string()))
self.fine(' Lesson %s: %s' % (
lesson.lesson_id, lesson.lesson_title))
# find lessons not used by any of the units
unused_lessons = list(lessons)
for lesson in used_lessons:
unused_lessons.remove(lesson)
for lesson in unused_lessons:
self.warn('Unused lesson_id %s (%s)' % (
lesson.lesson_id, lesson.to_id_string()))
# check all lessons point to known units
for lesson in lessons:
has = False
for unit in units:
if lesson.unit_id == unit.unit_id:
has = True
break
if not has:
self.error('Lesson has unknown unit_id %s (%s)' % (
lesson.unit_id, lesson.to_id_string()))
def verify_activities(self, lessons):
"""Loads and verifies all activities."""
self.info('Loading activities:')
count = 0
for lesson in lessons:
if lesson.lesson_activity == 'yes':
count += 1
fname = os.path.join(
os.path.dirname(__file__),
'../assets/js/activity-' + str(lesson.unit_id) + '.' +
str(lesson.lesson_id) + '.js')
if not os.path.exists(fname):
self.error(' Missing activity: %s' % fname)
else:
activity = evaluate_javascript_expression_from_file(
fname, 'activity', Activity().scope, self.error)
self.verify_activity_instance(activity, fname)
self.export.append('')
self.encode_activity_json(
activity, lesson.unit_id, lesson.lesson_id)
self.info('Read %s activities' % count)
def verify_assessment(self, units):
"""Loads and verifies all assessments."""
self.export.append('')
self.export.append('assessments = Array();')
self.info('Loading assessment:')
count = 0
for unit in units:
if unit.type == 'A':
count += 1
assessment_name = str(unit.unit_id)
fname = os.path.join(
os.path.dirname(__file__),
'../assets/js/assessment-' + assessment_name + '.js')
if not os.path.exists(fname):
self.error(' Missing assessment: %s' % fname)
else:
assessment = evaluate_javascript_expression_from_file(
fname, 'assessment', Assessment().scope, self.error)
self.verify_assessment_instance(assessment, fname)
self.export.append('')
self.encode_assessment_json(assessment, assessment_name)
self.info('Read %s assessments' % count)
# NB: The exported script needs to define a gcb_regex() wrapper function
@staticmethod
def encode_regex(regex_str):
"""Encodes a JavaScript-style regex into a Python gcb_regex call."""
# parse the regex into the base and modifiers. e.g., for /foo/i
# base is 'foo' and modifiers is 'i'
assert regex_str[0] == '/'
# find the LAST '/' in regex_str (because there might be other
# escaped '/' characters in the middle of regex_str)
final_slash_index = regex_str.rfind('/')
assert final_slash_index > 0
base = regex_str[1:final_slash_index]
modifiers = regex_str[final_slash_index+1:]
func_str = 'gcb_regex(' + repr(base) + ', ' + repr(modifiers) + ')'
return func_str
def encode_activity_json(self, activity_dict, unit_id, lesson_id):
"""Encodes an activity dictionary into JSON."""
output = []
for elt in activity_dict['activity']:
t = type(elt)
encoded_elt = None
if t is str:
encoded_elt = {'type': 'string', 'value': elt}
elif t is dict:
qt = elt['questionType']
encoded_elt = {'type': qt}
if qt == 'multiple choice':
choices = elt['choices']
encoded_choices = [[x, y.value, z] for x, y, z in choices]
encoded_elt['choices'] = encoded_choices
elif qt == 'multiple choice group':
# everything inside are primitive types that can be encoded
elt_copy = dict(elt)
del elt_copy['questionType'] # redundant
encoded_elt['value'] = elt_copy
elif qt == 'freetext':
for k in elt.keys():
if k == 'questionType':
continue
elif k == 'correctAnswerRegex':
encoded_elt[k] = Verifier.encode_regex(elt[k].value)
else:
# ordinary string
encoded_elt[k] = elt[k]
else:
assert False
else:
assert False
assert encoded_elt
output.append(encoded_elt)
# N.B.: make sure to get the string quoting right!
code_str = "units[%s]['lessons'][%s]['activity'] = " % (
unit_id, lesson_id) + repr(json.dumps(output)) + ';'
self.export.append(code_str)
if 'noverify' in activity_dict:
self.export.append('')
noverify_code_str = "units[%s]['lessons'][%s]['code'] = " % (
unit_id, lesson_id) + repr(activity_dict['noverify']) + ';'
self.export.append(noverify_code_str)
def encode_assessment_json(self, assessment_dict, assessment_name):
"""Encodes an assessment dictionary into JSON."""
real_dict = assessment_dict['assessment']
output = {}
output['assessmentName'] = real_dict['assessmentName']
if 'preamble' in real_dict:
output['preamble'] = real_dict['preamble']
output['checkAnswers'] = real_dict['checkAnswers'].value
encoded_questions_list = []
for elt in real_dict['questionsList']:
encoded_elt = {}
encoded_elt['questionHTML'] = elt['questionHTML']
if 'lesson' in elt:
encoded_elt['lesson'] = elt['lesson']
if 'correctAnswerNumeric' in elt:
encoded_elt['correctAnswerNumeric'] = elt[
'correctAnswerNumeric']
if 'correctAnswerString' in elt:
encoded_elt['correctAnswerString'] = elt['correctAnswerString']
if 'correctAnswerRegex' in elt:
encoded_elt['correctAnswerRegex'] = Verifier.encode_regex(
elt['correctAnswerRegex'].value)
if 'choices' in elt:
encoded_choices = []
correct_answer_index = None
for (ind, e) in enumerate(elt['choices']):
if type(e) is str:
encoded_choices.append(e)
elif e.term_type == CORRECT:
encoded_choices.append(e.value)
correct_answer_index = ind
else:
raise Exception("Invalid type in 'choices'")
encoded_elt['choices'] = encoded_choices
encoded_elt['correctAnswerIndex'] = correct_answer_index
encoded_questions_list.append(encoded_elt)
output['questionsList'] = encoded_questions_list
# N.B.: make sure to get the string quoting right!
code_str = 'assessments[\'' + assessment_name + '\'] = ' + repr(
json.dumps(output)) + ';'
self.export.append(code_str)
if 'noverify' in assessment_dict:
self.export.append('')
noverify_code_str = ('assessments[\'' + assessment_name +
'\'] = ' + repr(assessment_dict['noverify']) +
';')
self.export.append(noverify_code_str)
def format_parse_log(self):
return 'Parse log:\n%s' % '\n'.join(self.schema_helper.parse_log)
def verify_assessment_instance(self, scope, fname):
"""Verifies compliance of assessment with schema."""
if scope:
try:
self.schema_helper.check_instances_match_schema(
scope['assessment'], SCHEMA['assessment'], 'assessment')
self.info(' Verified assessment %s' % fname)
if OUTPUT_DEBUG_LOG:
self.info(self.format_parse_log())
except SchemaException as e:
self.error(' Error in assessment %s\n%s' % (
fname, self.format_parse_log()))
raise e
else:
self.error(' Unable to evaluate \'assessment =\' in %s' % fname)
def verify_activity_instance(self, scope, fname):
"""Verifies compliance of activity with schema."""
if scope:
try:
self.schema_helper.check_instances_match_schema(
scope['activity'], SCHEMA['activity'], 'activity')
self.info(' Verified activity %s' % fname)
if OUTPUT_DEBUG_LOG:
self.info(self.format_parse_log())
except SchemaException as e:
self.error(' Error in activity %s\n%s' % (
fname, self.format_parse_log()))
raise e
else:
self.error(' Unable to evaluate \'activity =\' in %s' % fname)
def fine(self, x):
if OUTPUT_FINE_LOG:
self.echo_func('FINE: ' + x)
def info(self, x):
self.echo_func('INFO: ' + x)
def warn(self, x):
self.warnings += 1
self.echo_func('WARNING: ' + x)
def error(self, x):
self.errors += 1
self.echo_func('ERROR: ' + x)
def load_and_verify_model(self, echo_func):
"""Loads, parses and verifies all content for a course."""
self.echo_func = echo_func
self.info('Started verification in: %s' % __file__)
unit_file = os.path.join(os.path.dirname(__file__), '../data/unit.csv')
lesson_file = os.path.join(
os.path.dirname(__file__), '../data/lesson.csv')
self.info('Loading units from: %s' % unit_file)
units = read_objects_from_csv_file(unit_file, UNITS_HEADER, Unit)
self.info('Read %s units' % len(units))
self.info('Loading lessons from: %s' % lesson_file)
lessons = read_objects_from_csv_file(
lesson_file, LESSONS_HEADER, Lesson)
self.info('Read %s lessons' % len(lessons))
self.verify_unit_fields(units)
self.verify_lesson_fields(lessons)
self.verify_unit_lesson_relationships(units, lessons)
try:
self.verify_activities(lessons)
self.verify_assessment(units)
except SchemaException as e:
self.error(str(e))
self.info('Schema usage statistics: %s' % self.schema_helper.type_stats)
self.info('Completed verification: %s warnings, %s errors.' % (
self.warnings, self.errors))
return self.warnings, self.errors
def run_all_regex_unit_tests():
"""Executes all tests related to regular expressions."""
# pylint: disable-msg=anomalous-backslash-in-string
assert escape_javascript_regex(
'blah regex: /site:bls.gov?/i, blah') == (
'blah regex: regex(\"/site:bls.gov?/i\"), blah')
assert escape_javascript_regex(
'blah regex: /site:http:\/\/www.google.com?q=abc/i, blah') == (
'blah regex: regex(\"/site:http:\/\/www.google.com?q=abc/i\"), '
'blah')
assert remove_javascript_multi_line_comment(
'blah\n/*\ncomment\n*/\nblah') == 'blah\n\nblah'
assert remove_javascript_multi_line_comment(
'blah\nblah /*\ncomment\nblah */\nblah') == ('blah\nblah \nblah')
assert remove_javascript_single_line_comment(
'blah\n// comment\nblah') == 'blah\n\nblah'
assert remove_javascript_single_line_comment(
'blah\nblah http://www.foo.com\nblah') == (
'blah\nblah http://www.foo.com\nblah')
assert remove_javascript_single_line_comment(
'blah\nblah // comment\nblah') == 'blah\nblah\nblah'
assert remove_javascript_single_line_comment(
'blah\nblah // comment http://www.foo.com\nblah') == (
'blah\nblah\nblah')
assert parse_content_marked_no_verify(
'blah1\n// <gcb-no-verify>\n/blah2\n// </gcb-no-verify>\nblah3')[0] == (
'blah1\n// \nblah3')
# pylint: enable-msg=anomalous-backslash-in-string
assert Verifier.encode_regex('/white?/i') == """gcb_regex('white?', 'i')"""
assert (Verifier.encode_regex('/jane austen (book|books) \\-price/i') ==
r"""gcb_regex('jane austen (book|books) \\-price', 'i')""")
assert (Verifier.encode_regex('/Kozanji|Kozan-ji|Kosanji|Kosan-ji/i') ==
r"""gcb_regex('Kozanji|Kozan-ji|Kosanji|Kosan-ji', 'i')""")
assert (Verifier.encode_regex('/Big Time College Sport?/i') ==
"gcb_regex('Big Time College Sport?', 'i')")
assert (Verifier.encode_regex('/354\\s*[+]\\s*651/') ==
r"""gcb_regex('354\\s*[+]\\s*651', '')""")
def run_all_schema_helper_unit_tests():
"""Executes all tests related to schema validation."""
def assert_same(a, b):
if a != b:
raise Exception('Expected:\n %s\nFound:\n %s' % (a, b))
def assert_pass(instances, types, expected_result=None):
try:
schema_helper = SchemaHelper()
result = schema_helper.check_instances_match_schema(
instances, types, 'test')
if OUTPUT_DEBUG_LOG:
print '\n'.join(schema_helper.parse_log)
if expected_result:
assert_same(expected_result, result)
except SchemaException as e:
if OUTPUT_DEBUG_LOG:
print str(e)
print '\n'.join(schema_helper.parse_log)
raise
def assert_fails(func):
try:
func()
raise Exception('Expected to fail')
except SchemaException as e:
if OUTPUT_DEBUG_LOG:
print str(e)
def assert_fail(instances, types):
assert_fails(lambda: assert_pass(instances, types))
def create_python_dict_from_js_object(js_object):
python_str, noverify = convert_javascript_to_python(
'var x = ' + js_object, 'x')
ret = evaluate_python_expression_from_text(
python_str, 'x', Assessment().scope, noverify)
return ret['x']
# CSV tests
read_objects_from_csv(
[['id', 'type'], [1, 'none']], 'id,type', Unit)
def reader_one():
return read_objects_from_csv(
[['id', 'type'], [1, 'none']], 'id,type,title', Unit)
assert_fails(reader_one)
def reader_two():
read_objects_from_csv(
[['id', 'type', 'title'], [1, 'none']], 'id,type,title', Unit)
assert_fails(reader_two)
# context tests
assert_same(Context().new([]).new(['a']).new(['b', 'c']).format_path(),
('//a/b/c'))
# simple map tests
assert_pass({'name': 'Bob'}, {'name': STRING}, None)
assert_fail('foo', 'bar')
assert_fail({'name': 'Bob'}, {'name': INTEGER})
assert_fail({'name': 12345}, {'name': STRING})
assert_fail({'amount': 12345}, {'name': INTEGER})
assert_fail({'regex': Term(CORRECT)}, {'regex': Term(REGEX)})
assert_pass({'name': 'Bob'}, {'name': STRING, 'phone': STRING})
assert_pass({'name': 'Bob'}, {'phone': STRING, 'name': STRING})
assert_pass({'name': 'Bob'},
{'phone': STRING, 'name': STRING, 'age': INTEGER})
# mixed attributes tests
assert_pass({'colors': ['red', 'blue']}, {'colors': [STRING]})
assert_pass({'colors': []}, {'colors': [STRING]})
assert_fail({'colors': {'red': 'blue'}}, {'colors': [STRING]})
assert_fail({'colors': {'red': 'blue'}}, {'colors': [FLOAT]})
assert_fail({'colors': ['red', 'blue', 5.5]}, {'colors': [STRING]})
assert_fail({'colors': ['red', 'blue', {'foo': 'bar'}]},
{'colors': [STRING]})
assert_fail({'colors': ['red', 'blue'], 'foo': 'bar'},
{'colors': [STRING]})
assert_pass({'colors': ['red', 1]}, {'colors': [[STRING, INTEGER]]})
assert_fail({'colors': ['red', 'blue']}, {'colors': [[STRING, INTEGER]]})
assert_fail({'colors': [1, 2, 3]}, {'colors': [[STRING, INTEGER]]})
assert_fail({'colors': ['red', 1, 5.3]}, {'colors': [[STRING, INTEGER]]})
assert_pass({'colors': ['red', 'blue']}, {'colors': [STRING]})
assert_fail({'colors': ['red', 'blue']}, {'colors': [[STRING]]})
assert_fail({'colors': ['red', ['blue']]}, {'colors': [STRING]})
assert_fail({'colors': ['red', ['blue', 'green']]}, {'colors': [STRING]})
# required attribute tests
assert_pass({'colors': ['red', 5]}, {'colors': [[STRING, INTEGER]]})
assert_fail({'colors': ['red', 5]}, {'colors': [[INTEGER, STRING]]})
assert_pass({'colors': ['red', 5]}, {'colors': [STRING, INTEGER]})
assert_pass({'colors': ['red', 5]}, {'colors': [INTEGER, STRING]})
assert_fail({'colors': ['red', 5, 'FF0000']},
{'colors': [[STRING, INTEGER]]})
# an array and a map of primitive type tests
assert_pass({'color': {'name': 'red', 'rgb': 'FF0000'}},
{'color': {'name': STRING, 'rgb': STRING}})
assert_fail({'color': {'name': 'red', 'rgb': ['FF0000']}},
{'color': {'name': STRING, 'rgb': STRING}})
assert_fail({'color': {'name': 'red', 'rgb': 'FF0000'}},
{'color': {'name': STRING, 'rgb': INTEGER}})
assert_fail({'color': {'name': 'red', 'rgb': 'FF0000'}},
{'color': {'name': STRING, 'rgb': {'hex': STRING}}})
assert_pass({'color': {'name': 'red', 'rgb': 'FF0000'}},
{'color': {'name': STRING, 'rgb': STRING}})
assert_pass({'colors':
[{'name': 'red', 'rgb': 'FF0000'},
{'name': 'blue', 'rgb': '0000FF'}]},
{'colors': [{'name': STRING, 'rgb': STRING}]})
assert_fail({'colors':
[{'name': 'red', 'rgb': 'FF0000'},
{'phone': 'blue', 'rgb': '0000FF'}]},
{'colors': [{'name': STRING, 'rgb': STRING}]})
# boolean type tests
assert_pass({'name': 'Bob', 'active': True},
{'name': STRING, 'active': BOOLEAN})
assert_pass({'name': 'Bob', 'active': [5, True, False]},
{'name': STRING, 'active': [INTEGER, BOOLEAN]})
assert_pass({'name': 'Bob', 'active': [5, True, 'false']},
{'name': STRING, 'active': [STRING, INTEGER, BOOLEAN]})
assert_fail({'name': 'Bob', 'active': [5, True, 'False']},
{'name': STRING, 'active': [[INTEGER, BOOLEAN]]})
# optional attribute tests
assert_pass({'points':
[{'x': 1, 'y': 2, 'z': 3}, {'x': 3, 'y': 2, 'z': 1},
{'x': 2, 'y': 3, 'z': 1}]},
{'points': [{'x': INTEGER, 'y': INTEGER, 'z': INTEGER}]})
assert_pass({'points':
[{'x': 1, 'z': 3}, {'x': 3, 'y': 2}, {'y': 3, 'z': 1}]},
{'points': [{'x': INTEGER, 'y': INTEGER, 'z': INTEGER}]})
assert_pass({'account':
[{'name': 'Bob', 'age': 25, 'active': True}]},
{'account':
[{'age': INTEGER, 'name': STRING, 'active': BOOLEAN}]})
assert_pass({'account':
[{'name': 'Bob', 'active': True}]},
{'account':
[{'age': INTEGER, 'name': STRING, 'active': BOOLEAN}]})
# nested array tests
assert_fail({'name': 'Bob', 'active': [5, True, 'false']},
{'name': STRING, 'active': [[BOOLEAN]]})
assert_fail({'name': 'Bob', 'active': [True]},
{'name': STRING, 'active': [[STRING]]})
assert_pass({'name': 'Bob', 'active': ['true']},
{'name': STRING, 'active': [[STRING]]})
assert_pass({'name': 'flowers', 'price': ['USD', 9.99]},
{'name': STRING, 'price': [[STRING, FLOAT]]})
assert_pass({'name': 'flowers', 'price':
[['USD', 9.99], ['CAD', 11.79], ['RUB', 250.23]]},
{'name': STRING, 'price': [[STRING, FLOAT]]})
# selector tests
assert_pass({'likes': [{'state': 'CA', 'food': 'cheese'},
{'state': 'NY', 'drink': 'wine'}]},
{'likes': [{'state': 'CA', 'food': STRING},
{'state': 'NY', 'drink': STRING}]})
assert_pass({'likes': [{'state': 'CA', 'food': 'cheese'},
{'state': 'CA', 'food': 'nuts'}]},
{'likes': [{'state': 'CA', 'food': STRING},
{'state': 'NY', 'drink': STRING}]})
assert_fail({'likes': {'state': 'CA', 'drink': 'cheese'}},
{'likes': [{'state': 'CA', 'food': STRING},
{'state': 'NY', 'drink': STRING}]})
# creating from dict tests
assert_same(create_python_dict_from_js_object('{"active": true}'),
{'active': Term(BOOLEAN, True)})
assert_same(create_python_dict_from_js_object(
'{"a": correct("hello world")}'),
{'a': Term(CORRECT, 'hello world')})
assert_same(create_python_dict_from_js_object('{"a": /hello/i}'),
{'a': Term(REGEX, '/hello/i')})
def run_all_unit_tests():
run_all_regex_unit_tests()
run_all_schema_helper_unit_tests()
run_all_unit_tests()
if __name__ == '__main__':
Verifier().load_and_verify_model(echo)
|
|
#!/usr/bin/env python2.7
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module cleans up CombinedRPAssetVersion config files in current assetroot.
It deletes PacketLevelAssetVersion <listener> items in CombinedRPAssetVersion
config files.
"""
import getopt
import getpass
import glob
import math
import os
import pwd
import re
import shutil
import subprocess
import sys
import tempfile
BIN_DIR = "/opt/google/bin"
SYSTEMRC_PATH = "/opt/google/etc/systemrc"
GEAPACHEUSER = "geapacheuser"
def Die(msg):
print >> sys.stderr, msg
exit(1)
def ExecuteCmd(os_cmd, err2out=False):
"""Execute shell command.
If the shell command fails, exit(1).
Args:
os_cmd: (string) linux shell command to run.
err2out: whether to send stderr to the same file handle as for stdout.
Returns:
(output_data, return_code) of the linux shell command running.
"""
print "Executing: %s" % os_cmd
try:
p = subprocess.Popen(
os_cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT if err2out else subprocess.PIPE)
out_data, err_data = p.communicate()
if (not err2out) and err_data:
print "ERROR: %s" % err_data
Die("Unable to execute %s" % os_cmd)
return out_data, p.returncode
except Exception, e:
Die("FAILED: %s" % str(e))
def DiscoverAssetRoot():
"""Grep the installed systemrc for the assetroot."""
rcpath = SYSTEMRC_PATH
systemrc = open(rcpath)
contents = systemrc.read()
systemrc.close()
match = re.search(r"<assetroot>(.+)</assetroot>", contents)
if not match:
Die("No assetroot found in %s" % rcpath)
assetroot = match.groups()[0]
if not os.path.exists(assetroot):
Die("Invalid assetroot path %s found in %s" % (assetroot, rcpath))
return assetroot
def FindAssets(assetroot, top_asset_pattern, report_abs_path=False):
"""Traverse the assetroot to find the names of assets.
Args:
assetroot: the assetroot path.
top_asset_pattern: a top asset pattern for regular expression.
report_abs_path: whether to report absolute path for assets.
Returns:
a list of asset names/paths.
"""
top_asset_regex = re.compile(top_asset_pattern)
top_assets = []
for root, dirs, unused_ in os.walk(assetroot, topdown=True):
# Use either absolute or relative path (path under assetroot).
asset_path = root if report_abs_path else root[len(assetroot)+1:]
for dname in dirs:
if re.search(top_asset_regex, dname):
full_path = os.path.join(asset_path, dname)
top_assets.append(full_path)
return top_assets
def FindCombinedRpAssets(assetroot):
"""Traverse the assetroot to find CobminedRP assets.
Args:
assetroot: the assetroot path.
Returns:
a list of CombinedRPAsset paths.
"""
top_asset_pattern = r"(CombinedRP\.k[it]a)$"
return FindAssets(assetroot, top_asset_pattern)
def GetAssetVersionConfigFiles(assetroot, asset_path):
"""Find all AssetVersion config files of the given asset in filesystem.
Args:
assetroot: the assetroot path.
asset_path: the asset path.
Returns:
a list of AssetVersion config files paths.
"""
# query list of versions
pathname = "ver[0-9][0-9][0-9]/khassetver.xml"
asset_abs_path = os.path.join(assetroot, asset_path)
return glob.iglob("%s/%s" % (asset_abs_path, pathname))
def ListenersFilter(file_path):
"""File lines generator filtering packlevel's <listener> items.
Reads specified version config file and filters packlevel's <listener> items.
Args:
file_path: assetversion config file path.
Yields:
All the lines except the <listener/> ones or Exception object.
"""
pattern = r"<listener>.+packlevel\d+\.k[it]a.+</listener>$"
try:
with open(file_path, "r") as f:
prog = re.compile(pattern)
for line in f:
match = prog.search(line)
if not match:
yield line
except IOError as e:
yield e
except Exception as e:
yield e
def DoClean(assetroot, assets):
"""Executes listeners cleaning in AssetVersion config files.
Args:
assetroot: the assetroot path.
assets: a list of assets.
"""
tmp_file = None
total_cleaned = 0
try:
tmp_file = tempfile.NamedTemporaryFile(
mode="w", prefix="tmp_gecleancombinedrp", delete=False)
tmp_file.close()
# Traverse all versions of CombinedRPAssets and clean listeners.
print ""
for asset in assets:
for version_filepath in GetAssetVersionConfigFiles(assetroot, asset):
print "Executing:", version_filepath
# Note: backup could be expensive in terms of disk space usage.
# shutil.copyfile(version_filepath, "%s.bak" % version_filepath)
with open(tmp_file.name, mode="w") as tmp_fd:
for item in ListenersFilter(version_filepath):
if isinstance(item, Exception):
raise item
else:
tmp_fd.write(item)
# Rewrite source version config file with cleaned one.
shutil.copyfile(tmp_file.name, version_filepath)
total_cleaned += 1
print "SUCCESS"
print "%s CombinedRPAssetVersion config files CLEANED." % total_cleaned
# Delete temp. file.
if os.path.exists(tmp_file.name):
os.unlink(tmp_file.name)
except Exception as e:
if tmp_file:
# Close and delete temp. file
tmp_file.close()
if os.path.exists(tmp_file.name):
os.unlink(tmp_file.name)
Die("Error: %s" % str(e))
def GetPrintSize(size_bytes):
"""Convert size in bytes to readable representation in bytes/KB/MB/GB/TB/PB.
Args:
size_bytes: size in bytes
Returns:
a string representation of size either in bytes or KB/MB/GB/TB/PB.
"""
size_names = ("bytes", "KB", "MB", "GB", "TB", "PB")
if size_bytes == 0:
return "%s %s" % (size_bytes, size_names[0])
i = int(math.floor(math.log(size_bytes, 1024)))
i = min(i, len(size_names) - 1)
p = math.pow(1024, i)
s = round(size_bytes/p, 3)
return "%s %s" % (s, size_names[i])
def PrintAssetVersions(assetroot, assets):
"""Prints asset versions references.
Args:
assetroot: the assetroot path.
assets: a list of assets.
"""
total_config_files = 0
total_config_files_size = 0
for asset in assets:
print ""
print "Asset:", asset
for version_config_filepath in GetAssetVersionConfigFiles(assetroot, asset):
print " ", version_config_filepath
total_config_files_size += os.path.getsize(version_config_filepath)
total_config_files += 1
print ""
print "Total AssetVersion config files (count/size): %s/%s" % (
total_config_files, GetPrintSize(total_config_files_size))
def IsFusionRunning():
"""Checks whether Fusion daemons are running.
Returns:
whether Fusion daemons are running.
"""
print ""
unused_output, error_code = ExecuteCmd(
"%s --checkrunning" % os.path.join(BIN_DIR, "gefdaemoncheck"))
return error_code == 0
def SwitchEffectiveUserToThis(user_name):
"""Switches effective user to specified.
Args:
user_name: a user name.
"""
print ""
print "Switching effective user to:", user_name
try:
user_entry = pwd.getpwnam(user_name)
user_id = user_entry.pw_uid
group_id = user_entry.pw_gid
os.setegid(group_id)
os.seteuid(user_id)
except KeyError as e:
Die("Couldn't get uid/gid for user '%s'. Error: %s" % (user_name, str(e)))
except Exception as e:
Die("Couldn't switch effective user to '%s'. Error: %s" % (
user_name, str(e)))
def Usage(msg=None):
"""Prints usage message.
Args:
msg: additional info to print, e.g. error message.
"""
if msg:
print ""
print msg
print ""
print "Usage: gecleancombinedrpasset.py [--help] [--dryrun] [assetroot_path]"
print ""
print "Will clean up listeners in all CombinedRPAssetVersion config files."
print ""
print (" --dryrun just reports a list of asset versions that would have"
" been cleaned.")
print ""
print (" assetroot_path the assetroot path to use. If uncpecified, the"
" current assetroot")
print " path is used."
assetroot = DiscoverAssetRoot()
print " Current assetroot:", assetroot
def main(dryrun, assetroot):
if not assetroot:
assetroot = DiscoverAssetRoot()
print ""
print "The assetroot path:", assetroot
combinedrp_assets = FindCombinedRpAssets(assetroot)
if dryrun:
PrintAssetVersions(assetroot, combinedrp_assets)
exit(0)
if getpass.getuser() != "root":
Die("You must run as root.")
if IsFusionRunning():
Die("Please stop fusion before proceeding: /etc/init.d/gefusion stop")
geapacheuser = GEAPACHEUSER
SwitchEffectiveUserToThis(geapacheuser)
# Do listeners cleaning in AssetVersion config files.
DoClean(assetroot, combinedrp_assets)
if __name__ == "__main__":
opt_dryrun = 0
assetroot_path = None
try:
opts, args = getopt.getopt(sys.argv[1:], "hd", ["help", "dryrun"])
except getopt.GetoptError as e:
Usage("Error: %s" % str(e))
exit(1)
for opt, var in opts:
if opt in ("-h", "--help"):
Usage()
exit(0)
elif opt in ("-d", "--dryrun"):
opt_dryrun = 1
if args:
assetroot_path = args[0]
main(opt_dryrun, assetroot_path)
|
|
# Copyright (c) 2012 Citrix Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Aggregate admin API extension."""
from webob import exc
from nova.api.openstack import extensions
from nova.compute import api as compute_api
from nova import exception
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'aggregates')
def _get_context(req):
return req.environ['nova.context']
def get_host_from_body(fn):
"""Makes sure that the host exists."""
def wrapped(self, req, id, body, *args, **kwargs):
if len(body) == 1 and "host" in body:
host = body['host']
else:
raise exc.HTTPBadRequest
return fn(self, req, id, host, *args, **kwargs)
return wrapped
class AggregateController(object):
"""The Host Aggregates API controller for the OpenStack API."""
def __init__(self):
self.api = compute_api.AggregateAPI()
def index(self, req):
"""Returns a list a host aggregate's id, name, availability_zone."""
context = _get_context(req)
authorize(context)
aggregates = self.api.get_aggregate_list(context)
return {'aggregates': aggregates}
def create(self, req, body):
"""Creates an aggregate, given its name and availability_zone."""
context = _get_context(req)
authorize(context)
if len(body) != 1:
raise exc.HTTPBadRequest
try:
host_aggregate = body["aggregate"]
name = host_aggregate["name"]
avail_zone = host_aggregate["availability_zone"]
except KeyError:
raise exc.HTTPBadRequest
if len(host_aggregate) != 2:
raise exc.HTTPBadRequest
try:
aggregate = self.api.create_aggregate(context, name, avail_zone)
except (exception.AggregateNameExists,
exception.InvalidAggregateAction):
LOG.exception(_("Cannot create aggregate with name %(name)s and "
"availability zone %(avail_zone)s") % locals())
raise exc.HTTPConflict
return self._marshall_aggregate(aggregate)
def show(self, req, id):
"""Shows the details of an aggregate, hosts and metadata included."""
context = _get_context(req)
authorize(context)
try:
aggregate = self.api.get_aggregate(context, id)
except exception.AggregateNotFound:
LOG.exception(_("Cannot show aggregate: %(id)s") % locals())
raise exc.HTTPNotFound
return self._marshall_aggregate(aggregate)
def update(self, req, id, body):
"""Updates the name and/or availability_zone of given aggregate."""
context = _get_context(req)
authorize(context)
if len(body) != 1:
raise exc.HTTPBadRequest
try:
updates = body["aggregate"]
except KeyError:
raise exc.HTTPBadRequest
if len(updates) < 1:
raise exc.HTTPBadRequest
for key in updates.keys():
if not key in ["name", "availability_zone"]:
raise exc.HTTPBadRequest
try:
aggregate = self.api.update_aggregate(context, id, updates)
except exception.AggregateNotFound:
LOG.exception(_("Cannot update aggregate: %(id)s") % locals())
raise exc.HTTPNotFound
return self._marshall_aggregate(aggregate)
def delete(self, req, id):
"""Removes an aggregate by id."""
context = _get_context(req)
authorize(context)
try:
self.api.delete_aggregate(context, id)
except exception.AggregateNotFound:
LOG.exception(_("Cannot delete aggregate: %(id)s") % locals())
raise exc.HTTPNotFound
def action(self, req, id, body):
_actions = {
'add_host': self._add_host,
'remove_host': self._remove_host,
'set_metadata': self._set_metadata,
}
for action, data in body.iteritems():
try:
return _actions[action](req, id, data)
except KeyError:
msg = _("Aggregates does not have %s action") % action
raise exc.HTTPBadRequest(explanation=msg)
raise exc.HTTPBadRequest(explanation=_("Invalid request body"))
@get_host_from_body
def _add_host(self, req, id, host):
"""Adds a host to the specified aggregate."""
context = _get_context(req)
authorize(context)
try:
aggregate = self.api.add_host_to_aggregate(context, id, host)
except (exception.AggregateNotFound, exception.ComputeHostNotFound):
LOG.exception(_("Cannot add host %(host)s in aggregate "
"%(id)s") % locals())
raise exc.HTTPNotFound
except (exception.AggregateHostExists,
exception.InvalidAggregateAction):
LOG.exception(_("Cannot add host %(host)s in aggregate "
"%(id)s") % locals())
raise exc.HTTPConflict
return self._marshall_aggregate(aggregate)
@get_host_from_body
def _remove_host(self, req, id, host):
"""Removes a host from the specified aggregate."""
context = _get_context(req)
authorize(context)
try:
aggregate = self.api.remove_host_from_aggregate(context, id, host)
except (exception.AggregateNotFound, exception.AggregateHostNotFound):
LOG.exception(_("Cannot remove host %(host)s in aggregate "
"%(id)s") % locals())
raise exc.HTTPNotFound
except exception.InvalidAggregateAction:
LOG.exception(_("Cannot remove host %(host)s in aggregate "
"%(id)s") % locals())
raise exc.HTTPConflict
return self._marshall_aggregate(aggregate)
def _set_metadata(self, req, id, body):
"""Replaces the aggregate's existing metadata with new metadata."""
context = _get_context(req)
authorize(context)
if len(body) != 1:
raise exc.HTTPBadRequest
try:
metadata = body["metadata"]
except KeyError:
raise exc.HTTPBadRequest
try:
aggregate = self.api.update_aggregate_metadata(context,
id, metadata)
except exception.AggregateNotFound:
LOG.exception(_("Cannot set metadata %(metadata)s in aggregate "
"%(id)s") % locals())
raise exc.HTTPNotFound
return self._marshall_aggregate(aggregate)
def _marshall_aggregate(self, aggregate):
return {"aggregate": aggregate}
class Aggregates(extensions.ExtensionDescriptor):
"""Admin-only aggregate administration"""
name = "Aggregates"
alias = "os-aggregates"
namespace = "http://docs.openstack.org/compute/ext/aggregates/api/v1.1"
updated = "2012-01-12T00:00:00+00:00"
def __init__(self, ext_mgr):
ext_mgr.register(self)
def get_resources(self):
resources = []
res = extensions.ResourceExtension('os-aggregates',
AggregateController(),
member_actions={"action": "POST", })
resources.append(res)
return resources
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class SecurityPartnerProvidersOperations(object):
"""SecurityPartnerProvidersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2021_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
security_partner_provider_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'securityPartnerProviderName': self._serialize.url("security_partner_provider_name", security_partner_provider_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/securityPartnerProviders/{securityPartnerProviderName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
security_partner_provider_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified Security Partner Provider.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param security_partner_provider_name: The name of the Security Partner Provider.
:type security_partner_provider_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
security_partner_provider_name=security_partner_provider_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'securityPartnerProviderName': self._serialize.url("security_partner_provider_name", security_partner_provider_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/securityPartnerProviders/{securityPartnerProviderName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
security_partner_provider_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.SecurityPartnerProvider"
"""Gets the specified Security Partner Provider.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param security_partner_provider_name: The name of the Security Partner Provider.
:type security_partner_provider_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SecurityPartnerProvider, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_05_01.models.SecurityPartnerProvider
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityPartnerProvider"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'securityPartnerProviderName': self._serialize.url("security_partner_provider_name", security_partner_provider_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SecurityPartnerProvider', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/securityPartnerProviders/{securityPartnerProviderName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
security_partner_provider_name, # type: str
parameters, # type: "_models.SecurityPartnerProvider"
**kwargs # type: Any
):
# type: (...) -> "_models.SecurityPartnerProvider"
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityPartnerProvider"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'securityPartnerProviderName': self._serialize.url("security_partner_provider_name", security_partner_provider_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'SecurityPartnerProvider')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('SecurityPartnerProvider', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('SecurityPartnerProvider', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/securityPartnerProviders/{securityPartnerProviderName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
security_partner_provider_name, # type: str
parameters, # type: "_models.SecurityPartnerProvider"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.SecurityPartnerProvider"]
"""Creates or updates the specified Security Partner Provider.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param security_partner_provider_name: The name of the Security Partner Provider.
:type security_partner_provider_name: str
:param parameters: Parameters supplied to the create or update Security Partner Provider
operation.
:type parameters: ~azure.mgmt.network.v2021_05_01.models.SecurityPartnerProvider
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either SecurityPartnerProvider or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2021_05_01.models.SecurityPartnerProvider]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityPartnerProvider"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
security_partner_provider_name=security_partner_provider_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('SecurityPartnerProvider', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'securityPartnerProviderName': self._serialize.url("security_partner_provider_name", security_partner_provider_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/securityPartnerProviders/{securityPartnerProviderName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
security_partner_provider_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.SecurityPartnerProvider"
"""Updates tags of a Security Partner Provider resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param security_partner_provider_name: The name of the Security Partner Provider.
:type security_partner_provider_name: str
:param parameters: Parameters supplied to update Security Partner Provider tags.
:type parameters: ~azure.mgmt.network.v2021_05_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SecurityPartnerProvider, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_05_01.models.SecurityPartnerProvider
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityPartnerProvider"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'securityPartnerProviderName': self._serialize.url("security_partner_provider_name", security_partner_provider_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SecurityPartnerProvider', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/securityPartnerProviders/{securityPartnerProviderName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.SecurityPartnerProviderListResult"]
"""Lists all Security Partner Providers in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SecurityPartnerProviderListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2021_05_01.models.SecurityPartnerProviderListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityPartnerProviderListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('SecurityPartnerProviderListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/securityPartnerProviders'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.SecurityPartnerProviderListResult"]
"""Gets all the Security Partner Providers in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SecurityPartnerProviderListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2021_05_01.models.SecurityPartnerProviderListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityPartnerProviderListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('SecurityPartnerProviderListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/securityPartnerProviders'} # type: ignore
|
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Creates a directory with with the unpacked contents of the remoting webapp.
The directory will contain a copy-of or a link-to to all remoting webapp
resources. This includes HTML/JS and any plugin binaries. The script also
massages resulting files appropriately with host plugin data. Finally,
a zip archive for all of the above is produced.
"""
# Python 2.5 compatibility
from __future__ import with_statement
import argparse
import io
import os
import platform
import re
import shutil
import subprocess
import sys
import time
import zipfile
# Update the module path, assuming that this script is in src/remoting/webapp,
# and that the google_api_keys module is in src/google_apis. Note that
# sys.path[0] refers to the directory containing this script.
if __name__ == '__main__':
sys.path.append(
os.path.abspath(os.path.join(sys.path[0], '../../google_apis')))
import google_api_keys
def findAndReplace(filepath, findString, replaceString):
"""Does a search and replace on the contents of a file."""
oldFilename = os.path.basename(filepath) + '.old'
oldFilepath = os.path.join(os.path.dirname(filepath), oldFilename)
os.rename(filepath, oldFilepath)
with open(oldFilepath) as input:
with open(filepath, 'w') as output:
for s in input:
output.write(s.replace(findString, replaceString))
os.remove(oldFilepath)
def createZip(zip_path, directory):
"""Creates a zipfile at zip_path for the given directory."""
zipfile_base = os.path.splitext(os.path.basename(zip_path))[0]
zip = zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED)
for (root, dirs, files) in os.walk(directory):
for f in files:
full_path = os.path.join(root, f)
rel_path = os.path.relpath(full_path, directory)
zip.write(full_path, os.path.join(zipfile_base, rel_path))
zip.close()
def replaceString(destination, placeholder, value):
findAndReplace(os.path.join(destination, 'plugin_settings.js'),
"'" + placeholder + "'", "'" + value + "'")
def replaceBool(destination, placeholder, value):
# Look for a "!!" in the source code so the expession we're
# replacing looks like a boolean to the compiler. A single "!"
# would satisfy the compiler but might confused human readers.
findAndReplace(os.path.join(destination, 'plugin_settings.js'),
"!!'" + placeholder + "'", 'true' if value else 'false')
def parseBool(boolStr):
"""Tries to parse a string as a boolean value.
Returns a bool on success; raises ValueError on failure.
"""
lower = boolStr.tolower()
if lower in ['0', 'false']: return False
if lower in ['1', 'true']: return True
raise ValueError('not a boolean string {!r}'.format(boolStr))
def getenvBool(name, defaultValue):
"""Gets an environment value as a boolean."""
rawValue = os.environ.get(name)
if rawValue is None:
return defaultValue
try:
return parseBool(rawValue)
except ValueError:
raise Exception('Value of ${} must be boolean!'.format(name))
def processJinjaTemplate(input_file, include_paths, output_file, context):
jinja2_path = os.path.normpath(
os.path.join(os.path.abspath(__file__),
'../../../third_party/jinja2'))
sys.path.append(os.path.split(jinja2_path)[0])
import jinja2
(template_path, template_name) = os.path.split(input_file)
include_paths = [template_path] + include_paths
env = jinja2.Environment(loader=jinja2.FileSystemLoader(include_paths))
template = env.get_template(template_name)
rendered = template.render(context)
io.open(output_file, 'w', encoding='utf-8').write(rendered)
def buildWebApp(buildtype, version, destination, zip_path,
manifest_template, webapp_type, appid, app_name,
app_description, app_capabilities, files, locales_listfile,
jinja_paths, service_environment, use_gcd):
"""Does the main work of building the webapp directory and zipfile.
Args:
buildtype: the type of build ("Official", "Release" or "Dev").
destination: A string with path to directory where the webapp will be
written.
zipfile: A string with path to the zipfile to create containing the
contents of |destination|.
manifest_template: jinja2 template file for manifest.
webapp_type: webapp type ("v1", "v2", "v2_pnacl" or "app_remoting").
appid: A string with the Remoting Application Id (only used for app
remoting webapps). If supplied, it defaults to using the
test API server.
app_name: A string with the name of the application.
app_description: A string with the description of the application.
app_capabilities: A set of strings naming the capabilities that should be
enabled for this application.
files: An array of strings listing the paths for resources to include
in this webapp.
locales_listfile: The name of a file containing a list of locales, one per
line, which are copied, along with their directory structure, from
the _locales directory down.
jinja_paths: An array of paths to search for {%include} directives in
addition to the directory containing the manifest template.
service_environment: Used to point the webApp to one of the
dev/test/staging/prod environments
use_gcd: True if GCD support should be enabled.
"""
# Load the locales files from the locales_listfile.
if not locales_listfile:
raise Exception('You must specify a locales_listfile')
locales = []
with open(locales_listfile) as input:
for s in input:
locales.append(s.rstrip())
# Ensure a fresh directory.
try:
shutil.rmtree(destination)
except OSError:
if os.path.exists(destination):
raise
else:
pass
os.mkdir(destination, 0775)
if buildtype != 'Official' and buildtype != 'Release' and buildtype != 'Dev':
raise Exception('Unknown buildtype: ' + buildtype)
jinja_context = {
'webapp_type': webapp_type,
'buildtype': buildtype,
}
# Copy all the files.
for current_file in files:
destination_file = os.path.join(destination, os.path.basename(current_file))
# Process *.jinja2 files as jinja2 templates
if current_file.endswith(".jinja2"):
destination_file = destination_file[:-len(".jinja2")]
processJinjaTemplate(current_file, jinja_paths,
destination_file, jinja_context)
else:
shutil.copy2(current_file, destination_file)
# Copy all the locales, preserving directory structure
destination_locales = os.path.join(destination, '_locales')
os.mkdir(destination_locales, 0775)
remoting_locales = os.path.join(destination, 'remoting_locales')
os.mkdir(remoting_locales, 0775)
for current_locale in locales:
extension = os.path.splitext(current_locale)[1]
if extension == '.json':
locale_id = os.path.split(os.path.split(current_locale)[0])[1]
destination_dir = os.path.join(destination_locales, locale_id)
destination_file = os.path.join(destination_dir,
os.path.split(current_locale)[1])
os.mkdir(destination_dir, 0775)
shutil.copy2(current_locale, destination_file)
elif extension == '.pak':
destination_file = os.path.join(remoting_locales,
os.path.split(current_locale)[1])
shutil.copy2(current_locale, destination_file)
else:
raise Exception('Unknown extension: ' + current_locale)
# Set client plugin type.
# TODO(wez): Use 'native' in app_remoting until b/17441659 is resolved.
client_plugin = 'pnacl' if webapp_type == 'v2_pnacl' else 'native'
findAndReplace(os.path.join(destination, 'plugin_settings.js'),
"'CLIENT_PLUGIN_TYPE'", "'" + client_plugin + "'")
# Allow host names for google services/apis to be overriden via env vars.
oauth2AccountsHost = os.environ.get(
'OAUTH2_ACCOUNTS_HOST', 'https://accounts.google.com')
oauth2ApiHost = os.environ.get(
'OAUTH2_API_HOST', 'https://www.googleapis.com')
directoryApiHost = os.environ.get(
'DIRECTORY_API_HOST', 'https://www.googleapis.com')
if webapp_type == 'app_remoting':
appRemotingApiHost = os.environ.get(
'APP_REMOTING_API_HOST', None)
appRemotingApplicationId = os.environ.get(
'APP_REMOTING_APPLICATION_ID', None)
# Release/Official builds are special because they are what we will upload
# to the web store. The checks below will validate that prod builds are
# being generated correctly (no overrides) and with the correct buildtype.
# They also verify that folks are not accidentally building dev/test/staging
# apps for release (no impersonation) instead of dev.
if service_environment == 'prod' and buildtype == 'Dev':
raise Exception("Prod environment cannot be built for 'dev' builds")
if buildtype != 'Dev':
if service_environment != 'prod':
raise Exception('Invalid service_environment targeted for '
+ buildtype + ': ' + service_environment)
if 'out/Release' not in destination and 'out\Release' not in destination:
raise Exception('Prod builds must be placed in the out/Release folder')
if appid != None:
raise Exception('Cannot pass in an appid for '
+ buildtype + ' builds: ' + service_environment)
if appRemotingApiHost != None:
raise Exception('Cannot set APP_REMOTING_API_HOST env var for '
+ buildtype + ' builds')
if appRemotingApplicationId != None:
raise Exception('Cannot set APP_REMOTING_APPLICATION_ID env var for '
+ buildtype + ' builds')
# If an Application ID was set (either from service_environment variable or
# from a command line argument), hardcode it, otherwise get it at runtime.
effectiveAppId = appRemotingApplicationId or appid
if effectiveAppId:
appRemotingApplicationId = "'" + effectiveAppId + "'"
else:
appRemotingApplicationId = "chrome.i18n.getMessage('@@extension_id')"
findAndReplace(os.path.join(destination, 'plugin_settings.js'),
"'APP_REMOTING_APPLICATION_ID'", appRemotingApplicationId)
oauth2BaseUrl = oauth2AccountsHost + '/o/oauth2'
oauth2ApiBaseUrl = oauth2ApiHost + '/oauth2'
directoryApiBaseUrl = directoryApiHost + '/chromoting/v1'
if webapp_type == 'app_remoting':
# Set the apiary endpoint and then set the endpoint version
if not appRemotingApiHost:
if service_environment == 'prod':
appRemotingApiHost = 'https://www.googleapis.com'
else:
appRemotingApiHost = 'https://www-googleapis-test.sandbox.google.com'
if service_environment == 'dev':
appRemotingServicePath = '/appremoting/v1beta1_dev'
elif service_environment == 'test':
appRemotingServicePath = '/appremoting/v1beta1'
elif service_environment == 'staging':
appRemotingServicePath = '/appremoting/v1beta1_staging'
elif service_environment == 'prod':
appRemotingServicePath = '/appremoting/v1beta1'
else:
raise Exception('Unknown service environment: ' + service_environment)
appRemotingApiBaseUrl = appRemotingApiHost + appRemotingServicePath
else:
appRemotingApiBaseUrl = ''
replaceBool(destination, 'USE_GCD', use_gcd)
replaceString(destination, 'OAUTH2_BASE_URL', oauth2BaseUrl)
replaceString(destination, 'OAUTH2_API_BASE_URL', oauth2ApiBaseUrl)
replaceString(destination, 'DIRECTORY_API_BASE_URL', directoryApiBaseUrl)
if webapp_type == 'app_remoting':
replaceString(destination, 'APP_REMOTING_API_BASE_URL',
appRemotingApiBaseUrl)
# Substitute hosts in the manifest's CSP list.
# Ensure we list the API host only once if it's the same for multiple APIs.
googleApiHosts = ' '.join(set([oauth2ApiHost, directoryApiHost]))
# WCS and the OAuth trampoline are both hosted on talkgadget. Split them into
# separate suffix/prefix variables to allow for wildcards in manifest.json.
talkGadgetHostSuffix = os.environ.get(
'TALK_GADGET_HOST_SUFFIX', 'talkgadget.google.com')
talkGadgetHostPrefix = os.environ.get(
'TALK_GADGET_HOST_PREFIX', 'https://chromoting-client.')
oauth2RedirectHostPrefix = os.environ.get(
'OAUTH2_REDIRECT_HOST_PREFIX', 'https://chromoting-oauth.')
# Use a wildcard in the manifest.json host specs if the prefixes differ.
talkGadgetHostJs = talkGadgetHostPrefix + talkGadgetHostSuffix
talkGadgetBaseUrl = talkGadgetHostJs + '/talkgadget/'
if talkGadgetHostPrefix == oauth2RedirectHostPrefix:
talkGadgetHostJson = talkGadgetHostJs
else:
talkGadgetHostJson = 'https://*.' + talkGadgetHostSuffix
# Set the correct OAuth2 redirect URL.
oauth2RedirectHostJs = oauth2RedirectHostPrefix + talkGadgetHostSuffix
oauth2RedirectHostJson = talkGadgetHostJson
oauth2RedirectPath = '/talkgadget/oauth/chrome-remote-desktop'
oauth2RedirectBaseUrlJs = oauth2RedirectHostJs + oauth2RedirectPath
oauth2RedirectBaseUrlJson = oauth2RedirectHostJson + oauth2RedirectPath
if buildtype == 'Official':
oauth2RedirectUrlJs = ("'" + oauth2RedirectBaseUrlJs +
"/rel/' + chrome.i18n.getMessage('@@extension_id')")
oauth2RedirectUrlJson = oauth2RedirectBaseUrlJson + '/rel/*'
else:
oauth2RedirectUrlJs = "'" + oauth2RedirectBaseUrlJs + "/dev'"
oauth2RedirectUrlJson = oauth2RedirectBaseUrlJson + '/dev*'
thirdPartyAuthUrlJs = oauth2RedirectBaseUrlJs + '/thirdpartyauth'
thirdPartyAuthUrlJson = oauth2RedirectBaseUrlJson + '/thirdpartyauth*'
replaceString(destination, 'TALK_GADGET_URL', talkGadgetBaseUrl)
findAndReplace(os.path.join(destination, 'plugin_settings.js'),
"'OAUTH2_REDIRECT_URL'", oauth2RedirectUrlJs)
# Configure xmpp server and directory bot settings in the plugin.
replaceBool(
destination, 'XMPP_SERVER_USE_TLS',
getenvBool('XMPP_SERVER_USE_TLS', True))
xmppServer = os.environ.get('XMPP_SERVER',
'talk.google.com:443')
replaceString(destination, 'XMPP_SERVER', xmppServer)
replaceString(destination, 'DIRECTORY_BOT_JID',
os.environ.get('DIRECTORY_BOT_JID',
'remoting@bot.talk.google.com'))
replaceString(destination, 'THIRD_PARTY_AUTH_REDIRECT_URL',
thirdPartyAuthUrlJs)
# Set the correct API keys.
# For overriding the client ID/secret via env vars, see google_api_keys.py.
apiClientId = google_api_keys.GetClientID('REMOTING')
apiClientSecret = google_api_keys.GetClientSecret('REMOTING')
apiClientIdV2 = google_api_keys.GetClientID('REMOTING_IDENTITY_API')
replaceString(destination, 'API_CLIENT_ID', apiClientId)
replaceString(destination, 'API_CLIENT_SECRET', apiClientSecret)
# Write the application capabilities.
appCapabilities = ','.join(
['remoting.ClientSession.Capability.' + x for x in app_capabilities])
findAndReplace(os.path.join(destination, 'app_capabilities.js'),
"'APPLICATION_CAPABILITIES'", appCapabilities)
# Use a consistent extension id for dev builds.
# AppRemoting builds always use the dev app id - the correct app id gets
# written into the manifest later.
if buildtype != 'Official' or webapp_type == 'app_remoting':
manifestKey = '"key": "remotingdevbuild",'
else:
manifestKey = ''
# Generate manifest.
if manifest_template:
context = {
'webapp_type': webapp_type,
'FULL_APP_VERSION': version,
'MANIFEST_KEY_FOR_UNOFFICIAL_BUILD': manifestKey,
'OAUTH2_REDIRECT_URL': oauth2RedirectUrlJson,
'TALK_GADGET_HOST': talkGadgetHostJson,
'THIRD_PARTY_AUTH_REDIRECT_URL': thirdPartyAuthUrlJson,
'REMOTING_IDENTITY_API_CLIENT_ID': apiClientIdV2,
'OAUTH2_BASE_URL': oauth2BaseUrl,
'OAUTH2_API_BASE_URL': oauth2ApiBaseUrl,
'DIRECTORY_API_BASE_URL': directoryApiBaseUrl,
'APP_REMOTING_API_BASE_URL': appRemotingApiBaseUrl,
'OAUTH2_ACCOUNTS_HOST': oauth2AccountsHost,
'GOOGLE_API_HOSTS': googleApiHosts,
'APP_NAME': app_name,
'APP_DESCRIPTION': app_description,
'OAUTH_GDRIVE_SCOPE': '',
'USE_GCD': use_gcd,
'XMPP_SERVER': xmppServer,
}
if 'GOOGLE_DRIVE' in app_capabilities:
context['OAUTH_GDRIVE_SCOPE'] = ('https://docs.google.com/feeds/ '
'https://www.googleapis.com/auth/drive')
processJinjaTemplate(manifest_template,
jinja_paths,
os.path.join(destination, 'manifest.json'),
context)
# Make the zipfile.
createZip(zip_path, destination)
return 0
def main():
parser = argparse.ArgumentParser()
parser.add_argument('buildtype')
parser.add_argument('version')
parser.add_argument('destination')
parser.add_argument('zip_path')
parser.add_argument('manifest_template')
parser.add_argument('webapp_type')
parser.add_argument('files', nargs='*', metavar='file', default=[])
parser.add_argument('--app_name', metavar='NAME')
parser.add_argument('--app_description', metavar='TEXT')
parser.add_argument('--app_capabilities',
nargs='*', default=[], metavar='CAPABILITY')
parser.add_argument('--appid')
parser.add_argument('--locales_listfile', default='', metavar='PATH')
parser.add_argument('--jinja_paths', nargs='*', default=[], metavar='PATH')
parser.add_argument('--service_environment', default='', metavar='ENV')
parser.add_argument('--use_gcd', choices=['0', '1'], default='0')
args = parser.parse_args()
args.use_gcd = (args.use_gcd != '0')
args.app_capabilities = set(args.app_capabilities)
return buildWebApp(**vars(args))
if __name__ == '__main__':
sys.exit(main())
|
|
"""
Module to set up run time parameters for Clawpack -- AMRClaw code.
The values set in the function setrun are then written out to data files
that will be read in by the Fortran code.
"""
import os
import numpy as np
#-----------------------------------------------
# Set these parameters for adjoint flagging....
# location of output from computing adjoint:
adjoint_output = os.path.abspath('adjoint/_output')
print('Will flag using adjoint solution from %s' % adjoint_output)
# Time period of interest:
t1 = 3.5*3600.
t2 = 11*3600.
# tolerance for adjoint flagging:
adjoint_flag_tolerance = 0.004
#-----------------------------------------------
t_shelf = 3.2*3600 # time approaching continental slope
t_harbor = 3.5*3600 # time approaching harbor
try:
CLAW = os.environ['CLAW']
except:
raise Exception("*** Must first set CLAW enviornment variable")
# Scratch directory for storing topo and dtopo files:
scratch_dir = os.path.join(CLAW, 'geoclaw', 'scratch')
#------------------------------
def setrun(claw_pkg='geoclaw'):
#------------------------------
"""
Define the parameters used for running Clawpack.
INPUT:
claw_pkg expected to be "geoclaw" for this setrun.
OUTPUT:
rundata - object of class ClawRunData
"""
from clawpack.clawutil import data
assert claw_pkg.lower() == 'geoclaw', "Expected claw_pkg = 'geoclaw'"
num_dim = 2
rundata = data.ClawRunData(claw_pkg, num_dim)
#------------------------------------------------------------------
# GeoClaw specific parameters:
#------------------------------------------------------------------
rundata = setgeo(rundata)
#------------------------------------------------------------------
# Standard Clawpack parameters to be written to claw.data:
#------------------------------------------------------------------
clawdata = rundata.clawdata # initialized when rundata instantiated
# Set single grid parameters first.
# See below for AMR parameters.
# ---------------
# Spatial domain:
# ---------------
# Number of space dimensions:
clawdata.num_dim = num_dim
# Lower and upper edge of computational domain:
clawdata.lower[0] = 140.0 # xlower
clawdata.upper[0] = 250.0 # xupper
clawdata.lower[1] = 10.0 # ylower
clawdata.upper[1] = 62.0 # yupper
# Number of grid cells:
clawdata.num_cells[0] = 110 # mx
clawdata.num_cells[1] = 52 # my
# ---------------
# Size of system:
# ---------------
# Number of equations in the system:
clawdata.num_eqn = 3
# Number of auxiliary variables in the aux array (initialized in setaux)
# see setadjoint
# Index of aux array corresponding to capacity function, if there is one:
clawdata.capa_index = 2
# -------------
# Initial time:
# -------------
clawdata.t0 = 0.0
# Restart from checkpoint file of a previous run?
# Note: If restarting, you must also change the Makefile to set:
# RESTART = True
# If restarting, t0 above should be from original run, and the
# restart_file 'fort.chkNNNNN' specified below should be in
# the OUTDIR indicated in Makefile.
clawdata.restart = False # True to restart from prior results
clawdata.restart_file = 'fort.chk00006' # File to use for restart data
# -------------
# Output times:
#--------------
# Specify at what times the results should be written to fort.q files.
# Note that the time integration stops after the final output time.
clawdata.output_style = 1
if clawdata.output_style==1:
# Output ntimes frames at equally spaced times up to tfinal:
# Can specify num_output_times = 0 for no output
clawdata.num_output_times = 22
clawdata.tfinal = 11*3600.
clawdata.output_t0 = False # output at initial (or restart) time?
elif clawdata.output_style == 2:
# Specify a list or numpy array of output times:
# Include t0 if you want output at the initial time.
clawdata.output_times = list(np.linspace(3600,3600*9,9))
elif clawdata.output_style == 3:
# Output every step_interval timesteps over total_steps timesteps:
clawdata.output_step_interval = 1
clawdata.total_steps = 1
clawdata.output_t0 = True # output at initial (or restart) time?
clawdata.output_format = 'binary' # 'ascii', 'binary', 'netcdf'
clawdata.output_q_components = 'all' # could be list such as [True,True]
clawdata.output_aux_components = 'none' # could be list
clawdata.output_aux_onlyonce = True # output aux arrays only at t0
# ---------------------------------------------------
# Verbosity of messages to screen during integration:
# ---------------------------------------------------
# The current t, dt, and cfl will be printed every time step
# at AMR levels <= verbosity. Set verbosity = 0 for no printing.
# (E.g. verbosity == 2 means print only on levels 1 and 2.)
clawdata.verbosity = 0
# --------------
# Time stepping:
# --------------
# if dt_variable==True: variable time steps used based on cfl_desired,
# if dt_variable==Falseixed time steps dt = dt_initial always used.
clawdata.dt_variable = True
# Initial time step for variable dt.
# (If dt_variable==0 then dt=dt_initial for all steps)
clawdata.dt_initial = 1
# Max time step to be allowed if variable dt used:
clawdata.dt_max = 1e+99
# Desired Courant number if variable dt used
clawdata.cfl_desired = 0.75
# max Courant number to allow without retaking step with a smaller dt:
clawdata.cfl_max = 1.0
# Maximum number of time steps to allow between output times:
clawdata.steps_max = 5000
# ------------------
# Method to be used:
# ------------------
# Order of accuracy: 1 => Godunov, 2 => Lax-Wendroff plus limiters
clawdata.order = 2
# Use dimensional splitting? (not yet available for AMR)
clawdata.dimensional_split = 'unsplit'
# For unsplit method, transverse_waves can be
# 0 or 'none' ==> donor cell (only normal solver used)
# 1 or 'increment' ==> corner transport of waves
# 2 or 'all' ==> corner transport of 2nd order corrections too
clawdata.transverse_waves = 2
# Number of waves in the Riemann solution:
clawdata.num_waves = 3
# List of limiters to use for each wave family:
# Required: len(limiter) == num_waves
# Some options:
# 0 or 'none' ==> no limiter (Lax-Wendroff)
# 1 or 'minmod' ==> minmod
# 2 or 'superbee' ==> superbee
# 3 or 'vanleer' ==> van Leer
# 4 or 'mc' ==> MC limiter
clawdata.limiter = ['vanleer', 'vanleer', 'vanleer']
clawdata.use_fwaves = True # True ==> use f-wave version of algorithms
# Source terms splitting:
# src_split == 0 or 'none' ==> no source term (src routine never called)
# src_split == 1 or 'godunov' ==> Godunov (1st order) splitting used,
# src_split == 2 or 'strang' ==> Strang (2nd order) splitting used, not recommended.
clawdata.source_split = 1
# --------------------
# Boundary conditions:
# --------------------
# Number of ghost cells (usually 2)
clawdata.num_ghost = 2
# Choice of BCs at xlower and xupper:
# 0 or 'user' => user specified (must modify bcNamr.f to use this option)
# 1 or 'extrap' => extrapolation (non-reflecting outflow)
# 2 or 'periodic' => periodic (must specify this at both boundaries)
# 3 or 'wall' => solid wall for systems where q(2) is normal velocity
clawdata.bc_lower[0] = 'extrap' # at xlower
clawdata.bc_upper[0] = 'extrap' # at xupper
clawdata.bc_lower[1] = 'extrap' # at ylower
clawdata.bc_upper[1] = 'extrap' # at yupper
# ---------------
# gauges:
# ---------------
gauges = rundata.gaugedata.gauges
# for gauges append lines of the form [gaugeno, x, y, t1, t2]
# Outside harbor:
gauges.append([1, 235.536, 41.67, t_shelf, 1.e10])
# Inside harbor:
gauges.append([2, 235.80917,41.74111,t_harbor, 1.e10])
# --------------
# Checkpointing:
# --------------
# Specify when checkpoint files should be created that can be
# used to restart a computation.
clawdata.checkpt_style = 0
if clawdata.checkpt_style == 0:
# Do not checkpoint at all
pass
elif clawdata.checkpt_style == 1:
# Checkpoint only at tfinal.
pass
elif clawdata.checkpt_style == 2:
# Specify a list of checkpoint times.
clawdata.checkpt_times = [0.1,0.15]
elif clawdata.checkpt_style == 3:
# Checkpoint every checkpt_interval timesteps (on Level 1)
# and at the final time.
clawdata.checkpt_interval = 5
# ---------------
# AMR parameters: (written to amr.data)
# ---------------
amrdata = rundata.amrdata
# max number of refinement levels:
amrdata.amr_levels_max = 4
# List of refinement ratios at each level (length at least amr_level_max-1)
amrdata.refinement_ratios_x = [5, 6, 6, 3, 30]
amrdata.refinement_ratios_y = [5, 6, 6, 3, 30]
amrdata.refinement_ratios_t = [5, 6, 6, 3, 4]
# Specify type of each aux variable in amrdata.auxtype.
# This must be a list of length num_aux, each element of which is one of:
# 'center', 'capacity', 'xleft', or 'yleft' (see documentation).
# need 4 values, set in setadjoint
# Flag for refinement based on Richardson error estimater:
amrdata.flag_richardson = False # use Richardson?
amrdata.flag_richardson_tol = 1.0 # Richardson tolerance
# Flag for refinement using routine flag2refine:
amrdata.flag2refine = True # use this?
# see setadjoint to set tolerance for adjoint flagging
# steps to take on each level L between regriddings of level L+1:
amrdata.regrid_interval = 3
# width of buffer zone around flagged points:
# (typically the same as regrid_interval so waves don't escape):
amrdata.regrid_buffer_width = 2
# clustering alg. cutoff for (# flagged pts) / (total # of cells refined)
# (closer to 1.0 => more small grids may be needed to cover flagged cells)
amrdata.clustering_cutoff = 0.7
# print info about each regridding up to this level:
amrdata.verbosity_regrid = 0
# ---------------
# Regions:
# ---------------
regions = rundata.regiondata.regions
# to specify regions of refinement append lines of the form
# [minlevel,maxlevel,t1,t2,x1,x2,y1,y2]
regions.append([1, 1, 0., 1e9, 0, 360, -90, 90]) #whole world
regions.append([1, 3, 0., 7*3600., 0, 360, -90, 90]) #whole world
regions.append([1, 3, 7*3600.,10*3600., 170., 360, 18, 90])
regions.append([1, 3, 10*3600.,1e9, 195., 360, -90, 90])
regions.append([4, 4, 0., 1800, 175, 195, 50, 54]) #earthquake source AASZ04
regions.append([3, 4, t_shelf, 1e9, 235, 238, 34, 43]) # between shelf and CC
regions.append([4, 4, t_shelf, 1e9, 235, 236, 41, 42])
regions.append([5, 5, t_shelf, 1e9, 235.5,235.83,41.6,41.8]) #only harbor
regions.append([5, 6, t_harbor, 1e9, 235.78,235.84,41.735,41.775]) #only harbor
#------------------------------------------------------------------
# Adjoint specific data:
#------------------------------------------------------------------
# Do this last since it resets some parameters such as num_aux
# as needed for adjoint flagging.
rundata = setadjoint(rundata)
# ----- For developers -----
# Toggle debugging print statements:
amrdata.dprint = False # print domain flags
amrdata.eprint = False # print err est flags
amrdata.edebug = False # even more err est flags
amrdata.gprint = False # grid bisection/clustering
amrdata.nprint = False # proper nesting output
amrdata.pprint = False # proj. of tagged points
amrdata.rprint = False # print regridding summary
amrdata.sprint = False # space/memory output
amrdata.tprint = False # time step reporting each level
amrdata.uprint = False # update/upbnd reporting
return rundata
# end of function setrun
# ----------------------
#-------------------
def setgeo(rundata):
#-------------------
"""
Set GeoClaw specific runtime parameters.
"""
try:
geo_data = rundata.geo_data
except:
print "*** Error, this rundata has no geo_data attribute"
raise AttributeError("Missing geo_data attribute")
# == Physics ==
geo_data.gravity = 9.81
geo_data.coordinate_system = 2
geo_data.earth_radius = 6367500.0
# == Forcing Options
geo_data.coriolis_forcing = False
# == Algorithm and Initial Conditions ==
tide_stage = 77.
geo_data.sea_level = (tide_stage - 77.)/100. # m relative to MHW
geo_data.dry_tolerance = 0.001
geo_data.friction_forcing = True
geo_data.manning_coefficient = 0.025
geo_data.friction_depth = 100.0
# Refinement settings
refinement_data = rundata.refinement_data
refinement_data.variable_dt_refinement_ratios = True
refinement_data.wave_tolerance = 0.09
refinement_data.deep_depth = 100.0
refinement_data.max_level_deep = 4
# == settopo.data values ==
topo_data = rundata.topo_data
# for topography, append lines of the form
# [topotype, minlevel, maxlevel, t1, t2, fname]
topo_path = os.path.join(scratch_dir, 'etopo1min170E124W40N61N.asc')
topo_data.topofiles.append([3, 1, 1, 0., 1.e10, topo_path])
topo_path = os.path.join(scratch_dir, 'etopo4min120E110W0N62N.asc')
topo_data.topofiles.append([3, 1, 1, 0., 1.e10, topo_path])
topo_path = os.path.join(scratch_dir, 'cc-1sec-c.asc')
topo_data.topofiles.append([-3, 1, 1, 32000., 1.e10, topo_path])
topo_path = os.path.join(scratch_dir, 'cc-1_3sec-c_pierless.asc')
topo_data.topofiles.append([3, 1, 1, 32000., 1.e10, topo_path])
# == setdtopo.data values ==
dtopo_data = rundata.dtopo_data
# for moving topography, append lines of the form :
# [topotype, minlevel,maxlevel,fname]
dtopo_path = os.path.join(scratch_dir, 'AASZ04v2.tt3')
dtopo_data.dtopofiles.append([3,3,3,dtopo_path])
dtopo_data.dt_max_dtopo = 0.2
# == setqinit.data values ==
rundata.qinit_data.qinit_type = 0
rundata.qinit_data.qinitfiles = []
qinitfiles = rundata.qinit_data.qinitfiles
# for qinit perturbations, append lines of the form: (<= 1 allowed for now!)
# [minlev, maxlev, fname]
# == fixedgrids.data values ==
rundata.fixed_grid_data.fixedgrids = []
fixedgrids = rundata.fixed_grid_data.fixedgrids
# for fixed grids append lines of the form
# [t1,t2,noutput,x1,x2,y1,y2,xpoints,ypoints,\
# ioutarrivaltimes,ioutsurfacemax]
# == fgmax.data values ==
fgmax_files = rundata.fgmax_data.fgmax_files
return rundata
# end of function setgeo
# ----------------------
#-------------------
def setadjoint(rundata):
#-------------------
"""
Set parameters used for adjoint flagging.
Also reads in all of the checkpointed Adjoint files.
"""
import glob
# Set these parameters at top of this file:
# adjoint_flag_tolerance, t1, t2, adjoint_output
# Then you don't need to modify this function...
rundata.amrdata.flag2refine = True # for adjoint flagging
rundata.amrdata.flag2refine_tol = adjoint_flag_tolerance
rundata.clawdata.num_aux = 4
rundata.amrdata.aux_type = ['center', 'capacity', 'yleft', 'center']
adjointdata = rundata.new_UserData(name='adjointdata',fname='adjoint.data')
adjointdata.add_param('adjoint_output',adjoint_output,'adjoint_output')
adjointdata.add_param('t1',t1,'t1, start time of interest')
adjointdata.add_param('t2',t2,'t2, final time of interest')
files = glob.glob(os.path.join(adjoint_output,"fort.b*"))
files.sort()
if (len(files) == 0):
print("No binary files found for adjoint output!")
adjointdata.add_param('numadjoints', len(files), 'Number of adjoint output files.')
adjointdata.add_param('innerprod_index', 4, 'Index for innerproduct data in aux array.')
counter = 1
for fname in files:
f = open(fname)
time = f.readline().split()[-1]
adjointdata.add_param('file' + str(counter), fname, 'Binary file' + str(counter))
counter = counter + 1
return rundata
# end of function setadjoint
# ----------------------
if __name__ == '__main__':
# Set up run-time parameters and write all data files.
import sys
rundata = setrun(*sys.argv[1:])
rundata.write()
|
|
#!/usr/bin/env python
import bsplines
import numpy
import scipy.interpolate.fitpack as fp
import scipy.integrate as si
import unittest
def createUniformKnotBSpline(order,segments,dim,knotSpacing=1.0):
aspl = bsplines.EuclideanBSpline(order, dim)
# Choose a uniform knot sequence.
aspl.initConstantUniformSpline(0, segments * knotSpacing, segments, numpy.zeros((dim, 1)))
kc = aspl.getNumControlVertices();
cp = numpy.random.random([dim,kc])
aspl.setControlVertices(cp)
return (aspl,(aspl.getKnotsVector(),cp,order-1))
def createExponentialKnotBSpline(order,segments,dim,knotSpacing=1.0):
aspl = bsplines.EuclideanBSpline(order, dim)
kr = aspl.numKnotsRequired(segments)
kc = aspl.numCoefficientsRequired(segments);
# Choose a uniform knot sequence.
knots = numpy.zeros(kr)
for i in range(0,kr):
knots[i] = knotSpacing * 2**i
cp = numpy.random.random([dim,kc])
aspl.setKnotVectorAndCoefficients(knots, cp)
return (aspl,(knots,cp,order-1))
def createRandomKnotBSpline(order,segments,dim):
aspl = bsplines.EuclideanBSpline(order, dim)
kr = aspl.numKnotsRequired(segments)
kc = aspl.numCoefficientsRequired(segments);
# Choose a uniform knot sequence.
knots = numpy.random.random(kr)*10
knots.sort()
cp = numpy.random.random([dim,kc])
aspl.setKnotVectorAndCoefficients(knots, cp)
return (aspl,(knots,cp,order-1))
def createRandomRepeatedKnotBSpline(order,segments,dim):
aspl = bsplines.EuclideanBSpline(order, dim)
kr = aspl.numKnotsRequired(segments)
kc = aspl.numCoefficientsRequired(segments);
# Choose a uniform knot sequence.
knots = numpy.random.random(kr)*10
knots.sort()
for i in range(0,len(knots)):
if i&1:
knots[i-1] = knots[i]
cp = numpy.random.random([dim,kc])
aspl.setKnotVectorAndCoefficients(knots, cp)
return (aspl,(knots,cp,order-1))
class BSplineTestCase(unittest.TestCase):
def runTest(self):
x=0
def assertMatricesEqual(self,M1, M2, tolerance, msg):
d1 = numpy.array(M1.shape)
d2 = numpy.array(M2.shape)
self.assertEqual(d1.size,d2.size)
for i in range(0,d1.size):
self.assertEqual(M1.shape[i], M2.shape[i])
md = numpy.max(numpy.abs(M1 - M2))
self.assertTrue(md < tolerance, msg= "The matrices\n%s\nand\n%s\nwere not equal to within tolerance %e [%e > %e]: %s" % (M1,M2,tolerance,md,tolerance, msg))
class TestBSplines(BSplineTestCase):
def test_bounds(self):
numpy.random.seed(3)
for order in range(2,10):
A = createUniformKnotBSpline(order,3,1);
aspl = A[0]
# Now, test that the bounds checking works.
# These shouldn't raise an exception.
aspl.eval(aspl.getMinTime())
aspl.eval(aspl.getMaxTime())
# These boundary cases should.
self.assertRaises(RuntimeError, lambda: aspl.eval(aspl.getMinTime() - 1e-15))
self.assertRaises(RuntimeError, lambda: aspl.eval(aspl.getMaxTime() + 1e-15))
aspl.eval(aspl.getMaxTime() - 1e-15)
def test_init(self):
numpy.random.seed(5)
# Test the initialization from two times and two positions.
p_0 = numpy.array([1,2,3]);
p_1 = numpy.array([2,4,6]);
t_0 = 0.0
t_1 = 0.1
dt = t_1 - t_0
v = (p_1 - p_0)/dt
for order in range(2,10):
aspl = bsplines.EuclideanBSpline(order, 3)
self.assertEqual(order, aspl.splineOrder())
#print "order: %d" % order
#print "p_0: %s" % p_0
#print "p_1: %s" % p_1
# Initialize the spline with these two times
aspl.initUniformSpline(numpy.array([t_0, t_1]), numpy.array([p_0,p_1]).transpose(), 1, 0.1);
b_0 = aspl.eval(t_0)
b_1 = aspl.eval(t_1)
v_0 = aspl.evalD(t_0,1)
v_1 = aspl.evalD(t_1,1)
#print "b_0: %s" % b_0
#print "b_1: %s" % b_1
for j in range(0,p_0.size):
# Keep the threshold low for even power cases.
self.assertAlmostEqual(p_0[j],b_0[j],places=2)
self.assertAlmostEqual(p_1[j],b_1[j],places=2)
self.assertAlmostEqual(v_0[j],v[j],places=2)
self.assertAlmostEqual(v_1[j],v[j],places=2)
def test_time_interval(self):
numpy.random.seed(6)
# Test two functions:
for order in range(2,10):
A = createUniformKnotBSpline(order,3,3)
aspl = A[0]
# Check that the time interval function works.
ti = aspl.timeInterval()
self.assertEqual(ti[0], aspl.getMinTime())
self.assertEqual(ti[1], aspl.getMaxTime())
def test_time_interval2(self):
numpy.random.seed(6)
# Test two functions:
for order in range(2,10):
nSegments = 3
aspl = bsplines.EuclideanBSpline(order, 3)
kr = aspl.numKnotsRequired(nSegments)
kc = aspl.numCoefficientsRequired(nSegments);
# Choose a uniform knot sequence at 0.0, 1.0, ...
knots = numpy.linspace(0.0,kr-1, kr)
cp = numpy.linspace(1.0,kc,kc)
# build a vector-valued spline
cpa = numpy.array([cp,cp*cp,cp*cp*cp])
aspl.initWithKnotsAndControlVertices(knots, cpa)
# Check that the time interval function works.
ti = aspl.timeInterval()
self.assertEqual(ti[0], aspl.getMinTime())
self.assertEqual(ti[1], aspl.getMaxTime())
def test_uniform(self):
numpy.random.seed(1)
for order in range(2,10):
aspl = bsplines.EuclideanBSpline(order, 1)
kr = aspl.numKnotsRequired(3)
kc = aspl.numCoefficientsRequired(3);
# Choose a uniform knot sequence.
knots = numpy.linspace(0.0,kr*1.0, kr)
cp = numpy.random.random([kc])
cpa = numpy.array([cp])
aspl.initWithKnotsAndControlVertices(knots, cpa)
fspl = (knots,cp,order-1)
for i in numpy.linspace(aspl.getMinTime(),aspl.getMaxTime()-1e-15,10):
f = fp.spalde(float(i),fspl)
a = aspl.eval(i)
for j in range(0,f.shape[0]):
a = aspl.evalD(i,j)
self.assertAlmostEqual(a, f[j])
def test_random(self):
numpy.random.seed(3)
for order in range(2,10):
aspl = bsplines.EuclideanBSpline(order, 1)
kr = aspl.numKnotsRequired(3)
kc = aspl.numCoefficientsRequired(3);
knots = numpy.random.random([kr]) * 10
knots.sort()
cp = numpy.random.random([kc])
cpa = numpy.array([cp])
aspl.initWithKnotsAndControlVertices(knots, cpa)
fspl = (knots,cp,order-1)
for i in numpy.linspace(aspl.getMinTime(),aspl.getMaxTime(),10):
f = fp.spalde(float(i),fspl)
a = aspl.eval(i)
for j in range(0,f.shape[0]):
a = aspl.evalD(i,j)
self.assertAlmostEqual(a, f[j])
def test_integral(self):
for order in range(2,8,2):
for dt in numpy.arange(0.1,2.0,0.1):
# Create a spline with three segments
aspl = bsplines.EuclideanBSpline(order, 1)
kr = aspl.numKnotsRequired(4)
kc = aspl.numCoefficientsRequired(4);
# Choose a uniform knot sequence.
knots = numpy.linspace(0.0, (kr - 1)*dt, kr)
cp = numpy.random.random(kc);
cpa = numpy.array([cp])
aspl = bsplines.EuclideanBSpline(order, 1);
aspl.initWithKnotsAndControlVertices(knots, cpa);
fspl = (knots,cp,order-1)
for a in numpy.arange(aspl.getMinTime(),aspl.getMaxTime()-1e-15,0.4*dt):
for i in numpy.arange(aspl.getMinTime(), aspl.getMaxTime()-1e-15, 0.4*dt):
#print "Eval at %f\n" % (i)
f = fp.splint(a,float(i),fspl)
b = aspl.evalI(a,i)
self.assertAlmostEqual(b, f, msg="order %d spline integral evaluated on [%f,%f] (%f != %f) was not right" % (order, a,i,float(b),f))
def test_integral_non_uniform(self):
for order in range(2,8,2):
# Create a spline with three segments
aspl = bsplines.EuclideanBSpline(order, 1)
kr = aspl.numKnotsRequired(4)
kc = aspl.numCoefficientsRequired(4);
# Choose a non-uniform knot sequence.
knots = numpy.linspace(0.0, (kr - 1), kr)
knots = knots*knots
cp = numpy.random.random(kc);
cpa = numpy.array([cp])
aspl = bsplines.EuclideanBSpline(order, 1);
aspl.initWithKnotsAndControlVertices(knots, cpa);
fspl = (knots,cp,order-1)
for a in numpy.arange(aspl.getMinTime(),aspl.getMaxTime()-1e-15,0.4):
for i in numpy.arange(aspl.getMinTime(), aspl.getMaxTime()-1e-15, 0.4):
#print "Eval at %f\n" % (i)
f = fp.splint(a,float(i),fspl)
b = aspl.evalI(a,i)
self.assertAlmostEqual(b, f, msg="order %d spline integral evaluated on [%f,%f] (%f != %f) was not right" % (order, a,i,float(b),f))
def test_constant_init(self):
tmin = 0.0
tmax = 5.0
for order in range(2,6):
for dim in range(1,4):
for segs in range(1,4):
c = numpy.random.random([dim])
# Initialize a constant spline
aspl = bsplines.EuclideanBSpline(order, dim)
aspl.initConstantSpline(tmin,tmax,segs,c)
# Test the time boundaries
self.assertAlmostEqual(tmin,aspl.getMinTime())
self.assertAlmostEqual(tmax,aspl.getMaxTime())
# Test the value.
for t in numpy.arange(aspl.getMinTime(),aspl.getMaxTime(),0.1):
self.assertMatricesEqual(aspl.evalD(t,0),c,1e-15,"Error getting back the constant value")
if __name__ == '__main__':
import rostest
rostest.rosrun('splines', 'bspline', TestBSplines)
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
#
"""Update Notifier
This script helps you keep track of software updates. It can be handy for
software without an effective update mechanism and for rarely used software you
still want to keep up to date.
The Update Notifier script dosen't update software directly. It just simplifies
the way you can check for available updates.
[Update Notifier](http://github.com/samuelspiza/updatenotifier) is hosted on
Github.
Usage
The script uses two data resources.
Input file
A host specific input file contains the tools you want to check and their
currently installed version. This [template](http://gist.github.com/488675)
contains an example JSON structure for this file.
Toolslist
The second file contains all supported tools, URLs to their corresponding
download pages and a regexp to match the version string on that page.
[This](http://gist.github.com/616971) is an example for the JSON structure for
this file. In addition to the default way of storing this file locally, there
are currently two ways to access remote files. If '--resource web' is set, the
parameter of '--tools' will be interpreted as an URL. If '--resource gist' is
set, it will be interpreted as 'ID:FILE_NAME' with 'ID' being the Gist ID and
'FILE_NAME' the name of the file in the gist repository.
"""
__author__ = "Samuel Spiza <sam.spiza@gmail.com>"
__version__ = "0.6.3"
import re
import codecs
import os
import json
import logging
import logging.handlers
import urllib.request
import urllib.parse
import urllib.error
import gzip
import optparse
import sys
import threading
def getOptions(argv):
"""A method for parsing the argument list."""
installDirectory = os.path.dirname(os.path.abspath((__file__)))
parser = optparse.OptionParser()
parser.add_option("-o", "--output",
dest="output", metavar="PATH",
default="updatenotifications.htm",
help="Change the path of the output file.")
parser.add_option("-i", "--input",
dest="input", metavar="PATH",
default=os.path.expanduser("~/updatenotifier.json"),
help="Change the path of the input file.")
parser.add_option("-r", "--resource",
dest="resource", metavar="TYPE", default="local",
help="Change the resource type to 'web' or 'gist'.")
parser.add_option("-t", "--tools",
dest="tools", metavar="PATH",
default=installDirectory + "/toolslist.json",
help="Change the path of the tools list file.")
parser.add_option("-l", "--log",
dest="log", action="store_true", default=False,
help="Write a log.")
parser.add_option("-m", "--logPath",
dest="logpath", metavar="PATH",
default=installDirectory + "/updatenotifier.log",
help="Change the path of the log file.")
return parser.parse_args(argv)[0]
HEADER = {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)',
'Accept-Language': 'de',
'Accept-Encoding': 'utf-8'}
def absUrl(site, href):
"""Returns an absolute URL.
It takes the a site and a path (e.g. argument to an 'href' or 'src'
parameter of a HTML tag). The absolute URL will be composed in the same way
a web browser does.
"""
href = href.replace("\\", "/")
if href.startswith("http://") or href.startswith("https://"):
return href
comps = href.split("/")
if href[:1] == "/":
comps[0:1] = site.split("/")[:3]
else:
comps[0:0] = site.split("/")[:-1]
i = 2
while i < len(comps):
if comps[i] == '.':
del comps[i]
elif comps[i] == '..':
if i > 0 and comps[i-1] != '..':
del comps[i-1:i+1]
i -= 1
else:
i += 1
else:
i += 1
return "/".join(comps)
def getResponse(url, postData=None):
"""Opens an URL with POST data.
The POST data must be a dictionary.
"""
if postData is not None:
postData = urllib.parse.urlencode(postData).encode("utf-8")
req = urllib.request.Request(url, postData)
for key in HEADER:
req.add_header(key, HEADER[key])
return urllib.request.urlopen(req)
def safeGetResponse(url, postData=None):
"""Opens an URL with POST data and handles exceptions.
Returns None if an error occurs. Catches HTTPError and URLError.
"""
try:
return getResponse(url, postData=postData)
except urllib.error.HTTPError as e:
if e.code == 302:
return getResponse(absUrl(url, e.info().get("Location")))
else:
print(url, " Error Code: ", e.code)
except urllib.error.URLError as e:
print(url, " Reason: ", e.reason)
return None
def getContentFromResponse(response, bytes=False, encoding="utf-8"):
if response is None:
return None
if response.info().get("Content-Encoding") == "gzip":
data = gzip.decompress(response.read())
else:
data = response.read()
if bytes:
return data
else:
return data.decode(encoding)
def safeGetContent(url, postData=None, bytes=False, encoding="utf-8"):
"""Opens an URL with POST data and returns decoded UTF-8 string.
Supports GZIP encoded resposes.
"""
response = safeGetResponse(url, postData)
return getContentFromResponse(response, bytes=bytes, encoding=encoding)
class ContentAsFileObjectWrapper:
def __init__(self, content):
self.content = content
def read(self):
return self.content
def __exit__(self, errorType, value, traceback):
pass
def __enter__(self):
return self
class FormaterSkeleton:
"""A skeleton for a Formater."""
def __init__(self):
self.lock = threading.Lock()
def webError(self, name):
pass
def failed(self, name, url):
pass
def update(self, name, url, installed, version):
pass
def upToDate(self, name, installed):
pass
def close(self):
pass
class StreamFormater(FormaterSkeleton):
"""A class for formating the console output.
Provides methods for different results of the update check. The output is
formated in a table layout. The width of the cols can be passed to the
Formater on creation and changed via a method.
"""
def __init__(self, width=(1, 1)):
"""The constructor.
The minimum width of the cols defaults to one.
"""
super().__init__()
self.setColWidth(width)
def setColWidth(self, width=(1, 1)):
"""Method to change the width of the cols.
Width is a tupel of two integers that set the minimum width of first
and second col. The default values are one.
"""
self.strWebError = "{0:%s} {1:%s} No HTTP response." % width
self.strFailed = "{0:%s} {1:%s} No Match." % width
self.strUpdate = "{0:%s} {1:%s} Version {2} available." % width
self.strUpToDate = "{0:%s} {1:%s}" % width
def webError(self, name):
print(self.strWebError.format(name, "Error:"))
def failed(self, name, url):
print(self.strFailed.format(name, "Error:"))
def update(self, name, url, installed, version):
print(self.strUpdate.format(name, installed, version))
def upToDate(self, name, installed):
print(self.strUpToDate.format(name, installed))
class HtmlFormater(FormaterSkeleton):
"""A class for formating the notification file.
Provides methods for different results of the update check. The output is
formated as a HTML table cell.
"""
def __init__(self, outputFile):
"""The constructor."""
super().__init__()
self.outputFile = outputFile
self.output = ""
self.strFailed = """ <tr>
<td><a href="{0}">{1}</a></td>
<td>Error:</td>
<td>No Match.</td>
</tr>
"""
self.strUpdate = """ <tr>
<td><a href="{0}">{1}</a></td>
<td>{2}</td>
<td>Version {3} available.</td>
</tr>
"""
self.htmlHead = """<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8" />
<title>Updatenotification</title>
</head>
<body>
<table>
"""
self.htmlTail = """ <table>
<body>
<html>
"""
def failed(self, name, url):
self.output += self.strFailed.format(url, name)
def update(self, name, url, installed, version):
self.output += self.strUpdate.format(url, name, installed, version)
def close(self):
if 0 < len(self.output):
with codecs.open(self.outputFile, encoding="utf-8", mode="w") as f:
f.write(self.htmlHead + self.output + self.htmlTail)
class Tool(threading.Thread):
"""A class for each tool to be check.
It provides a method to initiate the check.
"""
def __init__(self, name, url, regexp, installed):
"""The constructor.
The name is the humanreadable name of the tool. The URL points to the
download page. The regexp matches a version string in the content of
the download page. Installed is the version string of the currently
installed version of the tool on the host. The Formater objects will
be used to format the different outputs.
"""
super().__init__()
self.name = name
self.url = url
self.regexp = regexp
self.installed = installed
self.encoding = "utf-8"
self.formaters = []
def setEncoding(self, encoding):
self.encoding = encoding
def run(self):
"""Method to check if a newer version is available.
It prints the result of the check to the console and updates
self.notification except the check is successful and no new version is
available.
"""
content = safeGetContent(self.url, encoding=self.encoding)
if content is None:
self.webError()
else:
m = re.search(self.regexp, content)
if m is None:
self.failed()
elif self.installed != m.group(0):
self.update(self.installed, m.group(0))
else:
self.upToDate(self.installed)
def attachFormater(self, formater):
self.formaters.append(formater)
def webError(self):
logger = logging.getLogger('Tool.check')
logger.debug("Failed to retrieve HTTP response for %s", self.name)
for f in self.formaters:
f.lock.acquire()
f.webError(self.name)
f.lock.release()
def failed(self):
logger = logging.getLogger('Tool.check')
logger.debug("Failed to match version string for %s",
self.name)
for f in self.formaters:
f.lock.acquire()
f.failed(self.name, self.url)
f.lock.release()
def update(self, installed, new):
logger = logging.getLogger('Tool.check')
logger.info("%s @ %s -> %s", self.name, installed, new)
for f in self.formaters:
f.lock.acquire()
f.update(self.name, self.url, installed, new)
f.lock.release()
def upToDate(self, installed):
logger = logging.getLogger('Tool.check')
logger.debug("%s @ %s", self.name, installed)
for f in self.formaters:
f.lock.acquire()
f.upToDate(self.name, installed)
f.lock.release()
class UpdateNotifier:
"""A class to perform a check for software updates."""
def __init__(self, outputFile, toolsList, toolsToCheck):
"""The constructor.
OutputFile is the filepath of the file that will be written if at least
one available update was found or not all matches for version strings
were successful. ToolsList and ToolsToCheck are dictionaries of the
corresponding JSON objects.
"""
logger = logging.getLogger('UpdateNotifier')
self.outputFile = outputFile
self.toolsList = toolsList
self.toolsToCheck = {}
for tool in toolsToCheck:
if tool in self.toolsList:
self.toolsToCheck[tool] = toolsToCheck[tool]
else:
logger.warning("Unknown tool '%s'.", tool)
self.formater = [StreamFormater(self.getRowWidth()),
HtmlFormater(self.outputFile)]
def __enter__(self):
return self
def __exit__(self, errorType, value, traceback):
self.closeFormater()
def closeFormater(self):
for f in self.formater:
f.close()
def getRowWidth(self):
"""Determines the width of the cols for the output."""
names = [len(self.toolsList[t]['name']) for t in self.toolsToCheck]
nameLen = max(names)
# add [6] for "ERROR:"
versionLen = max([len(v) for v in self.toolsToCheck.values()] + [6])
return (nameLen, versionLen)
def check(self):
"""Initiates the check for updates for each tool."""
tools = []
for tool in sorted(self.toolsToCheck):
t = Tool(self.toolsList[tool]['name'],
self.toolsList[tool]['url'],
self.toolsList[tool]['regexp'],
self.toolsToCheck[tool])
if "encoding" in self.toolsList[tool]:
t.setEncoding(self.toolsList[tool]['encoding'])
for f in self.formater:
t.attachFormater(f)
t.start()
tools.append(t)
for tool in tools:
tool.join()
class Gist:
"""A class to use files in a gist as FileObjects."""
def __init__(self, resource):
"""The constructor.
The resource is the gist ID and the name of the file in the gist colon
seperated.
"""
self.id, self.fileName = resource.split(":")
self.repoContent = None
self.url = None
self.fileObject = None
def __enter__(self):
return self.getFileObject()
def __exit__(self, errorType, value, traceback):
pass
def getRepoContent(self):
if self.repoContent is None:
url = "http://gist.github.com/" + self.id
self.repoContent = safeGetContent(url)
return self.repoContent
def getUrl(self):
if self.url is None:
regexp = "/raw/%s/[0-9a-f]*/%s" % (self.id, self.fileName)
m = re.search(regexp, self.getRepoContent())
self.url = "http://gist.github.com" + m.group(0)
return self.url
def getFileObject(self):
if self.fileObject is None:
content = safeGetContent(self.getUrl())
self.fileObject = ContentAsFileObjectWrapper(content)
return self.fileObject
def main(argv):
options = getOptions(argv)
# Configure the logging.
logging.getLogger().setLevel(logging.INFO)
if options.log:
handler = logging.handlers.RotatingFileHandler(
options.logpath, maxBytes=65000, backupCount=1)
formatString = "%(asctime)s %(name)-20s %(levelname)-8s %(message)s"
handler.setFormatter(logging.Formatter(formatString))
else:
handler = logging.handlers.NullHandler()
logging.getLogger().addHandler(handler)
logger = logging.getLogger()
logger.info("updatenotifier.py START")
# Read the tools and their installed version form the input file
with open(options.input, "r") as file:
toolsToCheck = json.load(file)
# Create the FileObject based on the resource option. The content is a JSON
# object that contains all supported tools, URLs to their corresponding
# download pages and a regexp to match the version string on that page.
if options.resource.lower() == "web":
fo = ContentAsFileObjectWrapper(safeGetContent(options.tools))
elif options.resource.lower() == "gist":
fo = Gist(options.tools)
else:
fo = open(options.tools, "r")
with fo as file:
toolsList = json.load(file)
# Check all installed tools for updates
with UpdateNotifier(options.output, toolsList, toolsToCheck) as un:
un.check()
logger.info("updatenotifier.py END")
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
|
|
import time, logging
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.cache import cache
from django.core.mail import mail_admins
from djwepay.utils import make_batch_key, make_callback_key
from wepay import calls, WePay as PythonWePay
from wepay.exceptions import WePayError, WePayHTTPError, WePayConnectionError
from wepay.utils import cached_property
__all__ = ['WePay']
DEBUG = getattr(settings, 'WEPAY_DEBUG', getattr(settings, 'DEBUG'))
THROTTLE_PROTECT = getattr(settings, 'WEPAY_THROTTLE_PROTECT', True)
THROTTLE_CALL_LIMIT = getattr(settings, 'WEPAY_THROTTLE_CALL_LIMIT', 30)
THROTTLE_TIMEOUT = getattr(settings, 'WEPAY_THROTTLE_TIMEOUT', 10)
THROTTLE_CALL_KEY = getattr(settings, 'WEPAY_THROTTLE_CALL_KEY', 'wepay-throttle-call')
BLOCKING_KEY = THROTTLE_CALL_KEY + '-blocked'
DOMAIN = getattr(settings, 'WEPAY_SITE_DOMAIN', None)
WEPAY_MAIL_ADMIN = getattr(settings, 'WEPAY_MAIL_ADMINS', not DEBUG)
BATCH_CALLS_NUMBER = getattr(settings, 'WEPAY_BATCH_CALLS_NUMBER', 50)
BATCH_CALLS_CACHE = {}
BATCH_CALLBACKS = {}
class Call(calls.base.Call):
def make_call(self, func, params, extra_kwargs):
callback = extra_kwargs.pop('callback', None)
if extra_kwargs.get('batch_mode', False):
batch_key = make_batch_key(extra_kwargs.pop('batch_id'))
reference_id = extra_kwargs.get('batch_reference_id', None)
call = super(Call, self).make_call(func, params, extra_kwargs)
if not callback is None and callable(callback):
# put callback in the cache
assert not reference_id is None, \
"'batch_reference_id' is required when 'callback' is provided"
callbacks = BATCH_CALLBACKS.get(batch_key, {})
callbacks[reference_id] = callback
BATCH_CALLBACKS[batch_key] = callbacks
# put the actual call in the cache
calls = BATCH_CALLS_CACHE.get(batch_key, [])
calls.append(call)
BATCH_CALLS_CACHE[batch_key] = calls
return (None, call) # all api calls are expected to return a tuple
else:
response = super(Call, self).make_call(func, params, extra_kwargs)
processed = None
if callback is not None and callable(callback):
processed = callback(response)
return (processed, response)
def complete_uris(self, names, kwargs):
"""Converts to full uri and updates kwargs"""
for name in names:
if name in kwargs:
kwargs[name] = self._api.get_full_uri(kwargs[name])
class OAuth2(Call, calls.OAuth2):
def authorize(self, cleint_id, redirect_uri, scope, **kwargs):
return super(OAuth2, self).authorize(
cleint_id, self._api.get_full_uri(redirect_uri), scope, **kwargs)
def token(self, *args, **kwargs):
self.complete_uris(['redirect_uri', 'callback_uri'], kwargs)
return super(OAuth2, self).token(*args, **kwargs)
class App(Call, calls.App):
pass
class User(Call, calls.User):
def modify(self, *args, **kwargs):
self.complete_uris(['callback_uri'], kwargs)
return super(User, self).modify(*args, **kwargs)
def register(self, *args, **kwargs):
self.complete_uris(['redirect_uri', 'callback_uri'], kwargs)
return super(User, self).register(*args, **kwargs)
class Account(Call, calls.Account):
def create(self, *args, **kwargs):
self.complete_uris(['image_uri', 'callback_uri'], kwargs)
return super(Account, self).create(*args, **kwargs)
def modify(self, *args, **kwargs):
self.complete_uris(['image_uri', 'callback_uri'], kwargs)
return super(Account, self).modify(*args, **kwargs)
def get_update_uri(self, *args, **kwargs):
self.complete_uris(['redirect_uri'], kwargs)
return super(Account, self).get_update_uri(*args, **kwargs)
class Checkout(Call, calls.Checkout):
def create(self, *args, **kwargs):
self.complete_uris(
['redirect_uri', 'callback_uri', 'fallback_uri'], kwargs)
return super(Checkout, self).create(*args, **kwargs)
def modify(self, *args, **kwargs):
self.complete_uris(['callback_uri'], kwargs)
return super(Checkout, self).modify(*args, **kwargs)
class Preapproval(Call, calls.Preapproval):
def create(self, *args, **kwargs):
self.complete_uris(
['redirect_uri', 'callback_uri', 'fallback_uri'], kwargs)
return super(Preapproval, self).create(*args, **kwargs)
def modify(self, *args, **kwargs):
self.complete_uris(['callback_uri'], kwargs)
return super(Preapproval, self).modify(*args, **kwargs)
class Withdrawal(Call, calls.Withdrawal):
def create(self, *args, **kwargs):
self.complete_uris(
['redirect_uri', 'callback_uri', 'fallback_uri'], kwargs)
return super(Withdrawal, self).create(*args, **kwargs)
def modify(self, *args, **kwargs):
self.complete_uris(['callback_uri'], kwargs)
return super(Withdrawal, self).modify(*args, **kwargs)
class CreditCard(Call, calls.CreditCard):
pass
class SubscriptionPlan(Call, calls.SubscriptionPlan):
def create(self, *args, **kwargs):
self.complete_uris(['callback_uri'], kwargs)
return super(SubscriptionPlan, self).create(*args, **kwargs)
def modify(self, *args, **kwargs):
self.complete_uris(['callback_uri'], kwargs)
return super(SubscriptionPlan, self).modify(*args, **kwargs)
class Subscription(Call, calls.Subscription):
def create(self, *args, **kwargs):
self.complete_uris(['redirect_uri', 'callback_uri'], kwargs)
return super(Subscription, self).create(*args, **kwargs)
def modify(self, *args, **kwargs):
self.complete_uris(['redirect_uri', 'callback_uri'], kwargs)
return super(Subscription, self).modify(*args, **kwargs)
class SubscriptionCharge(Call, calls.SubscriptionCharge):
pass
class Batch(Call, calls.Batch):
def process_calls(self, batch_key, calls):
"""Checks if there are any callbacks associated with calls'
reference_id in cache. Invokes if such present.
"""
processed_calls = []
for call in calls:
reference_id = call.get('reference_id', None)
response = call['response']
processed = None
if 'error' in response:
processed = WePayError(response['error'], response['error_code'],
response['error_description'])
elif not reference_id is None:
callbacks = BATCH_CALLBACKS.get(batch_key, None)
callback = None
if callbacks:
callback = callbacks.get(reference_id, None)
if not callback is None and callable(callback):
processed = callback(response)
call['processed'] = processed
processed_calls.append(call)
return processed_calls
def create(self, batch_id, client_id, client_secret, max_calls=None, **kwargs):
"""Retrieves queued calls, sequentially sends /batch/create API calls in
chunks of up to `max_calls`, which defaults to
`WEPAY_BATCH_CALLS_NUMBER` setting and then processes any callbacks
set. Consider raising `timeout` kwarg, since batch calls can take a
while, also if you start getting HTTP 404 errors, try lowering
`max_calls` value.
"""
max_calls = max_calls or BATCH_CALLS_NUMBER
assert 0 < max_calls and max_calls <= 50, \
"""max_calls should be a positive number no greater then 50, it is
also WePay's limitation"""
batch_key = make_batch_key(batch_id)
calls = BATCH_CALLS_CACHE.get(batch_key)
calls_response = []
while calls:
cur_calls = calls[:max_calls]
response = super(Batch, self).create(
client_id, client_secret, cur_calls, **kwargs)[1]
calls_response.extend(response['calls'])
calls = calls[max_calls:]
response = (None, {'calls': self.process_calls(batch_key, calls_response)})
self.del_calls(batch_id)
return response
def del_calls(self, batch_id):
batch_key = make_batch_key(batch_id)
try:
del BATCH_CALLBACKS[batch_key]
except KeyError: pass
try:
del BATCH_CALLS_CACHE[batch_key]
except KeyError: pass
def get_calls(self, batch_id):
return BATCH_CALLS_CACHE.get(make_batch_key(batch_id))
def set_calls(self, batch_id, calls):
BATCH_CALLS_CACHE[make_batch_key(batch_id)] = calls
class WePay(PythonWePay):
def __init__(self, **kwargs):
domain = DOMAIN
if domain is None:
domain = Site.objects.get_current().domain
self.site_uri = "https://%s" % domain
kwargs['timeout'] = kwargs.get('timeout', 45)
super(WePay, self).__init__(**kwargs)
@cached_property
def oauth2(self):
return OAuth2(self)
@cached_property
def app(self):
return App(self)
@cached_property
def user(self):
return User(self)
@cached_property
def account(self):
return Account(self)
@cached_property
def checkout(self):
return Checkout(self)
@cached_property
def preapproval(self):
return Preapproval(self)
@cached_property
def withdrawal(self):
return Withdrawal(self)
@cached_property
def credit_card(self):
return CreditCard(self)
@cached_property
def subscription_plan(self):
return SubscriptionPlan(self)
@cached_property
def subscription(self):
return Subscription(self)
@cached_property
def subscription_charge(self):
return SubscriptionCharge(self)
@cached_property
def batch(self):
return Batch(self)
def _log_error(self, error, uri, params):
logger = logging.getLogger('djwepay.api.error')
logger.error("\nCall: '%s' with params: '%s' produced an error: '%s'"
"\n%s" % (uri, params, error, '='*70))
def _log_debug(self, uri, params, response):
logger = logging.getLogger('djwepay.api.debug')
logger.debug(
"\nCall: '%s' was placed with params: '%s' and received a response: "
"'%s'\n%s" % (uri, params, response, '='*70))
def _call_protected(self, uri, **kwargs):
blocked = cache.add(BLOCKING_KEY, True)
if not blocked:
time.sleep(1)
return self._call_protected(uri, **kwargs)
now = int(time.time())
unexpired_timestamp = now - THROTTLE_TIMEOUT
unexpired_calls = [x for x in cache.get(THROTTLE_CALL_KEY, [])
if x >= unexpired_timestamp]
if len(unexpired_calls) >= THROTTLE_CALL_LIMIT:
cache.delete(BLOCKING_KEY)
sleep_time = THROTTLE_TIMEOUT + unexpired_calls[0] - now + 1
time.sleep(sleep_time)
return self._call_protected(uri, **kwargs)
else:
unexpired_calls.append(now)
cache.set(
THROTTLE_CALL_KEY, unexpired_calls, THROTTLE_TIMEOUT)
cache.delete(BLOCKING_KEY)
return super(WePay, self).call(uri, **kwargs)
def call(self, uri, **kwargs):
try:
if THROTTLE_PROTECT:
response = self._call_protected(uri, **kwargs)
else:
response = super(WePay, self).call(uri, **kwargs)
except (WePayHTTPError, WePayConnectionError) as e:
self._log_error(e, uri, kwargs.get('params', {}))
mail_admins(
"WePayError", """
There was a problem with making an API call: %s
Params: %s
Timeout: %s
Error received: %s""" % (
uri, kwargs.get('params', None), kwargs.get('timeout', None), e),
fail_silently=not DEBUG
)
raise
if DEBUG:
self._log_debug(uri, kwargs.get('params', {}), response)
return response
def get_full_uri(self, uri):
"""
Used to build callback uri's. Make sure you have WEPAY_SITE_DOMAIN in
settings or Site app enabled and configured.
:param str uri: last part of url
"""
if uri.startswith('http'):
return uri
return '%s%s' % (self.site_uri, uri)
def get_login_uri(self):
"""
Returns WePay login url. Just in case if someone needs it.
"""
return '%s/login' % self.browser_uri
|
|
# vim:ts=4:sw=4:et:
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import time
from os import path
from watchman.integration.lib import WatchmanEdenTestCase
class TestEdenJournal(WatchmanEdenTestCase.WatchmanEdenTestCase):
def test_eden_journal(self):
def populate(repo):
repo.write_file("hello", "hola\n")
repo.commit("initial commit.")
root = self.makeEdenMount(populate)
repo = self.repoForPath(root)
initial_commit = repo.get_head_hash()
res = self.watchmanCommand("watch", root)
self.assertEqual("eden", res["watcher"])
clock = self.watchmanCommand("clock", root)
self.touchRelative(root, "newfile")
res = self.watchmanCommand("query", root, {"fields": ["name"], "since": clock})
clock = res["clock"]
self.assertFileListsEqual(res["files"], ["newfile"])
repo.add_file("newfile")
repo.commit(message="add newfile")
res = self.watchmanCommand(
"query",
root,
{
"expression": [
"not",
[
"anyof",
["dirname", ".hg"],
["match", "checklink*"],
["match", "hg-check*"],
],
],
"fields": ["name"],
"since": clock,
},
)
clock = res["clock"]
self.assertFileListsEqual(
res["files"],
["newfile"],
message="We expect to report the files changed in the commit",
)
# Test the the journal has the correct contents across a "reset" like
# operation where the parents are poked directly. This is using
# debugsetparents rather than reset because the latter isn't enabled
# by default for hg in the watchman test machinery.
self.touchRelative(root, "unclean")
repo.hg("debugsetparents", initial_commit)
res = self.watchmanCommand(
"query",
root,
{
"expression": ["not", ["dirname", ".hg"]],
"fields": ["name"],
"since": clock,
},
)
self.assertFileListsEqual(
res["files"],
["newfile", "unclean"],
message=(
"We expect to report the file changed in the commit "
"as well as the unclean file"
),
)
# make sure that we detect eden getting unmounted. This sleep is unfortunate
# and ugly. Without it, the unmount will fail because something is accessing
# the filesystem. I haven't been able to find out what it is because fuser
# takes too long to run and by the time it has run, whatever that blocker
# was is not longer there. Ordinarily I'd prefer to poll on some condition
# in a loop rather than just sleeping an arbitrary amount, but I just don't
# know what the offending thing is and running the unmount in a loop is prone
# to false negatives.
time.sleep(1)
self.eden.remove(root)
watches = self.watchmanCommand("watch-list")
self.assertNotIn(root, watches["roots"])
def test_two_rapid_checkouts_show_briefly_changed_files(self):
initial_commit = None
add_commit = None
remove_commit = None
def populate(repo):
nonlocal initial_commit, add_commit, remove_commit
repo.write_file("hello", "hola\n")
initial_commit = repo.commit("initial commit.")
repo.write_file("newfile", "contents\n")
add_commit = repo.commit("add newfile")
repo.remove_file("newfile")
remove_commit = repo.commit("remove newfile")
root = self.makeEdenMount(populate)
repo = self.repoForPath(root)
# Synchronize to the initial commit.
repo.update(initial_commit, clean=True)
res = self.watchmanCommand("watch", root)
self.assertEqual("eden", res["watcher"])
clock = self.watchmanCommand("clock", root)
res = self.watchmanCommand("query", root, {"fields": ["name"], "since": clock})
clock = res["clock"]
# Update to the latest commit, through the intermediate.
repo.update(add_commit)
repo.update(remove_commit)
res = self.watchmanCommand(
"query",
root,
{
"expression": [
"not",
[
"anyof",
["dirname", ".hg"],
["match", "checklink*"],
["match", "hg-check*"],
],
],
"fields": ["name", "new"],
"since": clock,
},
)
res = self.normalizeFiles(res)
self.assertCountEqual(
res["files"],
[{"name": "newfile", "new": False}],
"Files created and removed across the update operation should show up in the changed list",
)
def test_aba_checkouts_show_briefly_changed_files(self):
initial_commit = None
add_commit = None
def populate(repo):
nonlocal initial_commit, add_commit
repo.write_file("hello", "hola\n")
initial_commit = repo.commit("initial commit.")
repo.write_file("newfile", "contents\n")
add_commit = repo.commit("add newfile")
root = self.makeEdenMount(populate)
repo = self.repoForPath(root)
# Synchronize to the initial commit.
repo.update(initial_commit, clean=True)
res = self.watchmanCommand("watch", root)
self.assertEqual("eden", res["watcher"])
clock = self.watchmanCommand("clock", root)
res = self.watchmanCommand("query", root, {"fields": ["name"], "since": clock})
clock = res["clock"]
# Update to a new file and back to the initial commit, expecting to see the modified file show up in the change list.
repo.update(add_commit)
repo.update(initial_commit)
res = self.watchmanCommand(
"query",
root,
{
"expression": [
"not",
[
"anyof",
["dirname", ".hg"],
["match", "checklink*"],
["match", "hg-check*"],
],
],
"fields": ["name", "new"],
"since": clock,
},
)
res = self.normalizeFiles(res)
self.assertCountEqual(
res["files"],
[{"name": "newfile", "new": False}],
"Files created and removed across the update operation should show up in the changed list",
)
def test_querying_with_truncated_journal_returns_fresh_instance(self):
def populate(repo):
repo.write_file("hello", "hola\n")
repo.commit("initial commit.")
root = self.makeEdenMount(populate)
res = self.watchmanCommand("watch", root)
self.assertEqual("eden", res["watcher"])
clock = self.watchmanCommand("clock", root)
with self.eden.get_thrift_client() as thrift_client:
thrift_client.setJournalMemoryLimit(root, 0)
self.assertEqual(0, thrift_client.getJournalMemoryLimit(root))
# Eden's Journal always remembers at least one entry so we will
# do things in twos
self.touchRelative(root, "newfile")
self.touchRelative(root, "newfile2")
res = self.watchmanCommand(
"query",
root,
{
"expression": [
"not",
["anyof", ["dirname", ".hg"], ["dirname", ".eden"]],
],
"fields": ["name"],
"since": clock,
},
)
clock = res["clock"]
self.assertTrue(res["is_fresh_instance"])
self.assertFileListsEqual(
res["files"], ["hello", "newfile", "newfile2", ".hg", ".eden"]
)
self.removeRelative(root, "newfile")
self.removeRelative(root, "newfile2")
res = self.watchmanCommand(
"query",
root,
{
"expression": [
"not",
["anyof", ["dirname", ".hg"], ["dirname", ".eden"]],
],
"fields": ["name"],
"since": clock,
},
)
clock = res["clock"]
self.assertTrue(res["is_fresh_instance"])
self.assertFileListsEqual(res["files"], ["hello", ".hg", ".eden"])
def test_changing_root_tree(self):
def populate(repo):
repo.write_file("hello", "hola\n")
repo.commit("initial commit.")
root = self.makeEdenMount(populate)
res = self.watchmanCommand("watch", root)
self.assertEqual("eden", res["watcher"])
clock = self.watchmanCommand("clock", root)
# When the root tree inode changes, EdenFS will report an empty path
# for such change. This test ensures we handle this case well.
self.touchRelative(root, "")
self.touchRelative(root, "")
# This should not throw
res = self.watchmanCommand(
"query",
root,
{
"expression": [
"not",
["anyof", ["dirname", ".hg"], ["dirname", ".eden"]],
],
"fields": ["name", "exists"],
"since": clock,
},
)
self.assertEqual(res["files"][0], {"name": path.basename(root), "exists": True})
|
|
"""
<Program Name>
test_invalid_input.py
<Started>
December 2010
<Author>
Alex Hanson
<Purpose>
A battery of tests to run against the XML-RPC of the Custom Installer
Builder, testing its ability to detect invalid input. Requires the Seattle
integration test libraries, which should be in the Python path. The
corresponding 'seattle_gmail_info' file should be present in the directory in
which this script is run.
Note that the XMLRPC_PROXY_URL variable must be modified below.
"""
import sys
import xmlrpclib
import integrationtestlib
import send_gmail
# The URL of the XML-RPC interface. Note the trailing slash.
XMLRPC_PROXY_URL = 'http://example.com/custominstallerbuilder/xmlrpc/'
# Email addresses which should be notified in case of failure, in addition to
# the default list. For example:
# NOTIFY_LIST = ['user@example.com', 'user2@example.com']
NOTIFY_LIST = []
# We will want to access the proxy across several functions, so make it global.
XMLRPC_PROXY = None
# We'll store any errors in a dictionary and send them off at the end.
ERRORS = dict()
# Inserts an error into the ERRORS dictionary.
def log_error(function_name, message):
if function_name not in ERRORS:
ERRORS[function_name] = list()
ERRORS[function_name].append(message)
# Serves the dual purpose of testing against valid input and verifying the
# return value is of the right type.
def test_valid_input(test_function, return_type, function_name):
integrationtestlib.log(
('Verifying that the \'' + function_name + '\' function returns object of ' +
'type \'' + return_type.__name__ + '\'...'))
results = None
try:
results = test_function()
except:
log_error(function_name, 'Failed against valid input.')
return False, None
if not isinstance(results, return_type):
log_error(function_name,
('Returned object of type \'' + type(results).__name__ +
'\' rather than expected type \'' + return_type.__name__) + '\'.')
return False, None
return True, results
def test_invalid_input(test_function, function_name, reason_invalid):
integrationtestlib.log(('Verifying that the \'' + function_name +
'\' function fails against invalid input.'))
try:
test_function()
except:
# An error occurred, so the invalid input was detected.
return True
# We didn't want success here!
log_error(function_name, 'Function succeded with invalid input: ' + reason_invalid)
return False
def test_api_version():
test_function = lambda: XMLRPC_PROXY.api_version()
test_valid_input(test_function, str, 'api_version')
# Any argument should cause problems...
test_function = lambda: XMLRPC_PROXY.api_version('')
test_invalid_input(test_function, 'api_version', 'extra argument')
def test_build_installers():
vessels = [{'percentage': 80, 'owner': 'owner'}]
test_function = lambda: XMLRPC_PROXY.build_installers(vessels)
success, results = test_valid_input(test_function, dict, 'build_installers')
# A user might not have their vessels add to 80%.
vessels = [{'percentage': 100, 'owner': 'owner'}]
test_function = lambda: XMLRPC_PROXY.build_installers(vessels)
test_invalid_input(test_function, 'build_installers', 'vessels add to 100%')
# A user might neglect to give all vessels an owner.
vessels = [{'percentage': 80}]
test_function = lambda: XMLRPC_PROXY.build_installers(vessels)
test_invalid_input(test_function, 'build_installers', 'vessel lacks owner')
# A user might give an invalid cryptographic key.
vessels = [{'percentage': 80, 'owner': 'owner'}]
user_data = {'owner': {'public_key': 'INVALID'}}
test_function = lambda: XMLRPC_PROXY.build_installers(vessels, user_data)
test_invalid_input(test_function, 'build_installers', 'invalid cryptographic key')
return success, results
def test_get_urls(build_id):
test_function = lambda: XMLRPC_PROXY.get_urls(build_id)
test_valid_input(test_function, dict, 'get_urls')
# A user might give an invalid build ID.
test_function = lambda: XMLRPC_PROXY.get_urls('INVALID')
test_invalid_input(test_function, 'get_urls', 'invalid build ID')
# A user might give a build ID that (probably) does not exist.
test_function = lambda: XMLRPC_PROXY.get_urls('0123456789012345678901234567890123456789')
test_invalid_input(test_function, 'get_urls', 'non-existent build ID')
def report_results():
# If there are no entries in the dictionary, then no errors occurred.
if len(ERRORS) == 0:
integrationtestlib.log('All tests successful!')
return
# Otherwise, errors occurred...
error_string = 'The following errors occurred:\n'
for function in ERRORS:
for error in ERRORS[function]:
error_string += '\n[' + function + '] ' + error
integrationtestlib.log(error_string)
integrationtestlib.notify(error_string, 'Custom Installer Builder test failure')
def run_tests():
test_results = dict()
test_api_version()
success, results = test_build_installers()
if success:
test_get_urls(results['build_id'])
def main():
# Make the XML-RPC proxy accessible across the whole program.
global XMLRPC_PROXY
# Each test can reuse this proxy.
XMLRPC_PROXY = xmlrpclib.ServerProxy(XMLRPC_PROXY_URL)
# Setup the integration test library.
success, explanation = send_gmail.init_gmail()
if not success:
integrationtestlib.log('Failed to execute init_gmail(): ' + explanation)
sys.exit(1)
# Add any extra error log recipients.
integrationtestlib.notify_list.extend(NOTIFY_LIST)
# The main event!
run_tests()
report_results()
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
import SoftLayer.API
from zenjsonclient import router, ZenJsonClientError
from sets import Set
from time import sleep
import sys
###
DOMAIN_TO_APPEND = ""
if (len(sys.argv) == 2 ):
print sys.argv[1]
DOMAIN_TO_APPEND = sys.argv[1]
if (DOMAIN_TO_APPEND[0] != "."):
DOMAIN_TO_APPEND="."+DOMAIN_TO_APPEND
###
api_username = 'set me'
api_key = 'set me'
client = SoftLayer.API.Client('SoftLayer_Account', None, api_username, api_key)
object_mask = {
'hardware' : {
'operatingSystem' : {
'passwords' : {},
},
'networkComponents' : {},
'datacenter' : {},
'processorCount' : {},
}
}
client.set_object_mask(object_mask)
hardware = client.getHardware()
output_list = []
for eachDev in hardware:
ip=""
hostname=""
loc=""
if ("privateIpAddress" in eachDev.keys()):
ip=eachDev["privateIpAddress"]
if ("hostname" in eachDev.keys()):
hostname=eachDev["hostname"]
if ("datacenter" in eachDev.keys()):
loc=eachDev["datacenter"]
print "IP: %s, HOSTNAME: %s, LOCATION: %s" % (ip,hostname,loc)
output_list.append({
"IP": ip,
"HOSTNAME": hostname,
"LOCATION": loc
})
print "Got some data from softlayer:"
print repr(output_list)
'''output_list = eval(open("output.txt","r").read())'''
########################################################################################################################
#Injector part
########################################################################################################################
#Fake adapter
srcdata = output_list
for each in srcdata:
each['HOSTNAME']+=DOMAIN_TO_APPEND
print "SOURCE DATA:"
print srcdata
print '\n\n\n'
#Step 1: Get the list of locations in the zenoss
resp = router.device.getLocations()
zen_locations_dict_list = resp.result["locations"]
zen_locations_list = []
for each in zen_locations_dict_list:
if ('name' in each.keys()):
nm = each['name']
if (nm[0] == "/"):
nm=nm[1:]
zen_locations_list.append(nm)
print "Locations found in zenoss:"
for each in zen_locations_list:
print "\t",each
print "\n"
#Step 2: Get the list of locations from source data
src_locations_list = Set()
for each in srcdata:
if ('LOCATION' in each.keys()):
if ('name' in each['LOCATION'].keys()):
src_locations_list.add(each['LOCATION']['name'])
print "Locations found in the source file:"
for each in src_locations_list:
print "\t",each
print "\n"
#Step 3: Determine which locations are missing in zenoss
zen_locations_set = Set()
for each in zen_locations_list:
zen_locations_set.add(each)
src_locations_set = src_locations_list
locations_difference = src_locations_list - zen_locations_set
if (len(locations_difference)):
print "These locations from source data are missing in zenoss:"
for each in locations_difference:
print "\t",each
#Step 4: Add missing locations to zenoss
for each_location in locations_difference:
resp = router.device.addLocationNode(type='organizer', contextUid='/zport/dmd/Devices/Locations', id=each_location)
if (resp.result['success'] == True):
print "Succesfully added new location \"",each_location,"\" to zenoss."
#Step 5: Get hostnames/devices from zenoss
resp = router.device.getDevices(uid="/zport/dmd/Devices", sort="name", limit=10000000)
dev_response = resp.result['devices']
#Step 6: Collect devices hostnames from response and source data
dev_response_hostnames_set = Set()
for each in dev_response:
if ('name' in each):
dev_response_hostnames_set.add(each['name'])
print "Hostnames found in zenoss:"
for each in dev_response_hostnames_set:
print "\t",each
print
dev_source_hostnames_set = Set()
for each in srcdata:
if ('HOSTNAME' in each.keys()):
dev_source_hostnames_set.add(each['HOSTNAME'])
print "Hostnames found in source data:"
for each in dev_source_hostnames_set:
print "\t",each
print
dev_hostnames_difference_set_missing_in_zenoss = dev_source_hostnames_set - dev_response_hostnames_set
if (len(dev_hostnames_difference_set_missing_in_zenoss)):
print "These hostnames are missing in zenoss:"
for each in dev_hostnames_difference_set_missing_in_zenoss:
print "\t",each
print
dev_hostnames_difference_set_existing_in_zenoss = dev_response_hostnames_set & dev_source_hostnames_set
if (len(dev_hostnames_difference_set_existing_in_zenoss)):
print "These hostnames are present in zenoss and in the source file both:"
for each in dev_hostnames_difference_set_existing_in_zenoss:
print "\t",each
print
#Step 7: Insert missing hostnames
hosts_locs_table = []
for each in dev_hostnames_difference_set_missing_in_zenoss:
for each1 in srcdata:
if (each == each1['HOSTNAME']):
hosts_locs_table.append(each1)
break
print "Internal table created."
print "Going to insert hostnames/devices into zenoss."
print
#Step 7.2: Insert device with it's location into zenoss
for each_dev in hosts_locs_table:
locationP = each_dev['LOCATION']['name']
resp = router.device.addDevice(deviceName=each_dev['HOSTNAME'],
deviceClass='/Discovered',
locationPath=locationP)
print "Add Device, zenoss says: ", repr(resp.result)
#Step ?: Just printing
print "Now we have added new devices. Usually, zenoss puts it in his background queue,"
print "so we need to make sure that all backgorund task has been finished."
print "I'm going to pull you zenoss instance every 5 secs and check if added devices already in list."
#Step 8: Wait until all devices will be added to zenoss.
polling_finished_flag = False
pulling_list = {}
for each in srcdata:
pulling_list[each['HOSTNAME']]=False
while (not polling_finished_flag):
print "Pulling zenoos..."
resp = router.device.getDevices(uid="/zport/dmd/Devices", sort="name", limit=10000000)
dev_list_from_zen = resp.result['devices']
for each_device in dev_list_from_zen:
if (each_device['name'] in pulling_list.keys()):
pulling_list[each_device['name']]=True
polling_finished_flag=True
for eachD in pulling_list.keys():
if (pulling_list[eachD] == False):
polling_finished_flag = False
if (not polling_finished_flag):
print pulling_list
print "Sleeping 5 seconds..."
sleep(5)
print "Seems that background jobs has been done... Going to do resetIp for devices."
#Step 9: Seems that all missing devices has been added to zenoss, so now we can do resetIP for all devices,
#mentioned in source data (recently added and already existing)
#Step 9.1: Collect devices uids and constract UID<->IP table. Also filter another devices which isn't mentioned in srcdata.
uids_list = []
uid_ip_dict = {}
resp = router.device.getDevices(uid="/zport/dmd/Devices", sort="name", limit=10000000)
for each in resp.result['devices']:
if ('uid' in each):
uids_list.append(each['uid'])
nm_to_search = each['name']
for dev_in_source in srcdata:
if dev_in_source['HOSTNAME'] == nm_to_search:
uid_ip_dict[each['uid']]=dev_in_source['IP']
#Step 9.2: Just doing resetIP
print "\n\n\n"
for eachDeviceToResetIP in uid_ip_dict.keys():
uid = eachDeviceToResetIP
new_ip = uid_ip_dict[uid]
if (new_ip):
resp = router.device.resetIp(uids=[uid],hashcheck=1,ip=new_ip)
print "Resetting ip for UID %s, zenoss says: %s" % (uid, repr(resp.result))
#Step 10: Just print some interesting info
print "Device count in the source data: %d" % (len(srcdata))
flag=False
for each in uid_ip_dict.keys():
if (uid_ip_dict[each] == ""):
flag=True
if (flag):
print "Interesting thing! Some devices in the source data doesn't have ip:"
for each in uid_ip_dict.keys():
if (uid_ip_dict[each] == ""):
name = each.split("/")
print "\t",name[len(name)-1]
#Step 11: DONE!!!
print "\n\n\n\n\n"
print "DONE!"
|
|
from __future__ import division
from builtins import zip, range
from future.utils import with_metaclass
import numpy as np
import abc
import scipy.stats as stats
import scipy.special as special
from scipy.special import logsumexp
try:
from ..util.cstats import sample_markov
except ImportError:
from ..util.stats import sample_markov
from ..util.general import top_eigenvector, cumsum
from .hmm_states import HMMStatesPython, HMMStatesEigen, _SeparateTransMixin
from .hsmm_states import HSMMStatesEigen
# TODO these classes are currently backed by HMM message passing, but they can
# be made much more time and memory efficient. i have the code to do it in some
# other branches, but dense matrix multiplies are actually competitive.
class _HSMMStatesIntegerNegativeBinomialBase(with_metaclass(abc.ABCMeta, HSMMStatesEigen, HMMStatesEigen)):
@property
def rs(self):
return np.array([d.r for d in self.dur_distns])
@property
def ps(self):
return np.array([d.p for d in self.dur_distns])
### HMM embedding parameters
@abc.abstractproperty
def hmm_trans_matrix(self):
pass
@property
def hmm_aBl(self):
if self._hmm_aBl is None:
self._hmm_aBl = self.aBl.repeat(self.rs,axis=1)
return self._hmm_aBl
@property
def hmm_pi_0(self):
if not self.left_censoring:
rs = self.rs
starts = np.concatenate(((0,),rs.cumsum()[:-1]))
pi_0 = np.zeros(rs.sum())
pi_0[starts] = self.pi_0
return pi_0
else:
return top_eigenvector(self.hmm_trans_matrix)
def clear_caches(self):
super(_HSMMStatesIntegerNegativeBinomialBase,self).clear_caches()
self._hmm_aBl = None
def _map_states(self):
themap = np.arange(self.num_states).repeat(self.rs).astype('int32')
self.stateseq = themap[self.stateseq]
def generate_states(self):
self.stateseq = sample_markov(
T=self.T,trans_matrix=self.hmm_trans_matrix,
init_state_distn=self.hmm_pi_0)
self._map_states()
def Viterbi_hmm(self):
from hmm_messages_interface import viterbi
self.stateseq = viterbi(
self.hmm_trans_matrix,self.hmm_aBl,self.hmm_pi_0,
np.empty(self.hmm_aBl.shape[0],dtype='int32'))
self._map_states()
def resample_hmm(self):
alphan, self._normalizer = \
HMMStatesEigen._messages_forwards_normalized(
self.hmm_trans_matrix,self.hmm_pi_0,self.hmm_aBl)
self.stateseq = HMMStatesEigen._sample_backwards_normalized(
alphan,self.hmm_trans_matrix.T.copy())
self._map_states()
self.alphan = alphan # TODO remove
def resample_hsmm(self):
betal, betastarl = HSMMStatesEigen.messages_backwards(self)
HMMStatesEigen.sample_forwards(betal,betastarl)
def resample(self):
self.resample_hmm()
def Viterbi(self):
self.Viterbi_hmm()
def hmm_messages_forwards_log(self):
return HMMStatesEigen._messages_forwards_log(
self.hmm_trans_matrix,self.hmm_pi_0,self.hmm_aBl)
class HSMMStatesIntegerNegativeBinomial(_HSMMStatesIntegerNegativeBinomialBase):
@property
def hmm_trans_matrix(self):
return self.hmm_bwd_trans_matrix
@property
def hmm_bwd_trans_matrix(self):
rs, ps = self.rs, self.ps
starts, ends = cumsum(rs,strict=True), cumsum(rs,strict=False)
trans_matrix = np.zeros((ends[-1],ends[-1]))
enters = self.bwd_enter_rows
for (i,j), Aij in np.ndenumerate(self.trans_matrix):
block = trans_matrix[starts[i]:ends[i],starts[j]:ends[j]]
block[-1,:] = Aij * (1-ps[i]) * enters[j]
if i == j:
block[...] += np.diag(np.repeat(ps[i],rs[i])) \
+ np.diag(np.repeat(1-ps[i],rs[i]-1),k=1)
assert np.allclose(trans_matrix.sum(1),1) or self.trans_matrix.shape == (1,1)
return trans_matrix
@property
def bwd_enter_rows(self):
return [stats.binom.pmf(np.arange(r)[::-1],r-1,p) for r,p in zip(self.rs,self.ps)]
@property
def hmm_fwd_trans_matrix(self):
rs, ps = self.rs, self.ps
starts, ends = cumsum(rs,strict=True), cumsum(rs,strict=False)
trans_matrix = np.zeros((ends[-1],ends[-1]))
exits = self.fwd_exit_cols
for (i,j), Aij in np.ndenumerate(self.trans_matrix):
block = trans_matrix[starts[i]:ends[i],starts[j]:ends[j]]
block[:,0] = Aij * exits[i] * (1-ps[i])
if i == j:
block[...] += \
np.diag(np.repeat(ps[i],rs[i])) \
+ np.diag(np.repeat(1-ps[i],rs[i]-1) * (1-exits[i][:-1]),k=1)
assert np.allclose(trans_matrix.sum(1),1)
assert (0 <= trans_matrix).all() and (trans_matrix <= 1.).all()
return trans_matrix
@property
def fwd_exit_cols(self):
return [(1-p)**(np.arange(r)[::-1]) for r,p in zip(self.rs,self.ps)]
def messages_backwards2(self):
# this method is just for numerical testing
# returns HSMM messages using HMM embedding. the way of the future!
Al = np.log(self.trans_matrix)
T, num_states = self.T, self.num_states
betal = np.zeros((T,num_states))
betastarl = np.zeros((T,num_states))
starts = cumsum(self.rs,strict=True)
ends = cumsum(self.rs,strict=False)
foo = np.zeros((num_states,ends[-1]))
for idx, row in enumerate(self.bwd_enter_rows):
foo[idx,starts[idx]:ends[idx]] = row
bar = np.zeros_like(self.hmm_bwd_trans_matrix)
for start, end in zip(starts,ends):
bar[start:end,start:end] = self.hmm_bwd_trans_matrix[start:end,start:end]
pmess = np.zeros(ends[-1])
# betal[-1] is 0
for t in range(T-1,-1,-1):
pmess += self.hmm_aBl[t]
betastarl[t] = logsumexp(np.log(foo) + pmess, axis=1)
betal[t-1] = logsumexp(Al + betastarl[t], axis=1)
pmess = logsumexp(np.log(bar) + pmess, axis=1)
pmess[ends-1] = np.logaddexp(pmess[ends-1],betal[t-1] + np.log(1-self.ps))
betal[-1] = 0.
return betal, betastarl
### NEW
def meanfieldupdate(self):
return self.meanfieldupdate_sampling()
# return self.meanfieldupdate_Estep()
def meanfieldupdate_sampling(self):
from ..util.general import count_transitions
num_r_samples = self.model.mf_num_samples \
if hasattr(self.model,'mf_num_samples') else 10
self.expected_states = np.zeros((self.T,self.num_states))
self.expected_transcounts = np.zeros((self.num_states,self.num_states))
self.expected_durations = np.zeros((self.num_states,self.T))
eye = np.eye(self.num_states)/num_r_samples
for i in range(num_r_samples):
self.model._resample_from_mf()
self.clear_caches()
self.resample()
self.expected_states += eye[self.stateseq]
self.expected_transcounts += \
count_transitions(self.stateseq_norep,minlength=self.num_states)\
/ num_r_samples
for state in range(self.num_states):
self.expected_durations[state] += \
np.bincount(
self.durations_censored[self.stateseq_norep == state],
minlength=self.T)[:self.T].astype(np.double)/num_r_samples
def meanfieldupdate_Estep(self):
# TODO bug in here? it's not as good as sampling
num_r_samples = self.model.mf_num_samples \
if hasattr(self.model,'mf_num_samples') else 10
num_stateseq_samples_per_r = self.model.mf_num_stateseq_samples_per_r \
if hasattr(self.model,'mf_num_stateseq_samples_per_r') else 1
self.expected_states = np.zeros((self.T,self.num_states))
self.expected_transcounts = np.zeros((self.num_states,self.num_states))
self.expected_durations = np.zeros((self.num_states,self.T))
mf_aBl = self.mf_aBl
for i in range(num_r_samples):
for d in self.dur_distns:
d._resample_r_from_mf()
self.clear_caches()
trans = self.mf_bwd_trans_matrix # TODO check this
init = self.hmm_mf_bwd_pi_0
aBl = mf_aBl.repeat(self.rs,axis=1)
hmm_alphal, hmm_betal = HMMStatesEigen._messages_log(self,trans,init,aBl)
# collect stateseq and transitions statistics from messages
hmm_expected_states, hmm_expected_transcounts, normalizer = \
HMMStatesPython._expected_statistics_from_messages(
trans,aBl,hmm_alphal,hmm_betal)
expected_states, expected_transcounts, _ \
= self._hmm_stats_to_hsmm_stats(
hmm_expected_states, hmm_expected_transcounts, normalizer)
self.expected_states += expected_states / num_r_samples
self.expected_transcounts += expected_transcounts / num_r_samples
# collect duration statistics by sampling from messages
for j in range(num_stateseq_samples_per_r):
self._resample_from_mf(trans,init,aBl,hmm_alphal,hmm_betal)
for state in range(self.num_states):
self.expected_durations[state] += \
np.bincount(
self.durations_censored[self.stateseq_norep == state],
minlength=self.T)[:self.T].astype(np.double) \
/(num_r_samples*num_stateseq_samples_per_r)
def _hmm_stats_to_hsmm_stats(self,hmm_expected_states,hmm_expected_transcounts,normalizer):
rs = self.rs
starts = np.concatenate(((0,),np.cumsum(rs[:-1])))
dotter = np.zeros((rs.sum(),len(rs)))
for idx, (start, length) in enumerate(zip(starts,rs)):
dotter[start:start+length,idx] = 1.
expected_states = hmm_expected_states.dot(dotter)
expected_transcounts = dotter.T.dot(hmm_expected_transcounts).dot(dotter)
expected_transcounts.flat[::expected_transcounts.shape[0]+1] = 0
return expected_states, expected_transcounts, normalizer
def _resample_from_mf(self,trans,init,aBl,hmm_alphal,hmm_betal):
self.stateseq = HMMStatesEigen._sample_forwards_log(
hmm_betal,trans,init,aBl)
self._map_states()
@property
def hmm_mf_bwd_pi_0(self):
rs = self.rs
starts = np.concatenate(((0,),rs.cumsum()[:-1]))
mf_pi_0 = np.zeros(rs.sum())
mf_pi_0[starts] = self.mf_pi_0
return mf_pi_0
@property
def mf_bwd_trans_matrix(self):
rs = self.rs
starts, ends = cumsum(rs,strict=True), cumsum(rs,strict=False)
trans_matrix = np.zeros((ends[-1],ends[-1]))
Elnps, Eln1mps = zip(*[d._fixedr_distns[d.ridx]._mf_expected_statistics() for d in self.dur_distns])
Eps, E1mps = np.exp(Elnps), np.exp(Eln1mps) # NOTE: actually exp(E[ln(p)]) etc
enters = self.mf_bwd_enter_rows(rs,Eps,E1mps)
for (i,j), Aij in np.ndenumerate(self.mf_trans_matrix):
block = trans_matrix[starts[i]:ends[i],starts[j]:ends[j]]
block[-1,:] = Aij * eE1mps[i] * enters[j]
if i == j:
block[...] += np.diag(np.repeat(eEps[i],rs[i])) \
+ np.diag(np.repeat(eE1mps[i],rs[i]-1),k=1)
assert np.all(trans_matrix >= 0)
return trans_matrix
def mf_bwd_enter_rows(self,rs,Elnps,Eln1mps):
return [self._mf_binom(np.arange(r)[::-1],r-1,Ep,E1mp)
for r,Ep,E1mp in zip(rs,Eps,E1mps)]
@staticmethod
def _mf_binom(k,n,p1,p2):
return np.exp(special.gammaln(n+1) - special.gammaln(k+1) - special.gammaln(n-k+1) \
+ k*p1 + (n-k)*p2)
class HSMMStatesIntegerNegativeBinomialVariant(_HSMMStatesIntegerNegativeBinomialBase):
@property
def hmm_trans_matrix(self):
return self.hmm_bwd_trans_matrix
@property
def hmm_bwd_trans_matrix(self):
rs, ps = self.rs, self.ps
starts, ends = cumsum(rs,strict=True), cumsum(rs,strict=False)
trans_matrix = np.zeros((rs.sum(),rs.sum()))
for (i,j), Aij in np.ndenumerate(self.trans_matrix):
block = trans_matrix[starts[i]:ends[i],starts[j]:ends[j]]
block[-1,0] = Aij * (1-ps[i])
if i == j:
block[...] += np.diag(np.repeat(ps[i],rs[i])) \
+ np.diag(np.repeat(1-ps[i],rs[i]-1),k=1)
assert np.allclose(trans_matrix.sum(1),1)
return trans_matrix
class HSMMStatesIntegerNegativeBinomialSeparateTrans(
_SeparateTransMixin,
HSMMStatesIntegerNegativeBinomial):
pass
class HSMMStatesDelayedIntegerNegativeBinomial(HSMMStatesIntegerNegativeBinomial):
@property
def hmm_trans_matrix(self):
# return self.hmm_trans_matrix_orig
return self.hmm_trans_matrix_2
@property
def hmm_trans_matrix_orig(self):
rs, ps, delays = self.rs, self.ps, self.delays
starts, ends = cumsum(rs+delays,strict=True), cumsum(rs+delays,strict=False)
trans_matrix = np.zeros((ends[-1],ends[-1]))
enters = self.bwd_enter_rows
for (i,j), Aij in np.ndenumerate(self.trans_matrix):
block = trans_matrix[starts[i]:ends[i],starts[j]:ends[j]]
if delays[i] == 0:
block[-1,:rs[j]] = Aij * enters[j] * (1-ps[i])
else:
block[-1,:rs[j]] = Aij * enters[j]
if i == j:
block[:rs[i],:rs[i]] += \
np.diag(np.repeat(ps[i],rs[i])) + np.diag(np.repeat(1-ps[i],rs[i]-1),k=1)
if delays[i] > 0:
block[rs[i]-1,rs[i]] = (1-ps[i])
block[rs[i]:,rs[i]:] = np.eye(delays[i],k=1)
assert np.allclose(trans_matrix.sum(1),1.)
return trans_matrix
@property
def hmm_trans_matrix_1(self):
rs, ps, delays = self.rs, self.ps, self.delays
starts, ends = cumsum(rs+delays,strict=True), cumsum(rs+delays,strict=False)
trans_matrix = np.zeros((ends[-1],ends[-1]))
enters = self.bwd_enter_rows
for (i,j), Aij in np.ndenumerate(self.trans_matrix):
block = trans_matrix[starts[i]:ends[i],starts[j]:ends[j]]
block[-1,:rs[j]] = Aij * enters[j] * (1-ps[i])
if i == j:
block[-rs[i]:,-rs[i]:] += \
np.diag(np.repeat(ps[i],rs[i])) + np.diag(np.repeat(1-ps[i],rs[i]-1),k=1)
if delays[i] > 0:
block[:delays[i]:,:delays[i]] = np.eye(delays[i],k=1)
block[delays[i]-1,delays[i]] = 1
assert np.allclose(trans_matrix.sum(1),1.)
return trans_matrix
@property
def hmm_trans_matrix_2(self):
rs, ps, delays = self.rs, self.ps, self.delays
starts, ends = cumsum(rs+delays,strict=True), cumsum(rs+delays,strict=False)
trans_matrix = np.zeros((ends[-1],ends[-1]))
enters = self.bwd_enter_rows
for (i,j), Aij in np.ndenumerate(self.trans_matrix):
block = trans_matrix[starts[i]:ends[i],starts[j]:ends[j]]
block[-1,0] = Aij * (1-ps[i])
if i == j:
block[-rs[i]:,-rs[i]:] += \
np.diag(np.repeat(ps[i],rs[i])) + np.diag(np.repeat(1-ps[i],rs[i]-1),k=1)
if delays[i] > 0:
block[:delays[i]:,:delays[i]] = np.eye(delays[i],k=1)
block[delays[i]-1,-rs[i]:] = enters[i]
assert np.allclose(trans_matrix.sum(1),1.)
return trans_matrix
@property
def hmm_aBl(self):
if self._hmm_aBl is None:
self._hmm_aBl = self.aBl.repeat(self.rs+self.delays,axis=1)
return self._hmm_aBl
@property
def hmm_pi_0(self):
if self.left_censoring:
raise NotImplementedError
else:
rs, delays = self.rs, self.delays
starts = np.concatenate(((0,),(rs+delays).cumsum()[:-1]))
pi_0 = np.zeros((rs+delays).sum())
pi_0[starts] = self.pi_0
return pi_0
@property
def delays(self):
return np.array([d.delay for d in self.dur_distns])
def _map_states(self):
themap = np.arange(self.num_states).repeat(self.rs+self.delays).astype('int32')
self.stateseq = themap[self.stateseq]
class HSMMStatesTruncatedIntegerNegativeBinomial(HSMMStatesDelayedIntegerNegativeBinomial):
@property
def bwd_enter_rows(self):
As = [np.diag(np.repeat(p,r)) + np.diag(np.repeat(1-p,r-1),k=1) for r,p in zip(self.rs,self.ps)]
enters = [stats.binom.pmf(np.arange(r)[::-1],r-1,p) for A,r,p in zip(As,self.rs,self.ps)]
# norms = [sum(v.dot(np.linalg.matrix_power(A,d))[-1]*(1-p) for d in range(delay))
# for A,v,p,delay in zip(As,enters,self.ps,self.delays)]
# enters = [v.dot(np.linalg.matrix_power(A,self.delays[state])) / (1.-norm)
enters = [v.dot(np.linalg.matrix_power(A,self.delays[state]))
for state, (A,v) in enumerate(zip(As,enters))]
return [v / v.sum() for v in enters] # this should just be for numerical purposes
class HSMMStatesDelayedIntegerNegativeBinomialSeparateTrans(
_SeparateTransMixin,
HSMMStatesDelayedIntegerNegativeBinomial):
pass
class HSMMStatesTruncatedIntegerNegativeBinomialSeparateTrans(
_SeparateTransMixin,
HSMMStatesTruncatedIntegerNegativeBinomial):
pass
|
|
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import web_idl
from . import name_style
from .blink_v8_bridge import blink_class_name
from .code_node import EmptyNode
from .code_node import ListNode
from .code_node import TextNode
from .code_node_cxx import CxxClassDefNode
from .code_node_cxx import CxxFuncDeclNode
from .code_node_cxx import CxxFuncDefNode
from .code_node_cxx import CxxNamespaceNode
from .codegen_accumulator import CodeGenAccumulator
from .codegen_context import CodeGenContext
from .codegen_format import format_template as _format
from .codegen_utils import component_export
from .codegen_utils import component_export_header
from .codegen_utils import enclose_with_header_guard
from .codegen_utils import make_copyright_header
from .codegen_utils import make_forward_declarations
from .codegen_utils import make_header_include_directives
from .codegen_utils import write_code_node_to_file
from .mako_renderer import MakoRenderer
from .package_initializer import package_initializer
from .path_manager import PathManager
from .task_queue import TaskQueue
def make_factory_methods(cg_context):
assert isinstance(cg_context, CodeGenContext)
T = TextNode
decls = ListNode()
defs = ListNode()
func_decl = CxxFuncDeclNode(name="Create",
arg_decls=[
"v8::Isolate* isolate",
"v8::Local<v8::Value> value",
"ExceptionState& exception_state",
],
return_type="${class_name}",
static=True)
func_def = CxxFuncDefNode(name="Create",
arg_decls=[
"v8::Isolate* isolate",
"v8::Local<v8::Value> value",
"ExceptionState& exception_state",
],
return_type="${class_name}",
class_name=cg_context.class_name)
func_def.set_base_template_vars(cg_context.template_bindings())
decls.append(func_decl)
defs.append(func_def)
func_def.body.extend([
T("const auto& result = bindings::FindIndexInEnumStringTable("
"isolate, value, string_table_, \"${enumeration.identifier}\", "
"exception_state);"),
T("return result.has_value() ? "
"${class_name}(static_cast<Enum>(result.value())) : "
"${class_name}();"),
])
func_decl = CxxFuncDeclNode(name="Create",
arg_decls=["const String& value"],
return_type="absl::optional<${class_name}>",
static=True)
func_def = CxxFuncDefNode(name="Create",
arg_decls=["const String& value"],
return_type="absl::optional<${class_name}>",
class_name=cg_context.class_name)
func_def.set_base_template_vars(cg_context.template_bindings())
decls.append(func_decl)
defs.append(EmptyNode())
defs.append(func_def)
func_def.body.extend([
T("const auto& result = bindings::FindIndexInEnumStringTable"
"(value, string_table_);"),
T("if (!result)\n"
" return absl::nullopt;"),
T("return ${class_name}(static_cast<Enum>(result.value()));"),
])
return decls, defs
def make_default_constructor(cg_context):
assert isinstance(cg_context, CodeGenContext)
func_decl = CxxFuncDeclNode(name=cg_context.class_name,
arg_decls=[],
return_type="",
constexpr=True,
default=True)
return func_decl, None
def make_constructors(cg_context):
assert isinstance(cg_context, CodeGenContext)
T = TextNode
class_name = cg_context.class_name
decls = ListNode([
CxxFuncDefNode(
name=class_name,
arg_decls=["Enum value"],
return_type="",
explicit=True,
constexpr=True,
member_initializer_list=[
"${base_class_name}("
"static_cast<enum_int_t>(value), "
"string_table_[static_cast<enum_int_t>(value)])"
]),
CxxFuncDeclNode(
name=class_name,
arg_decls=["const ${class_name}&"],
return_type="",
constexpr=True,
default=True),
CxxFuncDeclNode(
name=class_name,
arg_decls=["${class_name}&&"],
return_type="",
constexpr=True,
default=True),
CxxFuncDeclNode(
name="~${class_name}", arg_decls=[], return_type="", default=True),
])
defs = ListNode([
T("static_assert("
"std::is_trivially_copyable<${class_name}>::value, \"\");"),
])
defs.set_base_template_vars(cg_context.template_bindings())
return decls, defs
def make_assignment_operators(cg_context):
assert isinstance(cg_context, CodeGenContext)
decls = ListNode([
CxxFuncDeclNode(
name="operator=",
arg_decls=["const ${class_name}&"],
return_type="${class_name}&",
default=True),
CxxFuncDeclNode(
name="operator=",
arg_decls=["${class_name}&&"],
return_type="${class_name}&",
default=True),
])
defs = ListNode()
# Migration adapter
func_decl = CxxFuncDeclNode(
name="operator=",
arg_decls=["const String&"],
return_type="${class_name}&")
func_def = CxxFuncDefNode(
name="operator=",
arg_decls=["const String& str_value"],
return_type="${class_name}&",
class_name=cg_context.class_name)
decls.append(func_decl)
defs.append(func_def)
func_def.set_base_template_vars(cg_context.template_bindings())
func_def.body.append(
TextNode("""\
const auto& index =
bindings::FindIndexInEnumStringTable(str_value, string_table_);
CHECK(index.has_value());
return operator=(${class_name}(static_cast<Enum>(index.value())));
"""))
return decls, defs
def make_equality_operators(cg_context):
assert isinstance(cg_context, CodeGenContext)
func1_def = CxxFuncDefNode(
name="operator==",
arg_decls=["const ${class_name}& lhs", "${class_name}::Enum rhs"],
return_type="bool",
inline=True)
func1_def.set_base_template_vars(cg_context.template_bindings())
func1_def.body.append(TextNode("return lhs.AsEnum() == rhs;"))
func2_def = CxxFuncDefNode(
name="operator==",
arg_decls=["${class_name}::Enum lhs", "const ${class_name}& rhs"],
return_type="bool",
inline=True)
func2_def.set_base_template_vars(cg_context.template_bindings())
func2_def.body.append(TextNode("return lhs == rhs.AsEnum();"))
decls = ListNode([func1_def, EmptyNode(), func2_def])
return decls, None
def make_as_enum_function(cg_context):
assert isinstance(cg_context, CodeGenContext)
func_def = CxxFuncDefNode(
name="AsEnum", arg_decls=[], return_type="Enum", const=True)
func_def.body.append(TextNode("return static_cast<Enum>(GetEnumValue());"))
return func_def, None
def make_nested_enum_class_def(cg_context):
assert isinstance(cg_context, CodeGenContext)
enum_values = [
TextNode(name_style.constant(value))
for value in cg_context.enumeration.values
]
return ListNode([
TextNode("enum class Enum : enum_int_t {"),
ListNode(enum_values, separator=", "),
TextNode("};"),
])
def make_enum_string_table(cg_context):
assert isinstance(cg_context, CodeGenContext)
decls = TextNode("static const char* const string_table_[];")
str_values = [
TextNode("\"{}\"".format(value))
for value in cg_context.enumeration.values
]
# Define the string table in *.cc so that there never exists a copy of
# the table (i.e. the strings in the table are interned strings in the
# scope of this IDL enumeration). This trick makes it possible to compare
# the strings by their address.
defs = ListNode([
TextNode("constexpr const char* const "
"${class_name}::string_table_[] = {"),
ListNode(str_values, separator=", "),
TextNode("};"),
])
defs.set_base_template_vars(cg_context.template_bindings())
return decls, defs
def generate_enumeration(enumeration_identifier):
assert isinstance(enumeration_identifier, web_idl.Identifier)
web_idl_database = package_initializer().web_idl_database()
enumeration = web_idl_database.find(enumeration_identifier)
path_manager = PathManager(enumeration)
assert path_manager.api_component == path_manager.impl_component
api_component = path_manager.api_component
for_testing = enumeration.code_generator_info.for_testing
# Class names
class_name = blink_class_name(enumeration)
cg_context = CodeGenContext(
enumeration=enumeration,
class_name=class_name,
base_class_name="bindings::EnumerationBase")
# Filepaths
header_path = path_manager.api_path(ext="h")
source_path = path_manager.api_path(ext="cc")
# Root nodes
header_node = ListNode(tail="\n")
header_node.set_accumulator(CodeGenAccumulator())
header_node.set_renderer(MakoRenderer())
source_node = ListNode(tail="\n")
source_node.set_accumulator(CodeGenAccumulator())
source_node.set_renderer(MakoRenderer())
# Namespaces
header_blink_ns = CxxNamespaceNode(name_style.namespace("blink"))
source_blink_ns = CxxNamespaceNode(name_style.namespace("blink"))
# Class definition
class_def = CxxClassDefNode(cg_context.class_name,
base_class_names=["bindings::EnumerationBase"],
final=True,
export=component_export(
api_component, for_testing))
class_def.set_base_template_vars(cg_context.template_bindings())
# Implementation parts
factory_decls, factory_defs = make_factory_methods(cg_context)
default_ctor_decls, default_ctor_defs = make_default_constructor(
cg_context)
ctor_decls, ctor_defs = make_constructors(cg_context)
assign_decls, assign_defs = make_assignment_operators(cg_context)
equal_decls, equal_defs = make_equality_operators(cg_context)
nested_enum_class_def = make_nested_enum_class_def(cg_context)
table_decls, table_defs = make_enum_string_table(cg_context)
as_enum_decl, as_enum_def = make_as_enum_function(cg_context)
# Header part (copyright, include directives, and forward declarations)
header_node.extend([
make_copyright_header(),
EmptyNode(),
enclose_with_header_guard(
ListNode([
make_header_include_directives(header_node.accumulator),
EmptyNode(),
header_blink_ns,
]), name_style.header_guard(header_path)),
])
header_blink_ns.body.extend([
make_forward_declarations(header_node.accumulator),
EmptyNode(),
])
source_node.extend([
make_copyright_header(),
EmptyNode(),
TextNode("#include \"{}\"".format(header_path)),
EmptyNode(),
make_header_include_directives(source_node.accumulator),
EmptyNode(),
source_blink_ns,
])
source_blink_ns.body.extend([
make_forward_declarations(source_node.accumulator),
EmptyNode(),
])
# Assemble the parts.
header_node.accumulator.add_class_decls([
"ExceptionState",
])
header_node.accumulator.add_include_headers([
component_export_header(api_component, for_testing),
"third_party/abseil-cpp/absl/types/optional.h",
"third_party/blink/renderer/platform/bindings/enumeration_base.h",
])
source_node.accumulator.add_include_headers([
"third_party/blink/renderer/bindings/core/v8/generated_code_helper.h",
])
header_blink_ns.body.append(class_def)
header_blink_ns.body.append(EmptyNode())
class_def.public_section.append(nested_enum_class_def)
class_def.public_section.append(EmptyNode())
class_def.private_section.append(table_decls)
class_def.private_section.append(EmptyNode())
source_blink_ns.body.append(table_defs)
source_blink_ns.body.append(EmptyNode())
class_def.public_section.append(factory_decls)
class_def.public_section.append(EmptyNode())
source_blink_ns.body.append(factory_defs)
source_blink_ns.body.append(EmptyNode())
class_def.private_section.append(default_ctor_decls)
class_def.private_section.append(EmptyNode())
source_blink_ns.body.append(default_ctor_defs)
source_blink_ns.body.append(EmptyNode())
class_def.public_section.append(ctor_decls)
class_def.public_section.append(EmptyNode())
source_blink_ns.body.append(ctor_defs)
source_blink_ns.body.append(EmptyNode())
class_def.public_section.append(assign_decls)
class_def.public_section.append(EmptyNode())
source_blink_ns.body.append(assign_defs)
source_blink_ns.body.append(EmptyNode())
class_def.public_section.append(as_enum_decl)
class_def.public_section.append(EmptyNode())
source_blink_ns.body.append(as_enum_def)
source_blink_ns.body.append(EmptyNode())
header_blink_ns.body.append(equal_decls)
header_blink_ns.body.append(EmptyNode())
source_blink_ns.body.append(equal_defs)
source_blink_ns.body.append(EmptyNode())
# Write down to the files.
write_code_node_to_file(header_node, path_manager.gen_path_to(header_path))
write_code_node_to_file(source_node, path_manager.gen_path_to(source_path))
def generate_enumerations(task_queue):
assert isinstance(task_queue, TaskQueue)
web_idl_database = package_initializer().web_idl_database()
for enumeration in web_idl_database.enumerations:
task_queue.post_task(generate_enumeration, enumeration.identifier)
|
|
'''
Window Pygame: windowing provider based on Pygame
'''
__all__ = ('WindowPygame', )
# fail early if possible
import pygame
from kivy.compat import PY2
from kivy.core.window import WindowBase
from kivy.core import CoreCriticalException
from os import environ
from os.path import exists, join
from kivy.config import Config
from kivy import kivy_home_dir
from kivy.base import ExceptionManager
from kivy.logger import Logger
from kivy.base import stopTouchApp, EventLoop
from kivy.utils import platform
# When we are generating documentation, Config doesn't exist
_exit_on_escape = True
if Config:
_exit_on_escape = Config.getboolean('kivy', 'exit_on_escape')
try:
android = None
if platform == 'android':
import android
except ImportError:
pass
# late binding
glReadPixels = GL_RGBA = GL_UNSIGNED_BYTE = None
class WindowPygame(WindowBase):
def create_window(self, *largs):
# ensure the mouse is still not up after window creation, otherwise, we
# have some weird bugs
self.dispatch('on_mouse_up', 0, 0, 'all', [])
# force display to show (available only for fullscreen)
displayidx = Config.getint('graphics', 'display')
if not 'SDL_VIDEO_FULLSCREEN_HEAD' in environ and displayidx != -1:
environ['SDL_VIDEO_FULLSCREEN_HEAD'] = '%d' % displayidx
# init some opengl, same as before.
self.flags = pygame.HWSURFACE | pygame.OPENGL | pygame.DOUBLEBUF
# right now, activate resizable window only on linux.
# on window / macosx, the opengl context is lost, and we need to
# reconstruct everything. Check #168 for a state of the work.
if platform in ('linux', 'macosx', 'win') and \
Config.getint('graphics', 'resizable'):
self.flags |= pygame.RESIZABLE
try:
pygame.display.init()
except pygame.error as e:
raise CoreCriticalException(e.message)
multisamples = Config.getint('graphics', 'multisamples')
if multisamples > 0:
pygame.display.gl_set_attribute(pygame.GL_MULTISAMPLEBUFFERS, 1)
pygame.display.gl_set_attribute(pygame.GL_MULTISAMPLESAMPLES,
multisamples)
pygame.display.gl_set_attribute(pygame.GL_DEPTH_SIZE, 16)
pygame.display.gl_set_attribute(pygame.GL_STENCIL_SIZE, 1)
pygame.display.set_caption(self.title)
if self.position == 'auto':
self._pos = None
elif self.position == 'custom':
self._pos = self.left, self.top
else:
raise ValueError('position token in configuration accept only '
'"auto" or "custom"')
if self.fullscreen == 'fake':
Logger.debug('WinPygame: Set window to fake fullscreen mode')
self.flags |= pygame.NOFRAME
# if no position set, in fake mode, we always need to set the
# position. so replace 0, 0.
if self._pos is None:
self._pos = (0, 0)
environ['SDL_VIDEO_WINDOW_POS'] = '%d,%d' % self._pos
elif self.fullscreen in ('auto', True):
Logger.debug('WinPygame: Set window to fullscreen mode')
self.flags |= pygame.FULLSCREEN
elif self._pos is not None:
environ['SDL_VIDEO_WINDOW_POS'] = '%d,%d' % self._pos
# never stay with a None pos, application using w.center will be fired.
self._pos = (0, 0)
# prepare keyboard
repeat_delay = int(Config.get('kivy', 'keyboard_repeat_delay'))
repeat_rate = float(Config.get('kivy', 'keyboard_repeat_rate'))
pygame.key.set_repeat(repeat_delay, int(1000. / repeat_rate))
# set window icon before calling set_mode
try:
#filename_icon = Config.get('kivy', 'window_icon')
filename_icon = self.icon or Config.get('kivy', 'window_icon')
if filename_icon == '':
logo_size = 512 if platform == 'macosx' else 32
filename_icon = join(kivy_home_dir,
'icon', 'kivy-icon-%d.png' % logo_size)
self.set_icon(filename_icon)
except:
Logger.exception('Window: cannot set icon')
# try to use mode with multisamples
try:
self._pygame_set_mode()
except pygame.error as e:
if multisamples:
Logger.warning('WinPygame: Video: failed (multisamples=%d)' %
multisamples)
Logger.warning('WinPygame: trying without antialiasing')
pygame.display.gl_set_attribute(
pygame.GL_MULTISAMPLEBUFFERS, 0)
pygame.display.gl_set_attribute(
pygame.GL_MULTISAMPLESAMPLES, 0)
multisamples = 0
try:
self._pygame_set_mode()
except pygame.error as e:
raise CoreCriticalException(e.message)
else:
raise CoreCriticalException(e.message)
info = pygame.display.Info()
self._size = (info.current_w, info.current_h)
#self.dispatch('on_resize', *self._size)
# in order to debug futur issue with pygame/display, let's show
# more debug output.
Logger.debug('Window: Display driver ' + pygame.display.get_driver())
Logger.debug('Window: Actual window size: %dx%d',
info.current_w, info.current_h)
if platform != 'android':
# unsupported platform, such as android that doesn't support
# gl_get_attribute.
Logger.debug(
'Window: Actual color bits r%d g%d b%d a%d',
pygame.display.gl_get_attribute(pygame.GL_RED_SIZE),
pygame.display.gl_get_attribute(pygame.GL_GREEN_SIZE),
pygame.display.gl_get_attribute(pygame.GL_BLUE_SIZE),
pygame.display.gl_get_attribute(pygame.GL_ALPHA_SIZE))
Logger.debug(
'Window: Actual depth bits: %d',
pygame.display.gl_get_attribute(pygame.GL_DEPTH_SIZE))
Logger.debug(
'Window: Actual stencil bits: %d',
pygame.display.gl_get_attribute(pygame.GL_STENCIL_SIZE))
Logger.debug(
'Window: Actual multisampling samples: %d',
pygame.display.gl_get_attribute(pygame.GL_MULTISAMPLESAMPLES))
super(WindowPygame, self).create_window()
# set mouse visibility
pygame.mouse.set_visible(
Config.getboolean('graphics', 'show_cursor'))
# if we are on android platform, automaticly create hooks
if android:
from kivy.support import install_android
install_android()
def close(self):
pygame.display.quit()
self.dispatch('on_close')
def on_title(self, instance, value):
if self.initialized:
pygame.display.set_caption(self.title)
def set_icon(self, filename):
try:
if not exists(filename):
return False
if PY2:
try:
im = pygame.image.load(filename)
except UnicodeEncodeError:
im = pygame.image.load(filename.encode('utf8'))
else:
im = pygame.image.load(filename)
if im is None:
raise Exception('Unable to load window icon (not found)')
pygame.display.set_icon(im)
super(WindowPygame, self).set_icon(filename)
except:
Logger.exception('WinPygame: unable to set icon')
def screenshot(self, *largs, **kwargs):
global glReadPixels, GL_RGBA, GL_UNSIGNED_BYTE
filename = super(WindowPygame, self).screenshot(*largs, **kwargs)
if filename is None:
return None
if glReadPixels is None:
from kivy.core.gl import glReadPixels, GL_RGBA, GL_UNSIGNED_BYTE
width, height = self.system_size
data = glReadPixels(0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE)
data = str(buffer(data))
surface = pygame.image.fromstring(data, (width, height), 'RGBA', True)
pygame.image.save(surface, filename)
Logger.debug('Window: Screenshot saved at <%s>' % filename)
return filename
def on_keyboard(self, key, scancode=None, codepoint=None,
modifier=None, **kwargs):
codepoint = codepoint or kwargs.get('unicode')
# Quit if user presses ESC or the typical OSX shortcuts CMD+q or CMD+w
# TODO If just CMD+w is pressed, only the window should be closed.
is_osx = platform == 'darwin'
if _exit_on_escape and (key == 27 or
(is_osx and key in (113, 119) and
modifier == 1024)):
stopTouchApp()
self.close() # not sure what to do here
return True
super(WindowPygame, self).on_keyboard(
key, scancode, codepoint=codepoint, modifier=modifier)
def flip(self):
pygame.display.flip()
super(WindowPygame, self).flip()
def toggle_fullscreen(self):
if self.flags & pygame.FULLSCREEN:
self.flags &= ~pygame.FULLSCREEN
else:
self.flags |= pygame.FULLSCREEN
self._pygame_set_mode()
def _mainloop(self):
EventLoop.idle()
for event in pygame.event.get():
# kill application (SIG_TERM)
if event.type == pygame.QUIT:
EventLoop.quit = True
self.close()
# mouse move
elif event.type == pygame.MOUSEMOTION:
x, y = event.pos
self.mouse_pos = x, self.system_size[1] - y
# don't dispatch motion if no button are pressed
if event.buttons == (0, 0, 0):
continue
self._mouse_x = x
self._mouse_y = y
self._mouse_meta = self.modifiers
self.dispatch('on_mouse_move', x, y, self.modifiers)
# mouse action
elif event.type in (pygame.MOUSEBUTTONDOWN,
pygame.MOUSEBUTTONUP):
self._pygame_update_modifiers()
x, y = event.pos
btn = 'left'
if event.button == 3:
btn = 'right'
elif event.button == 2:
btn = 'middle'
elif event.button == 4:
btn = 'scrolldown'
elif event.button == 5:
btn = 'scrollup'
elif event.button == 6:
btn = 'scrollright'
elif event.button == 7:
btn = 'scrollleft'
eventname = 'on_mouse_down'
if event.type == pygame.MOUSEBUTTONUP:
eventname = 'on_mouse_up'
self._mouse_x = x
self._mouse_y = y
self._mouse_meta = self.modifiers
self._mouse_btn = btn
self._mouse_down = eventname == 'on_mouse_down'
self.dispatch(eventname, x, y, btn, self.modifiers)
# keyboard action
elif event.type in (pygame.KEYDOWN, pygame.KEYUP):
self._pygame_update_modifiers(event.mod)
# atm, don't handle keyup
if event.type == pygame.KEYUP:
self.dispatch('on_key_up', event.key,
event.scancode)
continue
# don't dispatch more key if down event is accepted
if self.dispatch('on_key_down', event.key,
event.scancode, event.unicode,
self.modifiers):
continue
self.dispatch('on_keyboard', event.key,
event.scancode, event.unicode,
self.modifiers)
# video resize
elif event.type == pygame.VIDEORESIZE:
self._size = event.size
self.update_viewport()
elif event.type == pygame.VIDEOEXPOSE:
self.canvas.ask_update()
# ignored event
elif event.type == pygame.ACTIVEEVENT:
pass
# drop file (pygame patch needed)
elif event.type == pygame.USEREVENT and \
hasattr(pygame, 'USEREVENT_DROPFILE') and \
event.code == pygame.USEREVENT_DROPFILE:
self.dispatch('on_dropfile', event.filename)
'''
# unhandled event !
else:
Logger.debug('WinPygame: Unhandled event %s' % str(event))
'''
def mainloop(self):
while not EventLoop.quit and EventLoop.status == 'started':
try:
self._mainloop()
if not pygame.display.get_active():
pygame.time.wait(100)
except BaseException as inst:
# use exception manager first
r = ExceptionManager.handle_exception(inst)
if r == ExceptionManager.RAISE:
stopTouchApp()
raise
else:
pass
#
# Pygame wrapper
#
def _pygame_set_mode(self, size=None):
if size is None:
size = self.size
if self.fullscreen == 'auto':
pygame.display.set_mode((0, 0), self.flags)
else:
pygame.display.set_mode(size, self.flags)
def _pygame_update_modifiers(self, mods=None):
# Available mod, from dir(pygame)
# 'KMOD_ALT', 'KMOD_CAPS', 'KMOD_CTRL', 'KMOD_LALT',
# 'KMOD_LCTRL', 'KMOD_LMETA', 'KMOD_LSHIFT', 'KMOD_META',
# 'KMOD_MODE', 'KMOD_NONE'
if mods is None:
mods = pygame.key.get_mods()
self._modifiers = []
if mods & (pygame.KMOD_SHIFT | pygame.KMOD_LSHIFT):
self._modifiers.append('shift')
if mods & (pygame.KMOD_ALT | pygame.KMOD_LALT):
self._modifiers.append('alt')
if mods & (pygame.KMOD_CTRL | pygame.KMOD_LCTRL):
self._modifiers.append('ctrl')
if mods & (pygame.KMOD_META | pygame.KMOD_LMETA):
self._modifiers.append('meta')
def request_keyboard(self, callback, target, input_type='text'):
keyboard = super(WindowPygame, self).request_keyboard(
callback, target, input_type)
if android and not self.allow_vkeyboard:
android.show_keyboard(target, input_type)
return keyboard
def release_keyboard(self, *largs):
super(WindowPygame, self).release_keyboard(*largs)
if android:
android.hide_keyboard()
return True
|
|
# Copyright 2013 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import importlib
import unittest
try:
import unittest.mock as mock
except ImportError:
import mock
from cloudbaseinit import conf as cloudbaseinit_conf
from cloudbaseinit.plugins.common import base
from cloudbaseinit.tests import testutils
CONF = cloudbaseinit_conf.CONF
class ConfigWinRMListenerPluginTests(unittest.TestCase):
def setUp(self):
self._mock_wintypes = mock.MagicMock()
self._mock_pywintypes = mock.MagicMock()
self._mock_win32 = mock.MagicMock()
self._moves_mock = mock.MagicMock()
self._module_patcher = mock.patch.dict(
'sys.modules',
{'ctypes': self._mock_wintypes,
'ctypes.wintypes': self._mock_wintypes,
'pywintypes': self._mock_pywintypes,
'win32com': self._mock_win32,
'six.moves': self._moves_mock
})
self._module_patcher.start()
self._winreg_mock = self._moves_mock.winreg
winrmlistener = importlib.import_module('cloudbaseinit.plugins.'
'windows.winrmlistener')
self._winrmlistener = winrmlistener.ConfigWinRMListenerPlugin()
def tearDown(self):
self._module_patcher.stop()
def _test_check_winrm_service(self, service_exists):
mock_osutils = mock.MagicMock()
mock_osutils.check_service_exists.return_value = service_exists
mock_osutils.SERVICE_START_MODE_MANUAL = 'fake start'
mock_osutils.SERVICE_START_MODE_DISABLED = 'fake start'
mock_osutils.SERVICE_STATUS_STOPPED = 'fake status'
mock_osutils.get_service_start_mode.return_value = 'fake start'
mock_osutils.get_service_status.return_value = 'fake status'
with testutils.LogSnatcher('cloudbaseinit.plugins.windows.'
'winrmlistener') as snatcher:
response = self._winrmlistener._check_winrm_service(mock_osutils)
if not service_exists:
expected_logging = [
"Cannot configure the WinRM listener as the service "
"is not available"
]
self.assertEqual(expected_logging, snatcher.output)
self.assertFalse(response)
else:
mock_osutils.get_service_start_mode.assert_called_once_with(
self._winrmlistener._winrm_service_name)
mock_osutils.get_service_start_mode.assert_called_once_with(
self._winrmlistener._winrm_service_name)
mock_osutils.set_service_start_mode.assert_called_once_with(
self._winrmlistener._winrm_service_name,
mock_osutils .SERVICE_START_MODE_AUTOMATIC)
mock_osutils.get_service_status.assert_called_once_with(
self._winrmlistener._winrm_service_name)
mock_osutils.start_service.assert_called_once_with(
self._winrmlistener._winrm_service_name)
self.assertTrue(response)
def test_check_winrm_service(self):
self._test_check_winrm_service(service_exists=True)
def test_check_winrm_service_no_service(self):
self._test_check_winrm_service(service_exists=False)
@mock.patch('cloudbaseinit.utils.windows.security.'
'WindowsSecurityUtils')
def _test_check_uac_remote_restrictions(self, mock_SecurityUtils,
disable_uac_remote_restrictions):
mock_security_utils = mock.MagicMock()
mock_SecurityUtils.return_value = mock_security_utils
mock_osutils = mock.Mock()
mock_osutils.check_os_version.side_effect = [True, False]
if disable_uac_remote_restrictions:
mock_security_utils.get_uac_remote_restrictions.return_value = \
disable_uac_remote_restrictions
with self._winrmlistener._check_uac_remote_restrictions(mock_osutils):
mock_SecurityUtils.assert_called_once_with()
mock_osutils.check_os_version.assert_has_calls(
[mock.call(6, 0), mock.call(6, 2)])
(mock_security_utils.get_uac_remote_restrictions.
assert_called_once_with())
if disable_uac_remote_restrictions:
expected_set_token_calls = [mock.call(enable=True)]
else:
expected_set_token_calls = [mock.call(enable=False),
mock.call(enable=True)]
mock_security_utils.set_uac_remote_restrictions.has_calls(
expected_set_token_calls)
def test_check_uac_remote_restrictions(self):
self._test_check_uac_remote_restrictions(
disable_uac_remote_restrictions=True)
def test_check_uac_remote_restrictions_no_disable_restrictions(self):
self._test_check_uac_remote_restrictions(
disable_uac_remote_restrictions=False)
def _test_configure_winrm_listener(self, has_listener=True):
mock_listener_config = mock.MagicMock()
mock_winrm_config = mock.MagicMock()
mock_osutils = mock.MagicMock()
mock_osutils.PROTOCOL_TCP = mock.sentinel.PROTOCOL_TCP
mock_winrm_config.get_listener.side_effect = [
has_listener, mock_listener_config]
port = 9999
protocol = mock.sentinel.protocol
cert_thumbprint = mock.sentinel.cert_thumbprint
mock_listener_config.get.return_value = port
self._winrmlistener._configure_winrm_listener(
mock_osutils, mock_winrm_config, protocol, cert_thumbprint)
if has_listener:
mock_winrm_config.delete_listener.assert_called_once_with(
protocol=protocol)
mock_winrm_config.create_listener.assert_called_once_with(
cert_thumbprint=cert_thumbprint, protocol=protocol)
mock_listener_config.get.assert_called_once_with("Port")
mock_osutils.firewall_create_rule.assert_called_once_with(
"WinRM %s" % protocol, port, mock_osutils.PROTOCOL_TCP)
def test_configure_winrm_listener(self):
self._test_configure_winrm_listener()
def test_configure_winrm_listener_no_initial_listener(self):
self._test_configure_winrm_listener(has_listener=False)
def _test_get_winrm_listeners_config(self, listeners_config=None,
http_listener=None,
https_listener=None):
winrmconfig = importlib.import_module('cloudbaseinit.utils.'
'windows.winrmconfig')
mock_service = mock.MagicMock()
mock_service.get_winrm_listeners_configuration.return_value = \
listeners_config
expected_result = listeners_config
if listeners_config is None:
expected_result = []
if http_listener:
expected_result.append(
{"protocol": winrmconfig.LISTENER_PROTOCOL_HTTP})
if https_listener:
expected_result.append(
{"protocol": winrmconfig.LISTENER_PROTOCOL_HTTPS})
with testutils.ConfPatcher("winrm_configure_http_listener",
http_listener):
with testutils.ConfPatcher("winrm_configure_https_listener",
https_listener):
result = self._winrmlistener._get_winrm_listeners_config(
mock_service)
self.assertEqual(result, expected_result)
def test_get_winrm_listeners_config_has_listeners(self):
self._test_get_winrm_listeners_config(
listeners_config=mock.sentinel.listeners)
def test_get_winrm_listeners_config_http_listener(self):
self._test_get_winrm_listeners_config(http_listener=True)
def test_get_winrm_listeners_config_https_listener(self):
self._test_get_winrm_listeners_config(https_listener=True)
@mock.patch('cloudbaseinit.utils.windows.x509.CryptoAPICertManager')
def test_create_self_signed_certificate(self, mock_CryptoAPICertManager):
mock_cert_mgr = mock.MagicMock()
mock_CryptoAPICertManager.return_value = mock_cert_mgr
mock_cert_mgr.create_self_signed_cert.return_value = \
mock.sentinel.cert_thumbprint, mock.sentinel.cert_str
result = self._winrmlistener._create_self_signed_certificate()
self.assertEqual(result, mock.sentinel.cert_thumbprint)
mock_CryptoAPICertManager.assert_called_once_with()
mock_cert_mgr.create_self_signed_cert.assert_called_once_with(
self._winrmlistener._cert_subject)
@mock.patch('cloudbaseinit.plugins.windows.winrmlistener.'
'ConfigWinRMListenerPlugin._configure_winrm_listener')
@mock.patch('cloudbaseinit.plugins.windows.winrmlistener.'
'ConfigWinRMListenerPlugin._check_uac_remote_restrictions')
@mock.patch('cloudbaseinit.plugins.windows.winrmlistener.'
'ConfigWinRMListenerPlugin._get_winrm_listeners_config')
@mock.patch('cloudbaseinit.osutils.factory.get_os_utils')
@mock.patch('cloudbaseinit.plugins.windows.winrmlistener.'
'ConfigWinRMListenerPlugin._check_winrm_service')
@mock.patch('cloudbaseinit.utils.windows.winrmconfig.WinRMConfig')
@mock.patch('cloudbaseinit.plugins.windows.winrmlistener'
'.ConfigWinRMListenerPlugin._create_self_signed_certificate')
def _test_execute(self, mock_create_cert, mock_WinRMConfig,
mock_check_winrm_service, mock_get_os_utils,
mock_get_winrm_listeners, mock_check_restrictions,
mock_configure_listener,
service_status=True, protocol=None,
listeners_config=True, certificate_thumbprint=None):
mock_winrm_config = mock.MagicMock()
mock_WinRMConfig.return_value = mock_winrm_config
mock_osutils = mock.MagicMock()
mock_get_os_utils.return_value = mock_osutils
mock_check_winrm_service.return_value = service_status
if not service_status:
expected_result = (base.PLUGIN_EXECUTE_ON_NEXT_BOOT, False)
elif not listeners_config:
mock_get_winrm_listeners.return_value = None
expected_result = (base.PLUGIN_EXECUTION_DONE, False)
else:
expected_result = (base.PLUGIN_EXECUTION_DONE, False)
if certificate_thumbprint is not None:
certificate_thumbprint = \
str(mock.sentinel.certificate_thumbprint)
listener_config = {
"protocol": protocol,
"certificate_thumbprint": certificate_thumbprint
}
mock_get_winrm_listeners.return_value = [listener_config]
winrm_enable_basic_auth = mock.Mock(spec=bool)
with testutils.ConfPatcher('winrm_enable_basic_auth',
winrm_enable_basic_auth):
result = self._winrmlistener.execute(
mock.sentinel.service, mock.sentinel.shared_data)
self.assertEqual(result, expected_result)
mock_get_os_utils.assert_called_once_with()
mock_check_winrm_service.assert_called_once_with(mock_osutils)
if service_status:
mock_get_winrm_listeners.assert_called_once_with(
mock.sentinel.service)
if listeners_config:
mock_check_restrictions.assert_called_once_with(mock_osutils)
mock_WinRMConfig.assert_called_once_with()
mock_winrm_config.set_auth_config.assert_called_once_with(
basic=winrm_enable_basic_auth)
winrmconfig = importlib.import_module('cloudbaseinit.utils.'
'windows.winrmconfig')
if (protocol == winrmconfig.LISTENER_PROTOCOL_HTTPS and
not certificate_thumbprint):
certificate_thumbprint = mock_create_cert.return_value
mock_create_cert.assert_called_once_with()
mock_configure_listener.assert_called_once_with(
mock_osutils, mock_winrm_config, protocol.upper(),
certificate_thumbprint)
def test_execute_service_status_is_false(self):
self._test_execute(service_status=False)
def test_execute_no_listeners_config(self):
self._test_execute(listeners_config=None)
def test_execute_http_protocol(self):
self._test_execute(protocol=str(mock.sentinel.http))
def test_execute_https_protocol(self):
self._test_execute(protocol="HTTPS")
|
|
# -*- coding: utf-8 -*-
"""
sphinx.util.stemmer
~~~~~~~~~~~~~~~~~~~
Porter Stemming Algorithm
This is the Porter stemming algorithm, ported to Python from the
version coded up in ANSI C by the author. It may be be regarded
as canonical, in that it follows the algorithm presented in
Porter, 1980, An algorithm for suffix stripping, Program, Vol. 14,
no. 3, pp 130-137,
only differing from it at the points maked --DEPARTURE-- below.
See also http://www.tartarus.org/~martin/PorterStemmer
The algorithm as described in the paper could be exactly replicated
by adjusting the points of DEPARTURE, but this is barely necessary,
because (a) the points of DEPARTURE are definitely improvements, and
(b) no encoding of the Porter stemmer I have seen is anything like
as exact as this version, even with the points of DEPARTURE!
Release 1: January 2001
:copyright: Copyright 2001 by Vivake Gupta <v@nano.com>.
:license: Public Domain ("can be used free of charge for any purpose").
"""
class PorterStemmer(object):
def __init__(self):
"""The main part of the stemming algorithm starts here.
b is a buffer holding a word to be stemmed. The letters are in b[k0],
b[k0+1] ... ending at b[k]. In fact k0 = 0 in this demo program. k is
readjusted downwards as the stemming progresses. Zero termination is
not in fact used in the algorithm.
Note that only lower case sequences are stemmed. Forcing to lower case
should be done before stem(...) is called.
"""
self.b = "" # buffer for word to be stemmed
self.k = 0
self.k0 = 0
self.j = 0 # j is a general offset into the string
def cons(self, i):
"""cons(i) is TRUE <=> b[i] is a consonant."""
if self.b[i] == 'a' or self.b[i] == 'e' or self.b[i] == 'i' \
or self.b[i] == 'o' or self.b[i] == 'u':
return 0
if self.b[i] == 'y':
if i == self.k0:
return 1
else:
return (not self.cons(i - 1))
return 1
def m(self):
"""m() measures the number of consonant sequences between k0 and j.
if c is a consonant sequence and v a vowel sequence, and <..>
indicates arbitrary presence,
<c><v> gives 0
<c>vc<v> gives 1
<c>vcvc<v> gives 2
<c>vcvcvc<v> gives 3
....
"""
n = 0
i = self.k0
while 1:
if i > self.j:
return n
if not self.cons(i):
break
i = i + 1
i = i + 1
while 1:
while 1:
if i > self.j:
return n
if self.cons(i):
break
i = i + 1
i = i + 1
n = n + 1
while 1:
if i > self.j:
return n
if not self.cons(i):
break
i = i + 1
i = i + 1
def vowelinstem(self):
"""vowelinstem() is TRUE <=> k0,...j contains a vowel"""
for i in range(self.k0, self.j + 1):
if not self.cons(i):
return 1
return 0
def doublec(self, j):
"""doublec(j) is TRUE <=> j,(j-1) contain a double consonant."""
if j < (self.k0 + 1):
return 0
if (self.b[j] != self.b[j - 1]):
return 0
return self.cons(j)
def cvc(self, i):
"""cvc(i) is TRUE <=> i-2,i-1,i has the form
consonant - vowel - consonant
and also if the second c is not w,x or y. this is used when trying to
restore an e at the end of a short e.g.
cav(e), lov(e), hop(e), crim(e), but
snow, box, tray.
"""
if i < (self.k0 + 2) or not self.cons(i) or self.cons(i - 1) \
or not self.cons(i - 2):
return 0
ch = self.b[i]
if ch == 'w' or ch == 'x' or ch == 'y':
return 0
return 1
def ends(self, s):
"""ends(s) is TRUE <=> k0,...k ends with the string s."""
length = len(s)
if s[length - 1] != self.b[self.k]: # tiny speed-up
return 0
if length > (self.k - self.k0 + 1):
return 0
if self.b[self.k - length + 1:self.k + 1] != s:
return 0
self.j = self.k - length
return 1
def setto(self, s):
"""setto(s) sets (j+1),...k to the characters in the string s,
readjusting k."""
length = len(s)
self.b = self.b[:self.j + 1] + s + self.b[self.j + length + 1:]
self.k = self.j + length
def r(self, s):
"""r(s) is used further down."""
if self.m() > 0:
self.setto(s)
def step1ab(self):
"""step1ab() gets rid of plurals and -ed or -ing. e.g.
caresses -> caress
ponies -> poni
ties -> ti
caress -> caress
cats -> cat
feed -> feed
agreed -> agree
disabled -> disable
matting -> mat
mating -> mate
meeting -> meet
milling -> mill
messing -> mess
meetings -> meet
"""
if self.b[self.k] == 's':
if self.ends("sses"):
self.k = self.k - 2
elif self.ends("ies"):
self.setto("i")
elif self.b[self.k - 1] != 's':
self.k = self.k - 1
if self.ends("eed"):
if self.m() > 0:
self.k = self.k - 1
elif (self.ends("ed") or self.ends("ing")) and self.vowelinstem():
self.k = self.j
if self.ends("at"):
self.setto("ate")
elif self.ends("bl"):
self.setto("ble")
elif self.ends("iz"):
self.setto("ize")
elif self.doublec(self.k):
self.k = self.k - 1
ch = self.b[self.k]
if ch == 'l' or ch == 's' or ch == 'z':
self.k = self.k + 1
elif (self.m() == 1 and self.cvc(self.k)):
self.setto("e")
def step1c(self):
"""step1c() turns terminal y to i when there is another vowel in
the stem."""
if (self.ends("y") and self.vowelinstem()):
self.b = self.b[:self.k] + 'i' + self.b[self.k + 1:]
def step2(self):
"""step2() maps double suffices to single ones.
so -ization ( = -ize plus -ation) maps to -ize etc. note that the
string before the suffix must give m() > 0.
"""
if self.b[self.k - 1] == 'a':
if self.ends("ational"):
self.r("ate")
elif self.ends("tional"):
self.r("tion")
elif self.b[self.k - 1] == 'c':
if self.ends("enci"):
self.r("ence")
elif self.ends("anci"):
self.r("ance")
elif self.b[self.k - 1] == 'e':
if self.ends("izer"):
self.r("ize")
elif self.b[self.k - 1] == 'l':
if self.ends("bli"):
self.r("ble") # --DEPARTURE--
# To match the published algorithm, replace this phrase with
# if self.ends("abli"): self.r("able")
elif self.ends("alli"):
self.r("al")
elif self.ends("entli"):
self.r("ent")
elif self.ends("eli"):
self.r("e")
elif self.ends("ousli"):
self.r("ous")
elif self.b[self.k - 1] == 'o':
if self.ends("ization"):
self.r("ize")
elif self.ends("ation"):
self.r("ate")
elif self.ends("ator"):
self.r("ate")
elif self.b[self.k - 1] == 's':
if self.ends("alism"):
self.r("al")
elif self.ends("iveness"):
self.r("ive")
elif self.ends("fulness"):
self.r("ful")
elif self.ends("ousness"):
self.r("ous")
elif self.b[self.k - 1] == 't':
if self.ends("aliti"):
self.r("al")
elif self.ends("iviti"):
self.r("ive")
elif self.ends("biliti"):
self.r("ble")
elif self.b[self.k - 1] == 'g': # --DEPARTURE--
if self.ends("logi"):
self.r("log")
# To match the published algorithm, delete this phrase
def step3(self):
"""step3() dels with -ic-, -full, -ness etc. similar strategy
to step2."""
if self.b[self.k] == 'e':
if self.ends("icate"):
self.r("ic")
elif self.ends("ative"):
self.r("")
elif self.ends("alize"):
self.r("al")
elif self.b[self.k] == 'i':
if self.ends("iciti"):
self.r("ic")
elif self.b[self.k] == 'l':
if self.ends("ical"):
self.r("ic")
elif self.ends("ful"):
self.r("")
elif self.b[self.k] == 's':
if self.ends("ness"):
self.r("")
def step4(self):
"""step4() takes off -ant, -ence etc., in context <c>vcvc<v>."""
if self.b[self.k - 1] == 'a':
if self.ends("al"):
pass
else:
return
elif self.b[self.k - 1] == 'c':
if self.ends("ance"):
pass
elif self.ends("ence"):
pass
else:
return
elif self.b[self.k - 1] == 'e':
if self.ends("er"):
pass
else:
return
elif self.b[self.k - 1] == 'i':
if self.ends("ic"):
pass
else:
return
elif self.b[self.k - 1] == 'l':
if self.ends("able"):
pass
elif self.ends("ible"):
pass
else:
return
elif self.b[self.k - 1] == 'n':
if self.ends("ant"):
pass
elif self.ends("ement"):
pass
elif self.ends("ment"):
pass
elif self.ends("ent"):
pass
else:
return
elif self.b[self.k - 1] == 'o':
if self.ends("ion") and (self.b[self.j] == 's' or
self.b[self.j] == 't'):
pass
elif self.ends("ou"):
pass
# takes care of -ous
else:
return
elif self.b[self.k - 1] == 's':
if self.ends("ism"):
pass
else:
return
elif self.b[self.k - 1] == 't':
if self.ends("ate"):
pass
elif self.ends("iti"):
pass
else:
return
elif self.b[self.k - 1] == 'u':
if self.ends("ous"):
pass
else:
return
elif self.b[self.k - 1] == 'v':
if self.ends("ive"):
pass
else:
return
elif self.b[self.k - 1] == 'z':
if self.ends("ize"):
pass
else:
return
else:
return
if self.m() > 1:
self.k = self.j
def step5(self):
"""step5() removes a final -e if m() > 1, and changes -ll to -l if
m() > 1.
"""
self.j = self.k
if self.b[self.k] == 'e':
a = self.m()
if a > 1 or (a == 1 and not self.cvc(self.k - 1)):
self.k = self.k - 1
if self.b[self.k] == 'l' and self.doublec(self.k) and self.m() > 1:
self.k = self.k - 1
def stem(self, p, i, j):
"""In stem(p,i,j), p is a char pointer, and the string to be stemmed
is from p[i] to p[j] inclusive. Typically i is zero and j is the
offset to the last character of a string, (p[j+1] == '\0'). The
stemmer adjusts the characters p[i] ... p[j] and returns the new
end-point of the string, k. Stemming never increases word length, so
i <= k <= j. To turn the stemmer into a module, declare 'stem' as
extern, and delete the remainder of this file.
"""
# copy the parameters into statics
self.b = p
self.k = j
self.k0 = i
if self.k <= self.k0 + 1:
return self.b # --DEPARTURE--
# With this line, strings of length 1 or 2 don't go through the
# stemming process, although no mention is made of this in the
# published algorithm. Remove the line to match the published
# algorithm.
self.step1ab()
self.step1c()
self.step2()
self.step3()
self.step4()
self.step5()
return self.b[self.k0:self.k + 1]
|
|
"""An abstract class for entities."""
import re
from collections import defaultdict
from homeassistant.const import (
ATTR_ASSUMED_STATE, ATTR_FRIENDLY_NAME, ATTR_HIDDEN, ATTR_ICON,
ATTR_UNIT_OF_MEASUREMENT, DEVICE_DEFAULT_NAME, STATE_OFF, STATE_ON,
STATE_UNAVAILABLE, STATE_UNKNOWN, TEMP_CELSIUS, TEMP_FAHRENHEIT,
ATTR_ENTITY_PICTURE)
from homeassistant.exceptions import NoEntitySpecifiedError
from homeassistant.util import ensure_unique_string, slugify
# Dict mapping entity_id to a boolean that overwrites the hidden property
_OVERWRITE = defaultdict(dict)
# Pattern for validating entity IDs (format: <domain>.<entity>)
ENTITY_ID_PATTERN = re.compile(r"^(\w+)\.(\w+)$")
def generate_entity_id(entity_id_format, name, current_ids=None, hass=None):
"""Generate a unique entity ID based on given entity IDs or used IDs."""
name = (name or DEVICE_DEFAULT_NAME).lower()
if current_ids is None:
if hass is None:
raise RuntimeError("Missing required parameter currentids or hass")
current_ids = hass.states.entity_ids()
return ensure_unique_string(
entity_id_format.format(slugify(name)), current_ids)
def split_entity_id(entity_id):
"""Split a state entity_id into domain, object_id."""
return entity_id.split(".", 1)
def valid_entity_id(entity_id):
"""Test if an entity ID is a valid format."""
return ENTITY_ID_PATTERN.match(entity_id) is not None
class Entity(object):
"""An abstract class for Home Assistant entities."""
# pylint: disable=no-self-use
# SAFE TO OVERWRITE
# The properties and methods here are safe to overwrite when inheriting
# this class. These may be used to customize the behavior of the entity.
@property
def should_poll(self):
"""Return True if entity has to be polled for state.
False if entity pushes its state to HA.
"""
return True
@property
def unique_id(self):
"""Return an unique ID."""
return "{}.{}".format(self.__class__, id(self))
@property
def name(self):
"""Return the name of the entity."""
return None
@property
def state(self):
"""Return the state of the entity."""
return STATE_UNKNOWN
@property
def state_attributes(self):
"""Return the state attributes.
Implemented by component base class.
"""
return None
@property
def device_state_attributes(self):
"""Return device specific state attributes.
Implemented by platform classes.
"""
return None
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return None
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return None
@property
def entity_picture(self):
"""Return the entity picture to use in the frontend, if any."""
return None
@property
def hidden(self):
"""Return True if the entity should be hidden from UIs."""
return False
@property
def available(self):
"""Return True if entity is available."""
return True
@property
def assumed_state(self):
"""Return True if unable to access real state of the entity."""
return False
def update(self):
"""Retrieve latest state."""
pass
entity_id = None
# DO NOT OVERWRITE
# These properties and methods are either managed by Home Assistant or they
# are used to perform a very specific function. Overwriting these may
# produce undesirable effects in the entity's operation.
hass = None
def update_ha_state(self, force_refresh=False):
"""Update Home Assistant with current state of entity.
If force_refresh == True will update entity before setting state.
"""
if self.hass is None:
raise RuntimeError("Attribute hass is None for {}".format(self))
if self.entity_id is None:
raise NoEntitySpecifiedError(
"No entity id specified for entity {}".format(self.name))
if force_refresh:
self.update()
state = STATE_UNKNOWN if self.state is None else str(self.state)
attr = self.state_attributes or {}
device_attr = self.device_state_attributes
if device_attr is not None:
attr.update(device_attr)
self._attr_setter('unit_of_measurement', str, ATTR_UNIT_OF_MEASUREMENT,
attr)
if not self.available:
state = STATE_UNAVAILABLE
attr = {}
self._attr_setter('name', str, ATTR_FRIENDLY_NAME, attr)
self._attr_setter('icon', str, ATTR_ICON, attr)
self._attr_setter('entity_picture', str, ATTR_ENTITY_PICTURE, attr)
self._attr_setter('hidden', bool, ATTR_HIDDEN, attr)
self._attr_setter('assumed_state', bool, ATTR_ASSUMED_STATE, attr)
# Overwrite properties that have been set in the config file.
attr.update(_OVERWRITE.get(self.entity_id, {}))
# Remove hidden property if false so it won't show up.
if not attr.get(ATTR_HIDDEN, True):
attr.pop(ATTR_HIDDEN)
# Convert temperature if we detect one
if attr.get(ATTR_UNIT_OF_MEASUREMENT) in (TEMP_CELSIUS,
TEMP_FAHRENHEIT):
state, attr[ATTR_UNIT_OF_MEASUREMENT] = \
self.hass.config.temperature(
state, attr[ATTR_UNIT_OF_MEASUREMENT])
state = str(state)
return self.hass.states.set(self.entity_id, state, attr)
def _attr_setter(self, name, typ, attr, attrs):
"""Helper method to populate attributes based on properties."""
if attr in attrs:
return
value = getattr(self, name)
if not value:
return
try:
attrs[attr] = typ(value)
except (TypeError, ValueError):
pass
def __eq__(self, other):
"""Return the comparison."""
return (isinstance(other, Entity) and
other.unique_id == self.unique_id)
def __repr__(self):
"""Return the representation."""
return "<Entity {}: {}>".format(self.name, self.state)
@staticmethod
def overwrite_attribute(entity_id, attrs, vals):
"""Overwrite any attribute of an entity.
This function should receive a list of attributes and a
list of values. Set attribute to None to remove any overwritten
value in place.
"""
for attr, val in zip(attrs, vals):
if val is None:
_OVERWRITE[entity_id.lower()].pop(attr, None)
else:
_OVERWRITE[entity_id.lower()][attr] = val
class ToggleEntity(Entity):
"""An abstract class for entities that can be turned on and off."""
# pylint: disable=no-self-use
@property
def state(self):
"""Return the state."""
return STATE_ON if self.is_on else STATE_OFF
@property
def is_on(self):
"""Return True if entity is on."""
return False
def turn_on(self, **kwargs):
"""Turn the entity on."""
pass
def turn_off(self, **kwargs):
"""Turn the entity off."""
pass
def toggle(self, **kwargs):
"""Toggle the entity off."""
if self.is_on:
self.turn_off(**kwargs)
else:
self.turn_on(**kwargs)
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from textwrap import dedent
from pants.backend.jvm.targets.scala_library import ScalaLibrary
from pants.backend.jvm.tasks.scalastyle import Scalastyle
from pants.base.address import BuildFileAddress
from pants.base.config import Config
from pants.base.exceptions import TaskError
from pants_test.jvm.nailgun_task_test_base import NailgunTaskTestBase
class ScalastyleTest(NailgunTaskTestBase):
"""Tests for the class Scalastyle."""
@classmethod
def task_type(cls):
return Scalastyle
#
# Internal test helper section
#
def _with_skip_option(self):
return {
self.options_scope: { 'skip': True }
}
def _with_no_skip_option(self):
return {
self.options_scope: { 'skip': False }
}
def _create_scalastyle_config_file(self, rules=None):
# put a default rule there if rules are not specified.
rules = rules or ['org.scalastyle.scalariform.ImportGroupingChecker']
rule_section_xml = ''
for rule in rules:
rule_section_xml += dedent('''
<check level="error" class="{rule}" enabled="true"></check>
'''.format(rule=rule))
return self.create_file(
relpath='scalastyle_config.xml',
contents=dedent('''
<scalastyle commentFilter="enabled">
<name>Test Scalastyle configuration</name>
{rule_section_xml}
</scalastyle>
'''.format(rule_section_xml=rule_section_xml)))
def _create_scalastyle_excludes_file(self, exclude_patterns=None):
return self.create_file(
relpath='scalastyle_excludes.txt',
contents='\n'.join(exclude_patterns) if exclude_patterns else '')
def _create_context(self, config=None, options=None, target_roots=None):
# If config is not specified, then we override pants.ini scalastyle such that
# we have a default scalastyle config xml but with empty excludes.
# Also by default, the task shouldn't be skipped, so use no skip option.
return self.context(
config=config or dedent('''
[scalastyle]
config: {config}
excludes:
'''.format(config=self._create_scalastyle_config_file())),
options=options or self._with_no_skip_option(),
target_roots=target_roots)
def _create_scalastyle_task(self, config=None, options=None):
return self.create_task(self._create_context(config, options), self.build_root)
def _create_scalastyle_task_from_context(self, context=None):
if context:
return self.create_task(context, self.build_root)
else:
return self._create_scalastyle_task()
#
# Test section
#
def test_initialize_config_no_config_settings(self):
with self.assertRaises(Config.ConfigError):
task = self._create_scalastyle_task(config=dedent('''
[scalastyle]
# override the default pants.ini [scalastyle].config with empty string
# to test the logic: if config setting not specified, we should throw.
config:
excludes: file_does_not_exist.xml
'''))
def test_initialize_config_config_setting_exist_but_invalid(self):
with self.assertRaises(Config.ConfigError):
self._create_scalastyle_task(config=dedent('''
[scalastyle]
config: file_does_not_exist.xml
excludes:
'''))
def test_initialize_config_no_excludes_setting(self):
task = self._create_scalastyle_task(config=dedent('''
[scalastyle]
config: {config}
excludes:
'''.format(config=self._create_scalastyle_config_file())))
# config file shouldn't be none and the task shouldn't be skipped.
self.assertIsNotNone(task._scalastyle_config)
self.assertFalse(task._should_skip)
# but the excludes pattern should remain none.
self.assertIsNone(task._scalastyle_excludes)
def test_initialize_config_excludes_setting_exist_but_invalid(self):
with self.assertRaises(Config.ConfigError):
self._create_scalastyle_task(config=dedent('''
[scalastyle]
config: {config}
excludes: file_does_not_exist.xml
'''.format(config=self._create_scalastyle_config_file())))
def test_initialize_config_excludes_parsed_loaded_correctly(self):
task = self._create_scalastyle_task(config=dedent('''
[scalastyle]
config: {config}
excludes: {excludes}
'''.format(
config=self._create_scalastyle_config_file(),
excludes=self._create_scalastyle_excludes_file(['.*\.cpp', '.*\.py']))))
self.assertEqual(2, len(task._scalastyle_excludes))
self.assertTrue(task._should_include_source('com/some/org/x.scala'))
self.assertFalse(task._should_include_source('com/some/org/y.cpp'))
self.assertFalse(task._should_include_source('z.py'))
def test_should_skip_if_skip_option_specified(self):
task = self._create_scalastyle_task(options=self._with_skip_option())
self.assertIsNotNone(task._scalastyle_config)
self.assertTrue(task._should_skip)
def test_get_non_synthetic_scala_targets(self):
# Create a custom context so we can manually inject multiple
# targets of different source types and synthetic vs non-synthetic
# to test the target filtering logic.
# scala_library - should remain.
scala_target_address = BuildFileAddress(
self.add_to_build_file(
'a/scala/BUILD', 'scala_library(name="s", sources=["Source.scala"])'),
's')
self.build_graph.inject_address_closure(scala_target_address)
scala_target = self.build_graph.get_target(scala_target_address)
# scala_library but with java sources - should be filtered
scala_target_java_source_address = BuildFileAddress(
self.add_to_build_file(
'a/scala_java/BUILD', 'scala_library(name="sj", sources=["Source.java"])'),
'sj')
self.build_graph.inject_address_closure(scala_target_java_source_address)
scala_target_with_java_source = self.build_graph.get_target(
scala_target_java_source_address)
# java_library - should be filtered
java_target_address = BuildFileAddress(
self.add_to_build_file(
'a/java/BUILD', 'java_library(name="j", sources=["Source.java"])'),
'j')
self.build_graph.inject_address_closure(java_target_address)
java_target = self.build_graph.get_target(java_target_address)
# synthetic scala_library - should be filtered
synthetic_scala_target = self.make_target('a/synthetic_scala:ss', ScalaLibrary)
context = self._create_context(
target_roots=[
java_target,
scala_target,
scala_target_with_java_source,
synthetic_scala_target
])
# scala_library would bring in 'scala-library defined in BUILD.tools
# so we have an extra target here.
self.assertEqual(5, len(context.targets()))
# Now create the task and run the non_synthetic scala-only filtering.
task = self._create_scalastyle_task_from_context(context)
result_targets = task._get_non_synthetic_scala_targets(context.targets())
# Only the scala target should remain
self.assertEquals(1, len(result_targets))
self.assertEqual(scala_target, result_targets[0])
def test_get_non_excluded_scala_sources(self):
# Create a custom context so we can manually inject scala targets
# with mixed sources in them to test the source filtering logic.
context = self._create_context(config=dedent('''
[scalastyle]
config: {config}
excludes: {excludes}
'''.format(
config=self._create_scalastyle_config_file(),
excludes=self._create_scalastyle_excludes_file(['a/scala_2/Source2.scala']))))
# this scala target has mixed *.scala and *.java sources.
# the *.java source should filtered out.
scala_target_address_1 = BuildFileAddress(
self.add_to_build_file(
'a/scala_1/BUILD',
'scala_library(name="s1", sources=["Source1.java", "Source1.scala"])'),
's1')
self.build_graph.inject_address_closure(scala_target_address_1)
scala_target_1 = self.build_graph.get_target(scala_target_address_1)
# this scala target has single *.scala source but will be excluded out
# by the [scalastyle].[excludes] setting.
scala_target_address_2 = BuildFileAddress(
self.add_to_build_file(
'a/scala_2/BUILD', 'scala_library(name="s2", sources=["Source2.scala"])'),
's2')
self.build_graph.inject_address_closure(scala_target_address_2)
scala_target_2 = self.build_graph.get_target(scala_target_address_2)
context = self._create_context(
config=dedent('''
[scalastyle]
config: {config}
excludes: {excludes}
'''.format(config=self._create_scalastyle_config_file(),
excludes=self._create_scalastyle_excludes_file(['a/scala_2/Source2.scala']))),
target_roots=[
scala_target_1,
scala_target_2
])
# Remember, we have the extra 'scala-library-2.9.3' dep target.
self.assertEqual(3, len(context.targets()))
# Now create the task and run the scala source and exclusion filtering.
task = self._create_scalastyle_task_from_context(context)
result_sources = task._get_non_excluded_scala_sources(
task._get_non_synthetic_scala_targets(context.targets()))
# Only the scala source from target 1 should remain
self.assertEquals(1, len(result_sources))
self.assertEqual('a/scala_1/Source1.scala', result_sources[0])
def test_end_to_end_pass(self):
# Default scalastyle config (import grouping rule) and no excludes.
# Create a scala source that would PASS ImportGroupingChecker rule.
self.create_file(
relpath='a/scala/pass.scala',
contents=dedent('''
import java.util
object HelloWorld {
def main(args: Array[String]) {
println("Hello, world!")
}
}
'''))
scala_target_address = BuildFileAddress(
self.add_to_build_file(
'a/scala/BUILD', 'scala_library(name="pass", sources=["pass.scala"])'),
'pass')
self.build_graph.inject_address_closure(scala_target_address)
scala_target = self.build_graph.get_target(scala_target_address)
context = self._create_context(target_roots=[scala_target])
self.execute(context)
def test_fail(self):
# Default scalastyle config (import grouping rule) and no excludes.
# Create a scala source that would FAIL ImportGroupingChecker rule.
self.create_file(
relpath='a/scala/fail.scala',
contents=dedent('''
import java.io._
object HelloWorld {
def main(args: Array[String]) {
println("Hello, world!")
}
}
import java.util._
'''))
scala_target_address = BuildFileAddress(
self.add_to_build_file(
'a/scala/BUILD', 'scala_library(name="fail", sources=["fail.scala"])'),
'fail')
self.build_graph.inject_address_closure(scala_target_address)
scala_target = self.build_graph.get_target(scala_target_address)
context = self._create_context(target_roots=[scala_target])
with self.assertRaises(TaskError):
self.execute(context)
|
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class dnspolicylabel_policybinding_binding(base_resource) :
""" Binding class showing the policybinding that can be bound to dnspolicylabel.
"""
def __init__(self) :
self._policyname = ""
self._priority = 0
self._gotopriorityexpression = ""
self._invoke = False
self._labeltype = ""
self._invoke_labelname = ""
self._labelname = ""
self.___count = 0
@property
def priority(self) :
"""Specifies the priority of the policy.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
"""Specifies the priority of the policy.
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@gotopriorityexpression.setter
def gotopriorityexpression(self, gotopriorityexpression) :
"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
self._gotopriorityexpression = gotopriorityexpression
except Exception as e:
raise e
@property
def policyname(self) :
"""The dns policy name.
"""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
"""The dns policy name.
"""
try :
self._policyname = policyname
except Exception as e:
raise e
@property
def labeltype(self) :
"""Type of policy label invocation.<br/>Possible values = policylabel.
"""
try :
return self._labeltype
except Exception as e:
raise e
@labeltype.setter
def labeltype(self, labeltype) :
"""Type of policy label invocation.<br/>Possible values = policylabel
"""
try :
self._labeltype = labeltype
except Exception as e:
raise e
@property
def labelname(self) :
"""Name of the dns policy label.
"""
try :
return self._labelname
except Exception as e:
raise e
@labelname.setter
def labelname(self, labelname) :
"""Name of the dns policy label.
"""
try :
self._labelname = labelname
except Exception as e:
raise e
@property
def invoke_labelname(self) :
"""Name of the label to invoke if the current policy rule evaluates to TRUE.
"""
try :
return self._invoke_labelname
except Exception as e:
raise e
@invoke_labelname.setter
def invoke_labelname(self, invoke_labelname) :
"""Name of the label to invoke if the current policy rule evaluates to TRUE.
"""
try :
self._invoke_labelname = invoke_labelname
except Exception as e:
raise e
@property
def invoke(self) :
"""Invoke flag.
"""
try :
return self._invoke
except Exception as e:
raise e
@invoke.setter
def invoke(self, invoke) :
"""Invoke flag.
"""
try :
self._invoke = invoke
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(dnspolicylabel_policybinding_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.dnspolicylabel_policybinding_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.labelname) :
return str(self.labelname)
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, labelname) :
""" Use this API to fetch dnspolicylabel_policybinding_binding resources.
"""
try :
obj = dnspolicylabel_policybinding_binding()
obj.labelname = labelname
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, labelname, filter_) :
""" Use this API to fetch filtered set of dnspolicylabel_policybinding_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = dnspolicylabel_policybinding_binding()
obj.labelname = labelname
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, labelname) :
""" Use this API to count dnspolicylabel_policybinding_binding resources configued on NetScaler.
"""
try :
obj = dnspolicylabel_policybinding_binding()
obj.labelname = labelname
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, labelname, filter_) :
""" Use this API to count the filtered set of dnspolicylabel_policybinding_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = dnspolicylabel_policybinding_binding()
obj.labelname = labelname
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Labeltype:
policylabel = "policylabel"
class dnspolicylabel_policybinding_binding_response(base_response) :
def __init__(self, length=1) :
self.dnspolicylabel_policybinding_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.dnspolicylabel_policybinding_binding = [dnspolicylabel_policybinding_binding() for _ in range(length)]
|
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from collections import namedtuple
import re
from boto3.exceptions import DynamoDBOperationNotSupportedError
from boto3.exceptions import DynamoDBNeedsConditionError
from boto3.exceptions import DynamoDBNeedsKeyConditionError
ATTR_NAME_REGEX = re.compile(r'[^.\[\]]+(?![^\[]*\])')
class ConditionBase(object):
expression_format = ''
expression_operator = ''
has_grouped_values = False
def __init__(self, *values):
self._values = values
def __and__(self, other):
if not isinstance(other, ConditionBase):
raise DynamoDBOperationNotSupportedError('AND', other)
return And(self, other)
def __or__(self, other):
if not isinstance(other, ConditionBase):
raise DynamoDBOperationNotSupportedError('OR', other)
return Or(self, other)
def __invert__(self):
return Not(self)
def get_expression(self):
return {'format': self.expression_format,
'operator': self.expression_operator,
'values': self._values}
def __eq__(self, other):
if isinstance(other, type(self)):
if self._values == other._values:
return True
return False
def __ne__(self, other):
return not self.__eq__(other)
class AttributeBase(object):
def __init__(self, name):
self.name = name
def __and__(self, value):
raise DynamoDBOperationNotSupportedError('AND', self)
def __or__(self, value):
raise DynamoDBOperationNotSupportedError('OR', self)
def __invert__(self):
raise DynamoDBOperationNotSupportedError('NOT', self)
def eq(self, value):
"""Creates a condition where the attribute is equal to the value.
:param value: The value that the attribute is equal to.
"""
return Equals(self, value)
def lt(self, value):
"""Creates a condition where the attribute is less than the value.
:param value: The value that the attribute is less than.
"""
return LessThan(self, value)
def lte(self, value):
"""Creates a condition where the attribute is less than or equal to the
value.
:param value: The value that the attribute is less than or equal to.
"""
return LessThanEquals(self, value)
def gt(self, value):
"""Creates a condition where the attribute is greater than the value.
:param value: The value that the attribute is greater than.
"""
return GreaterThan(self, value)
def gte(self, value):
"""Creates a condition where the attribute is greater than or equal to
the value.
:param value: The value that the attribute is greater than or equal to.
"""
return GreaterThanEquals(self, value)
def begins_with(self, value):
"""Creates a condition where the attribute begins with the value.
:param value: The value that the attribute begins with.
"""
return BeginsWith(self, value)
def between(self, low_value, high_value):
"""Creates a condition where the attribute is greater than or equal
to the low value and less than or equal to the high value.
:param low_value: The value that the attribute is greater than.
:param high_value: The value that the attribute is less than.
"""
return Between(self, low_value, high_value)
def __eq__(self, other):
return isinstance(other, type(self)) and self.name == other.name
def __ne__(self, other):
return not self.__eq__(other)
class ConditionAttributeBase(ConditionBase, AttributeBase):
"""This base class is for conditions that can have attribute methods.
One example is the Size condition. To complete a condition, you need
to apply another AttributeBase method like eq().
"""
def __init__(self, *values):
ConditionBase.__init__(self, *values)
# This is assuming the first value to the condition is the attribute
# in which can be used to generate its attribute base.
AttributeBase.__init__(self, values[0].name)
def __eq__(self, other):
return ConditionBase.__eq__(self, other) and \
AttributeBase.__eq__(self, other)
def __ne__(self, other):
return not self.__eq__(other)
class ComparisonCondition(ConditionBase):
expression_format = '{0} {operator} {1}'
class Equals(ComparisonCondition):
expression_operator = '='
class NotEquals(ComparisonCondition):
expression_operator = '<>'
class LessThan(ComparisonCondition):
expression_operator = '<'
class LessThanEquals(ComparisonCondition):
expression_operator = '<='
class GreaterThan(ComparisonCondition):
expression_operator = '>'
class GreaterThanEquals(ComparisonCondition):
expression_operator = '>='
class In(ComparisonCondition):
expression_operator = 'IN'
has_grouped_values = True
class Between(ConditionBase):
expression_operator = 'BETWEEN'
expression_format = '{0} {operator} {1} AND {2}'
class BeginsWith(ConditionBase):
expression_operator = 'begins_with'
expression_format = '{operator}({0}, {1})'
class Contains(ConditionBase):
expression_operator = 'contains'
expression_format = '{operator}({0}, {1})'
class Size(ConditionAttributeBase):
expression_operator = 'size'
expression_format = '{operator}({0})'
class AttributeType(ConditionBase):
expression_operator = 'attribute_type'
expression_format = '{operator}({0}, {1})'
class AttributeExists(ConditionBase):
expression_operator = 'attribute_exists'
expression_format = '{operator}({0})'
class AttributeNotExists(ConditionBase):
expression_operator = 'attribute_not_exists'
expression_format = '{operator}({0})'
class And(ConditionBase):
expression_operator = 'AND'
expression_format = '({0} {operator} {1})'
class Or(ConditionBase):
expression_operator = 'OR'
expression_format = '({0} {operator} {1})'
class Not(ConditionBase):
expression_operator = 'NOT'
expression_format = '({operator} {0})'
class Key(AttributeBase):
pass
class Attr(AttributeBase):
"""Represents an DynamoDB item's attribute."""
def ne(self, value):
"""Creates a condition where the attribute is not equal to the value
:param value: The value that the attribute is not equal to.
"""
return NotEquals(self, value)
def is_in(self, value):
"""Creates a condition where the attribute is in the value,
:type value: list
:param value: The value that the attribute is in.
"""
return In(self, value)
def exists(self):
"""Creates a condition where the attribute exists."""
return AttributeExists(self)
def not_exists(self):
"""Creates a condition where the attribute does not exist."""
return AttributeNotExists(self)
def contains(self, value):
"""Creates a condition where the attribute contains the value.
:param value: The value the attribute contains.
"""
return Contains(self, value)
def size(self):
"""Creates a condition for the attribute size.
Note another AttributeBase method must be called on the returned
size condition to be a valid DynamoDB condition.
"""
return Size(self)
def attribute_type(self, value):
"""Creates a condition for the attribute type.
:param value: The type of the attribute.
"""
return AttributeType(self, value)
BuiltConditionExpression = namedtuple(
'BuiltConditionExpression',
['condition_expression', 'attribute_name_placeholders',
'attribute_value_placeholders']
)
class ConditionExpressionBuilder(object):
"""This class is used to build condition expressions with placeholders"""
def __init__(self):
self._name_count = 0
self._value_count = 0
self._name_placeholder = 'n'
self._value_placeholder = 'v'
def _get_name_placeholder(self):
return '#' + self._name_placeholder + str(self._name_count)
def _get_value_placeholder(self):
return ':' + self._value_placeholder + str(self._value_count)
def reset(self):
"""Resets the placeholder name and values"""
self._name_count = 0
self._value_count = 0
def build_expression(self, condition, is_key_condition=False):
"""Builds the condition expression and the dictionary of placeholders.
:type condition: ConditionBase
:param condition: A condition to be built into a condition expression
string with any necessary placeholders.
:type is_key_condition: Boolean
:param is_key_condition: True if the expression is for a
KeyConditionExpression. False otherwise.
:rtype: (string, dict, dict)
:returns: Will return a string representing the condition with
placeholders inserted where necessary, a dictionary of
placeholders for attribute names, and a dictionary of
placeholders for attribute values. Here is a sample return value:
('#n0 = :v0', {'#n0': 'myattribute'}, {':v1': 'myvalue'})
"""
if not isinstance(condition, ConditionBase):
raise DynamoDBNeedsConditionError(condition)
attribute_name_placeholders = {}
attribute_value_placeholders = {}
condition_expression = self._build_expression(
condition, attribute_name_placeholders,
attribute_value_placeholders, is_key_condition=is_key_condition)
return BuiltConditionExpression(
condition_expression=condition_expression,
attribute_name_placeholders=attribute_name_placeholders,
attribute_value_placeholders=attribute_value_placeholders
)
def _build_expression(self, condition, attribute_name_placeholders,
attribute_value_placeholders, is_key_condition):
expression_dict = condition.get_expression()
replaced_values = []
for value in expression_dict['values']:
# Build the necessary placeholders for that value.
# Placeholders are built for both attribute names and values.
replaced_value = self._build_expression_component(
value, attribute_name_placeholders,
attribute_value_placeholders, condition.has_grouped_values,
is_key_condition)
replaced_values.append(replaced_value)
# Fill out the expression using the operator and the
# values that have been replaced with placeholders.
return expression_dict['format'].format(
*replaced_values, operator=expression_dict['operator'])
def _build_expression_component(self, value, attribute_name_placeholders,
attribute_value_placeholders,
has_grouped_values, is_key_condition):
# Continue to recurse if the value is a ConditionBase in order
# to extract out all parts of the expression.
if isinstance(value, ConditionBase):
return self._build_expression(
value, attribute_name_placeholders,
attribute_value_placeholders, is_key_condition)
# If it is not a ConditionBase, we can recurse no further.
# So we check if it is an attribute and add placeholders for
# its name
elif isinstance(value, AttributeBase):
if is_key_condition and not isinstance(value, Key):
raise DynamoDBNeedsKeyConditionError(
'Attribute object %s is of type %s. '
'KeyConditionExpression only supports Attribute objects '
'of type Key' % (value.name, type(value)))
return self._build_name_placeholder(
value, attribute_name_placeholders)
# If it is anything else, we treat it as a value and thus placeholders
# are needed for the value.
else:
return self._build_value_placeholder(
value, attribute_value_placeholders, has_grouped_values)
def _build_name_placeholder(self, value, attribute_name_placeholders):
attribute_name = value.name
# Figure out which parts of the attribute name that needs replacement.
attribute_name_parts = ATTR_NAME_REGEX.findall(attribute_name)
# Add a temporary placeholder for each of these parts.
placeholder_format = ATTR_NAME_REGEX.sub('%s', attribute_name)
str_format_args = []
for part in attribute_name_parts:
name_placeholder = self._get_name_placeholder()
self._name_count += 1
str_format_args.append(name_placeholder)
# Add the placeholder and value to dictionary of name placeholders.
attribute_name_placeholders[name_placeholder] = part
# Replace the temporary placeholders with the designated placeholders.
return placeholder_format % tuple(str_format_args)
def _build_value_placeholder(self, value, attribute_value_placeholders,
has_grouped_values=False):
# If the values are grouped, we need to add a placeholder for
# each element inside of the actual value.
if has_grouped_values:
placeholder_list = []
for v in value:
value_placeholder = self._get_value_placeholder()
self._value_count += 1
placeholder_list.append(value_placeholder)
attribute_value_placeholders[value_placeholder] = v
# Assuming the values are grouped by parenthesis.
# IN is the currently the only one that uses this so it maybe
# needed to be changed in future.
return '(' + ', '.join(placeholder_list) + ')'
# Otherwise, treat the value as a single value that needs only
# one placeholder.
else:
value_placeholder = self._get_value_placeholder()
self._value_count += 1
attribute_value_placeholders[value_placeholder] = value
return value_placeholder
|
|
# ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
'''
Command line argument parser for neon deep learning library
This is a wrapper around the configargparse ArgumentParser class.
It adds in the default neon command line arguments and allows
additional arguments to be added using the argparse library
methods. Lower priority defaults can also be read from a configuration file
(specified by the -c command line argument).
'''
import configargparse
import logging
from logging.handlers import RotatingFileHandler
import numpy as np
import os
import inspect
from neon import __version__ as neon_version
from neon.backends import gen_backend
from neon.backends.util.check_gpu import get_compute_capability, get_device_count
from neon.callbacks.callbacks import Callbacks
logger = logging.getLogger(__name__)
def extract_valid_args(args, func, startidx=0):
"""
Given a namespace of argparser args, extract those applicable to func.
Arguments:
args (Namespace): a namespace of args from argparse
func (Function): a function to inspect, to determine valid args
Returns:
dict of (arg, value) pairs from args that are valid for func
"""
func_args = inspect.getargspec(func).args[startidx:]
return dict((k, v) for k, v in vars(args).items() if k in func_args)
class NeonArgparser(configargparse.ArgumentParser):
"""
Setup the command line arg parser and parse the
arguments in sys.arg (or from configuration file). Use the parsed
options to configure the logging module.
Arguments:
desc (String) : Docstring from the calling function.
This will be used for the description of the command receiving the arguments.
"""
def __init__(self, *args, **kwargs):
self._PARSED = False
self.work_dir = os.path.join(os.path.expanduser('~'), 'nervana')
if 'default_config_files' not in kwargs:
kwargs['default_config_files'] = [os.path.join(self.work_dir,
'neon.cfg')]
if 'add_config_file_help' not in kwargs:
# turn off the auto-generated config help for config files since it
# referenced unsettable config options like --version
kwargs['add_config_file_help'] = False
super(NeonArgparser, self).__init__(*args, **kwargs)
# ensure that default values are display via --help
self.formatter_class = configargparse.ArgumentDefaultsHelpFormatter
self.setup_default_args()
def setup_default_args(self):
"""
Setup the default arguments used by neon
"""
self.add_argument('--version', action='version', version=neon_version)
self.add_argument('-c', '--config', is_config_file=True,
help='Read values for these arguments from the '
'configuration file specified here first.')
self.add_argument('-v', '--verbose', action='count', default=1,
help="verbosity level. Add multiple v's to "
"further increase verbosity")
# we store the negation of no_progress_bar in args.progress_bar during
# parsing
self.add_argument('--no_progress_bar',
action="store_true",
help="suppress running display of progress bar and "
"training loss")
# runtime specifc options
rt_grp = self.add_argument_group('runtime')
rt_grp.add_argument('-w', '--data_dir',
default=os.path.join(self.work_dir, 'data'),
help='working directory in which to cache '
'downloaded and preprocessed datasets')
rt_grp.add_argument('-e', '--epochs', type=int, default=10,
help='number of complete passes over the dataset to run')
rt_grp.add_argument('-s', '--save_path', type=str,
help='file path to save model snapshots')
rt_grp.add_argument('--serialize', nargs='?', type=int,
default=0, const=1, metavar='N',
help='serialize model every N epochs')
rt_grp.add_argument('--model_file', help='load model from pkl file')
rt_grp.add_argument('-l', '--log', dest='logfile', nargs='?',
const=os.path.join(self.work_dir, 'neon_log.txt'),
help='log file')
rt_grp.add_argument('-o', '--output_file', default=None,
help='hdf5 data file for metrics computed during '
'the run, optional. Can be used by nvis for '
'visualization.')
rt_grp.add_argument('-eval', '--eval_freq', type=int, default=None,
help='frequency (in epochs) to test the eval set.')
rt_grp.add_argument('-H', '--history', type=int, default=1,
help='number of checkpoint files to retain')
be_grp = self.add_argument_group('backend')
be_grp.add_argument('-b', '--backend', choices=['cpu', 'gpu', 'mgpu', 'argon'],
default='gpu' if get_compute_capability() >= 3.0
else 'cpu',
help='backend type. Multi-GPU support is a premium '
'feature available exclusively through the '
'Nervana cloud. Please contact '
'info@nervanasys.com for details.')
be_grp.add_argument('-i', '--device_id', type=int, default=0,
help='gpu device id (only used with GPU backend)')
be_grp.add_argument('-m', '--max_devices', type=int, default=get_device_count(),
help='max number of GPUs (only used with mgpu backend')
be_grp.add_argument('-r', '--rng_seed', type=int,
default=None, metavar='SEED',
help='random number generator seed')
be_grp.add_argument('-u', '--rounding',
const=True,
type=int,
nargs='?',
metavar='BITS',
default=False,
help='use stochastic rounding [will round to BITS number '
'of bits if specified]')
be_grp.add_argument('-d', '--datatype', choices=['f16', 'f32', 'f64'],
default='f32', metavar='default datatype',
help='default floating point '
'precision for backend [f64 for cpu only]')
be_grp.add_argument('-z', '--batch_size', type=int, default=128,
help='batch size')
be_grp.add_argument('--caffe', action='store_true',
help='match caffe when computing conv and pool layer output '
'sizes and dropout implementation')
be_grp.add_argument('--deterministic', action='store_true',
help='Use deterministic kernels where applicable')
return
def add_yaml_arg(self):
'''
Add the yaml file argument, this is needed for scripts that
parse the model config from yaml files
'''
# yaml configuration file
self.add_argument('yaml_file',
type=configargparse.FileType('r'),
help='neon model specification file')
def add_argument(self, *args, **kwargs):
'''
Method by which command line arguments are added to the parser. Passed
straight through to parent add_argument method.
'''
if self._PARSED:
logger.warn('Adding arguments after arguments were parsed = '
'may need to rerun parse_args')
# reset so warning only comes once
self._PARSED = False
super(NeonArgparser, self).add_argument(*args, **kwargs)
return
# we never use this alias from ConfigArgParse, but defining this here
# prevents documentation indent warnings
def add(self):
pass
# we never use this alias from ConfigArgParse, but defining this here
# prevents documentation indent warnings
def add_arg(self):
pass
def parse_args(self, gen_be=True):
'''
Parse the command line arguments and setup neon
runtime environment accordingly
Arguments:
gen_be (bool): if False, the arg parser will not
generate the backend
Returns:
namespace: contains the parsed arguments as attributes
'''
args = super(NeonArgparser, self).parse_args()
err_msg = None # used for relaying exception to logger
# set up the logging
# max thresh is 50 (critical only), min is 10 (debug or higher)
try:
log_thresh = max(10, 40 - args.verbose*10)
except (AttributeError, TypeError):
# if defaults are not set or not -v given
# for latter will get type error
log_thresh = 30
args.log_thresh = log_thresh
# logging formater
fmtr = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# get the parent logger for neon
main_logger = logging.getLogger('neon')
main_logger.setLevel(log_thresh)
# setup a console stderr log handler
stderrlog = logging.StreamHandler()
stderrlog.setFormatter(fmtr)
# expand any user directories in paths
for path in ['data_dir', 'save_path', 'model_file', 'output_file',
'logfile']:
if getattr(args, path):
setattr(args, path, os.path.expanduser(getattr(args, path)))
if args.logfile:
# add log to file as well
filelog = RotatingFileHandler(filename=args.logfile, mode='w',
maxBytes=10000000, backupCount=5)
filelog.setFormatter(fmtr)
filelog.setLevel(log_thresh)
main_logger.addHandler(filelog)
# if a log file is specified and progress bar displayed,
# log only errors to console.
if args.no_progress_bar:
stderrlog.setLevel(log_thresh)
else:
stderrlog.setLevel(logging.ERROR)
else:
stderrlog.setLevel(log_thresh)
# add this handler instead
main_logger.propagate = False
main_logger.addHandler(stderrlog)
# need to write out float otherwise numpy
# generates type in bytes not bits (f16 == 128 bits)
args.datatype = 'float' + args.datatype[1:]
args.datatype = np.dtype(args.datatype).type
# invert no_progress_bar meaning and store in args.progress_bar
args.progress_bar = not args.no_progress_bar
if args.backend == 'cpu' and args.rounding > 0:
err_msg = 'CPU backend does not support stochastic rounding'
logger.exception(err_msg)
raise NotImplementedError(err_msg)
# done up front to avoid losing data due to incorrect path
if args.save_path:
savedir = os.path.dirname(os.path.abspath(args.save_path))
if not os.access(savedir, os.R_OK | os.W_OK):
try:
os.makedirs(savedir)
except OSError:
err_msg = 'Can not create save_path %s' % (savedir)
if os.path.exists(args.save_path):
logger.warning('save file %s exists, attempting to overwrite' % args.save_path)
if not os.access(args.save_path, os.R_OK | os.W_OK):
err_msg = 'Can not write to save_path file %s' % args.save_path
if err_msg:
logger.exception(err_msg)
raise IOError(err_msg)
if (args.serialize > 0) and (args.save_path is None):
args.save_path = "neon_model.pkl"
logger.warn('No path given for model serialization, using default "%s"',
args.save_path)
if (args.save_path is not None) and (args.serialize == 0):
args.serialize = 1
logger.warn('No schedule given for model serialization, using default %d',
args.serialize)
if args.model_file:
err_msg = None
if not os.path.exists(args.model_file):
err_msg = 'Model file %s not present' % args.model_file
if not os.access(args.model_file, os.R_OK):
err_msg = 'No read access for model file %s' % args.model_file
if err_msg:
logger.exception(err_msg)
raise IOError(err_msg)
if args.caffe:
args.compat_mode = 'caffe'
else:
args.compat_mode = None
if args.deterministic:
logger.warn('--deterministic flag is deprecated. Specify random seed for '
'deterministic behavior.')
# extended parsers may need to generate backend after argparsing
if gen_be:
# generate the backend
gen_backend(backend=args.backend,
rng_seed=args.rng_seed,
device_id=args.device_id,
batch_size=args.batch_size,
datatype=args.datatype,
max_devices=args.max_devices,
compat_mode=args.compat_mode)
# display what command line / config options were set (and from where)
logger.info(self.format_values())
self._PARSED = True
self.args = args
args.callback_args = extract_valid_args(args, Callbacks.__init__, startidx=1)
return args
|
|
#!/usr/bin/env python
# -*- mode: python -*-
# Re test suite and benchmark suite v1.5
# The 3 possible outcomes for each pattern
[SUCCEED, FAIL, SYNTAX_ERROR] = range(3)
# Benchmark suite (needs expansion)
#
# The benchmark suite does not test correctness, just speed. The
# first element of each tuple is the regex pattern; the second is a
# string to match it against. The benchmarking code will embed the
# second string inside several sizes of padding, to test how regex
# matching performs on large strings.
benchmarks = [
# test common prefix
('Python|Perl', 'Perl'), # Alternation
('(Python|Perl)', 'Perl'), # Grouped alternation
('Python|Perl|Tcl', 'Perl'), # Alternation
('(Python|Perl|Tcl)', 'Perl'), # Grouped alternation
('(Python)\\1', 'PythonPython'), # Backreference
('([0a-z][a-z0-9]*,)+', 'a5,b7,c9,'), # Disable the fastmap optimization
('([a-z][a-z0-9]*,)+', 'a5,b7,c9,'), # A few sets
('Python', 'Python'), # Simple text literal
('.*Python', 'Python'), # Bad text literal
('.*Python.*', 'Python'), # Worse text literal
('.*(Python)', 'Python'), # Bad text literal with grouping
]
# Test suite (for verifying correctness)
#
# The test suite is a list of 5- or 3-tuples. The 5 parts of a
# complete tuple are:
# element 0: a string containing the pattern
# 1: the string to match against the pattern
# 2: the expected result (SUCCEED, FAIL, SYNTAX_ERROR)
# 3: a string that will be eval()'ed to produce a test string.
# This is an arbitrary Python expression; the available
# variables are "found" (the whole match), and "g1", "g2", ...
# up to "g99" contain the contents of each group, or the
# string 'None' if the group wasn't given a value, or the
# string 'Error' if the group index was out of range;
# also "groups", the return value of m.group() (a tuple).
# 4: The expected result of evaluating the expression.
# If the two don't match, an error is reported.
#
# If the regex isn't expected to work, the latter two elements can be omitted.
tests = [
# Test ?P< and ?P= extensions
('(?P<foo_123', '', SYNTAX_ERROR), # Unterminated group identifier
('(?P<1>a)', '', SYNTAX_ERROR), # Begins with a digit
('(?P<!>a)', '', SYNTAX_ERROR), # Begins with an illegal char
('(?P<foo!>a)', '', SYNTAX_ERROR), # Begins with an illegal char
# Same tests, for the ?P= form
('(?P<foo_123>a)(?P=foo_123', 'aa', SYNTAX_ERROR),
('(?P<foo_123>a)(?P=1)', 'aa', SYNTAX_ERROR),
('(?P<foo_123>a)(?P=!)', 'aa', SYNTAX_ERROR),
('(?P<foo_123>a)(?P=foo_124', 'aa', SYNTAX_ERROR), # Backref to undefined group
('(?P<foo_123>a)', 'a', SUCCEED, 'g1', 'a'),
('(?P<foo_123>a)(?P=foo_123)', 'aa', SUCCEED, 'g1', 'a'),
# Test octal escapes
('\\1', 'a', SYNTAX_ERROR), # Backreference
('[\\1]', '\1', SUCCEED, 'found', '\1'), # Character
('\\09', chr(0) + '9', SUCCEED, 'found', chr(0) + '9'),
('\\141', 'a', SUCCEED, 'found', 'a'),
('(a)(b)(c)(d)(e)(f)(g)(h)(i)(j)(k)(l)\\119', 'abcdefghijklk9', SUCCEED, 'found+"-"+g11', 'abcdefghijklk9-k'),
# Test \0 is handled everywhere
(r'\0', '\0', SUCCEED, 'found', '\0'),
(r'[\0a]', '\0', SUCCEED, 'found', '\0'),
(r'[a\0]', '\0', SUCCEED, 'found', '\0'),
(r'[^a\0]', '\0', FAIL),
# Test various letter escapes
(r'\a[\b]\f\n\r\t\v', '\a\b\f\n\r\t\v', SUCCEED, 'found', '\a\b\f\n\r\t\v'),
(r'[\a][\b][\f][\n][\r][\t][\v]', '\a\b\f\n\r\t\v', SUCCEED, 'found', '\a\b\f\n\r\t\v'),
# NOTE: not an error under PCRE/PRE:
# (r'\u', '', SYNTAX_ERROR), # A Perl escape
(r'\c\e\g\h\i\j\k\m\o\p\q\y\z', 'ceghijkmopqyz', SUCCEED, 'found', 'ceghijkmopqyz'),
(r'\xff', '\377', SUCCEED, 'found', chr(255)),
# new \x semantics
(r'\x00ffffffffffffff', '\377', FAIL, 'found', chr(255)),
(r'\x00f', '\017', FAIL, 'found', chr(15)),
(r'\x00fe', '\376', FAIL, 'found', chr(254)),
# (r'\x00ffffffffffffff', '\377', SUCCEED, 'found', chr(255)),
# (r'\x00f', '\017', SUCCEED, 'found', chr(15)),
# (r'\x00fe', '\376', SUCCEED, 'found', chr(254)),
(r"^\w+=(\\[\000-\277]|[^\n\\])*", "SRC=eval.c g.c blah blah blah \\\\\n\tapes.c",
SUCCEED, 'found', "SRC=eval.c g.c blah blah blah \\\\"),
# Test that . only matches \n in DOTALL mode
('a.b', 'acb', SUCCEED, 'found', 'acb'),
('a.b', 'a\nb', FAIL),
('a.*b', 'acc\nccb', FAIL),
('a.{4,5}b', 'acc\nccb', FAIL),
('a.b', 'a\rb', SUCCEED, 'found', 'a\rb'),
('a.b(?s)', 'a\nb', SUCCEED, 'found', 'a\nb'),
('a.*(?s)b', 'acc\nccb', SUCCEED, 'found', 'acc\nccb'),
('(?s)a.{4,5}b', 'acc\nccb', SUCCEED, 'found', 'acc\nccb'),
('(?s)a.b', 'a\nb', SUCCEED, 'found', 'a\nb'),
(')', '', SYNTAX_ERROR), # Unmatched right bracket
('', '', SUCCEED, 'found', ''), # Empty pattern
('abc', 'abc', SUCCEED, 'found', 'abc'),
('abc', 'xbc', FAIL),
('abc', 'axc', FAIL),
('abc', 'abx', FAIL),
('abc', 'xabcy', SUCCEED, 'found', 'abc'),
('abc', 'ababc', SUCCEED, 'found', 'abc'),
('ab*c', 'abc', SUCCEED, 'found', 'abc'),
('ab*bc', 'abc', SUCCEED, 'found', 'abc'),
('ab*bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab*bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab+bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab+bc', 'abc', FAIL),
('ab+bc', 'abq', FAIL),
('ab+bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab?bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab?bc', 'abc', SUCCEED, 'found', 'abc'),
('ab?bc', 'abbbbc', FAIL),
('ab?c', 'abc', SUCCEED, 'found', 'abc'),
('^abc$', 'abc', SUCCEED, 'found', 'abc'),
('^abc$', 'abcc', FAIL),
('^abc', 'abcc', SUCCEED, 'found', 'abc'),
('^abc$', 'aabc', FAIL),
('abc$', 'aabc', SUCCEED, 'found', 'abc'),
('^', 'abc', SUCCEED, 'found+"-"', '-'),
('$', 'abc', SUCCEED, 'found+"-"', '-'),
('a.c', 'abc', SUCCEED, 'found', 'abc'),
('a.c', 'axc', SUCCEED, 'found', 'axc'),
('a.*c', 'axyzc', SUCCEED, 'found', 'axyzc'),
('a.*c', 'axyzd', FAIL),
('a[bc]d', 'abc', FAIL),
('a[bc]d', 'abd', SUCCEED, 'found', 'abd'),
('a[b-d]e', 'abd', FAIL),
('a[b-d]e', 'ace', SUCCEED, 'found', 'ace'),
('a[b-d]', 'aac', SUCCEED, 'found', 'ac'),
('a[-b]', 'a-', SUCCEED, 'found', 'a-'),
('a[\\-b]', 'a-', SUCCEED, 'found', 'a-'),
# NOTE: not an error under PCRE/PRE:
# ('a[b-]', 'a-', SYNTAX_ERROR),
('a[]b', '-', SYNTAX_ERROR),
('a[', '-', SYNTAX_ERROR),
('a\\', '-', SYNTAX_ERROR),
('abc)', '-', SYNTAX_ERROR),
('(abc', '-', SYNTAX_ERROR),
('a]', 'a]', SUCCEED, 'found', 'a]'),
('a[]]b', 'a]b', SUCCEED, 'found', 'a]b'),
('a[\]]b', 'a]b', SUCCEED, 'found', 'a]b'),
('a[^bc]d', 'aed', SUCCEED, 'found', 'aed'),
('a[^bc]d', 'abd', FAIL),
('a[^-b]c', 'adc', SUCCEED, 'found', 'adc'),
('a[^-b]c', 'a-c', FAIL),
('a[^]b]c', 'a]c', FAIL),
('a[^]b]c', 'adc', SUCCEED, 'found', 'adc'),
('\\ba\\b', 'a-', SUCCEED, '"-"', '-'),
('\\ba\\b', '-a', SUCCEED, '"-"', '-'),
('\\ba\\b', '-a-', SUCCEED, '"-"', '-'),
('\\by\\b', 'xy', FAIL),
('\\by\\b', 'yz', FAIL),
('\\by\\b', 'xyz', FAIL),
('x\\b', 'xyz', FAIL),
('x\\B', 'xyz', SUCCEED, '"-"', '-'),
('\\Bz', 'xyz', SUCCEED, '"-"', '-'),
('z\\B', 'xyz', FAIL),
('\\Bx', 'xyz', FAIL),
('\\Ba\\B', 'a-', FAIL, '"-"', '-'),
('\\Ba\\B', '-a', FAIL, '"-"', '-'),
('\\Ba\\B', '-a-', FAIL, '"-"', '-'),
('\\By\\B', 'xy', FAIL),
('\\By\\B', 'yz', FAIL),
('\\By\\b', 'xy', SUCCEED, '"-"', '-'),
('\\by\\B', 'yz', SUCCEED, '"-"', '-'),
('\\By\\B', 'xyz', SUCCEED, '"-"', '-'),
('ab|cd', 'abc', SUCCEED, 'found', 'ab'),
('ab|cd', 'abcd', SUCCEED, 'found', 'ab'),
('()ef', 'def', SUCCEED, 'found+"-"+g1', 'ef-'),
('$b', 'b', FAIL),
('a\\(b', 'a(b', SUCCEED, 'found+"-"+g1', 'a(b-Error'),
('a\\(*b', 'ab', SUCCEED, 'found', 'ab'),
('a\\(*b', 'a((b', SUCCEED, 'found', 'a((b'),
('a\\\\b', 'a\\b', SUCCEED, 'found', 'a\\b'),
('((a))', 'abc', SUCCEED, 'found+"-"+g1+"-"+g2', 'a-a-a'),
('(a)b(c)', 'abc', SUCCEED, 'found+"-"+g1+"-"+g2', 'abc-a-c'),
('a+b+c', 'aabbabc', SUCCEED, 'found', 'abc'),
('(a+|b)*', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b)+', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b)?', 'ab', SUCCEED, 'found+"-"+g1', 'a-a'),
(')(', '-', SYNTAX_ERROR),
('[^ab]*', 'cde', SUCCEED, 'found', 'cde'),
('abc', '', FAIL),
('a*', '', SUCCEED, 'found', ''),
('a|b|c|d|e', 'e', SUCCEED, 'found', 'e'),
('(a|b|c|d|e)f', 'ef', SUCCEED, 'found+"-"+g1', 'ef-e'),
('abcd*efg', 'abcdefg', SUCCEED, 'found', 'abcdefg'),
('ab*', 'xabyabbbz', SUCCEED, 'found', 'ab'),
('ab*', 'xayabbbz', SUCCEED, 'found', 'a'),
('(ab|cd)e', 'abcde', SUCCEED, 'found+"-"+g1', 'cde-cd'),
('[abhgefdc]ij', 'hij', SUCCEED, 'found', 'hij'),
('^(ab|cd)e', 'abcde', FAIL, 'xg1y', 'xy'),
('(abc|)ef', 'abcdef', SUCCEED, 'found+"-"+g1', 'ef-'),
('(a|b)c*d', 'abcd', SUCCEED, 'found+"-"+g1', 'bcd-b'),
('(ab|ab*)bc', 'abc', SUCCEED, 'found+"-"+g1', 'abc-a'),
('a([bc]*)c*', 'abc', SUCCEED, 'found+"-"+g1', 'abc-bc'),
('a([bc]*)(c*d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-bc-d'),
('a([bc]+)(c*d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-bc-d'),
('a([bc]*)(c+d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-b-cd'),
('a[bcd]*dcdcde', 'adcdcde', SUCCEED, 'found', 'adcdcde'),
('a[bcd]+dcdcde', 'adcdcde', FAIL),
('(ab|a)b*c', 'abc', SUCCEED, 'found+"-"+g1', 'abc-ab'),
('((a)(b)c)(d)', 'abcd', SUCCEED, 'g1+"-"+g2+"-"+g3+"-"+g4', 'abc-a-b-d'),
('[a-zA-Z_][a-zA-Z0-9_]*', 'alpha', SUCCEED, 'found', 'alpha'),
('^a(bc+|b[eh])g|.h$', 'abh', SUCCEED, 'found+"-"+g1', 'bh-None'),
('(bc+d$|ef*g.|h?i(j|k))', 'effgz', SUCCEED, 'found+"-"+g1+"-"+g2', 'effgz-effgz-None'),
('(bc+d$|ef*g.|h?i(j|k))', 'ij', SUCCEED, 'found+"-"+g1+"-"+g2', 'ij-ij-j'),
('(bc+d$|ef*g.|h?i(j|k))', 'effg', FAIL),
('(bc+d$|ef*g.|h?i(j|k))', 'bcdd', FAIL),
('(bc+d$|ef*g.|h?i(j|k))', 'reffgz', SUCCEED, 'found+"-"+g1+"-"+g2', 'effgz-effgz-None'),
('(((((((((a)))))))))', 'a', SUCCEED, 'found', 'a'),
('multiple words of text', 'uh-uh', FAIL),
('multiple words', 'multiple words, yeah', SUCCEED, 'found', 'multiple words'),
('(.*)c(.*)', 'abcde', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcde-ab-de'),
('\\((.*), (.*)\\)', '(a, b)', SUCCEED, 'g2+"-"+g1', 'b-a'),
('[k]', 'ab', FAIL),
('a[-]?c', 'ac', SUCCEED, 'found', 'ac'),
('(abc)\\1', 'abcabc', SUCCEED, 'g1', 'abc'),
('([a-c]*)\\1', 'abcabc', SUCCEED, 'g1', 'abc'),
('^(.+)?B', 'AB', SUCCEED, 'g1', 'A'),
('(a+).\\1$', 'aaaaa', SUCCEED, 'found+"-"+g1', 'aaaaa-aa'),
('^(a+).\\1$', 'aaaa', FAIL),
('(abc)\\1', 'abcabc', SUCCEED, 'found+"-"+g1', 'abcabc-abc'),
('([a-c]+)\\1', 'abcabc', SUCCEED, 'found+"-"+g1', 'abcabc-abc'),
('(a)\\1', 'aa', SUCCEED, 'found+"-"+g1', 'aa-a'),
('(a+)\\1', 'aa', SUCCEED, 'found+"-"+g1', 'aa-a'),
('(a+)+\\1', 'aa', SUCCEED, 'found+"-"+g1', 'aa-a'),
('(a).+\\1', 'aba', SUCCEED, 'found+"-"+g1', 'aba-a'),
('(a)ba*\\1', 'aba', SUCCEED, 'found+"-"+g1', 'aba-a'),
('(aa|a)a\\1$', 'aaa', SUCCEED, 'found+"-"+g1', 'aaa-a'),
('(a|aa)a\\1$', 'aaa', SUCCEED, 'found+"-"+g1', 'aaa-a'),
('(a+)a\\1$', 'aaa', SUCCEED, 'found+"-"+g1', 'aaa-a'),
('([abc]*)\\1', 'abcabc', SUCCEED, 'found+"-"+g1', 'abcabc-abc'),
('(a)(b)c|ab', 'ab', SUCCEED, 'found+"-"+g1+"-"+g2', 'ab-None-None'),
('(a)+x', 'aaax', SUCCEED, 'found+"-"+g1', 'aaax-a'),
('([ac])+x', 'aacx', SUCCEED, 'found+"-"+g1', 'aacx-c'),
('([^/]*/)*sub1/', 'd:msgs/tdir/sub1/trial/away.cpp', SUCCEED, 'found+"-"+g1', 'd:msgs/tdir/sub1/-tdir/'),
('([^.]*)\\.([^:]*):[T ]+(.*)', 'track1.title:TBlah blah blah', SUCCEED, 'found+"-"+g1+"-"+g2+"-"+g3', 'track1.title:TBlah blah blah-track1-title-Blah blah blah'),
('([^N]*N)+', 'abNNxyzN', SUCCEED, 'found+"-"+g1', 'abNNxyzN-xyzN'),
('([^N]*N)+', 'abNNxyz', SUCCEED, 'found+"-"+g1', 'abNN-N'),
('([abc]*)x', 'abcx', SUCCEED, 'found+"-"+g1', 'abcx-abc'),
('([abc]*)x', 'abc', FAIL),
('([xyz]*)x', 'abcx', SUCCEED, 'found+"-"+g1', 'x-'),
('(a)+b|aac', 'aac', SUCCEED, 'found+"-"+g1', 'aac-None'),
# Test symbolic groups
('(?P<i d>aaa)a', 'aaaa', SYNTAX_ERROR),
('(?P<id>aaa)a', 'aaaa', SUCCEED, 'found+"-"+id', 'aaaa-aaa'),
('(?P<id>aa)(?P=id)', 'aaaa', SUCCEED, 'found+"-"+id', 'aaaa-aa'),
('(?P<id>aa)(?P=xd)', 'aaaa', SYNTAX_ERROR),
# Test octal escapes/memory references
('\\1', 'a', SYNTAX_ERROR),
('\\09', chr(0) + '9', SUCCEED, 'found', chr(0) + '9'),
('\\141', 'a', SUCCEED, 'found', 'a'),
('(a)(b)(c)(d)(e)(f)(g)(h)(i)(j)(k)(l)\\119', 'abcdefghijklk9', SUCCEED, 'found+"-"+g11', 'abcdefghijklk9-k'),
# All tests from Perl
('abc', 'abc', SUCCEED, 'found', 'abc'),
('abc', 'xbc', FAIL),
('abc', 'axc', FAIL),
('abc', 'abx', FAIL),
('abc', 'xabcy', SUCCEED, 'found', 'abc'),
('abc', 'ababc', SUCCEED, 'found', 'abc'),
('ab*c', 'abc', SUCCEED, 'found', 'abc'),
('ab*bc', 'abc', SUCCEED, 'found', 'abc'),
('ab*bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab*bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab{0,}bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab+bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab+bc', 'abc', FAIL),
('ab+bc', 'abq', FAIL),
('ab{1,}bc', 'abq', FAIL),
('ab+bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab{1,}bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab{1,3}bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab{3,4}bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab{4,5}bc', 'abbbbc', FAIL),
('ab?bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab?bc', 'abc', SUCCEED, 'found', 'abc'),
('ab{0,1}bc', 'abc', SUCCEED, 'found', 'abc'),
('ab?bc', 'abbbbc', FAIL),
('ab?c', 'abc', SUCCEED, 'found', 'abc'),
('ab{0,1}c', 'abc', SUCCEED, 'found', 'abc'),
('^abc$', 'abc', SUCCEED, 'found', 'abc'),
('^abc$', 'abcc', FAIL),
('^abc', 'abcc', SUCCEED, 'found', 'abc'),
('^abc$', 'aabc', FAIL),
('abc$', 'aabc', SUCCEED, 'found', 'abc'),
('^', 'abc', SUCCEED, 'found', ''),
('$', 'abc', SUCCEED, 'found', ''),
('a.c', 'abc', SUCCEED, 'found', 'abc'),
('a.c', 'axc', SUCCEED, 'found', 'axc'),
('a.*c', 'axyzc', SUCCEED, 'found', 'axyzc'),
('a.*c', 'axyzd', FAIL),
('a[bc]d', 'abc', FAIL),
('a[bc]d', 'abd', SUCCEED, 'found', 'abd'),
('a[b-d]e', 'abd', FAIL),
('a[b-d]e', 'ace', SUCCEED, 'found', 'ace'),
('a[b-d]', 'aac', SUCCEED, 'found', 'ac'),
('a[-b]', 'a-', SUCCEED, 'found', 'a-'),
('a[b-]', 'a-', SUCCEED, 'found', 'a-'),
('a[b-a]', '-', SYNTAX_ERROR),
('a[]b', '-', SYNTAX_ERROR),
('a[', '-', SYNTAX_ERROR),
('a]', 'a]', SUCCEED, 'found', 'a]'),
('a[]]b', 'a]b', SUCCEED, 'found', 'a]b'),
('a[^bc]d', 'aed', SUCCEED, 'found', 'aed'),
('a[^bc]d', 'abd', FAIL),
('a[^-b]c', 'adc', SUCCEED, 'found', 'adc'),
('a[^-b]c', 'a-c', FAIL),
('a[^]b]c', 'a]c', FAIL),
('a[^]b]c', 'adc', SUCCEED, 'found', 'adc'),
('ab|cd', 'abc', SUCCEED, 'found', 'ab'),
('ab|cd', 'abcd', SUCCEED, 'found', 'ab'),
('()ef', 'def', SUCCEED, 'found+"-"+g1', 'ef-'),
('*a', '-', SYNTAX_ERROR),
('(*)b', '-', SYNTAX_ERROR),
('$b', 'b', FAIL),
('a\\', '-', SYNTAX_ERROR),
('a\\(b', 'a(b', SUCCEED, 'found+"-"+g1', 'a(b-Error'),
('a\\(*b', 'ab', SUCCEED, 'found', 'ab'),
('a\\(*b', 'a((b', SUCCEED, 'found', 'a((b'),
('a\\\\b', 'a\\b', SUCCEED, 'found', 'a\\b'),
('abc)', '-', SYNTAX_ERROR),
('(abc', '-', SYNTAX_ERROR),
('((a))', 'abc', SUCCEED, 'found+"-"+g1+"-"+g2', 'a-a-a'),
('(a)b(c)', 'abc', SUCCEED, 'found+"-"+g1+"-"+g2', 'abc-a-c'),
('a+b+c', 'aabbabc', SUCCEED, 'found', 'abc'),
('a{1,}b{1,}c', 'aabbabc', SUCCEED, 'found', 'abc'),
('a**', '-', SYNTAX_ERROR),
('a.+?c', 'abcabc', SUCCEED, 'found', 'abc'),
('(a+|b)*', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b){0,}', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b)+', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b){1,}', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b)?', 'ab', SUCCEED, 'found+"-"+g1', 'a-a'),
('(a+|b){0,1}', 'ab', SUCCEED, 'found+"-"+g1', 'a-a'),
(')(', '-', SYNTAX_ERROR),
('[^ab]*', 'cde', SUCCEED, 'found', 'cde'),
('abc', '', FAIL),
('a*', '', SUCCEED, 'found', ''),
('([abc])*d', 'abbbcd', SUCCEED, 'found+"-"+g1', 'abbbcd-c'),
('([abc])*bcd', 'abcd', SUCCEED, 'found+"-"+g1', 'abcd-a'),
('a|b|c|d|e', 'e', SUCCEED, 'found', 'e'),
('(a|b|c|d|e)f', 'ef', SUCCEED, 'found+"-"+g1', 'ef-e'),
('abcd*efg', 'abcdefg', SUCCEED, 'found', 'abcdefg'),
('ab*', 'xabyabbbz', SUCCEED, 'found', 'ab'),
('ab*', 'xayabbbz', SUCCEED, 'found', 'a'),
('(ab|cd)e', 'abcde', SUCCEED, 'found+"-"+g1', 'cde-cd'),
('[abhgefdc]ij', 'hij', SUCCEED, 'found', 'hij'),
('^(ab|cd)e', 'abcde', FAIL),
('(abc|)ef', 'abcdef', SUCCEED, 'found+"-"+g1', 'ef-'),
('(a|b)c*d', 'abcd', SUCCEED, 'found+"-"+g1', 'bcd-b'),
('(ab|ab*)bc', 'abc', SUCCEED, 'found+"-"+g1', 'abc-a'),
('a([bc]*)c*', 'abc', SUCCEED, 'found+"-"+g1', 'abc-bc'),
('a([bc]*)(c*d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-bc-d'),
('a([bc]+)(c*d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-bc-d'),
('a([bc]*)(c+d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-b-cd'),
('a[bcd]*dcdcde', 'adcdcde', SUCCEED, 'found', 'adcdcde'),
('a[bcd]+dcdcde', 'adcdcde', FAIL),
('(ab|a)b*c', 'abc', SUCCEED, 'found+"-"+g1', 'abc-ab'),
('((a)(b)c)(d)', 'abcd', SUCCEED, 'g1+"-"+g2+"-"+g3+"-"+g4', 'abc-a-b-d'),
('[a-zA-Z_][a-zA-Z0-9_]*', 'alpha', SUCCEED, 'found', 'alpha'),
('^a(bc+|b[eh])g|.h$', 'abh', SUCCEED, 'found+"-"+g1', 'bh-None'),
('(bc+d$|ef*g.|h?i(j|k))', 'effgz', SUCCEED, 'found+"-"+g1+"-"+g2', 'effgz-effgz-None'),
('(bc+d$|ef*g.|h?i(j|k))', 'ij', SUCCEED, 'found+"-"+g1+"-"+g2', 'ij-ij-j'),
('(bc+d$|ef*g.|h?i(j|k))', 'effg', FAIL),
('(bc+d$|ef*g.|h?i(j|k))', 'bcdd', FAIL),
('(bc+d$|ef*g.|h?i(j|k))', 'reffgz', SUCCEED, 'found+"-"+g1+"-"+g2', 'effgz-effgz-None'),
('((((((((((a))))))))))', 'a', SUCCEED, 'g10', 'a'),
('((((((((((a))))))))))\\10', 'aa', SUCCEED, 'found', 'aa'),
# Python does not have the same rules for \\41 so this is a syntax error
# ('((((((((((a))))))))))\\41', 'aa', FAIL),
# ('((((((((((a))))))))))\\41', 'a!', SUCCEED, 'found', 'a!'),
('((((((((((a))))))))))\\41', '', SYNTAX_ERROR),
('(?i)((((((((((a))))))))))\\41', '', SYNTAX_ERROR),
('(((((((((a)))))))))', 'a', SUCCEED, 'found', 'a'),
('multiple words of text', 'uh-uh', FAIL),
('multiple words', 'multiple words, yeah', SUCCEED, 'found', 'multiple words'),
('(.*)c(.*)', 'abcde', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcde-ab-de'),
('\\((.*), (.*)\\)', '(a, b)', SUCCEED, 'g2+"-"+g1', 'b-a'),
('[k]', 'ab', FAIL),
('a[-]?c', 'ac', SUCCEED, 'found', 'ac'),
('(abc)\\1', 'abcabc', SUCCEED, 'g1', 'abc'),
('([a-c]*)\\1', 'abcabc', SUCCEED, 'g1', 'abc'),
('(?i)abc', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)abc', 'XBC', FAIL),
('(?i)abc', 'AXC', FAIL),
('(?i)abc', 'ABX', FAIL),
('(?i)abc', 'XABCY', SUCCEED, 'found', 'ABC'),
('(?i)abc', 'ABABC', SUCCEED, 'found', 'ABC'),
('(?i)ab*c', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)ab*bc', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)ab*bc', 'ABBC', SUCCEED, 'found', 'ABBC'),
('(?i)ab*?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab{0,}?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab+?bc', 'ABBC', SUCCEED, 'found', 'ABBC'),
('(?i)ab+bc', 'ABC', FAIL),
('(?i)ab+bc', 'ABQ', FAIL),
('(?i)ab{1,}bc', 'ABQ', FAIL),
('(?i)ab+bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab{1,}?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab{1,3}?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab{3,4}?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab{4,5}?bc', 'ABBBBC', FAIL),
('(?i)ab??bc', 'ABBC', SUCCEED, 'found', 'ABBC'),
('(?i)ab??bc', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)ab{0,1}?bc', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)ab??bc', 'ABBBBC', FAIL),
('(?i)ab??c', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)ab{0,1}?c', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)^abc$', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)^abc$', 'ABCC', FAIL),
('(?i)^abc', 'ABCC', SUCCEED, 'found', 'ABC'),
('(?i)^abc$', 'AABC', FAIL),
('(?i)abc$', 'AABC', SUCCEED, 'found', 'ABC'),
('(?i)^', 'ABC', SUCCEED, 'found', ''),
('(?i)$', 'ABC', SUCCEED, 'found', ''),
('(?i)a.c', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)a.c', 'AXC', SUCCEED, 'found', 'AXC'),
('(?i)a.*?c', 'AXYZC', SUCCEED, 'found', 'AXYZC'),
('(?i)a.*c', 'AXYZD', FAIL),
('(?i)a[bc]d', 'ABC', FAIL),
('(?i)a[bc]d', 'ABD', SUCCEED, 'found', 'ABD'),
('(?i)a[b-d]e', 'ABD', FAIL),
('(?i)a[b-d]e', 'ACE', SUCCEED, 'found', 'ACE'),
('(?i)a[b-d]', 'AAC', SUCCEED, 'found', 'AC'),
('(?i)a[-b]', 'A-', SUCCEED, 'found', 'A-'),
('(?i)a[b-]', 'A-', SUCCEED, 'found', 'A-'),
('(?i)a[b-a]', '-', SYNTAX_ERROR),
('(?i)a[]b', '-', SYNTAX_ERROR),
('(?i)a[', '-', SYNTAX_ERROR),
('(?i)a]', 'A]', SUCCEED, 'found', 'A]'),
('(?i)a[]]b', 'A]B', SUCCEED, 'found', 'A]B'),
('(?i)a[^bc]d', 'AED', SUCCEED, 'found', 'AED'),
('(?i)a[^bc]d', 'ABD', FAIL),
('(?i)a[^-b]c', 'ADC', SUCCEED, 'found', 'ADC'),
('(?i)a[^-b]c', 'A-C', FAIL),
('(?i)a[^]b]c', 'A]C', FAIL),
('(?i)a[^]b]c', 'ADC', SUCCEED, 'found', 'ADC'),
('(?i)ab|cd', 'ABC', SUCCEED, 'found', 'AB'),
('(?i)ab|cd', 'ABCD', SUCCEED, 'found', 'AB'),
('(?i)()ef', 'DEF', SUCCEED, 'found+"-"+g1', 'EF-'),
('(?i)*a', '-', SYNTAX_ERROR),
('(?i)(*)b', '-', SYNTAX_ERROR),
('(?i)$b', 'B', FAIL),
('(?i)a\\', '-', SYNTAX_ERROR),
('(?i)a\\(b', 'A(B', SUCCEED, 'found+"-"+g1', 'A(B-Error'),
('(?i)a\\(*b', 'AB', SUCCEED, 'found', 'AB'),
('(?i)a\\(*b', 'A((B', SUCCEED, 'found', 'A((B'),
('(?i)a\\\\b', 'A\\B', SUCCEED, 'found', 'A\\B'),
('(?i)abc)', '-', SYNTAX_ERROR),
('(?i)(abc', '-', SYNTAX_ERROR),
('(?i)((a))', 'ABC', SUCCEED, 'found+"-"+g1+"-"+g2', 'A-A-A'),
('(?i)(a)b(c)', 'ABC', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABC-A-C'),
('(?i)a+b+c', 'AABBABC', SUCCEED, 'found', 'ABC'),
('(?i)a{1,}b{1,}c', 'AABBABC', SUCCEED, 'found', 'ABC'),
('(?i)a**', '-', SYNTAX_ERROR),
('(?i)a.+?c', 'ABCABC', SUCCEED, 'found', 'ABC'),
('(?i)a.*?c', 'ABCABC', SUCCEED, 'found', 'ABC'),
('(?i)a.{0,5}?c', 'ABCABC', SUCCEED, 'found', 'ABC'),
('(?i)(a+|b)*', 'AB', SUCCEED, 'found+"-"+g1', 'AB-B'),
('(?i)(a+|b){0,}', 'AB', SUCCEED, 'found+"-"+g1', 'AB-B'),
('(?i)(a+|b)+', 'AB', SUCCEED, 'found+"-"+g1', 'AB-B'),
('(?i)(a+|b){1,}', 'AB', SUCCEED, 'found+"-"+g1', 'AB-B'),
('(?i)(a+|b)?', 'AB', SUCCEED, 'found+"-"+g1', 'A-A'),
('(?i)(a+|b){0,1}', 'AB', SUCCEED, 'found+"-"+g1', 'A-A'),
('(?i)(a+|b){0,1}?', 'AB', SUCCEED, 'found+"-"+g1', '-None'),
('(?i))(', '-', SYNTAX_ERROR),
('(?i)[^ab]*', 'CDE', SUCCEED, 'found', 'CDE'),
('(?i)abc', '', FAIL),
('(?i)a*', '', SUCCEED, 'found', ''),
('(?i)([abc])*d', 'ABBBCD', SUCCEED, 'found+"-"+g1', 'ABBBCD-C'),
('(?i)([abc])*bcd', 'ABCD', SUCCEED, 'found+"-"+g1', 'ABCD-A'),
('(?i)a|b|c|d|e', 'E', SUCCEED, 'found', 'E'),
('(?i)(a|b|c|d|e)f', 'EF', SUCCEED, 'found+"-"+g1', 'EF-E'),
('(?i)abcd*efg', 'ABCDEFG', SUCCEED, 'found', 'ABCDEFG'),
('(?i)ab*', 'XABYABBBZ', SUCCEED, 'found', 'AB'),
('(?i)ab*', 'XAYABBBZ', SUCCEED, 'found', 'A'),
('(?i)(ab|cd)e', 'ABCDE', SUCCEED, 'found+"-"+g1', 'CDE-CD'),
('(?i)[abhgefdc]ij', 'HIJ', SUCCEED, 'found', 'HIJ'),
('(?i)^(ab|cd)e', 'ABCDE', FAIL),
('(?i)(abc|)ef', 'ABCDEF', SUCCEED, 'found+"-"+g1', 'EF-'),
('(?i)(a|b)c*d', 'ABCD', SUCCEED, 'found+"-"+g1', 'BCD-B'),
('(?i)(ab|ab*)bc', 'ABC', SUCCEED, 'found+"-"+g1', 'ABC-A'),
('(?i)a([bc]*)c*', 'ABC', SUCCEED, 'found+"-"+g1', 'ABC-BC'),
('(?i)a([bc]*)(c*d)', 'ABCD', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABCD-BC-D'),
('(?i)a([bc]+)(c*d)', 'ABCD', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABCD-BC-D'),
('(?i)a([bc]*)(c+d)', 'ABCD', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABCD-B-CD'),
('(?i)a[bcd]*dcdcde', 'ADCDCDE', SUCCEED, 'found', 'ADCDCDE'),
('(?i)a[bcd]+dcdcde', 'ADCDCDE', FAIL),
('(?i)(ab|a)b*c', 'ABC', SUCCEED, 'found+"-"+g1', 'ABC-AB'),
('(?i)((a)(b)c)(d)', 'ABCD', SUCCEED, 'g1+"-"+g2+"-"+g3+"-"+g4', 'ABC-A-B-D'),
('(?i)[a-zA-Z_][a-zA-Z0-9_]*', 'ALPHA', SUCCEED, 'found', 'ALPHA'),
('(?i)^a(bc+|b[eh])g|.h$', 'ABH', SUCCEED, 'found+"-"+g1', 'BH-None'),
('(?i)(bc+d$|ef*g.|h?i(j|k))', 'EFFGZ', SUCCEED, 'found+"-"+g1+"-"+g2', 'EFFGZ-EFFGZ-None'),
('(?i)(bc+d$|ef*g.|h?i(j|k))', 'IJ', SUCCEED, 'found+"-"+g1+"-"+g2', 'IJ-IJ-J'),
('(?i)(bc+d$|ef*g.|h?i(j|k))', 'EFFG', FAIL),
('(?i)(bc+d$|ef*g.|h?i(j|k))', 'BCDD', FAIL),
('(?i)(bc+d$|ef*g.|h?i(j|k))', 'REFFGZ', SUCCEED, 'found+"-"+g1+"-"+g2', 'EFFGZ-EFFGZ-None'),
('(?i)((((((((((a))))))))))', 'A', SUCCEED, 'g10', 'A'),
('(?i)((((((((((a))))))))))\\10', 'AA', SUCCEED, 'found', 'AA'),
#('(?i)((((((((((a))))))))))\\41', 'AA', FAIL),
#('(?i)((((((((((a))))))))))\\41', 'A!', SUCCEED, 'found', 'A!'),
('(?i)(((((((((a)))))))))', 'A', SUCCEED, 'found', 'A'),
('(?i)(?:(?:(?:(?:(?:(?:(?:(?:(?:(a))))))))))', 'A', SUCCEED, 'g1', 'A'),
('(?i)(?:(?:(?:(?:(?:(?:(?:(?:(?:(a|b|c))))))))))', 'C', SUCCEED, 'g1', 'C'),
('(?i)multiple words of text', 'UH-UH', FAIL),
('(?i)multiple words', 'MULTIPLE WORDS, YEAH', SUCCEED, 'found', 'MULTIPLE WORDS'),
('(?i)(.*)c(.*)', 'ABCDE', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABCDE-AB-DE'),
('(?i)\\((.*), (.*)\\)', '(A, B)', SUCCEED, 'g2+"-"+g1', 'B-A'),
('(?i)[k]', 'AB', FAIL),
# ('(?i)abcd', 'ABCD', SUCCEED, 'found+"-"+\\found+"-"+\\\\found', 'ABCD-$&-\\ABCD'),
# ('(?i)a(bc)d', 'ABCD', SUCCEED, 'g1+"-"+\\g1+"-"+\\\\g1', 'BC-$1-\\BC'),
('(?i)a[-]?c', 'AC', SUCCEED, 'found', 'AC'),
('(?i)(abc)\\1', 'ABCABC', SUCCEED, 'g1', 'ABC'),
('(?i)([a-c]*)\\1', 'ABCABC', SUCCEED, 'g1', 'ABC'),
('a(?!b).', 'abad', SUCCEED, 'found', 'ad'),
('a(?=d).', 'abad', SUCCEED, 'found', 'ad'),
('a(?=c|d).', 'abad', SUCCEED, 'found', 'ad'),
('a(?:b|c|d)(.)', 'ace', SUCCEED, 'g1', 'e'),
('a(?:b|c|d)*(.)', 'ace', SUCCEED, 'g1', 'e'),
('a(?:b|c|d)+?(.)', 'ace', SUCCEED, 'g1', 'e'),
('a(?:b|(c|e){1,2}?|d)+?(.)', 'ace', SUCCEED, 'g1 + g2', 'ce'),
('^(.+)?B', 'AB', SUCCEED, 'g1', 'A'),
# Comments using the (?#...) syntax
('w(?# comment', 'w', SYNTAX_ERROR),
('w(?# comment 1)xy(?# comment 2)z', 'wxyz', SUCCEED, 'found', 'wxyz'),
# Check odd placement of embedded pattern modifiers
# not an error under PCRE/PRE:
('w(?i)', 'W', SUCCEED, 'found', 'W'),
# ('w(?i)', 'W', SYNTAX_ERROR),
# Comments using the x embedded pattern modifier
("""(?x)w# comment 1
x y
# comment 2
z""", 'wxyz', SUCCEED, 'found', 'wxyz'),
# using the m embedded pattern modifier
('^abc', """jkl
abc
xyz""", FAIL),
('(?m)^abc', """jkl
abc
xyz""", SUCCEED, 'found', 'abc'),
('(?m)abc$', """jkl
xyzabc
123""", SUCCEED, 'found', 'abc'),
# using the s embedded pattern modifier
('a.b', 'a\nb', FAIL),
('(?s)a.b', 'a\nb', SUCCEED, 'found', 'a\nb'),
# test \w, etc. both inside and outside character classes
('\\w+', '--ab_cd0123--', SUCCEED, 'found', 'ab_cd0123'),
('[\\w]+', '--ab_cd0123--', SUCCEED, 'found', 'ab_cd0123'),
('\\D+', '1234abc5678', SUCCEED, 'found', 'abc'),
('[\\D]+', '1234abc5678', SUCCEED, 'found', 'abc'),
('[\\da-fA-F]+', '123abc', SUCCEED, 'found', '123abc'),
# not an error under PCRE/PRE:
# ('[\\d-x]', '-', SYNTAX_ERROR),
(r'([\s]*)([\S]*)([\s]*)', ' testing!1972', SUCCEED, 'g3+g2+g1', 'testing!1972 '),
(r'(\s*)(\S*)(\s*)', ' testing!1972', SUCCEED, 'g3+g2+g1', 'testing!1972 '),
(r'\xff', '\377', SUCCEED, 'found', chr(255)),
# new \x semantics
(r'\x00ff', '\377', FAIL),
# (r'\x00ff', '\377', SUCCEED, 'found', chr(255)),
(r'\t\n\v\r\f\a\g', '\t\n\v\r\f\ag', SUCCEED, 'found', '\t\n\v\r\f\ag'),
('\t\n\v\r\f\a\g', '\t\n\v\r\f\ag', SUCCEED, 'found', '\t\n\v\r\f\ag'),
(r'\t\n\v\r\f\a', '\t\n\v\r\f\a', SUCCEED, 'found', chr(9)+chr(10)+chr(11)+chr(13)+chr(12)+chr(7)),
(r'[\t][\n][\v][\r][\f][\b]', '\t\n\v\r\f\b', SUCCEED, 'found', '\t\n\v\r\f\b'),
#
# post-1.5.2 additions
# xmllib problem
(r'(([a-z]+):)?([a-z]+)$', 'smil', SUCCEED, 'g1+"-"+g2+"-"+g3', 'None-None-smil'),
# bug 110866: reference to undefined group
(r'((.)\1+)', '', SYNTAX_ERROR),
# bug 111869: search (PRE/PCRE fails on this one, SRE doesn't)
(r'.*d', 'abc\nabd', SUCCEED, 'found', 'abd'),
# bug 112468: various expected syntax errors
(r'(', '', SYNTAX_ERROR),
(r'[\41]', '!', SUCCEED, 'found', '!'),
# bug 114033: nothing to repeat
(r'(x?)?', 'x', SUCCEED, 'found', 'x'),
# bug 115040: rescan if flags are modified inside pattern
(r' (?x)foo ', 'foo', SUCCEED, 'found', 'foo'),
# bug 115618: negative lookahead
(r'(?<!abc)(d.f)', 'abcdefdof', SUCCEED, 'found', 'dof'),
# bug 116251: character class bug
(r'[\w-]+', 'laser_beam', SUCCEED, 'found', 'laser_beam'),
# bug 123769+127259: non-greedy backtracking bug
(r'.*?\S *:', 'xx:', SUCCEED, 'found', 'xx:'),
(r'a[ ]*?\ (\d+).*', 'a 10', SUCCEED, 'found', 'a 10'),
(r'a[ ]*?\ (\d+).*', 'a 10', SUCCEED, 'found', 'a 10'),
# bug 127259: \Z shouldn't depend on multiline mode
(r'(?ms).*?x\s*\Z(.*)','xx\nx\n', SUCCEED, 'g1', ''),
# bug 128899: uppercase literals under the ignorecase flag
(r'(?i)M+', 'MMM', SUCCEED, 'found', 'MMM'),
(r'(?i)m+', 'MMM', SUCCEED, 'found', 'MMM'),
(r'(?i)[M]+', 'MMM', SUCCEED, 'found', 'MMM'),
(r'(?i)[m]+', 'MMM', SUCCEED, 'found', 'MMM'),
# bug 130748: ^* should be an error (nothing to repeat)
(r'^*', '', SYNTAX_ERROR),
# bug 133283: minimizing repeat problem
(r'"(?:\\"|[^"])*?"', r'"\""', SUCCEED, 'found', r'"\""'),
# bug 477728: minimizing repeat problem
(r'^.*?$', 'one\ntwo\nthree\n', FAIL),
# bug 483789: minimizing repeat problem
(r'a[^>]*?b', 'a>b', FAIL),
# bug 490573: minimizing repeat problem
(r'^a*?$', 'foo', FAIL),
]
try:
u = eval("u'\N{LATIN CAPITAL LETTER A WITH DIAERESIS}'")
except SyntaxError:
pass
else:
tests.extend([
# bug 410271: \b broken under locales
(r'\b.\b', 'a', SUCCEED, 'found', 'a'),
(r'(?u)\b.\b', u, SUCCEED, 'found', u),
])
|
|
# !/usr/bin/python
# -*- coding: latin-1 -*-
# WAVELET Torrence and Combo translate from Matlab to Python
# author: Mabel Calim Costa
# INPE
# 23/01/2013
#Baseado : Torrence e Combo
# data from http://paos.colorado.edu/research/wavelets/software.html
import numpy as np
import pylab
from pylab import *
import matplotlib.pyplot as plt
import os
import sys
import netCDF4
def load_nc(file, var, dt, date1):
"""
Opens and reads an archive .txt with data only (without dates).
"""
f = netCDF4.Dataset(file, 'r+')
data = f.variables[var][:]
n = len(data)
time = np.arange(n) * dt + date1
f.close()
return data, time
def load_txt(archive, dt, date1):
"""
Opens and reads an archive .txt with data only (without dates).
:Parameters:
| **file:** filename
| **dt:** A number. Time-step of the vector. Example: Hourly, daily, monthly, etc.
| **date1:** A number. The initial time of the data. Example: 1985.
:Returns:
| **data:** array_like
| **time:** array_like
:Notes:
| This function is linked to data/txt directory, so, if you have any file extension .txt put it in the following folder:
::
/lib/wavelet/data/txt
Example
::
>> dt = 0.25
>> date1 = 1871
# Test data = sst_nino3.dat is already in the package!
>> data, time = load_txt('sst_nino3.dat', dt, date1)
"""
filename = os.path.join(sys.prefix, 'lib', 'python' + sys.version[
:3], 'site-packages', 'wavelet', 'lib',
'wavelet', 'data', 'txt', archive)
if(not os.path.isfile(filename) and os.path.isfile(archive)):
filename = archive
else:
raise IOError(
'File {0} not found either here {1} or here {1}'.format(filename,
archive))
data = np.loadtxt(filename)
n = len(data)
time = np.arange(n) * dt + date1
return data, time
def normalize(data):
"""
Normalize by Standard Score - mean = 0 ; Variance = 1.
:Parameters:
| **Data, the loaded data.**
:Returns:
| **normalized_data**
:Notes:
| You can skip this function if it the normalization is not necessary (e.g. EOF data).
"""
variance = np.var(data)
data = (data - np.mean(data)) / (np.sqrt(variance))
return data
def cwt(data, dt=1, pad=1, dj=0.25, s0=2, j1=7/0.25, lag1=0.72, param=6, mother='Morlet', name='Name of Time series'):
"""
Continuous wavelet transform from data. Wavelet params can be modified as you wish.
:Parameters:
| **Data:** array_like. Raw of data or normalized data.
| **pad:** 0 or 1 Pad the time series with zeroes to next pow of two length (recommended).
| **dt:** Time-step of the vector. Example: Hourly, daily, monthly, etc...
| **dj:** Divide octave in sub-octaves. If dj = 0.25 this will do 4 sub-octaves per octave.
| **s0:** The maximum frequency resolution. If it is an annual data, s0 = 2*dt say start at a scale of 6 months.
| **j1:** Divide the power-of-teo with dj sub-octaves each.
| **lag1:** Lag-1 autocoorelation for red noise background.
| **param:** The mother wavelet nondimensional time-parameter, depends on wavelet, for other wavelet than Morlet, see Torrence and Compo 1998.
| **mother:** string. The mother wavelet funtion. Can be 'Morlet', 'PAUL', 'DOG'.
| **name:** Name of Time Series or your Plot.
:Returns:
| **result:** as dict. Returns all parameters for plot
Example
::
>> dt = 0.25
>> date1 = 1871
# Test data = sst_nino3.dat is already in the package!
>> data,n,time = load_txt('sst_nino3.dat',dt,date1)
# This normalize by variance
>> data_norm, variance = normalize(data)
# Continuous wavelet transform
>> result = cwt(data_norm,0.25,variance,n,1,0.25,2*0.25,7/0.25,0.72,6,'Morlet')
"""
import lib_wavelet
variance = np.var(data)
n = len(data)
# Wavelet transform
ondaleta, wave, period, scale, coi, f = lib_wavelet.wavelet(
data, dt, param, dj, s0, j1, mother)
# wave = np.array(wave)
power = (np.abs(wave) ** 2)
# Significance levels: (variance=1 for the normalized SST)
signif, fft_theor = lib_wavelet.wave_signif(
1.0, dt, scale, 0, lag1, 0.95, -1, mother, param)
ones = np.ones((len(signif), n)) # expand signif --> ones (J+1)x(n)
sig95 = [s * ones[1] for s in signif] # vstack signif concatenate
sig95 = power / sig95 # where ratio > 1, power is significant
# Global wavelet spectrum & significance levels:
global_ws = variance * (np.sum(power.conj().transpose(), axis=0) / n)
dof = [n - s for s in scale]
"""CAUTION - DEFAULT VALUES """
global_signif, fft_theor = lib_wavelet.wave_signif(
variance, dt, scale, 1, lag1, 0.95, dof, mother, param)
# Daughter wavelet
joint_wavelet = np.concatenate((np.fft.ifft(ondaleta)[np.ceil(
n / 2.):], np.fft.ifft(ondaleta)[np.ceil(n / 2.):][::-1]), axis=1)
imag_wavelet = np.concatenate((np.fft.ifft(ondaleta).imag[np.ceil(
n / 2.):], np.fft.ifft(ondaleta).imag[np.ceil(n / 2.):][::-1]), axis=1)
nw = np.size(joint_wavelet) # daughter's number of points
# admissibility condition
mean_wavelet = mean(joint_wavelet.real)
mean_wavelet = np.ones(nw) * mean_wavelet
result = {'ondaleta': ondaleta, 'wave': wave, 'period': period,
'scale': scale, 'coi': coi, 'power': power, 'sig95': sig95,
'global_ws': global_ws, 'global_signif': global_signif,
'joint_wavelet': joint_wavelet, 'imag_wavelet': imag_wavelet,
'nw': nw, 'mean_wavelet': mean_wavelet, 'dj': dj, 'j1': j1,
'dt': dt, 'fft': f, 'mother': mother, 'data': data, 'name': name}
return result
# result = cwt(data_norm,0.25,1,0.25,2*0.25,7/0.25,0.72,6,'Morlet')
def fft(data):
"""FFT spectrum
"""
n = len(data)
X = np.fft.fft(data)
sxx = ((X * np.conj(X)) / (n))
f = -np.fft.fftfreq(n)[np.ceil(n / 2.):]
sxx = np.abs(sxx)
sxx = sxx[np.ceil(n / 2.):]
return f, sxx
# ---------------------------
# Ploting
# ---------------------------
def levels(result, dtmin):
"""
Power levels
"""
dtmax = result['power'].max()
lev = []
for i in range(int(log2(dtmax / dtmin))):
dtmin = dtmin * 2
lev.append(dtmin)
return lev
def wavelet_plot(var, time, data, dtmin, result):
"""
PLOT WAVELET TRANSFORM
var = title name from data
time = vector get in load function
data = from normalize function
dtmin = minimum resolution :1 octave
result = dict from cwt function
"""
from numpy import log2
import numpy as np
import wavetest
import matplotlib
import matplotlib.gridspec as gridspec
# frequency limit
# print result['period']
# lim = np.where(result['period'] == result['period'][-1]/2)[0][0]
"""Plot time series """
fig = plt.figure(figsize=(15, 10), dpi=300)
gs1 = gridspec.GridSpec(4, 3)
gs1.update(left=0.05, right=0.7, wspace=0.5, hspace=0)
ax1 = plt.subplot(gs1[0, :])
ax1 = pylab.gca()
ax1.xaxis.set_visible(False)
plt.setp(ax1.get_xticklabels(), visible=False)
ax2 = plt.subplot(gs1[1:4, :])#, axisbg='#C0C0C0')
gs2 = gridspec.GridSpec(4, 1)
gs2.update(left=0.7, right=0.98, hspace=0)
ax5 = plt.subplot(gs2[1:4, 0], sharey=ax2)
plt.setp(ax5.get_yticklabels(), visible=False)
gs3 = gridspec.GridSpec(6, 1)
gs3.update(left=0.77, top=0.86, right=0.98, hspace=0.6, wspace=0.01)
ax3 = plt.subplot(gs3[0, 0])
# ----------------------------------------------------------------------------------------------------------------#
ax1.plot(time, data)
ax1.axis('tight')
ax1.set_ylabel('SP [mV]', fontsize=15)
ax1.set_title('%s' % var, fontsize=17)
ax1.yaxis.set_major_locator(MaxNLocator(prune='lower'))
ax1.grid(True)
ax1.xaxis.set_visible(False)
# ----------------------------------------------------------------------------------------------------------------#
ax3.plot(range(-result['nw'] / 2, result['nw'] / 2),
result['joint_wavelet'], 'k', label='Real part')
ax3.plot(range(-result['nw'] / 2, result['nw'] / 2),
result['imag_wavelet'], '--k', label='Imag part')
ax3.plot(range(-result['nw'] / 2, result['nw'] / 2),
result['mean_wavelet'], 'g', label='Mean')
# ax3.axis('tight')
ax3.set_xlim(-40, 40)
# ax3.set_ylim(-0.3,0.3)
# ax3.set_ylim([np.min(result['joint_wavelet']),np.max(result['joint_wavelet'])])
ax3.set_xlabel('Time', fontsize=10)
ax3.set_ylabel('Amplitude', fontsize=10)
ax3.set_title('$\psi$ (t/s) {0} in time domain'.format(result['mother']))
# ----------------------------------------------------------------------------------------------------------------#
# ax4.plot(result['ondaleta'],'k')
# ax4.set_xlabel('Frequency', fontsize=10)
# ax4.set_ylabel('Amplitude', fontsize=10)
# ax4.set_title('$\psi^-$ Frequency domain', fontsize=13)
# ----------------------------------------------------------------------------------------------------------------#
""" Contour plot wavelet power spectrum """
lev = wavetest.levels(result, dtmin)
pc = ax2.contourf(
time, np.log2(result['period']),
np.log2(result['power']), np.log2(lev))
# 95% significance contour, levels at -99 (fake) and 1 (95% signif)
pc2 = ax2.contour(
time, np.log2(result['period']), result['sig95'],
[-99, 1], linewidths=2)
ax2.plot(time, np.log2(result['coi']), 'k')
# cone-of-influence , anything "below"is dubious
ax2.fill_between(time, np.log2(result['coi']), int(
np.log2(result['period'][-1]) + 1), alpha=0.5, hatch='/')
position = fig.add_axes([0.07, 0.07, 0.6, 0.01])
cbar = plt.colorbar(pc, cax=position, orientation='horizontal')
cbar.set_label('Power')
yt = range(int(np.log2(result['period'][0])), int(
np.log2(result['period'][-1]) + 1)) # create the vector of periods
Yticks = [float(math.pow(2, p)) for p in yt] # make 2^periods
# Yticks = [int(i) for i in Yticks]
ax2.set_yticks(yt)
ax2.set_yticklabels(Yticks)
ax2.set_ylim(ymin=(np.log2(np.min(result['period']))), ymax=(
np.log2(np.max(result['period']))))
ax2.set_ylim(ax2.get_ylim()[::-1])
ax2.set_xlabel('Time', fontsize=12)
ax2.set_ylabel('Period', fontsize=12)
ax2.axhline(y=10.5, xmin=0, xmax=1, linewidth=2, color='k')
ax2.axhline(y=13.3, xmin=0, xmax=1, linewidth=2, color='k')
# ----------------------------------------------------------------------------------------------------------------#
""" Plot global wavelet spectrum """
f, sxx = wavetest.fft(data)
ax5.plot(
sxx, np.log2(1 / f * result['dt']), 'gray', label='Fourier spectrum')
ax5.plot(result['global_ws'], np.log2(
result['period']), 'b', label='Wavelet spectrum')
ax5.plot(result['global_signif'], np.log2(
result['period']), 'r--', label='95% confidence spectrum')
ax5.legend(loc=0)
ax5.set_xlim(0, 1.25 * np.max(result['global_ws']))
ax5.set_xlabel('Power', fontsize=10)
ax5.set_title('Global Wavelet Spectrum', fontsize=12)
# save fig
plt.savefig('%s.png' % var, dpi=300)
|
|
"""Functions to construct sparse matrices
"""
from __future__ import division, print_function, absolute_import
__docformat__ = "restructuredtext en"
__all__ = ['spdiags', 'eye', 'identity', 'kron', 'kronsum',
'hstack', 'vstack', 'bmat', 'rand', 'random', 'diags', 'block_diag']
import numpy as np
from scipy._lib._numpy_compat import get_randint
from scipy._lib.six import xrange
from .sputils import upcast, get_index_dtype, isscalarlike
from .csr import csr_matrix
from .csc import csc_matrix
from .bsr import bsr_matrix
from .coo import coo_matrix
from .dia import dia_matrix
from .base import issparse
def spdiags(data, diags, m, n, format=None):
"""
Return a sparse matrix from diagonals.
Parameters
----------
data : array_like
matrix diagonals stored row-wise
diags : diagonals to set
- k = 0 the main diagonal
- k > 0 the k-th upper diagonal
- k < 0 the k-th lower diagonal
m, n : int
shape of the result
format : str, optional
Format of the result. By default (format=None) an appropriate sparse
matrix format is returned. This choice is subject to change.
See Also
--------
diags : more convenient form of this function
dia_matrix : the sparse DIAgonal format.
Examples
--------
>>> from scipy.sparse import spdiags
>>> data = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]])
>>> diags = np.array([0, -1, 2])
>>> spdiags(data, diags, 4, 4).toarray()
array([[1, 0, 3, 0],
[1, 2, 0, 4],
[0, 2, 3, 0],
[0, 0, 3, 4]])
"""
return dia_matrix((data, diags), shape=(m,n)).asformat(format)
def diags(diagonals, offsets=0, shape=None, format=None, dtype=None):
"""
Construct a sparse matrix from diagonals.
Parameters
----------
diagonals : sequence of array_like
Sequence of arrays containing the matrix diagonals,
corresponding to `offsets`.
offsets : sequence of int or an int, optional
Diagonals to set:
- k = 0 the main diagonal (default)
- k > 0 the kth upper diagonal
- k < 0 the kth lower diagonal
shape : tuple of int, optional
Shape of the result. If omitted, a square matrix large enough
to contain the diagonals is returned.
format : {"dia", "csr", "csc", "lil", ...}, optional
Matrix format of the result. By default (format=None) an
appropriate sparse matrix format is returned. This choice is
subject to change.
dtype : dtype, optional
Data type of the matrix.
See Also
--------
spdiags : construct matrix from diagonals
Notes
-----
This function differs from `spdiags` in the way it handles
off-diagonals.
The result from `diags` is the sparse equivalent of::
np.diag(diagonals[0], offsets[0])
+ ...
+ np.diag(diagonals[k], offsets[k])
Repeated diagonal offsets are disallowed.
.. versionadded:: 0.11
Examples
--------
>>> from scipy.sparse import diags
>>> diagonals = [[1, 2, 3, 4], [1, 2, 3], [1, 2]]
>>> diags(diagonals, [0, -1, 2]).toarray()
array([[1, 0, 1, 0],
[1, 2, 0, 2],
[0, 2, 3, 0],
[0, 0, 3, 4]])
Broadcasting of scalars is supported (but shape needs to be
specified):
>>> diags([1, -2, 1], [-1, 0, 1], shape=(4, 4)).toarray()
array([[-2., 1., 0., 0.],
[ 1., -2., 1., 0.],
[ 0., 1., -2., 1.],
[ 0., 0., 1., -2.]])
If only one diagonal is wanted (as in `numpy.diag`), the following
works as well:
>>> diags([1, 2, 3], 1).toarray()
array([[ 0., 1., 0., 0.],
[ 0., 0., 2., 0.],
[ 0., 0., 0., 3.],
[ 0., 0., 0., 0.]])
"""
# if offsets is not a sequence, assume that there's only one diagonal
if isscalarlike(offsets):
# now check that there's actually only one diagonal
if len(diagonals) == 0 or isscalarlike(diagonals[0]):
diagonals = [np.atleast_1d(diagonals)]
else:
raise ValueError("Different number of diagonals and offsets.")
else:
diagonals = list(map(np.atleast_1d, diagonals))
offsets = np.atleast_1d(offsets)
# Basic check
if len(diagonals) != len(offsets):
raise ValueError("Different number of diagonals and offsets.")
# Determine shape, if omitted
if shape is None:
m = len(diagonals[0]) + abs(int(offsets[0]))
shape = (m, m)
# Determine data type, if omitted
if dtype is None:
dtype = np.common_type(*diagonals)
# Construct data array
m, n = shape
M = max([min(m + offset, n - offset) + max(0, offset)
for offset in offsets])
M = max(0, M)
data_arr = np.zeros((len(offsets), M), dtype=dtype)
K = min(m, n)
for j, diagonal in enumerate(diagonals):
offset = offsets[j]
k = max(0, offset)
length = min(m + offset, n - offset, K)
if length < 0:
raise ValueError("Offset %d (index %d) out of bounds" % (offset, j))
try:
data_arr[j, k:k+length] = diagonal[...,:length]
except ValueError:
if len(diagonal) != length and len(diagonal) != 1:
raise ValueError(
"Diagonal length (index %d: %d at offset %d) does not "
"agree with matrix size (%d, %d)." % (
j, len(diagonal), offset, m, n))
raise
return dia_matrix((data_arr, offsets), shape=(m, n)).asformat(format)
def identity(n, dtype='d', format=None):
"""Identity matrix in sparse format
Returns an identity matrix with shape (n,n) using a given
sparse format and dtype.
Parameters
----------
n : int
Shape of the identity matrix.
dtype : dtype, optional
Data type of the matrix
format : str, optional
Sparse format of the result, e.g., format="csr", etc.
Examples
--------
>>> from scipy.sparse import identity
>>> identity(3).toarray()
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> identity(3, dtype='int8', format='dia')
<3x3 sparse matrix of type '<class 'numpy.int8'>'
with 3 stored elements (1 diagonals) in DIAgonal format>
"""
return eye(n, n, dtype=dtype, format=format)
def eye(m, n=None, k=0, dtype=float, format=None):
"""Sparse matrix with ones on diagonal
Returns a sparse (m x n) matrix where the kth diagonal
is all ones and everything else is zeros.
Parameters
----------
m : int
Number of rows in the matrix.
n : int, optional
Number of columns. Default: `m`.
k : int, optional
Diagonal to place ones on. Default: 0 (main diagonal).
dtype : dtype, optional
Data type of the matrix.
format : str, optional
Sparse format of the result, e.g., format="csr", etc.
Examples
--------
>>> from scipy import sparse
>>> sparse.eye(3).toarray()
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> sparse.eye(3, dtype=np.int8)
<3x3 sparse matrix of type '<class 'numpy.int8'>'
with 3 stored elements (1 diagonals) in DIAgonal format>
"""
if n is None:
n = m
m,n = int(m),int(n)
if m == n and k == 0:
# fast branch for special formats
if format in ['csr', 'csc']:
idx_dtype = get_index_dtype(maxval=n)
indptr = np.arange(n+1, dtype=idx_dtype)
indices = np.arange(n, dtype=idx_dtype)
data = np.ones(n, dtype=dtype)
cls = {'csr': csr_matrix, 'csc': csc_matrix}[format]
return cls((data,indices,indptr),(n,n))
elif format == 'coo':
idx_dtype = get_index_dtype(maxval=n)
row = np.arange(n, dtype=idx_dtype)
col = np.arange(n, dtype=idx_dtype)
data = np.ones(n, dtype=dtype)
return coo_matrix((data,(row,col)),(n,n))
diags = np.ones((1, max(0, min(m + k, n))), dtype=dtype)
return spdiags(diags, k, m, n).asformat(format)
def kron(A, B, format=None):
"""kronecker product of sparse matrices A and B
Parameters
----------
A : sparse or dense matrix
first matrix of the product
B : sparse or dense matrix
second matrix of the product
format : str, optional
format of the result (e.g. "csr")
Returns
-------
kronecker product in a sparse matrix format
Examples
--------
>>> from scipy import sparse
>>> A = sparse.csr_matrix(np.array([[0, 2], [5, 0]]))
>>> B = sparse.csr_matrix(np.array([[1, 2], [3, 4]]))
>>> sparse.kron(A, B).toarray()
array([[ 0, 0, 2, 4],
[ 0, 0, 6, 8],
[ 5, 10, 0, 0],
[15, 20, 0, 0]])
>>> sparse.kron(A, [[1, 2], [3, 4]]).toarray()
array([[ 0, 0, 2, 4],
[ 0, 0, 6, 8],
[ 5, 10, 0, 0],
[15, 20, 0, 0]])
"""
B = coo_matrix(B)
if (format is None or format == "bsr") and 2*B.nnz >= B.shape[0] * B.shape[1]:
# B is fairly dense, use BSR
A = csr_matrix(A,copy=True)
output_shape = (A.shape[0]*B.shape[0], A.shape[1]*B.shape[1])
if A.nnz == 0 or B.nnz == 0:
# kronecker product is the zero matrix
return coo_matrix(output_shape)
B = B.toarray()
data = A.data.repeat(B.size).reshape(-1,B.shape[0],B.shape[1])
data = data * B
return bsr_matrix((data,A.indices,A.indptr), shape=output_shape)
else:
# use COO
A = coo_matrix(A)
output_shape = (A.shape[0]*B.shape[0], A.shape[1]*B.shape[1])
if A.nnz == 0 or B.nnz == 0:
# kronecker product is the zero matrix
return coo_matrix(output_shape)
# expand entries of a into blocks
row = A.row.repeat(B.nnz)
col = A.col.repeat(B.nnz)
data = A.data.repeat(B.nnz)
row *= B.shape[0]
col *= B.shape[1]
# increment block indices
row,col = row.reshape(-1,B.nnz),col.reshape(-1,B.nnz)
row += B.row
col += B.col
row,col = row.reshape(-1),col.reshape(-1)
# compute block entries
data = data.reshape(-1,B.nnz) * B.data
data = data.reshape(-1)
return coo_matrix((data,(row,col)), shape=output_shape).asformat(format)
def kronsum(A, B, format=None):
"""kronecker sum of sparse matrices A and B
Kronecker sum of two sparse matrices is a sum of two Kronecker
products kron(I_n,A) + kron(B,I_m) where A has shape (m,m)
and B has shape (n,n) and I_m and I_n are identity matrices
of shape (m,m) and (n,n), respectively.
Parameters
----------
A
square matrix
B
square matrix
format : str
format of the result (e.g. "csr")
Returns
-------
kronecker sum in a sparse matrix format
Examples
--------
"""
A = coo_matrix(A)
B = coo_matrix(B)
if A.shape[0] != A.shape[1]:
raise ValueError('A is not square')
if B.shape[0] != B.shape[1]:
raise ValueError('B is not square')
dtype = upcast(A.dtype, B.dtype)
L = kron(eye(B.shape[0],dtype=dtype), A, format=format)
R = kron(B, eye(A.shape[0],dtype=dtype), format=format)
return (L+R).asformat(format) # since L + R is not always same format
def _compressed_sparse_stack(blocks, axis):
"""
Stacking fast path for CSR/CSC matrices
(i) vstack for CSR, (ii) hstack for CSC.
"""
other_axis = 1 if axis == 0 else 0
data = np.concatenate([b.data for b in blocks])
constant_dim = blocks[0].shape[other_axis]
idx_dtype = get_index_dtype(arrays=[b.indptr for b in blocks],
maxval=max(data.size, constant_dim))
indices = np.empty(data.size, dtype=idx_dtype)
indptr = np.empty(sum(b.shape[axis] for b in blocks) + 1, dtype=idx_dtype)
last_indptr = idx_dtype(0)
sum_dim = 0
sum_indices = 0
for b in blocks:
if b.shape[other_axis] != constant_dim:
raise ValueError('incompatible dimensions for axis %d' % other_axis)
indices[sum_indices:sum_indices+b.indices.size] = b.indices
sum_indices += b.indices.size
idxs = slice(sum_dim, sum_dim + b.shape[axis])
indptr[idxs] = b.indptr[:-1]
indptr[idxs] += last_indptr
sum_dim += b.shape[axis]
last_indptr += b.indptr[-1]
indptr[-1] = last_indptr
if axis == 0:
return csr_matrix((data, indices, indptr),
shape=(sum_dim, constant_dim))
else:
return csc_matrix((data, indices, indptr),
shape=(constant_dim, sum_dim))
def hstack(blocks, format=None, dtype=None):
"""
Stack sparse matrices horizontally (column wise)
Parameters
----------
blocks
sequence of sparse matrices with compatible shapes
format : str
sparse format of the result (e.g., "csr")
by default an appropriate sparse matrix format is returned.
This choice is subject to change.
dtype : dtype, optional
The data-type of the output matrix. If not given, the dtype is
determined from that of `blocks`.
See Also
--------
vstack : stack sparse matrices vertically (row wise)
Examples
--------
>>> from scipy.sparse import coo_matrix, hstack
>>> A = coo_matrix([[1, 2], [3, 4]])
>>> B = coo_matrix([[5], [6]])
>>> hstack([A,B]).toarray()
array([[1, 2, 5],
[3, 4, 6]])
"""
return bmat([blocks], format=format, dtype=dtype)
def vstack(blocks, format=None, dtype=None):
"""
Stack sparse matrices vertically (row wise)
Parameters
----------
blocks
sequence of sparse matrices with compatible shapes
format : str, optional
sparse format of the result (e.g., "csr")
by default an appropriate sparse matrix format is returned.
This choice is subject to change.
dtype : dtype, optional
The data-type of the output matrix. If not given, the dtype is
determined from that of `blocks`.
See Also
--------
hstack : stack sparse matrices horizontally (column wise)
Examples
--------
>>> from scipy.sparse import coo_matrix, vstack
>>> A = coo_matrix([[1, 2], [3, 4]])
>>> B = coo_matrix([[5, 6]])
>>> vstack([A, B]).toarray()
array([[1, 2],
[3, 4],
[5, 6]])
"""
return bmat([[b] for b in blocks], format=format, dtype=dtype)
def bmat(blocks, format=None, dtype=None):
"""
Build a sparse matrix from sparse sub-blocks
Parameters
----------
blocks : array_like
Grid of sparse matrices with compatible shapes.
An entry of None implies an all-zero matrix.
format : {'bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil'}, optional
The sparse format of the result (e.g. "csr"). By default an
appropriate sparse matrix format is returned.
This choice is subject to change.
dtype : dtype, optional
The data-type of the output matrix. If not given, the dtype is
determined from that of `blocks`.
Returns
-------
bmat : sparse matrix
See Also
--------
block_diag, diags
Examples
--------
>>> from scipy.sparse import coo_matrix, bmat
>>> A = coo_matrix([[1, 2], [3, 4]])
>>> B = coo_matrix([[5], [6]])
>>> C = coo_matrix([[7]])
>>> bmat([[A, B], [None, C]]).toarray()
array([[1, 2, 5],
[3, 4, 6],
[0, 0, 7]])
>>> bmat([[A, None], [None, C]]).toarray()
array([[1, 2, 0],
[3, 4, 0],
[0, 0, 7]])
"""
blocks = np.asarray(blocks, dtype='object')
if blocks.ndim != 2:
raise ValueError('blocks must be 2-D')
M,N = blocks.shape
# check for fast path cases
if (N == 1 and format in (None, 'csr') and all(isinstance(b, csr_matrix)
for b in blocks.flat)):
A = _compressed_sparse_stack(blocks[:,0], 0)
if dtype is not None:
A = A.astype(dtype)
return A
elif (M == 1 and format in (None, 'csc')
and all(isinstance(b, csc_matrix) for b in blocks.flat)):
A = _compressed_sparse_stack(blocks[0,:], 1)
if dtype is not None:
A = A.astype(dtype)
return A
block_mask = np.zeros(blocks.shape, dtype=bool)
brow_lengths = np.zeros(M, dtype=np.int64)
bcol_lengths = np.zeros(N, dtype=np.int64)
# convert everything to COO format
for i in range(M):
for j in range(N):
if blocks[i,j] is not None:
A = coo_matrix(blocks[i,j])
blocks[i,j] = A
block_mask[i,j] = True
if brow_lengths[i] == 0:
brow_lengths[i] = A.shape[0]
elif brow_lengths[i] != A.shape[0]:
msg = ('blocks[{i},:] has incompatible row dimensions. '
'Got blocks[{i},{j}].shape[0] == {got}, '
'expected {exp}.'.format(i=i, j=j,
exp=brow_lengths[i],
got=A.shape[0]))
raise ValueError(msg)
if bcol_lengths[j] == 0:
bcol_lengths[j] = A.shape[1]
elif bcol_lengths[j] != A.shape[1]:
msg = ('blocks[:,{j}] has incompatible row dimensions. '
'Got blocks[{i},{j}].shape[1] == {got}, '
'expected {exp}.'.format(i=i, j=j,
exp=bcol_lengths[j],
got=A.shape[1]))
raise ValueError(msg)
nnz = sum(block.nnz for block in blocks[block_mask])
if dtype is None:
all_dtypes = [blk.dtype for blk in blocks[block_mask]]
dtype = upcast(*all_dtypes) if all_dtypes else None
row_offsets = np.append(0, np.cumsum(brow_lengths))
col_offsets = np.append(0, np.cumsum(bcol_lengths))
shape = (row_offsets[-1], col_offsets[-1])
data = np.empty(nnz, dtype=dtype)
idx_dtype = get_index_dtype(maxval=max(shape))
row = np.empty(nnz, dtype=idx_dtype)
col = np.empty(nnz, dtype=idx_dtype)
nnz = 0
ii, jj = np.nonzero(block_mask)
for i, j in zip(ii, jj):
B = blocks[i, j]
idx = slice(nnz, nnz + B.nnz)
data[idx] = B.data
row[idx] = B.row + row_offsets[i]
col[idx] = B.col + col_offsets[j]
nnz += B.nnz
return coo_matrix((data, (row, col)), shape=shape).asformat(format)
def block_diag(mats, format=None, dtype=None):
"""
Build a block diagonal sparse matrix from provided matrices.
Parameters
----------
mats : sequence of matrices
Input matrices.
format : str, optional
The sparse format of the result (e.g., "csr"). If not given, the matrix
is returned in "coo" format.
dtype : dtype specifier, optional
The data-type of the output matrix. If not given, the dtype is
determined from that of `blocks`.
Returns
-------
res : sparse matrix
Notes
-----
.. versionadded:: 0.11.0
See Also
--------
bmat, diags
Examples
--------
>>> from scipy.sparse import coo_matrix, block_diag
>>> A = coo_matrix([[1, 2], [3, 4]])
>>> B = coo_matrix([[5], [6]])
>>> C = coo_matrix([[7]])
>>> block_diag((A, B, C)).toarray()
array([[1, 2, 0, 0],
[3, 4, 0, 0],
[0, 0, 5, 0],
[0, 0, 6, 0],
[0, 0, 0, 7]])
"""
nmat = len(mats)
rows = []
for ia, a in enumerate(mats):
row = [None]*nmat
if issparse(a):
row[ia] = a
else:
row[ia] = coo_matrix(a)
rows.append(row)
return bmat(rows, format=format, dtype=dtype)
def random(m, n, density=0.01, format='coo', dtype=None,
random_state=None, data_rvs=None):
"""Generate a sparse matrix of the given shape and density with randomly
distributed values.
Parameters
----------
m, n : int
shape of the matrix
density : real, optional
density of the generated matrix: density equal to one means a full
matrix, density of 0 means a matrix with no non-zero items.
format : str, optional
sparse matrix format.
dtype : dtype, optional
type of the returned matrix values.
random_state : {numpy.random.RandomState, int}, optional
Random number generator or random seed. If not given, the singleton
numpy.random will be used. This random state will be used
for sampling the sparsity structure, but not necessarily for sampling
the values of the structurally nonzero entries of the matrix.
data_rvs : callable, optional
Samples a requested number of random values.
This function should take a single argument specifying the length
of the ndarray that it will return. The structurally nonzero entries
of the sparse random matrix will be taken from the array sampled
by this function. By default, uniform [0, 1) random values will be
sampled using the same random state as is used for sampling
the sparsity structure.
Returns
-------
res : sparse matrix
Notes
-----
Only float types are supported for now.
Examples
--------
>>> from scipy.sparse import random
>>> from scipy import stats
>>> class CustomRandomState(np.random.RandomState):
... def randint(self, k):
... i = np.random.randint(k)
... return i - i % 2
>>> np.random.seed(12345)
>>> rs = CustomRandomState()
>>> rvs = stats.poisson(25, loc=10).rvs
>>> S = random(3, 4, density=0.25, random_state=rs, data_rvs=rvs)
>>> S.A
array([[ 36., 0., 33., 0.], # random
[ 0., 0., 0., 0.],
[ 0., 0., 36., 0.]])
>>> from scipy.sparse import random
>>> from scipy.stats import rv_continuous
>>> class CustomDistribution(rv_continuous):
... def _rvs(self, *args, **kwargs):
... return self._random_state.randn(*self._size)
>>> X = CustomDistribution(seed=2906)
>>> Y = X() # get a frozen version of the distribution
>>> S = random(3, 4, density=0.25, random_state=2906, data_rvs=Y.rvs)
>>> S.A
array([[ 0. , 0. , 0. , 0. ],
[ 0.13569738, 1.9467163 , -0.81205367, 0. ],
[ 0. , 0. , 0. , 0. ]])
"""
if density < 0 or density > 1:
raise ValueError("density expected to be 0 <= density <= 1")
dtype = np.dtype(dtype)
mn = m * n
tp = np.intc
if mn > np.iinfo(tp).max:
tp = np.int64
if mn > np.iinfo(tp).max:
msg = """\
Trying to generate a random sparse matrix such as the product of dimensions is
greater than %d - this is not supported on this machine
"""
raise ValueError(msg % np.iinfo(tp).max)
# Number of non zero values
k = int(density * m * n)
if random_state is None:
random_state = np.random
elif isinstance(random_state, (int, np.integer)):
random_state = np.random.RandomState(random_state)
if data_rvs is None:
if np.issubdtype(dtype, np.integer):
randint = get_randint(random_state)
def data_rvs(n):
return randint(np.iinfo(dtype).min, np.iinfo(dtype).max,
n, dtype=dtype)
elif np.issubdtype(dtype, np.complexfloating):
def data_rvs(n):
return random_state.rand(n) + random_state.rand(n) * 1j
else:
data_rvs = random_state.rand
ind = random_state.choice(mn, size=k, replace=False)
j = np.floor(ind * 1. / m).astype(tp, copy=False)
i = (ind - j * m).astype(tp, copy=False)
vals = data_rvs(k).astype(dtype, copy=False)
return coo_matrix((vals, (i, j)), shape=(m, n)).asformat(format,
copy=False)
def rand(m, n, density=0.01, format="coo", dtype=None, random_state=None):
"""Generate a sparse matrix of the given shape and density with uniformly
distributed values.
Parameters
----------
m, n : int
shape of the matrix
density : real, optional
density of the generated matrix: density equal to one means a full
matrix, density of 0 means a matrix with no non-zero items.
format : str, optional
sparse matrix format.
dtype : dtype, optional
type of the returned matrix values.
random_state : {numpy.random.RandomState, int}, optional
Random number generator or random seed. If not given, the singleton
numpy.random will be used.
Returns
-------
res : sparse matrix
Notes
-----
Only float types are supported for now.
See Also
--------
scipy.sparse.random : Similar function that allows a user-specified random
data source.
Examples
--------
>>> from scipy.sparse import rand
>>> matrix = rand(3, 4, density=0.25, format="csr", random_state=42)
>>> matrix
<3x4 sparse matrix of type '<class 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Row format>
>>> matrix.todense()
matrix([[0.05641158, 0. , 0. , 0.65088847],
[0. , 0. , 0. , 0.14286682],
[0. , 0. , 0. , 0. ]])
"""
return random(m, n, density, format, dtype, random_state)
|
|
from django.db import models
from django import forms
import datetime
from django.template.defaultfilters import slugify
from django.conf import settings
CURRENT_SEASON = getattr(settings, 'CURRENT_SEASON', datetime.date.today().year)
STATUS_CHOICES = (
('FR', 'Freshman'),
('SO', 'Sophomore'),
('JR', 'Junior'),
('SR', 'Senior'),
)
POSITION_TYPE_CHOICES = (
('O', 'Offense'),
('D', 'Defense'),
('S', 'Special Teams'),
)
SIDE_CHOICES = (
('O', 'Own'),
('P', 'Opponents'),
)
RESULT_CHOICES = (
('W', 'Win'),
('L', 'Loss'),
('T', 'Tie'),
)
GAME_TYPE_CHOICES = (
('H', 'Home'),
('A', 'Away'),
('N', 'Neutral Site'),
)
PLAY_CHOICES = (
('R', 'Run'),
('P', 'Pass'),
('F', 'Field Goal'),
('X', 'Extra Point'),
('N', 'Penalty'),
('K', 'Kickoff'),
('U', 'Punt'),
('T', 'Turnover'),
)
DIVISION_CHOICES = (
('B', 'Bowl Subdivision'),
('C', 'Championship Subdivision'),
('D', 'Division II'),
('T', 'Division III'),
)
class State(models.Model):
id = models.CharField(max_length=2, editable=False, primary_key=True)
name = models.CharField(max_length=50)
def __unicode__(self):
return self.name
def get_absolute_url(self):
return "/states/%s/" % self.id.lower()
class StateForm(forms.Form):
name = forms.ModelChoiceField(queryset=State.objects.all().order_by('name'))
class City(models.Model):
name = models.CharField(max_length=75)
slug = models.SlugField(max_length=75)
state = models.ForeignKey(State, null=True, blank=True)
def __unicode__(self):
if self.state:
return "%s, %s" % (self.name, self.state.id)
else:
return self.name
def get_absolute_url(self):
return "/college/states/%s/%s/" % (self.state.id.lower(), self.slug)
class Meta:
verbose_name_plural = 'cities'
class Week(models.Model):
season = models.IntegerField()
week_num = models.IntegerField()
end_date = models.DateField()
def __unicode__(self):
return "Week %s, %s" % (self.week_num, self.season)
def week_games_url(self):
return "/college/seasons/%s/week/%s/" % (self.season, self.week_num)
class Conference(models.Model):
abbrev = models.CharField(max_length=10)
name = models.CharField(max_length=90)
def __unicode__(self):
return self.name
def get_absolute_url(self):
return '/college/conferences/%s/' % self.abbrev.lower()
class College(models.Model):
name = models.CharField(max_length=90)
slug = models.SlugField(max_length=90)
drive_slug = models.CharField(max_length=90)
# city = models.ForeignKey(City, blank=True) #
state = models.ForeignKey(State, blank=True)
official_url = models.CharField(max_length=120, blank=True)
official_rss = models.CharField(max_length=120, blank=True)
updated = models.BooleanField()
def __unicode__(self):
return self.name
def get_absolute_url(self):
return '/college/teams/%s/' % self.slug
def current_record(self):
current_season = self.collegeyear_set.get(season=datetime.date.today()).year
return "(%d-%d)" % (current_season.wins, current_season.losses)
class Meta:
ordering = ['name', 'state']
class CollegeYear(models.Model):
college = models.ForeignKey(College)
season = models.IntegerField()
wins = models.IntegerField(default=0)
losses = models.IntegerField(default=0)
ties = models.IntegerField(default=0)
conference_wins = models.IntegerField(default=0)
conference_losses = models.IntegerField(default=0)
conference_ties = models.IntegerField(default=0)
freshmen = models.IntegerField(default=0)
sophomores = models.IntegerField(default=0)
juniors = models.IntegerField(default=0)
seniors = models.IntegerField(default=0)
conference = models.ForeignKey(Conference, null=True, blank=True)
division = models.CharField(max_length=1, choices=DIVISION_CHOICES)
def __unicode__(self):
return "%s - %s" % (self.college.name, str(self.season))
def game_count(self):
return self.wins+self.losses+self.ties
def get_ncaa_week_url(self):
return 'http://web1.ncaa.org/football/exec/rankingSummary?year=%d&org=%d&week=' % (self.season, self.college.id)
def get_absolute_url(self):
return "/college/teams/%s/%s/" % (self.college.slug, self.season)
def get_conference_url(self):
if self.conference:
return "/college/conferences/%s/%s/" % (self.conference.abbrev, self.season)
def coaching_staff_url(self):
return self.get_absolute_url()+'coaches/'
def record(self):
if self.ties:
return "%s-%s-%s" % (self.wins, self.losses, self.ties)
else:
return "%s-%s" % (self.wins, self.losses)
def conference_record(self):
if self.conference_ties:
return "%s-%s-%s" % (self.conference_wins, self.conference_losses, self.conference_ties)
else:
return "%s-%s" % (self.conference_wins, self.conference_losses)
def coach_total(self):
return len(self.collegecoach_set.filter(end_date__isnull=True))
class Meta:
ordering = ['college', '-season']
class Coach(models.Model):
ncaa_name = models.CharField(max_length=90)
first_name = models.CharField(max_length=75)
last_name = models.CharField(max_length=75)
slug = models.CharField(max_length=75, editable=False)
college = models.ForeignKey(College, null=True, blank=True, related_name='School')
grad_year = models.IntegerField(null=True, blank=True)
birth_date = models.DateField(null=True, blank=True)
years = models.IntegerField(default=0, blank=True)
wins = models.IntegerField(default=0, blank=True)
losses = models.IntegerField(default=0, blank=True)
ties = models.IntegerField(default=0, blank=True)
def __unicode__(self):
return self.first_name + " " + self.last_name
def save(self):
super(Coach, self).save()
self.slug = '%s-%s-%s' % (str(self.id), slugify(self.first_name), slugify(self.last_name))
super(Coach, self).save()
def get_absolute_url(self):
return '/coaches/detail/%s/' % self.slug
def full_name(self):
return self.first_name + " " + self.last_name
def current_school(self):
try:
current_school = self.collegecoach_set.get(collegeyear__season__exact = CURRENT_SEASON, end_date = None).collegeyear.college
except:
current_school = None
return current_school
def seasons_at_school(self,school):
return [sorted([cy.collegeyear.season for cy in self.collegecoach_set.all() if cy.collegeyear.college == school])]
def seasons_at_current_school(self):
return len([cy.collegeyear.college.id for cy in self.collegecoach_set.all() if cy.collegeyear.college.id == self.current_school().id])
def current_job(self):
if self.current_school():
cy = self.collegecoach_set.filter(collegeyear__college=self.current_school).order_by('start_date')[0].jobs_display()
return cy
else:
return None
def head_coach_experience(self):
if 1 in sum([[j.id for j in job.jobs.all() if j.id == 1] for job in self.collegecoach_set.all()],[]):
return "Yes"
else:
return "No"
def years_since_2000(self):
return self.collegecoach_set.all().count()
def years_at_alma_mater_since_2000(self):
return len([a for a in self.collegecoach_set.all() if self.college == a.collegeyear.college])
def states_coached_in(self):
states = {}
state_list = [s.collegeyear.college.state.id for s in self.collegecoach_set.all()]
[states.setdefault(e,500) for e in state_list if e not in states]
return states
def coaching_peers(self):
from django.db import connection
cursor = connection.cursor()
year_ids = [str(c.collegeyear.id) for c in self.collegecoach_set.all()]
cursor.execute("SELECT distinct college_coach.id FROM college_coach INNER JOIN college_collegecoach ON college_coach.id=college_collegecoach.coach_id WHERE college_collegecoach.collegeyear_id IN (%s)" % ','.join(year_ids))
results = cursor.fetchall()
ids = [c[0] for c in results]
return Coach.objects.filter(id__in=ids).exclude(id=self.id)
class Meta:
ordering = ['last_name', 'first_name']
verbose_name_plural = 'Coaches'
class CoachForm(forms.Form):
name = forms.CharField(max_length=50, initial='Last name')
class CoachDetailForm(forms.Form):
coaches = forms.ModelChoiceField(queryset=Coach.objects.none())
def __init__(self, coaches, *args, **kwargs):
super(CoachDetailForm, self).__init__(*args, **kwargs)
self.fields["coaches"].queryset = coaches
class CoachingJob(models.Model):
name = models.CharField(max_length=75)
slug = models.SlugField(max_length=75)
def __unicode__(self):
return self.name
class CollegeCoach(models.Model):
coach = models.ForeignKey(Coach)
collegeyear = models.ForeignKey(CollegeYear)
jobs = models.ManyToManyField(CoachingJob)
start_date = models.DateField(null=True, blank=True)
end_date = models.DateField(null=True, blank=True)
is_head_coach = models.BooleanField(default=False)
def __unicode__(self):
return "%s: %s" % (self.coach, self.collegeyear)
def get_absolute_url(self):
return self.coach.get_absolute_url()
def jobs_display(self):
return ", ".join([x.name for x in self.jobs.all()])
def is_current_job(self):
if self.collegeyear.season == CURRENT_SEASON and self.end_date == None:
return True
else:
return False
def partial_season(self):
if end_date:
return True
else:
return False
def feed_date(self):
if self.start_date and self.end_date:
return self.end_date
elif self.start_date:
return self.start_date
elif self.end_date:
return self.end_date
def feed_action(self):
if self.start_date and self.end_date:
return "Departed"
elif self.start_date:
return "Hired"
elif self.end_date:
return "Departed"
class Meta:
ordering = ['coach__last_name','-collegeyear__season']
verbose_name_plural = 'College coaches'
class CollegeTotal(models.Model):
college = models.ForeignKey(College)
season = models.IntegerField()
third_down_attempts = models.IntegerField(default=0)
third_down_conversions = models.IntegerField(default=0)
fourth_down_attempts = models.IntegerField(default=0)
fourth_down_conversions = models.IntegerField(default=0)
first_downs_rushing = models.IntegerField(default=0)
first_downs_passing = models.IntegerField(default=0)
first_downs_penalty = models.IntegerField(default=0)
first_downs_total = models.IntegerField(default=0)
penalties = models.IntegerField(default=0)
penalty_yards = models.IntegerField(default=0)
fumbles = models.IntegerField(default=0)
fumbles_lost = models.IntegerField(default=0)
rushes = models.IntegerField(default=0)
rush_gain = models.IntegerField(default=0)
rush_loss = models.IntegerField(default=0)
rush_net = models.IntegerField(default=0)
rush_touchdowns = models.IntegerField(default=0)
total_plays = models.IntegerField(default=0)
total_yards = models.IntegerField(default=0)
pass_attempts = models.IntegerField(default=0)
pass_completions = models.IntegerField(default=0)
pass_interceptions = models.IntegerField(default=0)
pass_yards = models.IntegerField(default=0)
pass_touchdowns = models.IntegerField(default=0)
receptions = models.IntegerField(default=0)
receiving_yards = models.IntegerField(default=0)
receiving_touchdowns = models.IntegerField(default=0)
punts = models.IntegerField(default=0)
punt_yards = models.IntegerField(default=0)
punt_returns = models.IntegerField(default=0)
punt_return_yards = models.IntegerField(default=0)
punt_return_touchdowns = models.IntegerField(default=0)
kickoff_returns = models.IntegerField(default=0)
kickoff_return_yards = models.IntegerField(default=0)
kickoff_return_touchdowns = models.IntegerField(default=0)
touchdowns = models.IntegerField(default=0)
pat_attempts = models.IntegerField(default=0)
pat_made = models.IntegerField(default=0)
two_point_conversion_attempts = models.IntegerField(default=0)
two_point_conversions = models.IntegerField(default=0)
field_goal_attempts = models.IntegerField(default=0)
field_goals_made = models.IntegerField(default=0)
points = models.IntegerField(default=0)
class Position(models.Model):
abbrev = models.CharField(max_length=5)
name = models.CharField(max_length=25)
plural_name = models.CharField(max_length=25)
position_type = models.CharField(max_length=1, choices=POSITION_TYPE_CHOICES)
def __unicode__(self):
return self.abbrev
def get_absolute_url(self):
return '/recruits/positions/%s/' % self.abbrev.lower()
class BowlGame(models.Model):
name = models.CharField(max_length=75)
slug = models.CharField(max_length=75)
city = models.ForeignKey(City)
def __unicode__(self):
return self.name
def get_absolute_url(self):
return '/college/bowl-games/%s/' % self.slug
class Game(models.Model):
season = models.IntegerField()
team1 = models.ForeignKey(CollegeYear, related_name='team1')
coach1 = models.ForeignKey(Coach, null=True, related_name='first_coach')
team2 = models.ForeignKey(CollegeYear, related_name='team2')
coach2 = models.ForeignKey(Coach, null=True, related_name='second_coach')
date = models.DateField()
week = models.ForeignKey(Week)
t1_game_type = models.CharField(max_length=1, choices=GAME_TYPE_CHOICES)
t1_result = models.CharField(max_length=1, choices=RESULT_CHOICES, blank=True)
team1_score = models.IntegerField(null=True, blank=True)
team2_score = models.IntegerField(null=True, blank=True)
site = models.CharField(max_length=90, blank=True)
attendance = models.IntegerField(null=True, blank=True)
overtime = models.CharField(max_length=5, blank=True)
ncaa_xml = models.CharField(max_length=120, blank=True)
duration = models.TimeField(null=True, blank=True)
has_drives = models.BooleanField()
has_stats = models.BooleanField()
has_player_stats = models.BooleanField()
is_conference_game = models.BooleanField()
is_bowl_game = models.BooleanField()
bowl_game = models.ForeignKey(BowlGame, null=True, blank=True)
def __unicode__(self):
return '%s vs. %s, %s' % (self.team1, self.team2, self.date)
def get_absolute_url(self):
return '/college/teams/%s/vs/%s/%s/%s/%s/' % (self.team1.college.slug, self.team2.college.slug, self.date.year, self.date.month, self.date.day)
def get_matchup_url(self):
return '/college/teams/%s/vs/%s/' % (self.team1.college.slug, self.team2.college.slug)
def get_reverse_url(self):
return '/college/teams/%s/vs/%s/%s/%s/%s/' % (self.team2.college.slug, self.team1.college.slug, self.date.year, self.date.month, self.date.day)
def get_ncaa_xml_url(self):
return 'http://web1.ncaa.org/d1mfb/%s/Internet/worksheets/%s.xml' % (self.season, self.ncaa_xml.strip())
def get_ncaa_drive_url(self):
return "http://web1.ncaa.org/mfb/driveSummary.jsp?acadyr=%s&h=%s&v=%s&date=%s&game=%s" % (self.season, self.team1.college.id, self.team2.college.id, self.date.strftime("%d-%b-%y").upper(), self.ncaa_xml.strip())
def get_play_by_play_url(self):
return "http://web1.ncaa.org/mfb/driveSummary.jsp?expand=A&acadyr=%s&h=%s&v=%s&date=%s&game=%s" % (self.season, self.team1.college.id, self.team2.college.id, self.date.strftime("%d-%b-%y").upper(), self.ncaa_xml.strip())
def margin(self):
return self.team1_score-self.team2_score
def display(self):
if self.margin() > 0:
return "%s %s, %s %s" % (self.team1.college, self.team1_score, self.team2.college, self.team2_score)
else:
return "%s %s, %s %s" % (self.team2.college, self.team2_score, self.team1.college, self.team1_score)
class QuarterScore(models.Model):
"Represents a team's scoring during a quarter of a game. OT periods begin with 5."
"Not implemented yet."
game = models.ForeignKey(Game)
team = models.ForeignKey(CollegeYear)
season = models.IntegerField()
quarter = models.IntegerField(default=CURRENT_SEASON)
points = models.PositiveIntegerField(default=0)
def __unicode__(self):
return "%s - %s" (self.team, self.quarter)
class DriveOutcome(models.Model):
abbrev = models.CharField(max_length=10)
name = models.CharField(max_length=50, null=True)
slug = models.SlugField(max_length=50, null=True)
def __unicode__(self):
return self.name
class GameDrive(models.Model):
season = models.IntegerField()
game = models.ForeignKey(Game)
team = models.ForeignKey(CollegeYear)
drive = models.IntegerField()
quarter = models.PositiveSmallIntegerField()
start_how = models.CharField(max_length=25)
start_time = models.TimeField()
start_position = models.IntegerField()
start_side = models.CharField(max_length=1, choices=SIDE_CHOICES)
end_result = models.ForeignKey(DriveOutcome)
end_time = models.TimeField()
end_position = models.IntegerField(null=True)
end_side = models.CharField(max_length=1, choices=SIDE_CHOICES)
plays = models.IntegerField()
yards = models.IntegerField()
time_of_possession = models.TimeField()
def __unicode__(self):
return "%s: %s drive %s" % (self.game, self.team, self.drive)
class GameOffense(models.Model):
game = models.ForeignKey(Game)
team = models.ForeignKey(CollegeYear)
season = models.IntegerField()
third_down_attempts = models.IntegerField(default=0)
third_down_conversions = models.IntegerField(default=0)
fourth_down_attempts = models.IntegerField(default=0)
fourth_down_conversions = models.IntegerField(default=0)
time_of_possession = models.TimeField(null=True)
first_downs_rushing = models.IntegerField(default=0)
first_downs_passing = models.IntegerField(default=0)
first_downs_penalty = models.IntegerField(default=0)
first_downs_total = models.IntegerField(default=0)
penalties = models.IntegerField(default=0)
penalty_yards = models.IntegerField(default=0)
fumbles = models.IntegerField(default=0)
fumbles_lost = models.IntegerField(default=0)
rushes = models.IntegerField(default=0)
rush_gain = models.IntegerField(default=0)
rush_loss = models.IntegerField(default=0)
rush_net = models.IntegerField(default=0)
rush_touchdowns = models.IntegerField(default=0)
total_plays = models.IntegerField(default=0)
total_yards = models.IntegerField(default=0)
pass_attempts = models.IntegerField(default=0)
pass_completions = models.IntegerField(default=0)
pass_interceptions = models.IntegerField(default=0)
pass_yards = models.IntegerField(default=0)
pass_touchdowns = models.IntegerField(default=0)
receptions = models.IntegerField(default=0)
receiving_yards = models.IntegerField(default=0)
receiving_touchdowns = models.IntegerField(default=0)
punts = models.IntegerField(default=0)
punt_yards = models.IntegerField(default=0)
punt_returns = models.IntegerField(default=0)
punt_return_yards = models.IntegerField(default=0)
punt_return_touchdowns = models.IntegerField(default=0)
kickoff_returns = models.IntegerField(default=0)
kickoff_return_yards = models.IntegerField(default=0)
kickoff_return_touchdowns = models.IntegerField(default=0)
touchdowns = models.IntegerField(default=0)
pat_attempts = models.IntegerField(default=0)
pat_made = models.IntegerField(default=0)
two_point_conversion_attempts = models.IntegerField(default=0)
two_point_conversions = models.IntegerField(default=0)
field_goal_attempts = models.IntegerField(default=0)
field_goals_made = models.IntegerField(default=0)
points = models.IntegerField(default=0)
def __unicode__(self):
return '%s - %s' % (self.game, self.team)
def third_down_rate(self):
return float(self.third_down_conversions)/float(self.third_down_attempts)
def field_goal_rate(self):
return float(self.field_goals_made)/float(self.field_goal_attempts)
def penalty_yard_ratio(self):
return float(self.penalty_yards)/float(self.total_yards)
def yards_per_reception(self):
return float(self.receiving_yards)/float(self.receptions)
def yards_per_pass_attempt(self):
return float(self.receiving_yards)/(self.pass_attempts)
def rushing_first_downs_pct(self):
return float(self.first_downs_rushing)/float(self.first_downs_total)*100
"""
Returns a floating-point number representing the number
of touchdowns per rushing attempt for a single game.
"""
def touchdowns_per_rushes(self):
return float(self.rush_touchdowns)/float(self.rushes)*100
"""
Returns the opponent for a team's given Game Offense record.
"""
def opponent(self):
if self.team == self.game.team2:
return self.game.team1
else:
return self.game.team2
class GameDefense(models.Model):
game = models.ForeignKey(Game)
team = models.ForeignKey(CollegeYear)
season = models.IntegerField()
safeties = models.IntegerField(default=0)
unassisted_tackles = models.IntegerField(default=0)
assisted_tackles = models.IntegerField(default=0)
unassisted_tackles_for_loss = models.IntegerField(default=0)
assisted_tackles_for_loss = models.IntegerField(default=0)
tackles_for_loss_yards = models.IntegerField(default=0)
unassisted_sacks = models.IntegerField(default=0)
assisted_sacks = models.IntegerField(default=0)
sack_yards = models.IntegerField(default=0)
defensive_interceptions = models.IntegerField(default=0)
defensive_interception_yards = models.IntegerField(default=0)
defensive_interception_touchdowns = models.IntegerField(default=0)
pass_breakups = models.IntegerField(default=0)
fumbles_forced = models.IntegerField(default=0)
fumbles_number = models.IntegerField(default=0)
fumbles_yards = models.IntegerField(default=0)
fumbles_touchdowns = models.IntegerField(default=0)
def __unicode__(self):
return '%s - %s' % (self.game, self.team)
class Player(models.Model):
name = models.CharField(max_length=120)
slug = models.SlugField(max_length=120)
team = models.ForeignKey(CollegeYear)
season = models.IntegerField()
position = models.ForeignKey(Position)
number = models.CharField(max_length=4)
games_played = models.PositiveIntegerField(default=0)
status = models.CharField(max_length=2, choices=STATUS_CHOICES)
def __unicode__(self):
return u"%s - %s" % (self.name, self.team)
def get_absolute_url(self):
return '/college/teams/%s/%s/players/%s/' % (self.team.college.slug, self.season, self.slug)
def get_team_position_url(self):
return '/college/teams/%s/%s/players/positions/%s/' % (self.team.college.slug, self.season, self.position.abbrev.lower())
def get_team_class_url(self):
return '/college/teams/%s/%s/players/class/%s/' % (self.team.college.slug, self.season, self.status.lower())
class Meta:
ordering = ['id']
class PlayerCollegeCareer(models.Model):
player = models.ForeignKey(Player)
first_season = models.ForeignKey(CollegeYear, related_name='first_season')
last_season = models.ForeignKey(CollegeYear, related_name='last_season')
total_games = models.IntegerField(null=True, blank=True)
def __unicode__(self):
return self.player.name.full_name()
class PlayerGame(models.Model):
player = models.ForeignKey(Player)
game = models.ForeignKey(Game)
played = models.BooleanField()
starter = models.BooleanField()
total_plays = models.IntegerField()
total_yards = models.IntegerField()
def __unicode__(self):
return self.player.name
class PlayerRush(models.Model):
player = models.ForeignKey(Player)
game = models.ForeignKey(Game)
rushes = models.IntegerField(default=0)
gain = models.IntegerField(default=0)
loss = models.IntegerField(default=0)
net = models.IntegerField(default=0)
td = models.IntegerField(default=0)
long_yards = models.IntegerField(default=0)
average = models.FloatField(default=0)
total_plays = models.IntegerField(default=0)
total_yards = models.IntegerField(default=0)
def __unicode__(self):
return "%s - %s" % (self.player.name, self.game)
class Meta:
verbose_name_plural = "player rushing"
class PlayerPass(models.Model):
player = models.ForeignKey(Player)
game = models.ForeignKey(Game)
attempts = models.IntegerField(default=0)
completions = models.IntegerField(default=0)
interceptions = models.IntegerField(default=0)
yards = models.IntegerField(default=0)
td = models.IntegerField(default=0)
conversions = models.IntegerField(default=0)
total_plays = models.IntegerField(default=0)
total_yards = models.IntegerField(default=0)
pass_efficiency = models.FloatField(default=0)
def __unicode__(self):
return "%s - %s" % (self.player.name, self.game)
def comp_att(self):
return "%d of %d" % (self.completions, self.attempts)
class Meta:
verbose_name_plural = 'player passing'
class PlayerReceiving(models.Model):
player = models.ForeignKey(Player)
game = models.ForeignKey(Game)
receptions = models.IntegerField(default=0)
yards = models.IntegerField(default=0)
td = models.IntegerField(default=0)
long_yards = models.IntegerField(default=0)
average = models.FloatField(default=0)
def __unicode__(self):
return "%s - %s" % (self.player.name, self.game)
class PlayerScoring(models.Model):
player = models.ForeignKey(Player)
game = models.ForeignKey(Game)
td = models.IntegerField(default=0)
fg_att = models.IntegerField(default=0)
fg_made = models.IntegerField(default=0)
pat_att = models.IntegerField(default=0)
pat_made = models.IntegerField(default=0)
two_pt_att = models.IntegerField(default=0)
two_pt_made = models.IntegerField(default=0)
def_pat_att = models.IntegerField(default=0)
def_pat_made = models.IntegerField(default=0)
def_two_pt_att = models.IntegerField(default=0)
def_two_pt_made = models.IntegerField(default=0)
safeties = models.IntegerField(default=0)
points = models.IntegerField(default=0)
def __unicode__(self):
return "%s - %s" % (self.player.name, self.game)
class PlayerTackle(models.Model):
player = models.ForeignKey(Player)
game = models.ForeignKey(Game)
unassisted_tackles = models.IntegerField(default=0)
assisted_tackles = models.IntegerField(default=0)
def __unicode__(self):
return "%s - %s" % (self.player.name, self.game)
def total_tackles(self):
return self.unassisted_tackles+self.assisted_tackles
class PlayerTacklesLoss(models.Model):
player = models.ForeignKey(Player)
game = models.ForeignKey(Game)
unassisted_tackles_for_loss = models.IntegerField(default=0)
assisted_tackles_for_loss = models.IntegerField(default=0)
tackles_for_loss_yards = models.IntegerField(default=0)
unassisted_sacks = models.IntegerField(default=0)
assisted_sacks = models.IntegerField(default=0)
sack_yards = models.IntegerField(default=0)
def __unicode__(self):
return "%s - %s" % (self.player.name, self.game)
def total_sacks(self):
return self.unassisted_sacks+self.assisted_sacks
def total_tackles_for_loss(self):
return self.unassisted_tackles_for_loss+self.assisted_tackles_for_loss
class Meta:
verbose_name_plural = 'player tackles for loss'
class PlayerPassDefense(models.Model):
player = models.ForeignKey(Player)
game = models.ForeignKey(Game)
interceptions = models.IntegerField(default=0)
interception_yards = models.IntegerField(default=0)
interception_td = models.IntegerField(default=0)
pass_breakups = models.IntegerField(default=0)
def __unicode__(self):
return "%s - %s" % (self.player.name, self.game)
class PlayerFumble(models.Model):
player = models.ForeignKey(Player)
game = models.ForeignKey(Game)
fumbles_forced = models.IntegerField(default=0)
fumbles_number = models.IntegerField(default=0)
fumbles_yards = models.IntegerField(default=0)
fumbles_td = models.IntegerField(default=0)
def __unicode__(self):
return "%s - %s" % (self.player.name, self.game)
class PlayerReturn(models.Model):
player = models.ForeignKey(Player)
game = models.ForeignKey(Game)
punt_returns = models.IntegerField(default=0)
punt_return_yards = models.IntegerField(default=0)
punt_return_td = models.IntegerField(default=0)
kickoff_returns = models.IntegerField(default=0)
kickoff_return_yards = models.IntegerField(default=0)
kickoff_return_td = models.IntegerField(default=0)
def __unicode__(self):
return "%s - %s" % (self.player.name, self.game)
class PlayerSummary(models.Model):
player = models.ForeignKey(Player)
rushes = models.IntegerField(null=True)
rush_gain = models.IntegerField(null=True)
rush_loss = models.IntegerField(null=True)
rush_net = models.IntegerField(null=True)
rush_td = models.IntegerField(null=True)
pass_attempts = models.IntegerField(null=True)
pass_complete = models.IntegerField(null=True)
pass_intercept = models.IntegerField(null=True)
pass_yards = models.IntegerField(null=True)
pass_td = models.IntegerField(null=True)
conversions = models.IntegerField(null=True)
offense_plays = models.IntegerField(null=True)
offense_yards = models.IntegerField(null=True)
receptions = models.IntegerField(null=True)
reception_yards = models.IntegerField(null=True)
reception_td = models.IntegerField(null=True)
def __unicode__(self):
return "%s - %s" % (self.player.name, self.player.season)
class Poll(models.Model):
name = models.CharField(max_length=50)
slug = models.SlugField(max_length=50)
def __unicode__(self):
return self.name
class PollResults(models.Model):
poll = models.ForeignKey(Poll)
week = models.ForeignKey(Week)
team = models.ForeignKey(College)
rank = models.IntegerField()
def __unicode__(self):
return "%s: %s %s" % (self.poll, self.week, self.team)
|
|
import os
import json
import argparse
from datetime import datetime
import heapq
#from customer_info import customer_info
#from time_frame import time_frame
class customer_info(object):
def __init__(self, customer_id):
# Customer ID
self.customer_id = customer_id
# Record all the orders
# HashMap<order_id, amount>
self.orders = dict()
# Number of site visits
self.num_visits = 0
# The function that ingest event to the corresponding customer
# O(1)
def ingest(self, event):
# Append the site visit
if event['type'] == 'SITE_VISIT':
self.num_visits += 1
# Append the expenditures
elif event['type'] == 'ORDER':
if event['verb'] == 'NEW':
#self.orders[event['key']] = event['total_amount']
self.orders[event['key']] = self.get_amount(event['total_amount'])
elif event['verb'] == 'UPDATE':
#self.orders[event['key']] = event['total_amount']
self.orders[event['key']] = self.get_amount(event['total_amount'])
# Inappropriate field content
#else:
# Extract money value
# O(1)
def get_amount(self, total_amount):
money_value = total_amount.split()[0]
try:
money_value = float(money_value)
return money_value
except:
return 0
class time_frame(object):
def __init__(self):
self.start_time = None
self.end_time = None
def ingest(self, event):
# Parse the time field
event_time = event['event_time']
event_date = self.parse_event_time(event_time)
# Update the earlist time
self.start_time = self.get_start_time(event_date)
# Update the latest time
self.end_time = self.get_end_time(event_date)
# Parse the time field
# %Y-%m-%d
def parse_event_time(self, event_time):
# Only need the year-month-date part
return event_time.split('T')[0]
# Select the earlist event time
def get_start_time(self, event_date):
# If self.start_time is None
if not self.start_time:
return event_date
date0 = datetime.strptime(self.start_time, '%Y-%m-%d')
date1 = datetime.strptime(event_date, '%Y-%m-%d')
if date0 <= date1:
return self.start_time
else:
return event_date
# Select the latest event time
def get_end_time(self, event_date):
# If self.end_time is None
if not self.end_time:
return event_date
date0 = datetime.strptime(self.end_time, '%Y-%m-%d')
date1 = datetime.strptime(event_date, '%Y-%m-%d')
if date0 <= date1:
return event_date
else:
return self.end_time
# Ingest the coming event to customer_info & time_frame
def ingest(event, D):
customer_map, tf = D
# Check the event type
if event['type'] == 'CUSTOMER':
customer_id = event['key']
else:
customer_id = event['customer_id']
# New customer
if customer_id not in customer_map:
ci = customer_info(customer_id)
customer_map[customer_id] = ci
# Existing customer
else:
ci = customer_map[customer_id]
# Ingest the incoming event
ci.ingest(event)
tf.ingest(event)
return customer_map, tf
def top_x_simple_ltv_customers(x, D):
customer_map, tf = D
# LTV = 52(a) X t
# a: customer expenditures per visit (USD) x number of site visits per week
time_delta = datetime.strptime(tf.end_time, '%Y-%m-%d') - datetime.strptime(tf.start_time, '%Y-%m-%d')
week_delta = time_delta.days / 7
customer_list = list(customer_map.values())
# Use a heap to store all the ltv
customer_ltv_heap = []
# Iterate each customer:
for customer in customer_list:
order_list = list(customer.orders.values())
customer_exp = sum(order_list)
# Get # visists
num_visit = customer.num_visits
# Get customer expenditures per visit (USD)
customer_exp_per_visit = customer_exp / num_visit
# Get number of site visits per week
num_visit_per_week = num_visit / week_delta
# Get LTV
a = customer_exp_per_visit * num_visit_per_week
t = 10
ltv = 52 * a * t
# Add the LTV to heap
heapq.heappush(customer_ltv_heap, (-1*ltv, customer.customer_id))
res = []
for i in range(min(x, len(customer_ltv_heap))):
tmp = heapq.heappop(customer_ltv_heap)
x = (-1*tmp[0], tmp[1])
res.append(x)
return res
def argument_parse():
# Parse the given arguments
parser = argparse.ArgumentParser()
parser.add_argument("-i", help="input data")
parser.add_argument("-o", help="output data")
parser.add_argument("-x", help="top x")
args = parser.parse_args()
return args
def event_iteration(events_file, customer_map, tf):
with open(os.getcwd() + '/' + events_file) as input_file:
# Load the k-v pairs in input file as json objects
events = json.load(input_file)
for event in events:
customer_map, tf = ingest(event, [customer_map, tf])
return customer_map, tf
def main():
# Get the parsed arguments
args = argument_parse()
# Create a hashmap to direct customer_id to customer_info object
customer_map = dict()
# Initialize the time_frame
tf = time_frame()
# Parse the input events
events_file = args.i
# Iterate each incoming event
customer_map, tf = event_iteration(events_file, customer_map, tf)
#print(len(customer_map))
#print(tf.start_time)
#print(tf.end_time)
top_X_simple_ltv_customers = top_x_simple_ltv_customers(int(args.x), [customer_map, tf])
#print(top_X_simple_ltv_customers)
f_w = open(os.getcwd() + '/' + args.o, 'w')
for x in top_X_simple_ltv_customers:
f_w.write(x[1]+'\t'+str(x[0])+'\n')
f_w.close()
if __name__ == "__main__":
main()
|
|
#!/usr/bin/python
doc_name = 'CfA.SNIbc.BIGINFO.list'
#doc_name = 'test'
USR='fedhere'
PWD='*******'
import sys,os
sys.path.append('mypython')
import pylab as pl
import gdata.docs
import gdata.docs.service
import gdata.spreadsheet.service
import re
import optparse
from numpy import array, ma, mean, std, median, where,zeros
def getstats(vals,data, sntype=None):
mydic=makevalsintodictionary((vals,data))
if sntype== None:
sntype = ['all']
else:
sntype=sntype.split(',')
for key in mydic.iterkeys():
mydic[key]=array(mydic[key])
for t in sntype:
if t == 'all':
mydata=mydic[key]
else:
chindex=where(mydic['type']==t)[0]
mydata=mydic[key][chindex]
if len(mydata)==0:
continue
mymask=zeros(len(mydata),float)
for i in where(mydata == '--')[0]:
mymask[i]=1
mx =ma.masked_array(mydata, mask=mymask)
thislist=[]
failedcount = 0
for f in mx:
try:
thislist.append(float(f))
except:
failedcount +=1
pass
print "type:", t, "number of elements: ",len(thislist), "(",failedcount,"failed)"
if len(thislist)>0:
print "\t",key,": mean=",mean(thislist),"median=",median(thislist),"std=",std(thislist)
def putstuff(user,pwd,vals,sn, value,cohort_key=None ):
# Connect to Google
gd_client = gdata.spreadsheet.service.SpreadsheetsService()
gd_client.email = USR#user
gd_client.password = PWD#pwd
gd_client.source = 'payne.org-example-1'
gd_client.ProgrammaticLogin()
#Now that we're connected, we query the spreadsheet by name, and extract the unique spreadsheet and worksheet IDs.
q = gdata.spreadsheet.service.DocumentQuery()
q['title'] = doc_name
q['title-exact'] = 'true'
feed = gd_client.GetSpreadsheetsFeed(query=q)
spreadsheet_id = feed.entry[0].id.text.rsplit('/',1)[1]
feed = gd_client.GetWorksheetsFeed(spreadsheet_id)
worksheet_id = feed.entry[0].id.text.rsplit('/',1)[1]
rows = gd_client.GetCellsFeed(spreadsheet_id, worksheet_id).entry
cells=gd_client.GetCellsFeed(spreadsheet_id, worksheet_id)
batchRequest = gdata.spreadsheet.SpreadsheetsCellsFeed()
cohort = GetCohort(cohort_key)
for myrow in cohort:
colcursor = 0
#print ("row ::"+str(myrow))
for mycell in myrow:
#print ("cell::"+str(mycell))
found = 0
#print ("try and place"+str(rowcursor)+','+str(colcursor))
for myentry in cells.entry:
if ((int(myentry.cell.row) == int(rowcursor+1)) and (int(myentry.cell.col) == int(colcursor+1))):
print "updating "+myentry.cell.text+" to "+str(mycell)
myentry.cell.inputValue = str(mycell)
batchRequest.AddUpdate(myentry)
found = 1
if not found:
print "inserting "+str(mycell)+" at Cell "+ str(rowcursor+1)+'_'+str(colcursor+1)
newCell = gdata.spreadsheet.SpreadsheetsCell()
newCell.cell = gdata.spreadsheet.Cell(inputValue=str(mycell), text=None, row=str(rowcursor+1), col=str(colcursor+1))
print newCEll.inpu
batchRequest.AddInsert(newCell)# the broken part
colcursor = colcursor + 1
rowcursor = rowcursor + 1
updated = gd_client.ExecuteBatch(batchRequest, cells.GetBatchLink().href)
if updated:
print "Sucessfull!"+str(updated)
else:
print "Failed!"
''' if len(sn) == 7:
sn=sn.upper().replace('SN','sn')
for i in rows:
help(cells.entry[0])
print cells.entry[0]#[i]#.cells.inputValue
print cells.entry[1]#[i]#.cells.inputValue
sys.exit()
if sn == cells.entry[1].cells.inputValue:
i.custom[v].cell =value
return value
return 0
'''
def makevalsintodictionary(toutput):
thisdic={}
for i,f in enumerate(toutput[0][1:]):
#toutput[0][1:] is the variables list
thisdic[f]=toutput[1][i+1]
# print f,thisdic[f]
# thisdic[f][p] =
return thisdic
def makeintodictionary(toutput):
thisdic={}
for i,f in enumerate(toutput[1][0]):
#toutput[1][0] is the sn name
f=f.lower()
thisdic[f]={}
for j,p in enumerate(toutput[0][1:]):
#toutput[0][1:] is the variables list
thisdic[f][p]={}
thisdic[f][p] = toutput[1][j+1][i]
return thisdic
def grepstuff(user,pwd,vals,sn, types='all',showvals=False,po=False):
# Connect to Google
gd_client = gdata.spreadsheet.service.SpreadsheetsService()
gd_client.email = USR#user
gd_client.password = PWD#pwd
gd_client.source = 'payne.org-example-1'
gd_client.ProgrammaticLogin()
#Now that we're connected, we query the spreadsheet by name, and extract the unique spreadsheet and worksheet IDs.
q = gdata.spreadsheet.service.DocumentQuery()
q['title'] = doc_name
q['title-exact'] = 'true'
feed = gd_client.GetSpreadsheetsFeed(query=q)
spreadsheet_id = feed.entry[0].id.text.rsplit('/',1)[1]
feed = gd_client.GetWorksheetsFeed(spreadsheet_id)
worksheet_id = feed.entry[0].id.text.rsplit('/',1)[1]
rows = gd_client.GetListFeed(spreadsheet_id, worksheet_id).entry
#At this point, you have a row iterator which will yield rows for the spreadsheet. This example will print everything out, keyed by column names:
if showvals:
for r in rows[0].custom.iterkeys():
if r.startswith("#"):
continue
else:
print r
return None, None
if vals == 'all':
vals=[]
for r in rows[0].custom.iterkeys():
if r.startswith("#"):
continue
vals.append(r)
else:
vals=vals.split(',')
types = types.split(',')
if not types == ['all']:
vals.append('type')
try:
vals.remove('comment')
except:
# print "no comment column to remove"
pass
data=[]
if 'snname' not in vals:
vals.insert(0,'snname')
else:
a=vals.index('snname')
vals[0],vals[a]=vals[a],vals[0]
if 'po' and 'lcvquality' not in vals:
vals.insert(1,'lcvquality')
# print "here", vals
if sn:
if ',' in sn:
sn=sn.split(',')
else:
sn=[sn]
for s in sn:
datai=[]
if len(s) == 7:
s=s.upper().replace('SN','sn')
if s not in [i.custom['snname'].text for i in rows if not i.custom['snname'].text.startswith('#')]:
print "cannot find supernova ",s#,". available objects are: ",", ".join([i.custom['snname'].text for i in rows if not i.custom['snname'].text.startswith('#')])
pass
for v in vals:
if po:
try:
datai.append(array([i.custom[v].text for i in rows if not i.custom[v].text == None and not i.custom[v].text.startswith('#') and s in i.custom['snname'].text and not i.custom['lcvquality'].text == '0' ]))
except:
print "cannot find column '"+v+"'. available keywords are: ",", ".join([r for r in rows[0].custom.iterkeys() if not r.startswith('#')])
pass
else:
try:
datai.append(array([i.custom[v].text for i in rows if not i.custom[v].text == None and not i.custom[v].text.startswith('#') and s in i.custom['snname'].text ]))
except:
print "cannot find column '"+v+"'. available keywords are: ",", ".join([r for r in rows[0].custom.iterkeys() if not r.startswith('#')])
pass
if len(datai)>0:
data.append(datai)
else:
if types == ['all']:
datai=[]
for v in vals:
if po:
try:
datai.append(array([i.custom[v].text for i in rows if not i.custom[v].text == None and not i.custom[v].text.startswith('#') and not i.custom['lcvquality'].text == '0' ]))
except:
print "cannot find column '"+v+"'. available keywords are: ",", ".join([r for r in rows[0].custom.iterkeys() if not r.startswith('#')])
pass
else:
try:
datai.append(array([i.custom[v].text for i in rows if not i.custom[v].text == None and not i.custom[v].text.startswith('#')]))
except:
print "cannot find column '"+v+"'. available keywords are: ",", ".join([r for r in rows[0].custom.iterkeys() if not r.startswith('#')])
pass
if len(datai)>0:
data.append(datai)
else:
datai=[]
for v in vals:
if po:
try:
datai.append(array([i.custom[v].text for i in rows if not i.custom[v].text == None and not i.custom[v].text.startswith('#') and not i.custom['lcvquality'].text == '0' and i.custom['type'].text in types]))
except:
print "cannot find column '"+v+"'. available keywords are: ",", ".join([r for r in rows[0].custom.iterkeys() if not r.startswith('#')])
pass
else:
try:
datai.append(array([i.custom[v].text for i in rows if not i.custom[v].text == None and not i.custom[v].text.startswith('#') and i.custom['type'].text in types]))
except:
print "cannot find column '"+v+"'. available keywords are: ",", ".join([r for r in rows[0].custom.iterkeys() if not r.startswith('#')])
pass
if len(datai)>0:
data.append(datai)
# print data
return vals,data
#################################################################
if __name__ == "__main__":
parser = optparse.OptionParser(usage="getfromtable -u username -p password -v <value1>,<value2>,<value3> -s <snname>", conflict_handler="resolve")
parser.add_option('-u','--user', default=None , type="string",
help='google user name')
parser.add_option('-p','--password', default=None , type="string",
help='google password')
parser.add_option('-s','--sn', default=None , type="string",
help='sn name')
parser.add_option('--showvals', default=False , action="store_true",
help="show only")
parser.add_option('--onlyphot', default=False , action="store_true",
help="only phot")
parser.add_option('--stats', default=False , action="store_true",
help='''get mean, std and median for the desired values.''')
parser.add_option('--type', default='all' , type="string",
help="only extracting given type (or types)")
parser.add_option('-v','--values', default='all' , type="string",
help='''comma separated list of values to extract.
acceptable values are:
snname : SN name (its printed either way) |
type: SN type |
ra : RA |
dec : Dec |
hostgal : h ost galaxy |
vz : redshift velocity |
z : redshift |
zref : redshift |
maxvdate : date of Vmax (julian date) from literature |
maxvjd : JD date of Vmax from literature |
maxvmag : V flux at maxVJD from literature) |
maxref : reference for literature Vmax |
cfavjdpolyfit : Vmax JD derived from CfA data - polynomial fitting |
cfavjdcovarerror : error on CfA data derived Vmad JD from polynomial fit covariance matrix |
dvjdwithcfapolyfit : difference b|w CfA and literature Vmax JD (days) - polynomial fit |
cfavjdbootstrap : Vmax JD from CfA data derived with bootstrap |
cfavjdbootstraperror : error on CfA data derived Vmad JD from bootstrapped polynimial fit |
dvjd : difference b/w CfA and literature Vmax JD (days) - bootstrap |
cfadm15 : CfA dm15 from polynomial fit |
cfadm15linear : dm15 from CfA data from linear interpolation |
''')
options, args = parser.parse_args()
if len(args)>0:
sys.argv.append('--help')
options, args = parser.parse_args()
sys.exit(0)
if options.user == None:
print "no username"
sys.argv.append('--help')
options, args = parser.parse_args()
sys.exit(0)
if options.password == None:
print "no username"
sys.argv.append('--help')
options, args = parser.parse_args()
sys.exit(0)
if options.showvals == True:
print "available values to extract"
vals,alldata=grepstuff(options.user,options.password,options.values, options.sn, showvals=True)
sys.exit()
vals,alldata=grepstuff(options.user,options.password,options.values, options.sn, options.type, options.showvals,options.onlyphot)
if options.stats:
getstats(vals,alldata, options.type)
else:
maxlen=0
for data in alldata:
maxlen=max(maxlen,len(data))
for data in alldata:
l=len(data)
print '#',
for i in vals:
print i,"\t\t",
print ""
for alld in alldata:
for i in range(len(alld[0])):
for data in alld:
try:
print data[i],"\t",
except IndexError:
print "N/A",
print ""
|
|
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gtk
import os.path
from debugger import *
from tab_interface import *
ICON_COLUMN=0
ID_COLUMN=1
FE_ID_COLUMN=2
PID_COLUMN=3
NAME_COLUMN=4
STATUS_COLUMN=5
ARGS_COLUMN=6
BGCOLOR_COLUMN=7
class ProcessTab(gtk.VBox):
def __init__(self,mc):
TabInterface.validate_implementation(self)
gtk.VBox.__init__(self)
self._id = None
self._mc = mc
self._ls = gtk.ListStore(gtk.gdk.Pixbuf,object,str,str,str,str,str,str)
tv = gtk.TreeView(self._ls)
self._tv = tv
pixbufCell = gtk.CellRendererPixbuf()
pixbufCell.set_property("mode", gtk.CELL_RENDERER_MODE_INERT)
pixbufColumn = gtk.TreeViewColumn("", pixbufCell, pixbuf=ICON_COLUMN)
pixbufColumn.unset_flags(gtk.CAN_FOCUS)
tv.append_column(pixbufColumn)
plainCell = gtk.CellRendererText()
tv.append_column(gtk.TreeViewColumn("ID", plainCell, text=FE_ID_COLUMN, background=BGCOLOR_COLUMN))
tv.append_column(gtk.TreeViewColumn("PID", plainCell, text=PID_COLUMN, background=BGCOLOR_COLUMN))
tv.append_column(gtk.TreeViewColumn("Name", plainCell, text=NAME_COLUMN, background=BGCOLOR_COLUMN))
tv.append_column(gtk.TreeViewColumn("Status", plainCell, text=STATUS_COLUMN, background=BGCOLOR_COLUMN))
tv.append_column(gtk.TreeViewColumn("Arguments", plainCell, text=ARGS_COLUMN, background=BGCOLOR_COLUMN))
tvs = tv.get_selection()
tvs.set_mode(gtk.SELECTION_SINGLE)
tv.connect("row-activated", self._on_row_activated)
tv.connect("button-press-event", self._on_button_press)
sw = gtk.ScrolledWindow()
sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
sw.add(tv)
self.pack_start(sw,True,True,0)
self.show_all()
popup = gtk.Menu()
popup.show()
self._popup_kill = add_to_menu(popup, "_Kill", self._on_kill)
self._popup = popup
mc.debugger.processes.item_added.add_listener(self._on_process_added)
mc.debugger.processes.item_deleted.add_listener(self._on_process_deleted)
mc.debugger.passive_processes.changed.add_listener(self._on_passive_processes_changed)
mc.debugger.active_frame_changed.add_listener(self._on_active_frame_changed)
@property
def id(self):
return self._id
@id.setter
def id(self,id):
self._id = id
def get_selected(self):
tvs = self._tv.get_selection()
m, s = tvs.get_selected()
if s != None:
return m.get_value(s,ID_COLUMN)
return None
def _on_kill(self,*args):
if self._popup_path:
iter = self._ls.get_iter(self._popup_path)
proc = self._ls.get(iter,ID_COLUMN)[0]
was_running = self._mc.debugger.status == STATUS_RUNNING
if was_running:
status_dlg.status = "Stopping other processes..."
self._mc.debugger.begin_interrupt().wait()
proc.kill()
if was_running:
assert self._mc.debugger.status == STATUS_BREAK
self._mc.debugger.active_thread.begin_resume()
def _on_row_activated(self,tv,path,view_column):
iter = self._ls.get_iter(path)
p = self._ls.get(iter,ID_COLUMN)[0]
if p:
if isinstance(p, DPassiveProcess):
p.attach()
else:
p.make_active()
return True
def _on_button_press(self,tv,evt):
if evt.button == 3:
pathinfo = self._tv.get_path_at_pos(int(evt.x), int(evt.y))
self._tv.grab_focus()
if pathinfo:
path,col,cellx,celly = pathinfo
self._popup_path = path
self._tv.set_cursor(path,col, 0)
self._popup_kill.set_sensitive(True)
else:
self._popup_path = None
self._popup_kill.set_sensitive(False)
self._popup.popup(None, None, None, evt.button, evt.time)
def _on_process_added(self,proc):
proc.target_executable_changed.add_listener(self._on_process_executable_changed)
self._update_liststore()
self._update_colors_and_status()
def _on_process_executable_changed(self,proc):
self._update_liststore()
self._update_colors_and_status()
def _on_process_deleted(self,proc):
proc.target_executable_changed.remove_listener(self._on_process_executable_changed)
self._update_liststore()
self._update_colors_and_status()
def _on_passive_processes_changed(self):
self._update_liststore()
self._update_colors_and_status()
def _on_active_frame_changed(self):
self._update_colors_and_status()
def _update_liststore(self):
self._ls.clear()
for p in self._mc.debugger.processes:
row = self._ls.append()
self._ls.set(row,ICON_COLUMN,None) # todo make new pixmap for stopped vs running thread
self._ls.set(row,ID_COLUMN,p)
if p.backend_info:
self._ls.set(row,PID_COLUMN, str(p.backend_info.pid))
else:
self._ls.set(row,PID_COLUMN, "<no pid>")
bn = os.path.basename(p.target_exe)
self._ls.set(row,NAME_COLUMN, bn)
for p in self._mc.debugger.passive_processes:
row = self._ls.append()
self._ls.set(row,ICON_COLUMN,None) # todo make new pixmap for stopped vs running thread
self._ls.set(row,ID_COLUMN,p)
self._ls.set(row,PID_COLUMN,p.backend_info.pid)
bn = os.path.basename(p.target_exe)
self._ls.set(row,NAME_COLUMN, bn)
def _compute_status(self,proc):
if isinstance(proc, DPassiveProcess):
return "<Availble for debugging...>"
return proc.status
def _update_colors_and_status(self):
show_where = self._mc.debugger.status == STATUS_BREAK
for row in liststore_get_children(self._ls):
p = self._ls.get(row,ID_COLUMN)[0]
if p.frontend_id:
self._ls.set(row,FE_ID_COLUMN, p.frontend_id)
else:
self._ls.set(row,FE_ID_COLUMN, p.frontend_id)
if isinstance(p, DPassiveProcess):
self._ls.set(row,BGCOLOR_COLUMN, "#D0D0FF")
elif self._mc.debugger.active_thread in p.threads:
self._ls.set(row,BGCOLOR_COLUMN, self._mc.resources.COLOR_CURRENT_LINE)
else:
self._ls.set(row,BGCOLOR_COLUMN, "white")
pstat = self._compute_status(p)
self._ls.set(row,STATUS_COLUMN, str(pstat))
try:
cmdline = " ".join(p.target_full_cmdline[1:])
except:
cmdline = "<unknown>"
self._ls.set(row,ARGS_COLUMN, cmdline)
|
|
# coding: utf-8
#
# Copyright 2015 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for recommendations_services."""
__author__ = 'Xinyu Wu'
from core.domain import recommendations_services
from core.domain import rights_manager
from core.domain import user_services
from core.platform import models
(recommendations_models, exp_models,) = models.Registry.import_models([
models.NAMES.recommendations, models.NAMES.exploration])
from core.tests import test_utils
import feconf
class TopicSimilarityUnitTests(test_utils.GenericTestBase):
"""Tests of the recommendation services module."""
TOPIC_SIMILARITIES_DEFAULT = (
"""Architecture,Art,Biology,Business,Chemistry,Computing,Economics,Education,Engineering,Environment,Geography,Government,Hobbies,Languages,Law,Life Skills,Mathematics,Medicine,Music,Philosophy,Physics,Programming,Psychology,Puzzles,Reading,Religion,Sport,Statistics,Welcome
1.0,0.9,0.2,0.4,0.1,0.2,0.3,0.3,0.6,0.6,0.4,0.2,0.5,0.5,0.5,0.3,0.5,0.3,0.3,0.5,0.4,0.1,0.6,0.1,0.1,0.1,0.1,0.1,0.3
0.9,1.0,0.1,0.6,0.1,0.1,0.6,0.6,0.2,0.3,0.3,0.2,0.5,0.7,0.6,0.2,0.3,0.2,0.9,0.7,0.3,0.1,0.6,0.1,0.1,0.1,0.1,0.1,0.3
0.2,0.1,1.0,0.2,0.8,0.3,0.2,0.3,0.3,0.7,0.4,0.2,0.2,0.1,0.1,0.9,0.4,0.8,0.1,0.1,0.4,0.1,0.6,0.1,0.1,0.1,0.1,0.6,0.3
0.4,0.6,0.2,1.0,0.1,0.5,0.9,0.6,0.4,0.6,0.2,0.7,0.2,0.5,0.7,0.1,0.3,0.1,0.1,0.6,0.1,0.2,0.3,0.1,0.1,0.1,0.1,0.5,0.3
0.1,0.1,0.8,0.1,1.0,0.2,0.2,0.3,0.2,0.6,0.6,0.1,0.2,0.2,0.2,0.7,0.3,0.7,0.1,0.1,0.2,0.1,0.3,0.1,0.1,0.1,0.1,0.3,0.3
0.2,0.1,0.3,0.5,0.2,1.0,0.6,0.3,0.6,0.1,0.1,0.1,0.2,0.2,0.1,0.3,0.9,0.2,0.2,0.3,0.4,0.95,0.3,0.5,0.1,0.1,0.1,0.6,0.3
0.3,0.6,0.2,0.9,0.2,0.6,1.0,0.3,0.3,0.5,0.4,0.7,0.2,0.4,0.8,0.2,0.6,0.2,0.1,0.3,0.1,0.3,0.6,0.3,0.2,0.1,0.1,0.7,0.3
0.3,0.6,0.3,0.6,0.3,0.3,0.3,1.0,0.3,0.5,0.3,0.5,0.2,0.2,0.6,0.1,0.2,0.1,0.1,0.5,0.1,0.1,0.6,0.1,0.3,0.2,0.2,0.2,0.3
0.6,0.2,0.3,0.4,0.2,0.6,0.3,0.3,1.0,0.4,0.2,0.2,0.2,0.2,0.3,0.1,0.7,0.1,0.1,0.3,0.6,0.6,0.2,0.3,0.1,0.1,0.1,0.5,0.3
0.6,0.3,0.7,0.6,0.6,0.1,0.5,0.5,0.4,1.0,0.8,0.6,0.2,0.2,0.3,0.8,0.2,0.3,0.1,0.2,0.1,0.1,0.3,0.1,0.1,0.1,0.1,0.3,0.3
0.4,0.3,0.4,0.2,0.6,0.1,0.4,0.3,0.2,0.8,1.0,0.2,0.2,0.4,0.3,0.6,0.3,0.3,0.1,0.1,0.1,0.1,0.3,0.1,0.1,0.1,0.1,0.2,0.3
0.2,0.2,0.2,0.7,0.1,0.1,0.7,0.5,0.2,0.6,0.2,1.0,0.2,0.3,0.8,0.1,0.1,0.1,0.1,0.4,0.1,0.1,0.4,0.1,0.5,0.5,0.2,0.4,0.3
0.5,0.5,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,1.0,0.5,0.2,0.2,0.3,0.2,0.4,0.2,0.3,0.5,0.2,0.6,0.5,0.3,0.6,0.2,0.3
0.5,0.7,0.1,0.5,0.2,0.2,0.4,0.2,0.2,0.2,0.4,0.3,0.5,1.0,0.3,0.1,0.1,0.1,0.3,0.4,0.1,0.1,0.3,0.1,0.8,0.4,0.1,0.1,0.3
0.5,0.6,0.1,0.7,0.2,0.1,0.8,0.6,0.3,0.3,0.3,0.8,0.2,0.3,1.0,0.1,0.1,0.1,0.1,0.6,0.1,0.1,0.6,0.1,0.4,0.6,0.1,0.2,0.3
0.3,0.2,0.9,0.1,0.7,0.3,0.2,0.1,0.1,0.8,0.6,0.1,0.2,0.1,0.1,1.0,0.4,0.8,0.1,0.2,0.2,0.2,0.3,0.1,0.2,0.1,0.3,0.4,0.3
0.5,0.3,0.4,0.3,0.3,0.9,0.6,0.2,0.7,0.2,0.3,0.1,0.3,0.1,0.1,0.4,1.0,0.2,0.3,0.4,0.7,0.8,0.2,0.6,0.1,0.1,0.1,0.8,0.3
0.3,0.2,0.8,0.1,0.7,0.2,0.2,0.1,0.1,0.3,0.3,0.1,0.2,0.1,0.1,0.8,0.2,1.0,0.2,0.3,0.1,0.2,0.3,0.1,0.1,0.1,0.1,0.1,0.3
0.3,0.9,0.1,0.1,0.1,0.2,0.1,0.1,0.1,0.1,0.1,0.1,0.4,0.3,0.1,0.1,0.3,0.2,1.0,0.6,0.3,0.2,0.4,0.1,0.3,0.1,0.1,0.1,0.3
0.5,0.7,0.1,0.6,0.1,0.3,0.3,0.5,0.3,0.2,0.1,0.4,0.2,0.4,0.6,0.2,0.4,0.3,0.6,1.0,0.3,0.6,0.4,0.5,0.2,0.1,0.1,0.3,0.3
0.4,0.3,0.4,0.1,0.2,0.4,0.1,0.1,0.6,0.1,0.1,0.1,0.3,0.1,0.1,0.2,0.7,0.1,0.3,0.3,1.0,0.6,0.1,0.5,0.2,0.1,0.3,0.4,0.3
0.1,0.1,0.1,0.2,0.1,0.95,0.3,0.1,0.6,0.1,0.1,0.1,0.5,0.1,0.1,0.2,0.8,0.2,0.2,0.6,0.6,1.0,0.3,0.6,0.1,0.1,0.1,0.6,0.3
0.6,0.6,0.6,0.3,0.3,0.3,0.6,0.6,0.2,0.3,0.3,0.4,0.2,0.3,0.6,0.3,0.2,0.3,0.4,0.4,0.1,0.3,1.0,0.4,0.3,0.3,0.2,0.4,0.3
0.1,0.1,0.1,0.1,0.1,0.5,0.3,0.1,0.3,0.1,0.1,0.1,0.6,0.1,0.1,0.1,0.6,0.1,0.1,0.5,0.5,0.6,0.4,1.0,0.1,0.1,0.1,0.5,0.3
0.1,0.1,0.1,0.1,0.1,0.1,0.2,0.3,0.1,0.1,0.1,0.5,0.5,0.8,0.4,0.2,0.1,0.1,0.3,0.2,0.2,0.1,0.3,0.1,1.0,0.4,0.1,0.1,0.3
0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.2,0.1,0.1,0.1,0.5,0.3,0.4,0.6,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.3,0.1,0.4,1.0,0.2,0.1,0.3
0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.2,0.1,0.1,0.1,0.2,0.6,0.1,0.1,0.3,0.1,0.1,0.1,0.1,0.3,0.1,0.2,0.1,0.1,0.2,1.0,0.3,0.3
0.1,0.1,0.6,0.5,0.3,0.6,0.7,0.2,0.5,0.3,0.2,0.4,0.2,0.1,0.2,0.4,0.8,0.1,0.1,0.3,0.4,0.6,0.4,0.5,0.1,0.1,0.3,1.0,0.3
0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,1.0""")
TOPIC_SIMILARITIES_UPDATED = (
"""Architecture,Art,Biology,Business,Chemistry,Computing,Economics,Education,Engineering,Environment,Geography,Government,Hobbies,Languages,Law,Life Skills,Mathematics,Medicine,Music,Philosophy,Physics,Programming,Psychology,Puzzles,Reading,Religion,Sport,Statistics,Welcome
1.0,0.9,0.2,0.4,0.1,0.2,0.3,0.3,0.6,0.6,0.4,0.2,0.5,0.5,0.5,0.3,0.5,0.3,0.3,0.5,0.4,0.1,0.6,0.1,0.1,0.1,0.1,0.1,0.3
0.9,1.0,0.2,0.6,0.1,0.1,0.6,0.6,0.2,0.3,0.3,0.2,0.5,0.7,0.6,0.2,0.3,0.2,0.9,0.7,0.3,0.1,0.6,0.1,0.1,0.1,0.1,0.1,0.3
0.2,0.2,1.0,0.2,0.8,0.3,0.2,0.3,0.3,0.7,0.4,0.2,0.2,0.1,0.1,0.9,0.4,0.8,0.1,0.1,0.4,0.1,0.6,0.1,0.1,0.1,0.1,0.6,0.3
0.4,0.6,0.2,1.0,0.1,0.5,0.9,0.6,0.4,0.6,0.2,0.7,0.2,0.5,0.7,0.1,0.3,0.1,0.1,0.6,0.1,0.2,0.3,0.1,0.1,0.1,0.1,0.5,0.3
0.1,0.1,0.8,0.1,1.0,0.2,0.2,0.3,0.2,0.6,0.6,0.1,0.2,0.2,0.2,0.7,0.3,0.7,0.1,0.1,0.2,0.1,0.3,0.1,0.1,0.1,0.1,0.3,0.3
0.2,0.1,0.3,0.5,0.2,1.0,0.6,0.3,0.6,0.1,0.1,0.1,0.2,0.2,0.1,0.3,0.9,0.2,0.2,0.3,0.4,0.95,0.3,0.5,0.1,0.1,0.1,0.6,0.3
0.3,0.6,0.2,0.9,0.2,0.6,1.0,0.3,0.3,0.5,0.4,0.7,0.2,0.4,0.8,0.2,0.6,0.2,0.1,0.3,0.1,0.3,0.6,0.3,0.2,0.1,0.1,0.7,0.3
0.3,0.6,0.3,0.6,0.3,0.3,0.3,1.0,0.3,0.5,0.3,0.5,0.2,0.2,0.6,0.1,0.2,0.1,0.1,0.5,0.1,0.1,0.6,0.1,0.3,0.2,0.2,0.2,0.3
0.6,0.2,0.3,0.4,0.2,0.6,0.3,0.3,1.0,0.4,0.2,0.2,0.2,0.2,0.3,0.1,0.7,0.1,0.1,0.3,0.6,0.6,0.2,0.3,0.1,0.1,0.1,0.5,0.3
0.6,0.3,0.7,0.6,0.6,0.1,0.5,0.5,0.4,1.0,0.8,0.6,0.2,0.2,0.3,0.8,0.2,0.3,0.1,0.2,0.1,0.1,0.3,0.1,0.1,0.1,0.1,0.3,0.3
0.4,0.3,0.4,0.2,0.6,0.1,0.4,0.3,0.2,0.8,1.0,0.2,0.2,0.4,0.3,0.6,0.3,0.3,0.1,0.1,0.1,0.1,0.3,0.1,0.1,0.1,0.1,0.2,0.3
0.2,0.2,0.2,0.7,0.1,0.1,0.7,0.5,0.2,0.6,0.2,1.0,0.2,0.3,0.8,0.1,0.1,0.1,0.1,0.4,0.1,0.1,0.4,0.1,0.5,0.5,0.2,0.4,0.3
0.5,0.5,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,1.0,0.5,0.2,0.2,0.3,0.2,0.4,0.2,0.3,0.5,0.2,0.6,0.5,0.3,0.6,0.2,0.3
0.5,0.7,0.1,0.5,0.2,0.2,0.4,0.2,0.2,0.2,0.4,0.3,0.5,1.0,0.3,0.1,0.1,0.1,0.3,0.4,0.1,0.1,0.3,0.1,0.8,0.4,0.1,0.1,0.3
0.5,0.6,0.1,0.7,0.2,0.1,0.8,0.6,0.3,0.3,0.3,0.8,0.2,0.3,1.0,0.1,0.1,0.1,0.1,0.6,0.1,0.1,0.6,0.1,0.4,0.6,0.1,0.2,0.3
0.3,0.2,0.9,0.1,0.7,0.3,0.2,0.1,0.1,0.8,0.6,0.1,0.2,0.1,0.1,1.0,0.4,0.8,0.1,0.2,0.2,0.2,0.3,0.1,0.2,0.1,0.3,0.4,0.3
0.5,0.3,0.4,0.3,0.3,0.9,0.6,0.2,0.7,0.2,0.3,0.1,0.3,0.1,0.1,0.4,1.0,0.2,0.3,0.4,0.7,0.8,0.2,0.6,0.1,0.1,0.1,0.8,0.3
0.3,0.2,0.8,0.1,0.7,0.2,0.2,0.1,0.1,0.3,0.3,0.1,0.2,0.1,0.1,0.8,0.2,1.0,0.2,0.3,0.1,0.2,0.3,0.1,0.1,0.1,0.1,0.1,0.3
0.3,0.9,0.1,0.1,0.1,0.2,0.1,0.1,0.1,0.1,0.1,0.1,0.4,0.3,0.1,0.1,0.3,0.2,1.0,0.6,0.3,0.2,0.4,0.1,0.3,0.1,0.1,0.1,0.3
0.5,0.7,0.1,0.6,0.1,0.3,0.3,0.5,0.3,0.2,0.1,0.4,0.2,0.4,0.6,0.2,0.4,0.3,0.6,1.0,0.3,0.6,0.4,0.5,0.2,0.1,0.1,0.3,0.3
0.4,0.3,0.4,0.1,0.2,0.4,0.1,0.1,0.6,0.1,0.1,0.1,0.3,0.1,0.1,0.2,0.7,0.1,0.3,0.3,1.0,0.6,0.1,0.5,0.2,0.1,0.3,0.4,0.3
0.1,0.1,0.1,0.2,0.1,0.95,0.3,0.1,0.6,0.1,0.1,0.1,0.5,0.1,0.1,0.2,0.8,0.2,0.2,0.6,0.6,1.0,0.3,0.6,0.1,0.1,0.1,0.6,0.3
0.6,0.6,0.6,0.3,0.3,0.3,0.6,0.6,0.2,0.3,0.3,0.4,0.2,0.3,0.6,0.3,0.2,0.3,0.4,0.4,0.1,0.3,1.0,0.4,0.3,0.3,0.2,0.4,0.3
0.1,0.1,0.1,0.1,0.1,0.5,0.3,0.1,0.3,0.1,0.1,0.1,0.6,0.1,0.1,0.1,0.6,0.1,0.1,0.5,0.5,0.6,0.4,1.0,0.1,0.1,0.1,0.5,0.3
0.1,0.1,0.1,0.1,0.1,0.1,0.2,0.3,0.1,0.1,0.1,0.5,0.5,0.8,0.4,0.2,0.1,0.1,0.3,0.2,0.2,0.1,0.3,0.1,1.0,0.4,0.1,0.1,0.3
0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.2,0.1,0.1,0.1,0.5,0.3,0.4,0.6,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.3,0.1,0.4,1.0,0.2,0.1,0.3
0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.2,0.1,0.1,0.1,0.2,0.6,0.1,0.1,0.3,0.1,0.1,0.1,0.1,0.3,0.1,0.2,0.1,0.1,0.2,1.0,0.3,0.3
0.1,0.1,0.6,0.5,0.3,0.6,0.7,0.2,0.5,0.3,0.2,0.4,0.2,0.1,0.2,0.4,0.8,0.1,0.1,0.3,0.4,0.6,0.4,0.5,0.1,0.1,0.3,1.0,0.3
0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,1.0""")
def test_validate_default_similarities(self):
recommendations_services._validate_topic_similarities(
recommendations_services.DEFAULT_TOPIC_SIMILARITIES_STRING)
def test_update_topic_similarities(self):
recommendations_services.update_topic_similarities(
'Art,Biology,Chemistry\n'
'1.0,0.2,0.1\n'
'0.2,1.0,0.8\n'
'0.1,0.8,1.0')
with self.assertRaisesRegexp(
Exception, 'Length of topic similarities columns does not'
' match topic list.'):
recommendations_services.update_topic_similarities(
'Art,Biology,Chemistry\n'
'1.0,0.2,0.1\n'
'0.2,1.0,0.8')
with self.assertRaisesRegexp(
Exception, 'Length of topic similarities rows does not match'
' topic list.'):
recommendations_services.update_topic_similarities(
'Art,Biology,Chemistry\n'
'1.0,0.2,0.1\n'
'0.2,1.0\n'
'0.1,0.8,1.0')
with self.assertRaisesRegexp(
ValueError, 'Expected similarity to be between 0.0 and 1.0,'
' received 800'):
recommendations_services.update_topic_similarities(
'Art,Biology,Chemistry\n'
'1.0,0.2,0.1\n'
'0.2,1.0,800\n'
'0.1,0.8,1.0')
with self.assertRaisesRegexp(
ValueError, 'Expected similarity to be a float, received'
' string'):
recommendations_services.update_topic_similarities(
'Art,Biology,Chemistry\n'
'string,0.2,0.1\n'
'0.2,1.0,0.8\n'
'0.1,0.8,1.0')
with self.assertRaisesRegexp(
Exception, 'Topic Fake Topic not in list of known topics.'):
recommendations_services.update_topic_similarities(
'Fake Topic,Biology,Chemistry\n'
'string,0.2,0.1\n'
'0.2,1.0,0.8\n'
'0.1,0.8,1.0')
with self.assertRaisesRegexp(
Exception, 'Expected topic similarities to be symmetric.'):
recommendations_services.update_topic_similarities(
'Art,Biology,Chemistry\n'
'1.0,0.2,0.1\n'
'0.3,1.0,0.8\n'
'0.8,0.1,1.0')
def test_get_topic_similarity(self):
self.assertEqual(recommendations_services.get_topic_similarity(
'Art', 'Biology'), 0.1)
self.assertEqual(recommendations_services.get_topic_similarity(
'Art', 'Art'), feconf.SAME_TOPIC_SIMILARITY)
self.assertEqual(recommendations_services.get_topic_similarity(
'Topic 1', 'Topic 2'), feconf.DEFAULT_TOPIC_SIMILARITY)
self.assertEqual(recommendations_services.get_topic_similarity(
'Topic', 'Topic'), feconf.SAME_TOPIC_SIMILARITY)
recommendations_services.update_topic_similarities(
'Art,Biology,Chemistry\n'
'1.0,0.2,0.1\n'
'0.2,1.0,0.8\n'
'0.1,0.8,1.0')
self.assertEqual(recommendations_services.get_topic_similarity(
'Art', 'Biology'), 0.2)
def test_get_topic_similarities_as_csv(self):
# The splitlines() is needed because a carriage return is added in
# the returned string
self.assertEqual(
recommendations_services.get_topic_similarities_as_csv().splitlines(),
self.TOPIC_SIMILARITIES_DEFAULT.splitlines())
recommendations_services.update_topic_similarities(
'Art,Biology,Chemistry\n'
'1.0,0.2,0.1\n'
'0.2,1.0,0.8\n'
'0.1,0.8,1.0')
self.assertEqual(
recommendations_services.get_topic_similarities_as_csv().splitlines(),
self.TOPIC_SIMILARITIES_UPDATED.splitlines())
class RecommendationsServicesUnitTests(test_utils.GenericTestBase):
"""Test recommendations services."""
EXP_DATA = {
'exp_id_1': {
'category': 'Art',
},
'exp_id_2': {
'category': 'Biology',
},
'exp_id_3': {
'category': 'Chemistry',
},
'exp_id_4': {
'category': 'Art',
}
}
USER_DATA = {
'alice': {
'email': 'alice@example.com'
},
'bob': {
'email': 'bob@example.com'
},
'charlie': {
'email': 'charlie@example.com'
},
}
def setUp(self):
"""Before each individual test, set up dummy explorations, users
and admin."""
super(RecommendationsServicesUnitTests, self).setUp()
for name, user in self.USER_DATA.iteritems():
user['id'] = self.get_user_id_from_email(
user['email'])
user_services.get_or_create_user(user['id'], user['email'])
self.signup(user['email'], name)
self.USER_DATA[name]['id'] = user['id']
self.EXP_DATA['exp_id_1']['owner_id'] = self.USER_DATA['alice']['id']
self.EXP_DATA['exp_id_2']['owner_id'] = self.USER_DATA['alice']['id']
self.EXP_DATA['exp_id_3']['owner_id'] = self.USER_DATA['bob']['id']
self.EXP_DATA['exp_id_4']['owner_id'] = self.USER_DATA['charlie']['id']
for exp_id, exp in self.EXP_DATA.iteritems():
self.save_new_valid_exploration(
exp_id, exp['owner_id'], category=exp['category'])
rights_manager.publish_exploration(exp['owner_id'], exp_id)
self.ADMIN_ID = self.get_user_id_from_email(self.ADMIN_EMAIL)
user_services.get_or_create_user(
self.ADMIN_ID, self.ADMIN_EMAIL)
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.set_admins([self.ADMIN_EMAIL])
class ExplorationRecommendationsUnitTests(RecommendationsServicesUnitTests):
"""Test recommendations services relating to exploration comparison."""
def test_get_item_similarity(self):
with self.assertRaisesRegexp(
Exception, 'Invalid reference_exp_id fake_exp_id'):
recommendations_services.get_item_similarity(
'fake_exp_id', 'fake_exp_id_2')
self.assertEqual(recommendations_services.get_item_similarity(
'exp_id_1', 'exp_id_2'), 4.5)
self.assertEqual(recommendations_services.get_item_similarity(
'exp_id_4', 'exp_id_4'), 9.0)
rights_manager.publicize_exploration(self.ADMIN_ID, 'exp_id_4')
self.assertEqual(recommendations_services.get_item_similarity(
'exp_id_4', 'exp_id_4'), 10.0)
rights_manager.unpublish_exploration(self.ADMIN_ID, 'exp_id_2')
self.assertEqual(recommendations_services.get_item_similarity(
'exp_id_1', 'exp_id_2'), 0.0)
def test_get_and_set_exploration_recommendations(self):
recommended_exp_ids = ['exp_id_2', 'exp_id_3']
recommendations_services.set_recommendations(
'exp_id_1', recommended_exp_ids)
saved_recommendation_ids = (
recommendations_services.get_exploration_recommendations(
'exp_id_1'))
self.assertEqual(recommended_exp_ids, saved_recommendation_ids)
recommended_exp_ids = ['exp_id_3']
recommendations_services.set_recommendations(
'exp_id_1', recommended_exp_ids)
saved_recommendation_ids = (
recommendations_services.get_exploration_recommendations(
'exp_id_1'))
self.assertEqual(recommended_exp_ids, saved_recommendation_ids)
|
|
import os
from django.contrib.auth.models import Group
from django.test import TransactionTestCase
from django.core.files.uploadedfile import UploadedFile
from hs_core.hydroshare.resource import add_resource_files
from hs_core import hydroshare
from hs_core.testing import MockIRODSTestCaseMixin
from hs_core.views.utils import create_folder
class TestReadmeResourceFile(MockIRODSTestCaseMixin, TransactionTestCase):
def setUp(self):
super(TestReadmeResourceFile, self).setUp()
self.group, _ = Group.objects.get_or_create(name='Hydroshare Author')
self.user = hydroshare.create_account(
'user1@nowhere.com',
username='user1',
first_name='Creator_FirstName',
last_name='Creator_LastName',
superuser=False,
groups=[self.group]
)
# create readme files
self.readme_txt = "readme.txt"
self.README_TXT = "README.TXT"
self.readme_md = "readme.md"
self.README_MD = "README.MD"
self.some_txt = "some.txt"
self.some_md = "some.md"
self.readme_txt_file = open(self.readme_txt, 'w')
self.readme_txt_file.write("This is a readme text file")
self.readme_txt_file.close()
self.README_TXT_file = open(self.README_TXT, 'w')
self.README_TXT_file.write("This is a readme text file with file name in uppercase")
self.README_TXT_file.close()
self.README_MD_file = open(self.README_MD, 'w')
self.README_MD_file.write("##This is a readme markdown file file name in uppercase")
self.README_MD_file.close()
self.readme_md_file = open(self.readme_md, 'w')
self.readme_md_file.write("##This is a readme markdown file")
self.readme_md_file.close()
self.some_txt_file = open(self.some_txt, 'w')
self.some_txt_file.write("This is NOT a readme text file")
self.some_txt_file.close()
self.some_md_file = open(self.some_md, 'w')
self.some_md_file.write("##This is NOT a readme markdown file")
self.some_md_file.close()
def tearDown(self):
super(TestReadmeResourceFile, self).tearDown()
if self.composite_resource:
self.composite_resource.delete()
self.readme_txt_file.close()
os.remove(self.readme_txt_file.name)
self.readme_md_file.close()
os.remove(self.readme_md_file.name)
self.some_txt_file.close()
os.remove(self.some_txt_file.name)
self.some_md_file.close()
os.remove(self.some_md_file.name)
def test_readme_file_1(self):
"""Test that when we upload a readme.txt file to the root,
this file is considered as the readme file of the resource"""
self._create_composite_resource()
# resource should not have any file at this point
self.assertEqual(self.composite_resource.files.count(), 0)
# resource has no readme file
self.assertEqual(self.composite_resource.readme_file, None)
# add the readme.txt file to the resource at the root level
files_to_add = [self.readme_txt]
self._add_files_to_resource(files_to_add)
# resource should have one file at this point
self.assertEqual(self.composite_resource.files.count(), 1)
# resource has a readme file
self.assertNotEqual(self.composite_resource.readme_file, None)
self.assertNotEqual(self.composite_resource.get_readme_file_content(), None)
def test_readme_file_2(self):
"""Test that when we upload a readme.md file to the root,
this file is considered as the readme file of the resource"""
self._create_composite_resource()
# resource should not have any file at this point
self.assertEqual(self.composite_resource.files.count(), 0)
# resource has no readme file
self.assertEqual(self.composite_resource.readme_file, None)
# add the readme.md file to the resource at the root level
files_to_add = [self.readme_md]
self._add_files_to_resource(files_to_add)
# resource should have one file at this point
self.assertEqual(self.composite_resource.files.count(), 1)
# resource has a readme file
self.assertNotEqual(self.composite_resource.readme_file, None)
self.assertNotEqual(self.composite_resource.get_readme_file_content(), None)
def test_readme_file_3(self):
"""Test that when we upload a readme.txt file and readme.md file to the root,
the readme.md file is considered as the readme file for the resource"""
self._create_composite_resource()
# resource should not have any file at this point
self.assertEqual(self.composite_resource.files.count(), 0)
# resource has no readme file
self.assertEqual(self.composite_resource.readme_file, None)
# add the readme.txt file to the resource at the root level
files_to_add = [self.readme_txt]
self._add_files_to_resource(files_to_add)
# add the readme.md file to the resource at the root level
files_to_add = [self.readme_md]
self._add_files_to_resource(files_to_add)
# resource should have two files at this point
self.assertEqual(self.composite_resource.files.count(), 2)
# resource has a readme file
self.assertNotEqual(self.composite_resource.readme_file, None)
self.assertNotEqual(self.composite_resource.get_readme_file_content(), None)
# check that the readme.md file is the readme file for the resource
self.assertEqual(self.composite_resource.readme_file.file_name, 'readme.md')
def test_readme_file_4(self):
"""Test that when we upload a readme.txt or a readme.md file to a folder,
such a file is NOT considered as the readme file of the resource"""
self._create_composite_resource()
# resource should not have any file at this point
self.assertEqual(self.composite_resource.files.count(), 0)
# create the folder
new_folder_path = os.path.join("data", "contents", "my-new-folder")
create_folder(self.composite_resource.short_id, new_folder_path)
# add the readme.txt file to the resource at the folder 'my-new-folder'
files_to_add = [self.readme_txt]
self._add_files_to_resource(files_to_add, upload_folder=new_folder_path)
# resource should have one file at this point
self.assertEqual(self.composite_resource.files.count(), 1)
# resource has no readme file
self.assertEqual(self.composite_resource.readme_file, None)
# add the readme.md file to the resource at the folder 'my-new-folder'
files_to_add = [self.readme_md]
self._add_files_to_resource(files_to_add, upload_folder=new_folder_path)
# resource should have two files at this point
self.assertEqual(self.composite_resource.files.count(), 2)
# resource has no readme file
self.assertEqual(self.composite_resource.readme_file, None)
def test_readme_file_5(self):
"""Test that when we upload a txt file or md file that does not have file name as
'readme.txt' or 'readme.md' to the root folder,
such a file is NOT considered as the readme file of the resource"""
self._create_composite_resource()
# resource should not have any file at this point
self.assertEqual(self.composite_resource.files.count(), 0)
# resource has no readme file
self.assertEqual(self.composite_resource.readme_file, None)
# add the some.txt file to the resource at the root level
files_to_add = [self.some_txt]
self._add_files_to_resource(files_to_add)
# resource should have one file at this point
self.assertEqual(self.composite_resource.files.count(), 1)
# resource has no readme file
self.assertEqual(self.composite_resource.readme_file, None)
# add the some.md file to the resource at the root level
files_to_add = [self.some_md]
self._add_files_to_resource(files_to_add)
# resource should have two files at this point
self.assertEqual(self.composite_resource.files.count(), 2)
# resource has no readme file
self.assertEqual(self.composite_resource.readme_file, None)
def test_readme_file_6(self):
"""Test that when we upload a README.TXT (file name all in uppercase) to the root,
this file is considered as the readme file of the resource"""
self._create_composite_resource()
# resource should not have any file at this point
self.assertEqual(self.composite_resource.files.count(), 0)
# resource has no readme file
self.assertEqual(self.composite_resource.readme_file, None)
# add the README.TXT file to the resource at the root level
files_to_add = [self.README_TXT]
self._add_files_to_resource(files_to_add)
# resource should have one file at this point
self.assertEqual(self.composite_resource.files.count(), 1)
# resource has a readme file
self.assertNotEqual(self.composite_resource.readme_file, None)
self.assertNotEqual(self.composite_resource.get_readme_file_content(), None)
def test_readme_file_7(self):
"""Test that when we upload a README.MD (file name all in uppercase) to the root,
this file is considered as the readme file of the resource"""
self._create_composite_resource()
# resource should not have any file at this point
self.assertEqual(self.composite_resource.files.count(), 0)
# resource has no readme file
self.assertIsNone(self.composite_resource.readme_file)
# add the README.MD file to the resource at the root level
files_to_add = [self.README_MD]
self._add_files_to_resource(files_to_add)
# resource should have one file at this point
self.assertEqual(self.composite_resource.files.count(), 1)
# resource has a readme file
self.assertIsNotNone(self.composite_resource.readme_file)
self.assertIsNotNone(self.composite_resource.get_readme_file_content())
def test_readme_file_8(self):
"""Test that when a README.md file with file_folder as '' instead of None,
this file is considered as the readme file of the resource"""
self._create_composite_resource()
# resource should not have any file at this point
self.assertEqual(self.composite_resource.files.count(), 0)
# resource has no readme file
self.assertEqual(self.composite_resource.readme_file, None)
# add the README.MD file to the resource at the root level
files_to_add = [self.README_MD]
self._add_files_to_resource(files_to_add)
# resource should have one file at this point
self.assertEqual(self.composite_resource.files.count(), 1)
# Update the readme file_folder to an empty string
file = self.composite_resource.files.first()
file.file_folder = ''
file.save()
# resource has a readme file
self.assertNotEqual(self.composite_resource.readme_file, None)
self.assertNotEqual(self.composite_resource.get_readme_file_content(), None)
def _create_composite_resource(self):
self.composite_resource = hydroshare.create_resource(
resource_type='CompositeResource',
owner=self.user,
title='Test Readme File'
)
def _add_files_to_resource(self, files_to_add, upload_folder=''):
files_to_upload = []
for fl in files_to_add:
file_to_upload = UploadedFile(file=open(fl, 'rb'), name=os.path.basename(fl))
files_to_upload.append(file_to_upload)
added_resource_files = add_resource_files(self.composite_resource.short_id,
*files_to_upload, folder=upload_folder)
return added_resource_files
|
|
#!/usr/bin/env python
import os
import sys
from PyQt4 import QtCore, QtGui
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4 import NavigationToolbar2QT as NavigationToolbar
#from matplotlib.backend_bases import NavigationToolbar2
from matplotlib.figure import Figure
from matplotlib.widgets import SpanSelector
#from matplotlib.pyplot import savefig
import numpy as N
class MyMplCanvas(FigureCanvas):
def __init__(self, parent=None, width = 5, height = 5, dpi = 100, sharex = None, sharey = None):
self.fig = Figure(figsize = (width, height), dpi=dpi, facecolor = '#FFFFFF')
self.axDict = {}
self.figInit = False
self.sharey = sharey
self.sharey = sharey
# self.ax1.hold(True)
FigureCanvas.__init__(self, self.fig)
#self.fc = FigureCanvas(self.fig)
FigureCanvas.setSizePolicy(self,
QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
self.setupSub(1)
def setupSub(self, numSubRows, numSubCols = 1, sharex = False, sharey = False):
self.fig.clf()
for m in range(1,numSubRows+1):
for n in range(1,numSubCols+1):
axName = 'ax%s'%m
axLoc = 100*numSubRows+10*n+m
#print axLoc
if sharex:
if m>1:
self.axDict[axName] = self.fig.add_subplot(axLoc, sharex = self.axDict['ax%s'%(m-1)])
else:
self.axDict[axName] = self.fig.add_subplot(axLoc)#, sharex = self.sharex, sharey = self.sharey)
else:
self.axDict[axName] = self.fig.add_subplot(axLoc)#, sharex = self.sharex, sharey = self.sharey)
self.figInit = True
self.fig.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9)
self.xtitle=""
self.ytitle=""
#self.PlotTitle = "Plot"
self.grid_status = True
self.xaxis_style = 'linear'
self.yaxis_style = 'linear'
self.format_labels()
def format_labels(self):
if self.figInit:
for ax in self.axDict.itervalues():
ax.title.set_fontsize(10)
ax.set_xlabel(self.xtitle, fontsize = 9)
ax.set_ylabel(self.ytitle, fontsize = 9)
labels_x = ax.get_xticklabels()
labels_y = ax.get_yticklabels()
for xlabel in labels_x:
xlabel.set_fontsize(8)
for ylabel in labels_y:
ylabel.set_fontsize(8)
ylabel.set_color('b')
if ax.get_legend() != None:
texts = ax.get_legend().get_texts()
for text in texts:
text.set_fontsize(8)
else:
print "please initiate the number of subplots. Call *.canvas.setupSub(numofSubs)"
def sizeHint(self):
w, h = self.get_width_height()
return QtCore.QSize(w, h)
def minimumSizeHint(self):
return QtCore.QSize(10, 10)
class MyNavigationToolbar(NavigationToolbar) : # currently not used
def __init__(self , parent , canvas , direction = 'h' ) :
#NavigationToolbar.__init__(self,parent,canvas)
#self.layout = QVBoxLayout( self )
self.canvas = canvas
QWidget.__init__( self, parent )
if direction=='h' :
self.layout = QHBoxLayout( self )
else :
self.layout = QVBoxLayout( self )
self.layout.setMargin( 2 )
self.layout.setSpacing( 0 )
NavigationToolbar2.__init__( self, canvas )
def set_message( self, s ):
pass
#------------------------------------------------------------------------------
class MPL_Widget(QtGui.QWidget):
def __init__(self, parent = None):
QtGui.QWidget.__init__(self, parent)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.canvas = MyMplCanvas()
self.toolbar = NavigationToolbar(self.canvas, self.canvas)
#self.toolbar.hide()
self.vbox = QtGui.QVBoxLayout()
self.vbox.addWidget(self.canvas)
self.vbox.addWidget(self.toolbar)
self.setLayout(self.vbox)
self.ax1 = self.canvas.axDict['ax1']
###############ZOOM CONTROLS ################
self.ZoomChrom = QtGui.QAction("Zoom Chrom", self)
self.ZoomChrom.setShortcut("Ctrl+Z")
self.addAction(self.ZoomChrom)
QtCore.QObject.connect(self.ZoomChrom,QtCore.SIGNAL("triggered()"), self.ZoomToggle)
self.actionAutoScaleChrom = QtGui.QAction("AutoScale", self)#self.MainWindow)
self.actionAutoScaleChrom.setShortcut("Ctrl+A")
self.addAction(self.actionAutoScaleChrom)
QtCore.QObject.connect(self.actionAutoScaleChrom,QtCore.SIGNAL("triggered()"), self.autoscale_plot)
self.span = SpanSelector(self.ax1, self.onselect, 'horizontal', minspan =0.01,
useblit=True, rectprops=dict(alpha=0.5, facecolor='#C6DEFF') )
self.hZoom = False
self.span.visible = False
self.localYMax = 0
self.canvas.mpl_connect('button_press_event', self.onclick)
###########SAVING FIGURE TO CLIPBOARD##########
self.cb = None #will be used for the clipboard
self.tempPath = getHomeDir()
self.tempPath = os.path.join(self.tempPath,'tempMPL.png')
self.mpl2ClipAction = QtGui.QAction("Save to Clipboard", self)
self.mpl2ClipAction.setShortcut("Ctrl+C")
self.addAction(self.mpl2ClipAction)
QtCore.QObject.connect(self.mpl2ClipAction,QtCore.SIGNAL("triggered()"), self.mpl2Clip)
#######SAVING FIGURE DATA############################
# self.saveCSVAction = QtGui.QAction("Save to CSV", self)
# self.saveCSVAction.setShortcut("Ctrl+Alt+S")
# self.addAction(self.saveCSVAction)
# QtCore.QObject.connect(self.saveCSVAction,QtCore.SIGNAL("triggered()"), self.save2CSV)
########### HELPER FUNCTIONS #########################
def ZoomToggle(self):
#self.toolbar.zoom() #this implements the classic zoom
if self.hZoom:
self.hZoom = False
self.span.visible = False
else:
self.hZoom = True
self.span.visible = True
def autoscale_plot(self):
# print "autoscale"
#self.toolbar.home() #implements the classic return to home
self.ax1.autoscale_view(tight = False, scalex=True, scaley=True)
self.canvas.draw()
def onclick(self, event):
#sets up the maximum Y level to be displayed after the zoom.
#if not set then it maxes to the largest point in the data
#not necessarily the local max
if event.ydata != None:
self.localYMax = int(event.ydata)
def onselect(self, xmin, xmax):
#print xmin, xmax
if self.hZoom:
self.ax1.set_ylim(ymax = self.localYMax)
self.ax1.set_xlim(xmin, xmax)
def save2CSV(self):
path = self.SFDialog()
if path != None:
try:
lines = self.ax1.get_lines()
data2write = []
for line in lines:
data2write.append(line.get_data()[0])
data2write.append(line.get_data()[1])
print data2write
data2write = N.array(data2write)
data2write.dtype = N.float32
N.savetxt(str(path), N.transpose(data2write), delimiter = ',', fmt='%.4f')
except:
try:
#this is for the case where the data may not be in float format?
N.savetxt(str(path), N.transpose(data2write), delimiter = ',')
except:
print 'Error saving figure data'
errorMsg = "Sorry: %s\n\n:%s\n"%(sys.exc_type, sys.exc_value)
print errorMsg
def SFDialog(self):
fileName = QtGui.QFileDialog.getSaveFileName(self,
"Select File to Save",
"",
"csv Files (*.csv)")
if not fileName.isEmpty():
print fileName
return fileName
else:
return None
def mpl2Clip(self):
try:
self.canvas.fig.savefig(self.tempPath)
tempImg = QtGui.QImage(self.tempPath)
self.cb = QtGui.QApplication.clipboard()
self.cb.setImage(tempImg)
except:
print 'Error copying figure to clipboard'
errorMsg = "Sorry: %s\n\n:%s\n"%(sys.exc_type, sys.exc_value)
print errorMsg
# savefig(fname, dpi=None, facecolor='w', edgecolor='w',
# orientation='portrait', papertype=None, format=None,
# transparent=False):
####USED TO GET THE USERS HOME DIRECTORY FOR USE OF A TEMP FILE
def valid(path):
if path and os.path.isdir(path):
return True
return False
def env(name):
return os.environ.get( name, '' )
def getHomeDir():
if sys.platform != 'win32':
return os.path.expanduser( '~' )
homeDir = env( 'USERPROFILE' )
if not valid(homeDir):
homeDir = env( 'HOME' )
if not valid(homeDir) :
homeDir = '%s%s' % (env('HOMEDRIVE'),env('HOMEPATH'))
if not valid(homeDir) :
homeDir = env( 'SYSTEMDRIVE' )
if homeDir and (not homeDir.endswith('\\')) :
homeDir += '\\'
if not valid(homeDir) :
homeDir = 'C:\\'
return homeDir
def main():
import sys
app = QtGui.QApplication(sys.argv)
w = MPL_Widget()
# w.canvas.setupSub(1)
ax1 = w.canvas.axDict['ax1']
x = N.arange(0, 20)
y = N.sin(x)
y2 = N.cos(x)
ax1.plot(x, y)
ax1.plot(x, y2)
w.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
|
|
#!/usr/bin/env python
import datetime
try:
from cdecimal import Decimal
except ImportError: #pragma: no cover
from decimal import Decimal
try:
import unittest2 as unittest
except ImportError:
import unittest
from agate import Table
from agate.column_types import *
from agate.columns import *
from agate.exceptions import *
class TestColumnTypes(unittest.TestCase):
def test_text(self):
self.assertIsInstance(TextType()._create_column(None, 1), TextColumn)
def test_text_cast(self):
values = ('a', 1, None, Decimal('2.7'), 'n/a')
casted = tuple(TextType().cast(v) for v in values)
self.assertSequenceEqual(casted, ('a', '1', None, '2.7', None))
def test_boolean(self):
self.assertIsInstance(BooleanType()._create_column(None, 1), BooleanColumn)
def test_boolean_cast(self):
values = (True, 'yes', None, False, 'no', 'n/a')
casted = tuple(BooleanType().cast(v) for v in values)
self.assertSequenceEqual(casted, (True, True, None, False, False, None))
def test_boolean_cast_custom_strings(self):
values = ('a', 'b', 'c', 'd', 'e', 'f')
boolean_type = BooleanType(
true_values=('a', 'b'),
false_values=('d', 'e'),
null_values=('c', 'f')
)
casted = tuple(boolean_type.cast(v) for v in values)
self.assertSequenceEqual(casted, (True, True, None, False, False, None))
def test_number(self):
self.assertIsInstance(NumberType()._create_column(None, 1), NumberColumn)
def test_number_cast(self):
values = (2, 1, None, Decimal('2.7'), 'n/a')
casted = tuple(NumberType().cast(v) for v in values)
self.assertSequenceEqual(casted, (Decimal('2'), Decimal('1'), None, Decimal('2.7'), None))
def test_number_cast_text(self):
with self.assertRaises(CastError):
NumberType().cast('a')
def test_number_cast_float(self):
with self.assertRaises(CastError):
NumberType().cast(1.1)
def test_date(self):
self.assertIsInstance(DateType()._create_column(None, 1), DateColumn)
def test_date_cast_format(self):
date_type = DateType(date_format='%m-%d-%Y')
values = ('03-01-1994', '02-17-1011', None, '01-05-1984', 'n/a')
casted = tuple(date_type.cast(v) for v in values)
self.assertSequenceEqual(casted, (
datetime.date(1994, 3, 1),
datetime.date(1011, 2, 17),
None,
datetime.date(1984, 1, 5),
None
))
def test_date_cast_parser(self):
values = ('3-1-1994', '2/17/1011', None, 'January 5th, 1984', 'n/a')
casted = tuple(DateType().cast(v) for v in values)
self.assertSequenceEqual(casted, (
datetime.date(1994, 3, 1),
datetime.date(1011, 2, 17),
None,
datetime.date(1984, 1, 5),
None
))
def test_datetime(self):
self.assertIsInstance(DateTimeType()._create_column(None, 1), DateTimeColumn)
def test_datetime_cast_format(self):
datetime_type = DateTimeType(datetime_format='%m-%d-%Y %I:%M %p')
values = ('03-01-1994 12:30 PM', '02-17-1011 06:30 AM', None, '01-05-1984 06:30 PM', 'n/a')
casted = tuple(datetime_type.cast(v) for v in values)
self.assertSequenceEqual(casted, (
datetime.datetime(1994, 3, 1, 12, 30, 0),
datetime.datetime(1011, 2, 17, 6, 30, 0),
None,
datetime.datetime(1984, 1, 5, 18, 30, 0),
None
))
def test_datetime_cast_parser(self):
values = ('3-1-1994 12:30 PM', '2/17/1011 06:30', None, 'January 5th, 1984 22:37', 'n/a')
casted = tuple(DateTimeType().cast(v) for v in values)
self.assertSequenceEqual(casted, (
datetime.datetime(1994, 3, 1, 12, 30, 0),
datetime.datetime(1011, 2, 17, 6, 30, 0),
None,
datetime.datetime(1984, 1, 5, 22, 37, 0),
None
))
def test_timedelta(self):
self.assertIsInstance(TimeDeltaType()._create_column(None, 1), TimeDeltaColumn)
def test_timedelta_cast_parser(self):
values = ('4:10', '1.2m', '172 hours', '5 weeks, 2 days', 'n/a')
casted = tuple(TimeDeltaType().cast(v) for v in values)
self.assertSequenceEqual(casted, (
datetime.timedelta(minutes=4, seconds=10),
datetime.timedelta(minutes=1, seconds=12),
datetime.timedelta(hours=172),
datetime.timedelta(weeks=5, days=2),
None
))
class TestColumns(unittest.TestCase):
def setUp(self):
self.rows = (
(1, 2, 'a'),
(2, 3, 'b'),
(None, 4, 'c')
)
self.number_type = NumberType()
self.text_type = TextType()
self.columns = (
('one', self.number_type),
('two', self.number_type),
('three', self.text_type)
)
self.table = Table(self.rows, self.columns)
def test_stringify(self):
self.assertEqual(str(self.table.columns['one']), "<agate.columns.NumberColumn: (1, 2, None)>")
def test_stringify_long(self):
rows = (
(1, 2, 'a'),
(2, 3, 'b'),
(None, 4, 'c'),
(1, 2, 'a'),
(2, 3, 'b'),
(None, 4, 'c')
)
self.table = Table(rows, self.columns)
self.assertEqual(str(self.table.columns['one']), "<agate.columns.NumberColumn: (1, 2, None, 1, 2, ...)>")
def test_length(self):
self.assertEqual(len(self.table.columns), 3)
def test_get_column_data(self):
self.assertSequenceEqual(self.table.columns['one'].get_data(), (1, 2, None))
def test_get_column(self):
self.assertSequenceEqual(self.table.columns['one'], (1, 2, None))
def test_get_column_cached(self):
c = self.table.columns['one']
c2 = self.table.columns['one']
c3 = self.table.columns['two']
self.assertIs(c, c2)
self.assertIsNot(c2, c3)
def test_get_invalid_column(self):
with self.assertRaises(ColumnDoesNotExistError):
self.table.columns['four']
def test_column_length(self):
self.assertEqual(len(self.table.columns['one']), 3)
def test_get_column_item(self):
self.assertEqual(self.table.columns['one'][1], 2)
def test_column_contains(self):
self.assertEqual(1 in self.table.columns['one'], True)
self.assertEqual(3 in self.table.columns['one'], False)
def test_iterate_columns(self):
it = iter(self.table.columns)
self.assertSequenceEqual(next(it), (1, 2, None))
self.assertSequenceEqual(next(it), (2, 3, 4))
self.assertSequenceEqual(next(it), ('a', 'b', 'c'))
with self.assertRaises(StopIteration):
next(it)
def test_immutable(self):
with self.assertRaises(TypeError):
self.table.columns['one'] = 'foo'
with self.assertRaises(TypeError):
self.table.columns['one'][0] = 100
def test_percentiles(self):
rows = [(n,) for n in range(1, 1001)]
table = Table(rows, (('ints', self.number_type),))
percentiles = table.columns['ints'].percentiles()
self.assertEqual(percentiles[0], Decimal('1'))
self.assertEqual(percentiles[25], Decimal('250.5'))
self.assertEqual(percentiles[50], Decimal('500.5'))
self.assertEqual(percentiles[75], Decimal('750.5'))
self.assertEqual(percentiles[99], Decimal('990.5'))
self.assertEqual(percentiles[100], Decimal('1000'))
def test_percentiles_locate(self):
rows = [(n,) for n in range(1, 1001)]
table = Table(rows, (('ints', self.number_type),))
percentiles = table.columns['ints'].percentiles()
self.assertEqual(percentiles.locate(251), Decimal('25'))
self.assertEqual(percentiles.locate(260), Decimal('25'))
self.assertEqual(percentiles.locate(261), Decimal('26'))
with self.assertRaises(ValueError):
percentiles.locate(0)
with self.assertRaises(ValueError):
percentiles.locate(1012)
def test_quartiles(self):
"""
CDF quartile tests from:
http://www.amstat.org/publications/jse/v14n3/langford.html#Parzen1979
"""
# N = 4
rows = [(n,) for n in [1, 2, 3, 4]]
table = Table(rows, (('ints', self.number_type),))
quartiles = table.columns['ints'].quartiles()
for i, v in enumerate(['1', '1.5', '2.5', '3.5', '4']):
self.assertEqual(quartiles[i], Decimal(v))
# N = 5
rows = [(n,) for n in [1, 2, 3, 4, 5]]
table = Table(rows, (('ints', self.number_type),))
quartiles = table.columns['ints'].quartiles()
for i, v in enumerate(['1', '2', '3', '4', '5']):
self.assertEqual(quartiles[i], Decimal(v))
# N = 6
rows = [(n,) for n in [1, 2, 3, 4, 5, 6]]
table = Table(rows, (('ints', self.number_type),))
quartiles = table.columns['ints'].quartiles()
for i, v in enumerate(['1', '2', '3.5', '5', '6']):
self.assertEqual(quartiles[i], Decimal(v))
# N = 7
rows = [(n,) for n in [1, 2, 3, 4, 5, 6, 7]]
table = Table(rows, (('ints', self.number_type),))
quartiles = table.columns['ints'].quartiles()
for i, v in enumerate(['1', '2', '4', '6', '7']):
self.assertEqual(quartiles[i], Decimal(v))
# N = 8 (doubled)
rows = [(n,) for n in [1, 1, 2, 2, 3, 3, 4, 4]]
table = Table(rows, (('ints', self.number_type),))
quartiles = table.columns['ints'].quartiles()
for i, v in enumerate(['1', '1.5', '2.5', '3.5', '4']):
self.assertEqual(quartiles[i], Decimal(v))
# N = 10 (doubled)
rows = [(n,) for n in [1, 1, 2, 2, 3, 3, 4, 4, 5, 5]]
table = Table(rows, (('ints', self.number_type),))
quartiles = table.columns['ints'].quartiles()
for i, v in enumerate(['1', '2', '3', '4', '5']):
self.assertEqual(quartiles[i], Decimal(v))
# N = 12 (doubled)
rows = [(n,) for n in [1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6]]
table = Table(rows, (('ints', self.number_type),))
quartiles = table.columns['ints'].quartiles()
for i, v in enumerate(['1', '2', '3.5', '5', '6']):
self.assertEqual(quartiles[i], Decimal(v))
# N = 14 (doubled)
rows = [(n,) for n in [1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7]]
table = Table(rows, (('ints', self.number_type),))
quartiles = table.columns['ints'].quartiles()
for i, v in enumerate(['1', '2', '4', '6', '7']):
self.assertEqual(quartiles[i], Decimal(v))
def test_quartiles_locate(self):
"""
CDF quartile tests from:
http://www.amstat.org/publications/jse/v14n3/langford.html#Parzen1979
"""
# N = 4
rows = [(n,) for n in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]
table = Table(rows, (('ints', self.number_type),))
quartiles = table.columns['ints'].quartiles()
self.assertEqual(quartiles.locate(2), Decimal('0'))
self.assertEqual(quartiles.locate(4), Decimal('1'))
self.assertEqual(quartiles.locate(6), Decimal('2'))
self.assertEqual(quartiles.locate(8), Decimal('3'))
with self.assertRaises(ValueError):
quartiles.locate(0)
with self.assertRaises(ValueError):
quartiles.locate(11)
def test_percentile_no_data(self):
rows = (())
table = Table(rows, (('ints', self.number_type),))
with self.assertRaises(ValueError):
table.columns['ints'].quartiles()
|
|
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import threading
import unittest
import logging
import grpc
from tests.unit import test_common
from tests.unit.framework.common import test_constants
from tests.unit.framework.common import test_control
_SERIALIZE_REQUEST = lambda bytestring: bytestring * 2
_DESERIALIZE_REQUEST = lambda bytestring: bytestring[len(bytestring) // 2:]
_SERIALIZE_RESPONSE = lambda bytestring: bytestring * 3
_DESERIALIZE_RESPONSE = lambda bytestring: bytestring[:len(bytestring) // 3]
_UNARY_UNARY = '/test/UnaryUnary'
_UNARY_STREAM = '/test/UnaryStream'
_STREAM_UNARY = '/test/StreamUnary'
_STREAM_STREAM = '/test/StreamStream'
_DEFECTIVE_GENERIC_RPC_HANDLER = '/test/DefectiveGenericRpcHandler'
class _Callback(object):
def __init__(self):
self._condition = threading.Condition()
self._value = None
self._called = False
def __call__(self, value):
with self._condition:
self._value = value
self._called = True
self._condition.notify_all()
def value(self):
with self._condition:
while not self._called:
self._condition.wait()
return self._value
class _Handler(object):
def __init__(self, control):
self._control = control
def handle_unary_unary(self, request, servicer_context):
self._control.control()
if servicer_context is not None:
servicer_context.set_trailing_metadata(((
'testkey',
'testvalue',
),))
return request
def handle_unary_stream(self, request, servicer_context):
for _ in range(test_constants.STREAM_LENGTH):
self._control.control()
yield request
self._control.control()
if servicer_context is not None:
servicer_context.set_trailing_metadata(((
'testkey',
'testvalue',
),))
def handle_stream_unary(self, request_iterator, servicer_context):
if servicer_context is not None:
servicer_context.invocation_metadata()
self._control.control()
response_elements = []
for request in request_iterator:
self._control.control()
response_elements.append(request)
self._control.control()
if servicer_context is not None:
servicer_context.set_trailing_metadata(((
'testkey',
'testvalue',
),))
return b''.join(response_elements)
def handle_stream_stream(self, request_iterator, servicer_context):
self._control.control()
if servicer_context is not None:
servicer_context.set_trailing_metadata(((
'testkey',
'testvalue',
),))
for request in request_iterator:
self._control.control()
yield request
self._control.control()
def defective_generic_rpc_handler(self):
raise test_control.Defect()
class _MethodHandler(grpc.RpcMethodHandler):
def __init__(self, request_streaming, response_streaming,
request_deserializer, response_serializer, unary_unary,
unary_stream, stream_unary, stream_stream):
self.request_streaming = request_streaming
self.response_streaming = response_streaming
self.request_deserializer = request_deserializer
self.response_serializer = response_serializer
self.unary_unary = unary_unary
self.unary_stream = unary_stream
self.stream_unary = stream_unary
self.stream_stream = stream_stream
class _GenericHandler(grpc.GenericRpcHandler):
def __init__(self, handler):
self._handler = handler
def service(self, handler_call_details):
if handler_call_details.method == _UNARY_UNARY:
return _MethodHandler(False, False, None, None,
self._handler.handle_unary_unary, None, None,
None)
elif handler_call_details.method == _UNARY_STREAM:
return _MethodHandler(False, True, _DESERIALIZE_REQUEST,
_SERIALIZE_RESPONSE, None,
self._handler.handle_unary_stream, None, None)
elif handler_call_details.method == _STREAM_UNARY:
return _MethodHandler(True, False, _DESERIALIZE_REQUEST,
_SERIALIZE_RESPONSE, None, None,
self._handler.handle_stream_unary, None)
elif handler_call_details.method == _STREAM_STREAM:
return _MethodHandler(True, True, None, None, None, None, None,
self._handler.handle_stream_stream)
elif handler_call_details.method == _DEFECTIVE_GENERIC_RPC_HANDLER:
return self._handler.defective_generic_rpc_handler()
else:
return None
class FailAfterFewIterationsCounter(object):
def __init__(self, high, bytestring):
self._current = 0
self._high = high
self._bytestring = bytestring
def __iter__(self):
return self
def __next__(self):
if self._current >= self._high:
raise test_control.Defect()
else:
self._current += 1
return self._bytestring
next = __next__
def _unary_unary_multi_callable(channel):
return channel.unary_unary(_UNARY_UNARY)
def _unary_stream_multi_callable(channel):
return channel.unary_stream(_UNARY_STREAM,
request_serializer=_SERIALIZE_REQUEST,
response_deserializer=_DESERIALIZE_RESPONSE)
def _stream_unary_multi_callable(channel):
return channel.stream_unary(_STREAM_UNARY,
request_serializer=_SERIALIZE_REQUEST,
response_deserializer=_DESERIALIZE_RESPONSE)
def _stream_stream_multi_callable(channel):
return channel.stream_stream(_STREAM_STREAM)
def _defective_handler_multi_callable(channel):
return channel.unary_unary(_DEFECTIVE_GENERIC_RPC_HANDLER)
class InvocationDefectsTest(unittest.TestCase):
def setUp(self):
self._control = test_control.PauseFailControl()
self._handler = _Handler(self._control)
self._server = test_common.test_server()
port = self._server.add_insecure_port('[::]:0')
self._server.add_generic_rpc_handlers((_GenericHandler(self._handler),))
self._server.start()
self._channel = grpc.insecure_channel('localhost:%d' % port)
def tearDown(self):
self._server.stop(0)
self._channel.close()
def testIterableStreamRequestBlockingUnaryResponse(self):
requests = [b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH)]
multi_callable = _stream_unary_multi_callable(self._channel)
with self.assertRaises(grpc.RpcError):
response = multi_callable(
requests,
metadata=(('test',
'IterableStreamRequestBlockingUnaryResponse'),))
def testIterableStreamRequestFutureUnaryResponse(self):
requests = [b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH)]
multi_callable = _stream_unary_multi_callable(self._channel)
response_future = multi_callable.future(
requests,
metadata=(('test', 'IterableStreamRequestFutureUnaryResponse'),))
with self.assertRaises(grpc.RpcError):
response = response_future.result()
def testIterableStreamRequestStreamResponse(self):
requests = [b'\x77\x58' for _ in range(test_constants.STREAM_LENGTH)]
multi_callable = _stream_stream_multi_callable(self._channel)
response_iterator = multi_callable(
requests,
metadata=(('test', 'IterableStreamRequestStreamResponse'),))
with self.assertRaises(grpc.RpcError):
next(response_iterator)
def testIteratorStreamRequestStreamResponse(self):
requests_iterator = FailAfterFewIterationsCounter(
test_constants.STREAM_LENGTH // 2, b'\x07\x08')
multi_callable = _stream_stream_multi_callable(self._channel)
response_iterator = multi_callable(
requests_iterator,
metadata=(('test', 'IteratorStreamRequestStreamResponse'),))
with self.assertRaises(grpc.RpcError):
for _ in range(test_constants.STREAM_LENGTH // 2 + 1):
next(response_iterator)
def testDefectiveGenericRpcHandlerUnaryResponse(self):
request = b'\x07\x08'
multi_callable = _defective_handler_multi_callable(self._channel)
with self.assertRaises(grpc.RpcError) as exception_context:
response = multi_callable(
request,
metadata=(('test', 'DefectiveGenericRpcHandlerUnary'),))
self.assertIs(grpc.StatusCode.UNKNOWN,
exception_context.exception.code())
if __name__ == '__main__':
logging.basicConfig()
unittest.main(verbosity=2)
|
|
from __future__ import division
try:
from BytesIO import BytesIO
except ImportError:
from io import BytesIO
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
import pandas as pd
from mrjob.job import MRJob
from mrjob.protocol import JSONValueProtocol
from mrjob.step import MRStep
import datetime
import json
import sklearn
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.cross_validation import KFold
from sklearn.cluster import KMeans
import numpy as np
def dt(X):
return datetime.datetime.fromtimestamp(float(X / 1000))
def to_date(X):
return X.day()
class MRJobPopularityRaw(MRJob):
INPUT_PROTOCOL = JSONValueProtocol
OUTPUT_PROTOCOL = JSONValueProtocol
#TODO get this so that it is read in from a file
combinations = {
"time":["time_step_mean","time_step_cv"],
"basic":["surface","number_activated_users","number_activations"],
"community":["inffected_communities_normalised","activation_entorpy","activation_entorpy","usage_dominace","user_usage_dominance"],
"exposure":["user_exposure_mean", "activateion_exposure_mean"],
"all":["time_step_mean","time_step_cv","surface","number_activated_users","number_activations","inffected_communities_normalised","activation_entorpy","activation_entorpy","usage_dominace","user_usage_dominance","user_exposure_mean", "activateion_exposure_mean"],
"time-cluster": ["time_step_mean", "time_step_cv","cluster"],
"basic-cluster": ["surface", "number_activated_users", "number_activations","cluster"],
"community-cluster": ["inffected_communities_normalised", "activation_entorpy", "activation_entorpy", "usage_dominace",
"user_usage_dominance","cluster"],
"exposure-cluster": ["user_exposure_mean", "activateion_exposure_mean","cluster"],
"all-cluster": ["time_step_mean", "time_step_cv", "surface", "number_activated_users", "number_activations",
"inffected_communities_normalised", "activation_entorpy", "activation_entorpy", "usage_dominace",
"user_usage_dominance", "user_exposure_mean", "activateion_exposure_mean","cluster"]
}
# combinations_no_c = {
# "time":["time_step_mean","time_step_cv","early_spread_time"],
# "basic":["surface","number_activated_users","number_activations"],
# "community":["inffected_communities_normalised","activation_entorpy","user_usage_entorpy","usage_dominace","user_usage_dominance"],
# "exposure":["user_exposure_mean", "activateion_exposure_mean"],
# "cascades":["wiener_index_avrage","wiener_index_std","number_of_trees","cascade_edges","cascade_nodes"],
# "distance":["diamiter"],
# "broker":["gatekeeper","liaison","representative","coordinator","consultant"],
# "all":["time_step_mean","time_step_cv","early_spread_time","surface","number_activated_users","number_activations","inffected_communities_normalised","activation_entorpy","user_usage_entorpy","usage_dominace","user_usage_dominance","user_exposure_mean", "activateion_exposure_mean","wiener_index_avrage","wiener_index_std","number_of_trees","cascade_edges","cascade_nodes","diamiter","gatekeeper","liaison","representative","coordinator","consultant"]
# # "all":["time_step_mean","time_step_cv","surface","number_activated_users","number_activations","inffected_communities_normalised","activation_entorpy","activation_entorpy","usage_dominace","user_usage_dominance","user_exposure_mean", "activateion_exposure_mean","wiener_index_avrage","number_of_trees"]
# # "all":["time_step_mean","time_step_cv","surface","number_activated_users","number_activations","inffected_communities_normalised","activation_entorpy","activation_entorpy","usage_dominace","user_usage_dominance","user_exposure_mean", "activateion_exposure_mean"]
# }
combinations_no_c = {
"time": ["time_step_mean", "time_step_cv"],
"basic": ["surface", "number_activated_users", "number_activations"],
"community": ["inffected_communities", "activation_entorpy", "user_usage_entorpy", "usage_dominace",
"user_usage_dominance"],
"exposure": ["user_exposure_mean", "user_exposure_cv",
"activateion_exposure_mean", "activateion_exposure_cv"],
"distance": ["diamiter", "step_distance_mean", "step_distance_cv"],
"topology": ["degree_mean", "degree_cv",
"constraint_mean", "constraint_cv",
"pagerank_mean", "pagerank_cv"]
}
combinations_no_c["all"] = combinations_no_c["time"] + combinations_no_c["basic"] + combinations_no_c["community"] + combinations_no_c["exposure"] + combinations_no_c["distance"] +combinations_no_c["topology"]
target = ["user_target","activation_target"]
def configure_options(self):
super(MRJobPopularityRaw, self).configure_options()
self.add_passthrough_option('--avrage', type='int', default=0, help='...')
self.add_passthrough_option('--cluster', type='int', default=2, help='...')
self.add_passthrough_option('--folds', type='int', default=10, help='...')
self.add_passthrough_option('--day_from', type='int', default=15, help='...')
self.add_passthrough_option('--day_to', type='int', default=45, help='...')
def mapper(self, _, line):
df = pd.read_json(line["raw"])
dfu, df = self.generate_tables(df)
df['time'] = df['time'].apply(dt)
df = df.set_index(pd.DatetimeIndex(df['time']))
df = df.resample('d').mean()
idx = pd.date_range(df.index[0], df.index[0] + datetime.timedelta(days=self.options.day_to))
dfi = df.reindex(idx, fill_value=0, method='ffill').fillna(method='ffill')
dfi["user_pop"] = dfi["number_activated_users"].expanding(min_periods=1).apply(self.apply_pop)
dfi["activation_pop"] = dfi["number_activations"].expanding(min_periods=1).apply(self.apply_pop)
for kt in range(self.options.day_from, self.options.day_to):
dft = dfi[:kt]
dft["user_target"] = dfi["number_activated_users"].values[-1]
dft["activation_target"] = dfi["number_activations"].values[-1]
for k, v in dft.reset_index().iterrows():
if k > 1:
# pop = self.compute_popularity(dft, k)
yield {"observations":k, "target":kt}, {"df": v.to_json(),
"word": line["file"].split("/")[-1],
"period": kt,
"popularity": dfi[:k]["activation_pop"].tolist(),
"user_popularity": dfi[:k]["user_pop"].tolist()}
def compute_popularity(self, df, days, resample_granularity = 'd'):
#TODO could this be changed into an expanding apply
up = []
p = []
dft = df[["number_activations", "number_activated_users"]]
# dft = dft.resample(resample_granularity).max()
idx = pd.date_range(dft.index[0], dft.index[0] + datetime.timedelta(days=days))
dft = dft[:days]
for x in range(1, days+1):
up.append((dft[:x]["number_activated_users"] / dft[:x]["number_activated_users"][-1]).mean())
p.append((dft[:x]["number_activations"] / dft[:x]["number_activations"][-1]).mean())
return up, p
def apply_pop(self, X):
return np.divide(X, X[-1]).mean()
def reducer(self, key, values):
df = {}
df_kmean = {}
for v in values:
df[v["word"]] = json.loads(v["df"])
df_kmean[v["word"]] = v["popularity"]
df = pd.DataFrame(df).T
df_kmean = pd.DataFrame(df_kmean).T
#Learn the cluster mebership upuntill this time
x_cols = df_kmean.columns
cluster = KMeans(n_clusters=2)
df_kmean['cluster'] = cluster.fit_predict(df_kmean[x_cols])
#join the cluster membership to the other metrics
df = df.join(df_kmean)
# print df
if len(df) > 1:
for k, v in self.combinations.iteritems():
for t in self.target:
r = self.liniar_regression(df.fillna(0), features=v, target=t)
yield None, {"observation_level": key["observations"], "result_mean": r[0], "result_var": r[1], "combination":k, "target":t, "target-day":key["target"]}
def reducer_kmean(self, key, values):
#TODO compute the populaity K-Means class, this will be a cotogory valibal in the linear regression
df = {}
df_kmean = {}
df_kmean_user = {}
for v in values:
df[v["word"]] = json.loads(v["df"])
df_kmean[v["word"]] = v["popularity"]
df_kmean_user[v["word"]] = v["user_popularity"]
df = pd.DataFrame(df).T.fillna(0)
df_kmean = pd.DataFrame(df_kmean).T.fillna(0)
df_kmean_user = pd.DataFrame(df_kmean_user).T.fillna(0)
popdict = {"frequency":df_kmean,"user":df_kmean_user}
#Learn the cluster mebership upuntill this time
x_cols = df_kmean.columns
#join the cluster membership to the other metrics
# print df
if len(df) > 1:
#Generate the kfolds
kf = KFold(len(df), n_folds=self.options.folds, shuffle=True)
#which popularity kmeans to use
for popk, popv in popdict.iteritems():
#iterate though the indecides to test and train
for train_index, test_index in kf:
# Get the K-means test and train data
train_kmean = popv.ix[train_index, x_cols]
test_kmean = popv.ix[test_index, x_cols]
for cnum in range(2, self.options.cluster+1):
cluster = KMeans(n_clusters=cnum)
train_kmean['cluster'] = cluster.fit_predict(train_kmean)
test_kmean['cluster'] = cluster.predict(test_kmean)
test_kmean.fillna(0)
train_kmean.fillna(0)
for t in self.target:
for k, v in self.combinations_no_c.iteritems():
#Generate the test and train datsets
X_train, X_test = df.ix[train_index, v], df.ix[test_index, v]
Y_train, Y_test = df.ix[train_index, t], df.ix[test_index, t]
for num in set(test_kmean['cluster'].values):
wor_train = train_kmean[(train_kmean["cluster"] == num)]
wor_test = test_kmean[(test_kmean["cluster"] == num)]
lm = LinearRegression(normalize=True)
if len(Y_train[(Y_train.index.isin(wor_train.index.values))]) > 0 and len(X_train[(X_train.index.isin(wor_train.index.values))]) > 0:
lm.fit(X_train[(X_train.index.isin(wor_train.index.values))], Y_train[(Y_train.index.isin(wor_train.index.values))])
r = mean_squared_error(Y_test[(Y_test.index.isin(wor_test.index.values))], lm.predict(X_test[(X_test.index.isin(wor_test.index.values))]))
yield None, {"observation_level": key["observations"], "result": r, "combination":k, "target":t, "target_level": key["target"],"clusters":cnum, "cluster_num":int(num), "popmessure":popk, "conf":lm.coef_.tolist()}
def generate_tables(self, df):
result_user = df.drop_duplicates(subset='number_activated_users', keep='first').set_index(
['number_activated_users'], verify_integrity=True, drop=False).sort_index()
result_user["surface_mean"] = result_user["surface"].expanding(min_periods=1).mean()
result_user["surface_cv"] = result_user["surface"].expanding(min_periods=1).std()
result_user["surface_var"] = result_user["surface"].expanding(min_periods=1).var()
result_user["degree_mean"] = result_user["degree"].expanding(min_periods=1).mean()
result_user["degree_median"] = result_user["degree"].expanding(min_periods=1).median()
result_user["degree_cv"] = result_user["degree"].expanding(min_periods=1).std()
result_user["degree_var"] = result_user["degree"].expanding(min_periods=1).var()
result_user["degree_max"] = result_user["degree"].expanding(min_periods=1).max()
result_user["degree_min"] = result_user["degree"].expanding(min_periods=1).min()
result_user["step_distance_mean"] = result_user["step_distance"].expanding(min_periods=1).mean()
result_user["step_distance_median"] = result_user["step_distance"].expanding(min_periods=1).median()
result_user["step_distance_cv"] = result_user["step_distance"].expanding(min_periods=1).std()
result_user["step_distance_var"] = result_user["step_distance"].expanding(min_periods=1).var()
result_user["step_distance_max"] = result_user["step_distance"].expanding(min_periods=1).max()
result_user["step_distance_min"] = result_user["step_distance"].expanding(min_periods=1).min()
result_user["user_exposure_mean"] = result_user["user_exposure"].expanding(min_periods=1).mean()
result_user["user_exposure_cv"] = result_user["user_exposure"].expanding(min_periods=1).std()
result_user["user_exposure_var"] = result_user["user_exposure"].expanding(min_periods=1).var()
result_user["user_exposure_median"] = result_user["user_exposure"].expanding(min_periods=1).median()
result_user["user_exposure_max"] = result_user["user_exposure"].expanding(min_periods=1).max()
result_user["user_exposure_min"] = result_user["user_exposure"].expanding(min_periods=1).min()
result_user["activateion_exposure_mean"] = result_user["activateion_exposure"].expanding(
min_periods=1).mean()
result_user["activateion_exposure_cv"] = result_user["activateion_exposure"].expanding(
min_periods=1).std()
result_user["activateion_exposure_var"] = result_user["activateion_exposure"].expanding(
min_periods=1).var()
result_user["activateion_exposure_median"] = result_user["activateion_exposure"].expanding(
min_periods=1).median()
result_user["activateion_exposure_max"] = result_user["activateion_exposure"].expanding(
min_periods=1).max()
result_user["activateion_exposure_min"] = result_user["activateion_exposure"].expanding(
min_periods=1).min()
result_user["pagerank_mean"] = result_user["pagerank"].expanding(min_periods=1).mean()
result_user["pagerank_cv"] = result_user["pagerank"].expanding(min_periods=1).std()
result_user["pagerank_var"] = result_user["pagerank"].expanding(min_periods=1).var()
result_user["pagerank_median"] = result_user["pagerank"].expanding(min_periods=1).median()
result_user["pagerank_max"] = result_user["pagerank"].expanding(min_periods=1).max()
result_user["pagerank_min"] = result_user["pagerank"].expanding(min_periods=1).min()
result_user["constraint_mean"] = result_user["constraint"].expanding(min_periods=1).mean()
result_user["constraint_cv"] = result_user["constraint"].expanding(min_periods=1).std()
result_user["constraint_var"] = result_user["constraint"].expanding(min_periods=1).var()
result_user["constraint_median"] = result_user["constraint"].expanding(min_periods=1).median()
result_user["constraint_max"] = result_user["constraint"].expanding(min_periods=1).max()
result_user["constraint_min"] = result_user["constraint"].expanding(min_periods=1).min()
result_user["time_step"] = result_user["time"].diff()
result_user["time_step_mean"] = (result_user["time_step"]).expanding(
min_periods=1).mean()
result_user["time_step_cv"] = (result_user["time_step"]).expanding(
min_periods=1).std()
result_user["time_step_median"] = (result_user["time_step"]).expanding(
min_periods=1).median()
result_user["time_step_min"] = (result_user["time_step"]).expanding(
min_periods=1).min()
result_user["time_step_max"] = (result_user["time_step"]).expanding(
min_periods=1).max()
result_user["time_step_var"] = (result_user["time_step"]).expanding(
min_periods=1).var()
#index on the number of activations
result_act = df.drop_duplicates(subset='number_activations', keep='first').set_index(
['number_activations'], verify_integrity=True, drop=False).sort_index()
#Surface setup
result_act["surface_mean"] = result_act["surface"].expanding(min_periods=1).mean()
result_act["surface_cv"] = result_act["surface"].expanding(min_periods=1).std()
result_act["surface_var"] = result_act["surface"].expanding(min_periods=1).var()
#Degre setup
result_act["degree_mean"] = result_act["degree"].expanding(min_periods=1).mean()
result_act["degree_median"] = result_act["degree"].expanding(min_periods=1).median()
result_act["degree_cv"] = result_act["degree"].expanding(min_periods=1).std()
result_act["degree_var"] = result_act["degree"].expanding(min_periods=1).var()
result_act["degree_max"] = result_act["degree"].expanding(min_periods=1).max()
result_act["degree_min"] = result_act["degree"].expanding(min_periods=1).min()
result_act["step_distance_mean"] = result_act["step_distance"].expanding(min_periods=1).mean()
result_act["step_distance_median"] = result_act["step_distance"].expanding(min_periods=1).median()
result_act["step_distance_cv"] = result_act["step_distance"].expanding(min_periods=1).std()
result_act["step_distance_var"] = result_act["step_distance"].expanding(min_periods=1).var()
result_act["step_distance_max"] = result_act["step_distance"].expanding(min_periods=1).max()
result_act["step_distance_min"] = result_act["step_distance"].expanding(min_periods=1).min()
#Activation exposure setup
result_act["activateion_exposure_mean"] = result_act["activateion_exposure"].expanding(
min_periods=1).mean()
result_act["activateion_exposure_cv"] = result_act["activateion_exposure"].expanding(
min_periods=1).std()
result_act["activateion_exposure_var"] = result_act["activateion_exposure"].expanding(
min_periods=1).var()
result_act["activateion_exposure_median"] = result_act["activateion_exposure"].expanding(
min_periods=1).median()
result_act["activateion_exposure_max"] = result_act["activateion_exposure"].expanding(
min_periods=1).max()
result_act["activateion_exposure_min"] = result_act["activateion_exposure"].expanding(
min_periods=1).min()
#User exposure setup
result_act["user_exposure_mean"] = result_act["user_exposure"].expanding(min_periods=1).mean()
result_act["user_exposure_cv"] = result_act["user_exposure"].expanding(min_periods=1).std()
result_act["user_exposure_var"] = result_act["user_exposure"].expanding(min_periods=1).var()
result_act["user_exposure_median"] = result_act["user_exposure"].expanding(min_periods=1).median()
result_act["user_exposure_max"] = result_act["user_exposure"].expanding(min_periods=1).max()
result_act["user_exposure_min"] = result_act["user_exposure"].expanding(min_periods=1).min()
#Pagerank setup
result_act["pagerank_mean"] = result_act["pagerank"].expanding(min_periods=1).mean()
result_act["pagerank_cv"] = result_act["pagerank"].expanding(min_periods=1).std()
result_act["pagerank_var"] = result_act["pagerank"].expanding(min_periods=1).var()
result_act["pagerank_median"] = result_act["pagerank"].expanding(min_periods=1).median()
result_act["pagerank_max"] = result_act["pagerank"].expanding(min_periods=1).max()
result_act["pagerank_min"] = result_act["pagerank"].expanding(min_periods=1).min()
#constraint setup
result_act["constraint_mean"] = result_act["constraint"].expanding(min_periods=1).mean()
result_act["constraint_cv"] = result_act["constraint"].expanding(min_periods=1).std()
result_act["constraint_var"] = result_act["constraint"].expanding(min_periods=1).var()
result_act["constraint_median"] = result_act["constraint"].expanding(min_periods=1).median()
result_act["constraint_max"] = result_act["constraint"].expanding(min_periods=1).max()
result_act["constraint_min"] = result_act["constraint"].expanding(min_periods=1).min()
#Time step setup
result_act["time_step"] = result_act["time"].diff()
result_act["time_step_mean"] = (result_act["time_step"]).expanding(
min_periods=1).mean()
result_act["time_step_cv"] = (result_act["time_step"]).expanding(
min_periods=1).std()
result_act["time_step_median"] = (result_act["time_step"]).expanding(
min_periods=1).median()
result_act["time_step_min"] = (result_act["time_step"]).expanding(
min_periods=1).min()
result_act["time_step_max"] = (result_act["time_step"]).expanding(
min_periods=1).max()
result_act["time_step_var"] = (result_act["time_step"]).expanding(
min_periods=1).var()
return result_act, result_user
def liniar_regression(self, df, features = [], target = "" , nfolds = 15, scoring="mean_squared_error"):
kf = KFold(len(df), n_folds=nfolds, shuffle=True)
lm = LinearRegression(normalize=True)
scores = sklearn.cross_validation.cross_val_score(lm, df[features], df[target], scoring=scoring, cv=kf )
return scores.mean(), scores.var()
def steps(self):
return [MRStep(
mapper=self.mapper,
reducer=self.reducer_kmean
)]
if __name__ == '__main__':
MRJobPopularityRaw.run()
|
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fuzzing strategy selection cron job.
Runs multi-armed bandit experiments for fuzzing strategy selection.
In particular, this is a Boltzman Exploration (softmax) implementation
of multi-armed bandit experiments. Queries from bigquery to update
multi-armed bandit probability values based on the new edges for various
combined strategies. In the upload_bandit_weights function, we can change
metric to be for edges, crash, features, or units. Currently based on new
edges."""
from collections import namedtuple
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.datastore import ndb_utils
from clusterfuzz._internal.fuzzing import strategy
from clusterfuzz._internal.google_cloud_utils import big_query
from clusterfuzz._internal.metrics import logs
from handlers import base_handler
from libs import handler
# After experimentation with high, low, and medium temperature parameters, we
# decided on .15.
TEMPERATURE_PARAMETER = .15
# Maintain a list of strategies to include in query for each fuzzing engine.
# Keep this strategy order for strategy combination tracking as strategy
# combinations are tracked as strings.
libfuzzer_query_strategy_list = [
strategy_tuple for strategy_tuple in strategy.LIBFUZZER_STRATEGY_LIST
if not strategy_tuple.manually_enable
]
afl_query_strategy_list = [
strategy_tuple for strategy_tuple in strategy.AFL_STRATEGY_LIST
if not strategy_tuple.manually_enable
]
# A tuple of engine name and corresponding strategies to include in multi-armed
# bandit query.
Engine = namedtuple('Engine', 'name query_strategy_list performance_metric')
LIBFUZZER_ENGINE = Engine(
name='libFuzzer',
query_strategy_list=libfuzzer_query_strategy_list,
performance_metric='new_edges')
AFL_ENGINE = Engine(
name='afl',
query_strategy_list=afl_query_strategy_list,
performance_metric='new_units_generated')
ENGINE_LIST = [LIBFUZZER_ENGINE, AFL_ENGINE]
# BigQuery query for calculating multi-armed bandit probabilities for
# various strategies using a Boltzman Exploration (softmax) model.
# Averages standardized new_edges feature over each strategy for expected
# new_edges metric for each strategy.
# See https://www.cs.mcgill.ca/~vkules/bandits.pdf for formula.
# TODO(mukundv): Change query once we decide on a temperature parameter and
# final implementation.
BANDIT_PROBABILITY_QUERY_FORMAT = """
(SELECT
/* Calculate bandit weights from calculated exponential values. */
strategy,
strategy_exp / exp_sum AS bandit_weight
FROM (
SELECT
EXP(strategy_avg_{performance_metric} / temperature) AS strategy_exp,
SUM(EXP(strategy_avg_{performance_metric} / temperature)) OVER() AS exp_sum,
strategy
FROM (
SELECT
/* Standardize the new edges data and take averages per strategy. */
AVG(({performance_metric} - overall_avg_{performance_metric}) / overall_stddev_{performance_metric}) AS strategy_avg_{performance_metric},
strategy,
/* Change temperature parameter here. */
{temperature_value} AS temperature
FROM (
SELECT
fuzzer,
CONCAT({strategies}) AS strategy,
fuzzer_stddev,
AVG({performance_metric}) OVER() AS overall_avg_{performance_metric},
STDDEV({performance_metric}) OVER() AS overall_stddev_{performance_metric},
{performance_metric},
strategy_selection_method
FROM (
SELECT
fuzzer,
{strategies_subquery}
STDDEV({performance_metric}) OVER(PARTITION BY fuzzer) AS fuzzer_stddev,
{performance_metric},
strategy_selection_method
FROM
{engine}_stats.TestcaseRun
WHERE
/* Query results from the past 5 days. Change as needed. */
DATE_DIFF(CAST(CURRENT_TIMESTAMP() AS DATE), CAST(_PARTITIONTIME AS DATE), DAY) < 6 )
WHERE
/* Filter for unstable targets. */
fuzzer_stddev < 50)
GROUP BY
strategy)))
"""
STRATEGY_SUBQUERY_FORMAT = """
IF
(strategy_{strategy_name} > 0,
"{strategy_name},",
"") AS strategy_{strategy_name},
"""
def _query_multi_armed_bandit_probabilities(engine):
"""Get query results.
Queries above BANDIT_PROBABILITY_QUERY and yields results
from bigquery. This query is sorted by strategies implemented."""
strategy_names_list = [
strategy_entry.name for strategy_entry in engine.query_strategy_list
]
strategies_subquery = '\n'.join([
STRATEGY_SUBQUERY_FORMAT.format(strategy_name=strategy_name)
for strategy_name in strategy_names_list
])
client = big_query.Client()
strategies = ','.join(
['strategy_' + strategy_name for strategy_name in strategy_names_list])
formatted_query = BANDIT_PROBABILITY_QUERY_FORMAT.format(
performance_metric=engine.performance_metric,
temperature_value=TEMPERATURE_PARAMETER,
strategies=strategies,
strategies_subquery=strategies_subquery,
engine=engine.name)
return client.query(query=formatted_query).rows
def _store_probabilities_in_bigquery(engine, data):
"""Update a bigquery table containing the daily updated
probability distribution over strategies."""
bigquery_data = []
# TODO(mukundv): Update once we choose a temperature parameter for final
# implementation.
for row in data:
bigquery_row = {
'strategy_name': row['strategy'],
'probability': row['bandit_weight'],
'engine': engine.name
}
bigquery_data.append(big_query.Insert(row=bigquery_row, insert_id=None))
if bigquery_data:
client = big_query.Client(
dataset_id='main', table_id='fuzz_strategy_probability')
client.insert(bigquery_data)
else:
logs.log('No fuzz strategy distribution data was found to upload to '
'BigQuery.')
def _query_and_upload_strategy_probabilities(engine):
"""Uploads queried data into datastore.
Calls query functions and uploads query results
to datastore to use as new probabilities. Probabilities
are based on new_edges feature."""
strategy_data = []
data = _query_multi_armed_bandit_probabilities(engine)
logs.log('Queried distribution for {}.'.format(engine.name))
# TODO(mukundv): Update once we choose a temperature parameter for final
# implementation.
for row in data:
curr_strategy = data_types.FuzzStrategyProbability()
curr_strategy.strategy_name = str(row['strategy'])
curr_strategy.probability = float(row['bandit_weight'])
curr_strategy.engine = engine.name
strategy_data.append(curr_strategy)
query = data_types.FuzzStrategyProbability.query(
data_types.FuzzStrategyProbability.engine == engine.name)
ndb_utils.delete_multi(
[entity.key for entity in ndb_utils.get_all_from_query(query)])
ndb_utils.put_multi(strategy_data)
logs.log('Uploaded queried distribution to ndb for {}'.format(engine.name))
_store_probabilities_in_bigquery(engine, data)
logs.log('Uploaded queried distribution to BigQuery for {}'.format(
engine.name))
class Handler(base_handler.Handler):
"""Cron job handler for fuzz strategy selection.
Handler to periodically update fuzz strategy bandit probabilities
based on a performance metric (currently based on new_edges)."""
@handler.cron()
def get(self):
"""Process all fuzz targets and update FuzzStrategy weights."""
for engine in ENGINE_LIST:
_query_and_upload_strategy_probabilities(engine)
|
|
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import assert_array_equal, assert_equal, assert_raises
def test_packbits():
# Copied from the docstring.
a = [[[1, 0, 1], [0, 1, 0]],
[[1, 1, 0], [0, 0, 1]]]
for dt in '?bBhHiIlLqQ':
arr = np.array(a, dtype=dt)
b = np.packbits(arr, axis=-1)
assert_equal(b.dtype, np.uint8)
assert_array_equal(b, np.array([[[160], [64]], [[192], [32]]]))
assert_raises(TypeError, np.packbits, np.array(a, dtype=float))
def test_packbits_empty():
shapes = [
(0,), (10, 20, 0), (10, 0, 20), (0, 10, 20), (20, 0, 0), (0, 20, 0),
(0, 0, 20), (0, 0, 0),
]
for dt in '?bBhHiIlLqQ':
for shape in shapes:
a = np.empty(shape, dtype=dt)
b = np.packbits(a)
assert_equal(b.dtype, np.uint8)
assert_equal(b.shape, (0,))
def test_packbits_empty_with_axis():
# Original shapes and lists of packed shapes for different axes.
shapes = [
((0,), [(0,)]),
((10, 20, 0), [(2, 20, 0), (10, 3, 0), (10, 20, 0)]),
((10, 0, 20), [(2, 0, 20), (10, 0, 20), (10, 0, 3)]),
((0, 10, 20), [(0, 10, 20), (0, 2, 20), (0, 10, 3)]),
((20, 0, 0), [(3, 0, 0), (20, 0, 0), (20, 0, 0)]),
((0, 20, 0), [(0, 20, 0), (0, 3, 0), (0, 20, 0)]),
((0, 0, 20), [(0, 0, 20), (0, 0, 20), (0, 0, 3)]),
((0, 0, 0), [(0, 0, 0), (0, 0, 0), (0, 0, 0)]),
]
for dt in '?bBhHiIlLqQ':
for in_shape, out_shapes in shapes:
for ax, out_shape in enumerate(out_shapes):
a = np.empty(in_shape, dtype=dt)
b = np.packbits(a, axis=ax)
assert_equal(b.dtype, np.uint8)
assert_equal(b.shape, out_shape)
def test_packbits_large():
# test data large enough for 16 byte vectorization
a = np.array([1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0,
0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1,
1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0,
1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1,
1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1,
1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1,
1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1,
0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1,
1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0,
1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1,
1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0,
0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1,
1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0,
1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0,
1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0])
a = a.repeat(3)
for dtype in '?bBhHiIlLqQ':
arr = np.array(a, dtype=dtype)
b = np.packbits(arr, axis=None)
assert_equal(b.dtype, np.uint8)
r = [252, 127, 192, 3, 254, 7, 252, 0, 7, 31, 240, 0, 28, 1, 255, 252,
113, 248, 3, 255, 192, 28, 15, 192, 28, 126, 0, 224, 127, 255,
227, 142, 7, 31, 142, 63, 28, 126, 56, 227, 240, 0, 227, 128, 63,
224, 14, 56, 252, 112, 56, 255, 241, 248, 3, 240, 56, 224, 112,
63, 255, 255, 199, 224, 14, 0, 31, 143, 192, 3, 255, 199, 0, 1,
255, 224, 1, 255, 252, 126, 63, 0, 1, 192, 252, 14, 63, 0, 15,
199, 252, 113, 255, 3, 128, 56, 252, 14, 7, 0, 113, 255, 255, 142, 56, 227,
129, 248, 227, 129, 199, 31, 128]
assert_array_equal(b, r)
# equal for size being multiple of 8
assert_array_equal(np.unpackbits(b)[:-4], a)
# check last byte of different remainders (16 byte vectorization)
b = [np.packbits(arr[:-i], axis=None)[-1] for i in range(1, 16)]
assert_array_equal(b, [128, 128, 128, 31, 30, 28, 24, 16, 0, 0, 0, 199,
198, 196, 192])
arr = arr.reshape(36, 25)
b = np.packbits(arr, axis=0)
assert_equal(b.dtype, np.uint8)
assert_array_equal(b, [[190, 186, 178, 178, 150, 215, 87, 83, 83, 195,
199, 206, 204, 204, 140, 140, 136, 136, 8, 40, 105,
107, 75, 74, 88],
[72, 216, 248, 241, 227, 195, 202, 90, 90, 83,
83, 119, 127, 109, 73, 64, 208, 244, 189, 45,
41, 104, 122, 90, 18],
[113, 120, 248, 216, 152, 24, 60, 52, 182, 150,
150, 150, 146, 210, 210, 246, 255, 255, 223,
151, 21, 17, 17, 131, 163],
[214, 210, 210, 64, 68, 5, 5, 1, 72, 88, 92,
92, 78, 110, 39, 181, 149, 220, 222, 218, 218,
202, 234, 170, 168],
[0, 128, 128, 192, 80, 112, 48, 160, 160, 224,
240, 208, 144, 128, 160, 224, 240, 208, 144,
144, 176, 240, 224, 192, 128]])
b = np.packbits(arr, axis=1)
assert_equal(b.dtype, np.uint8)
assert_array_equal(b, [[252, 127, 192, 0],
[ 7, 252, 15, 128],
[240, 0, 28, 0],
[255, 128, 0, 128],
[192, 31, 255, 128],
[142, 63, 0, 0],
[255, 240, 7, 0],
[ 7, 224, 14, 0],
[126, 0, 224, 0],
[255, 255, 199, 0],
[ 56, 28, 126, 0],
[113, 248, 227, 128],
[227, 142, 63, 0],
[ 0, 28, 112, 0],
[ 15, 248, 3, 128],
[ 28, 126, 56, 0],
[ 56, 255, 241, 128],
[240, 7, 224, 0],
[227, 129, 192, 128],
[255, 255, 254, 0],
[126, 0, 224, 0],
[ 3, 241, 248, 0],
[ 0, 255, 241, 128],
[128, 0, 255, 128],
[224, 1, 255, 128],
[248, 252, 126, 0],
[ 0, 7, 3, 128],
[224, 113, 248, 0],
[ 0, 252, 127, 128],
[142, 63, 224, 0],
[224, 14, 63, 0],
[ 7, 3, 128, 0],
[113, 255, 255, 128],
[ 28, 113, 199, 0],
[ 7, 227, 142, 0],
[ 14, 56, 252, 0]])
arr = arr.T.copy()
b = np.packbits(arr, axis=0)
assert_equal(b.dtype, np.uint8)
assert_array_equal(b, [[252, 7, 240, 255, 192, 142, 255, 7, 126, 255,
56, 113, 227, 0, 15, 28, 56, 240, 227, 255,
126, 3, 0, 128, 224, 248, 0, 224, 0, 142, 224,
7, 113, 28, 7, 14],
[127, 252, 0, 128, 31, 63, 240, 224, 0, 255,
28, 248, 142, 28, 248, 126, 255, 7, 129, 255,
0, 241, 255, 0, 1, 252, 7, 113, 252, 63, 14,
3, 255, 113, 227, 56],
[192, 15, 28, 0, 255, 0, 7, 14, 224, 199, 126,
227, 63, 112, 3, 56, 241, 224, 192, 254, 224,
248, 241, 255, 255, 126, 3, 248, 127, 224, 63,
128, 255, 199, 142, 252],
[0, 128, 0, 128, 128, 0, 0, 0, 0, 0, 0, 128, 0,
0, 128, 0, 128, 0, 128, 0, 0, 0, 128, 128,
128, 0, 128, 0, 128, 0, 0, 0, 128, 0, 0, 0]])
b = np.packbits(arr, axis=1)
assert_equal(b.dtype, np.uint8)
assert_array_equal(b, [[190, 72, 113, 214, 0],
[186, 216, 120, 210, 128],
[178, 248, 248, 210, 128],
[178, 241, 216, 64, 192],
[150, 227, 152, 68, 80],
[215, 195, 24, 5, 112],
[ 87, 202, 60, 5, 48],
[ 83, 90, 52, 1, 160],
[ 83, 90, 182, 72, 160],
[195, 83, 150, 88, 224],
[199, 83, 150, 92, 240],
[206, 119, 150, 92, 208],
[204, 127, 146, 78, 144],
[204, 109, 210, 110, 128],
[140, 73, 210, 39, 160],
[140, 64, 246, 181, 224],
[136, 208, 255, 149, 240],
[136, 244, 255, 220, 208],
[ 8, 189, 223, 222, 144],
[ 40, 45, 151, 218, 144],
[105, 41, 21, 218, 176],
[107, 104, 17, 202, 240],
[ 75, 122, 17, 234, 224],
[ 74, 90, 131, 170, 192],
[ 88, 18, 163, 168, 128]])
# result is the same if input is multiplied with a nonzero value
for dtype in 'bBhHiIlLqQ':
arr = np.array(a, dtype=dtype)
rnd = np.random.randint(low=np.iinfo(dtype).min,
high=np.iinfo(dtype).max, size=arr.size,
dtype=dtype)
rnd[rnd == 0] = 1
arr *= rnd.astype(dtype)
b = np.packbits(arr, axis=-1)
assert_array_equal(np.unpackbits(b)[:-4], a)
assert_raises(TypeError, np.packbits, np.array(a, dtype=float))
def test_packbits_very_large():
# test some with a larger arrays gh-8637
# code is covered earlier but larger array makes crash on bug more likely
for s in range(950, 1050):
for dt in '?bBhHiIlLqQ':
x = np.ones((200, s), dtype=bool)
np.packbits(x, axis=1)
def test_unpackbits():
# Copied from the docstring.
a = np.array([[2], [7], [23]], dtype=np.uint8)
b = np.unpackbits(a, axis=1)
assert_equal(b.dtype, np.uint8)
assert_array_equal(b, np.array([[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 0, 1, 1, 1]]))
def test_unpackbits_empty():
a = np.empty((0,), dtype=np.uint8)
b = np.unpackbits(a)
assert_equal(b.dtype, np.uint8)
assert_array_equal(b, np.empty((0,)))
def test_unpackbits_empty_with_axis():
# Lists of packed shapes for different axes and unpacked shapes.
shapes = [
([(0,)], (0,)),
([(2, 24, 0), (16, 3, 0), (16, 24, 0)], (16, 24, 0)),
([(2, 0, 24), (16, 0, 24), (16, 0, 3)], (16, 0, 24)),
([(0, 16, 24), (0, 2, 24), (0, 16, 3)], (0, 16, 24)),
([(3, 0, 0), (24, 0, 0), (24, 0, 0)], (24, 0, 0)),
([(0, 24, 0), (0, 3, 0), (0, 24, 0)], (0, 24, 0)),
([(0, 0, 24), (0, 0, 24), (0, 0, 3)], (0, 0, 24)),
([(0, 0, 0), (0, 0, 0), (0, 0, 0)], (0, 0, 0)),
]
for in_shapes, out_shape in shapes:
for ax, in_shape in enumerate(in_shapes):
a = np.empty(in_shape, dtype=np.uint8)
b = np.unpackbits(a, axis=ax)
assert_equal(b.dtype, np.uint8)
assert_equal(b.shape, out_shape)
def test_unpackbits_large():
# test all possible numbers via comparison to already tested packbits
d = np.arange(277, dtype=np.uint8)
assert_array_equal(np.packbits(np.unpackbits(d)), d)
assert_array_equal(np.packbits(np.unpackbits(d[::2])), d[::2])
d = np.tile(d, (3, 1))
assert_array_equal(np.packbits(np.unpackbits(d, axis=1), axis=1), d)
d = d.T.copy()
assert_array_equal(np.packbits(np.unpackbits(d, axis=0), axis=0), d)
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A component for running distributed TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import json
import os
import threading
import time
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.distribute import distribute_coordinator_context
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import monitored_session
from tensorflow.python.training import server_lib
class _TaskType(object):
PS = "ps"
WORKER = "worker"
CHIEF = "chief"
EVALUATOR = "evaluator"
CLIENT = "client"
# TODO(yuefengz): support another mode where the client colocates with one
# worker.
class CoordinatorMode(object):
"""Specify how distribute coordinator runs."""
# The default mode where distribute coordinator will run as a standalone
# client and connects to remote servers for training. Each remote server can
# use the distribute coordinator binary with task_type set correctly which
# will then turn into standard servers.
STANDALONE_CLIENT = "standalone_client"
# The distribute coordinator runs on each worker. It will run a standard
# server on each worker and optionally run the `worker_fn` that is configured
# to talk to its standard server.
INDEPENDENT_WORKER = "independent_worker"
class _Barrier(object):
"""A reusable barrier class for worker synchronization."""
def __init__(self, num_participants):
"""Initializes the barrier object.
Args:
num_participants: an integer which is the expected number of calls of
`wait` pass to through this barrier.
"""
self._num_participants = num_participants
self._counter = 0
self._flag = False
self._local_sense = threading.local()
self._lock = threading.Lock()
self._condition = threading.Condition()
def wait(self):
"""Waits until all other callers reach the same wait call."""
if not hasattr(self._local_sense, "value"):
self._local_sense.value = False
self._local_sense.value = not self._flag
with self._lock:
self._counter += 1
if self._counter == self._num_participants:
self._counter = 0
self._flag = self._local_sense.value
with self._condition:
while self._flag != self._local_sense.value:
self._condition.wait()
self._condition.notify_all()
def _get_num_workers(cluster_spec):
"""Gets number of workers including chief."""
if not cluster_spec:
return 0
return len(cluster_spec.as_dict().get(_TaskType.WORKER, [])) + len(
cluster_spec.as_dict().get(_TaskType.CHIEF, []))
class _WorkerContext(object):
"""The worker context class.
This context object provides configuration information for each task. One
context manager with a worker context object will be created per
invocation to the `worker_fn` where `get_current_worker_context` can be called
to access the worker context object.
"""
def __init__(self,
strategy,
cluster_spec,
task_type,
task_id,
session_config=None,
rpc_layer="grpc",
worker_barrier=None):
"""Initialize the worker context object.
Args:
strategy: a `DistributionStrategy` object.
cluster_spec: a ClusterSpec object. It can be empty or None in the local
training case.
task_type: a string indicating the role of the corresponding task, such as
"worker" or "ps". It can be None if it is local training or in-graph
replicated training.
task_id: an integer indicating id of the corresponding task. It can be
None if it is local training or in-graph replicated training.
session_config: an optional `tf.ConfigProto` object.
rpc_layer: optional string specifying the RPC protocol for communication
with worker masters. If None or empty, hosts in the `cluster_spec` will
be used directly.
worker_barrier: optional, the barrier object for worker synchronization.
"""
self._strategy = strategy
self._cluster_spec = cluster_spec
self._task_type = task_type
self._task_id = task_id
self._session_config = session_config
self._worker_barrier = worker_barrier
self._rpc_layer = rpc_layer
self._master_target = self._get_master_target()
self._num_workers = _get_num_workers(cluster_spec)
self._is_chief_node = self._is_chief()
def _debug_message(self):
if self._cluster_spec:
return "[cluster_spec: %r, task_type: %r, task_id: %r]" % (
self._cluster_spec, self.task_type, self.task_id)
else:
return "[local]"
def __enter__(self):
old_context = distribute_coordinator_context.get_current_worker_context()
if old_context:
raise ValueError(
"You cannot run distribute coordinator in a `worker_fn`.\t" +
self._debug_message())
# pylint: disable=protected-access
distribute_coordinator_context._worker_context.current = self
def __exit__(self, unused_exception_type, unused_exception_value,
unused_traceback):
# pylint: disable=protected-access
distribute_coordinator_context._worker_context.current = None
def _get_master_target(self):
"""Return the master target for a task."""
# If cluster_spec is None or empty, we use local master.
if not self._cluster_spec:
return ""
# If task_type is None, then it is in-graph replicated training. In this
# case we use the chief or first worker's master target.
if not self._task_type:
if _TaskType.CHIEF in self._cluster_spec.jobs:
task_type = _TaskType.CHIEF
task_id = 0
else:
assert _TaskType.WORKER in self._cluster_spec.jobs
task_type = _TaskType.WORKER
task_id = 0
else:
task_type = self._task_type
task_id = self._task_id
prefix = ""
if self._rpc_layer:
prefix = self._rpc_layer + "://"
return prefix + self._cluster_spec.job_tasks(task_type)[task_id or 0]
def _is_chief(self):
"""Return whether the task is the chief worker."""
if (not self._cluster_spec or
self._task_type in [_TaskType.CHIEF, _TaskType.EVALUATOR, None]):
return True
# If not local and chief not in the cluster_spec, use the first worker as
# chief.
if (_TaskType.CHIEF not in self._cluster_spec.jobs and
self._task_type == _TaskType.WORKER and self._task_id == 0):
return True
return False
def wait_for_other_workers(self):
"""Waits for other workers to reach the same call to this method.
Raises:
ValueError: if `worker_barrier` is not passed to the __init__ method.
"""
if not self._worker_barrier:
raise ValueError("`worker_barrier is not set in the worker context.` \t" +
self._debug_message())
self._worker_barrier.wait()
def session_creator(self,
scaffold=None,
config=None,
checkpoint_dir=None,
checkpoint_filename_with_path=None,
max_wait_secs=7200):
"""Returns a session creator.
The returned session creator will be configured with the correct master
target and session configs. It will also run either init ops or ready ops
by querying the `strategy` object when `create_session` is called on it.
Args:
scaffold: A `Scaffold` used for gathering or building supportive ops. If
not specified a default one is created. It's used to finalize the graph.
config: `ConfigProto` proto used to configure the session.
checkpoint_dir: A string. Optional path to a directory where to restore
variables.
checkpoint_filename_with_path: Full file name path to the checkpoint file.
Only one of `checkpoint_dir` or `checkpoint_filename_with_path` can be
specified.
max_wait_secs: Maximum time to wait for the session to become available.
Returns:
a descendant of SessionCreator.
"""
if config:
session_config = copy.deepcopy(config)
session_config.MergeFrom(self._session_config)
else:
session_config = self._session_config
if not self._strategy or self._strategy.should_init:
logging.info("Creating chief session creator with config: %r", config)
return monitored_session.ChiefSessionCreator(
scaffold,
master=self.master_target,
config=session_config,
checkpoint_dir=checkpoint_dir,
checkpoint_filename_with_path=checkpoint_filename_with_path)
else:
logging.info("Creating worker session creator with config: %r", config)
return monitored_session.WorkerSessionCreator(
scaffold,
master=self.master_target,
config=session_config,
max_wait_secs=max_wait_secs)
@property
def has_barrier(self):
"""Whether the barrier is set or not."""
return self._worker_barrier is not None
@property
def distributed_mode(self):
"""Whether it is distributed training or not."""
return bool(self._cluster_spec) and self._task_type != _TaskType.EVALUATOR
@property
def cluster_spec(self):
"""Returns a copy of the cluster_spec object."""
return copy.deepcopy(self._cluster_spec)
@property
def task_type(self):
"""Returns the role of the corresponing task."""
return self._task_type
@property
def task_id(self):
"""Returns the id or index of the corresponing task."""
return self._task_id
@property
def master_target(self):
"""Returns the session master for the corresponding task to connect to."""
return self._master_target
@property
def is_chief(self):
"""Returns whether the task is a chief node."""
return self._is_chief_node
@property
def num_workers(self):
"""Returns number of workers in the cluster, including chief."""
return self._num_workers
@property
def should_checkpoint(self):
"""Whether to save checkpoint."""
return self._strategy.should_checkpoint
@property
def should_save_summary(self):
"""Whether to save summaries."""
return self._strategy.should_save_summary
def _run_single_worker(worker_fn,
strategy,
cluster_spec,
task_type,
task_id,
session_config,
rpc_layer="",
worker_barrier=None):
"""Runs a single worker by calling `worker_fn` under context."""
session_config = copy.deepcopy(session_config)
strategy = copy.deepcopy(strategy)
# If there is an EVALUATOR task, we run single-machine eval on that task.
if task_type == _TaskType.EVALUATOR:
# It is possible to not have a strategy object for EVALUATOR task.
if strategy:
strategy.configure(session_config)
else:
assert strategy
strategy.configure(session_config, cluster_spec, task_type, task_id)
context = _WorkerContext(
strategy,
cluster_spec,
task_type,
task_id,
session_config=session_config,
rpc_layer=rpc_layer,
worker_barrier=worker_barrier)
with context:
return worker_fn(strategy)
def _split_cluster_for_evaluator(cluster_spec, task_type):
"""Split the cluster for evaluator since it needn't talk to other tasks."""
# Splitting the cluster is important to prevent the evaluator from talking to
# other tasks in the cluster. Since we allow evaluator not to use
# distribution strategies and as a result ops in the evalauator task may have
# unspecified devices. Those ops may end up on other tasks if we don't split
# the cluster.
new_cluster_spec = multi_worker_util.normalize_cluster_spec(
cluster_spec).as_dict()
if task_type == _TaskType.EVALUATOR:
assert _TaskType.EVALUATOR in new_cluster_spec
new_cluster_spec = {
_TaskType.EVALUATOR: new_cluster_spec[_TaskType.EVALUATOR]
}
else:
new_cluster_spec.pop(_TaskType.EVALUATOR, None)
return multi_worker_util.normalize_cluster_spec(new_cluster_spec)
def _run_std_server(cluster_spec=None,
task_type=None,
task_id=None,
session_config=None,
rpc_layer=None,
environment=None):
"""Runs a standard server."""
assert cluster_spec
target = cluster_spec.task_address(task_type, task_id)
if rpc_layer:
target = rpc_layer + "://" + target
class _FakeServer(object):
"""A fake server that runs a master session."""
def start(self):
# A tensorflow server starts when a remote session is created.
logging.info(
"Creating a remote session to start a TensorFlow server, "
"target = %r, session_config=%r", target, session_config)
session.Session(target=target, config=session_config)
def join(self):
while True:
time.sleep(5)
if environment == "google":
server = _FakeServer()
server.start()
return server
else:
if session_config:
logging.info(
"Starting standard TensorFlow server, target = %r, session_config= "
"%r", target, session_config)
else:
logging.info("Starting standard TensorFlow server, target = %r", target)
cluster_spec = _split_cluster_for_evaluator(cluster_spec, task_type)
server = server_lib.Server(
cluster_spec,
job_name=task_type,
task_index=task_id,
config=session_config,
protocol=rpc_layer)
server.start()
return server
def _run_between_graph_client(worker_fn, strategy, eval_fn, eval_strategy,
cluster_spec, session_config, rpc_layer):
"""Runs a standalone client for between-graph replication."""
eval_thread = None
if _TaskType.EVALUATOR in cluster_spec.jobs:
eval_thread = threading.Thread(
target=_run_single_worker,
args=(eval_fn, eval_strategy, cluster_spec, _TaskType.EVALUATOR, 0,
session_config),
kwargs={
"rpc_layer": rpc_layer,
})
eval_thread.start()
threads = []
worker_barrier = _Barrier(_get_num_workers(cluster_spec))
for task_type in [_TaskType.CHIEF, _TaskType.WORKER]:
for task_id in range(len(cluster_spec.as_dict().get(task_type, []))):
t = threading.Thread(
target=_run_single_worker,
args=(worker_fn, strategy, cluster_spec, task_type, task_id,
session_config),
kwargs={
"rpc_layer": rpc_layer,
"worker_barrier": worker_barrier
})
t.start()
threads.append(t)
# TODO(yuefengz): wrap threads into thread coordinator?
for t in threads:
t.join()
# TODO(yuefengz): is it necessary to join eval thread?
if eval_thread:
eval_thread.join()
# TODO(yuefengz): we probably want to return results from all workers?
return None
def _run_in_graph_client(worker_fn, strategy, eval_fn, eval_strategy,
cluster_spec, session_config, rpc_layer):
"""Runs a standalone client for in-graph replication."""
eval_thread = None
if _TaskType.EVALUATOR in cluster_spec.jobs:
eval_thread = threading.Thread(
target=_run_single_worker,
args=(eval_fn, eval_strategy, cluster_spec, _TaskType.EVALUATOR, 0,
session_config),
kwargs={
"rpc_layer": rpc_layer,
})
eval_thread.start()
worker_result = _run_single_worker(
worker_fn,
strategy,
cluster_spec,
None,
None,
session_config,
rpc_layer=rpc_layer)
if eval_thread:
eval_thread.join()
return worker_result
def _configure_session_config_for_std_servers(
strategy, eval_strategy, session_config, cluster_spec, task_type, task_id):
# pylint: disable=g-doc-args
"""Call strategy's `configure` to mutate the session_config.
The session_config is currently needed as default config for a TensorFlow
server. In the future, we should be able to remove this method and only pass
the session config to a client session.
"""
if task_type == _TaskType.EVALUATOR:
if eval_strategy:
eval_strategy.configure(session_config=session_config)
else:
# The strategy may be shared in standalone client mode.
strategy = copy.deepcopy(strategy)
strategy.configure(
session_config=session_config,
cluster_spec=cluster_spec,
task_type=task_type,
task_id=task_id)
# Remove the device filters specific to the strategy, so that the
# TensorFlow server brought up with one strategy can be used by other
# strategies. The device filters can be set in the client side as well.
del session_config.device_filters[:]
def run_standard_tensorflow_server(session_config=None):
"""Starts a standard TensorFlow server.
This method parses configurations from "TF_CONFIG" environment variable and
starts a TensorFlow server. The "TF_CONFIG" is typically a json string and
must have information of the cluster and the role of the server in the
cluster. One example is:
TF_CONFIG='{
"cluster": {
"worker": ["host1:2222", "host2:2222", "host3:2222"],
"ps": ["host4:2222", "host5:2222"]
},
"task": {"type": "worker", "index": 1}
}'
This "TF_CONFIG" specifies there are 3 workers and 2 ps tasks in the cluster
and the current role is worker 1.
Valid task types are "chief", "worker", "ps" and "evaluator" and you can have
at most one "chief" and at most one "evaluator".
An optional key-value can be specified is "rpc_layer". The default value is
"grpc".
Args:
session_config: an optional `tf.ConfigProto` object. Users can pass in
the session config object to configure server-local devices.
Returns:
a `tf.train.Server` object which has already been started.
Raises:
ValueError: if the "TF_CONFIG" environment is not complete.
"""
tf_config = json.loads(os.environ.get("TF_CONFIG", "{}"))
if "cluster" not in tf_config:
raise ValueError("\"cluster\" is not found in TF_CONFIG.")
cluster_spec = multi_worker_util.normalize_cluster_spec(tf_config["cluster"])
if "task" not in tf_config:
raise ValueError("\"task\" is not found in TF_CONFIG.")
task_env = tf_config["task"]
if "type" not in task_env:
raise ValueError(
"\"task_type\" is not found in the `task` part of TF_CONFIG.")
task_type = task_env["type"]
task_id = int(task_env.get("index", 0))
rpc_layer = tf_config.get("rpc_layer", "grpc")
session_config = session_config or config_pb2.ConfigProto()
# Set the collective group leader for collective ops to initialize collective
# ops when server starts.
if "chief" in cluster_spec.jobs:
session_config.experimental.collective_group_leader = (
"/job:chief/replica:0/task:0")
else:
if "worker" not in cluster_spec.jobs:
raise ValueError(
"You must have `chief` or `worker` jobs in the `cluster_spec`.")
session_config.experimental.collective_group_leader = (
"/job:worker/replica:0/task:0")
server = _run_std_server(
cluster_spec=cluster_spec,
task_type=task_type,
task_id=task_id,
session_config=session_config,
rpc_layer=rpc_layer)
server.start()
return server
# TODO(yuefengz): propagate cluster_spec in the STANDALONE_CLIENT mode.
# TODO(yuefengz): we may need a smart way to figure out whether the current task
# is the special task when we support cluster_spec propagation.
def run_distribute_coordinator(worker_fn,
strategy,
eval_fn=None,
eval_strategy=None,
mode=CoordinatorMode.STANDALONE_CLIENT,
cluster_spec=None,
task_type=None,
task_id=None,
session_config=None,
rpc_layer="grpc"):
"""Runs the coordinator for distributed TensorFlow.
This function runs a split coordinator for distributed TensorFlow in its
default mode, i.e the STANDALONE_CLIENT mode. Given a `cluster_spec`
specifying server addresses and their roles in a cluster, this coordinator
will figure out how to set them up, give the underlying function the right
targets for master sessions via a scope object and coordinate their training.
The cluster consisting of standard servers needs to be brought up either with
the standard server binary or with a binary running distribute coordinator
with `task_type` set to non-client type which will then turn into standard
servers.
In addition to be the distribute coordinator, this is also the source of
configurations for each job in the distributed training. As there are multiple
ways to configure a distributed TensorFlow cluster, its context object
provides these configurations so that users or higher-level APIs don't have to
figure out the configuration for each job by themselves.
In the between-graph replicated training, this coordinator will create
multiple threads and each calls the `worker_fn` which is supposed to create
its own graph and connect to one worker master given by its context object. In
the in-graph replicated training, it has only one thread calling this
`worker_fn`.
Another mode is the INDEPENDENT_WORKER mode where each server runs a
distribute coordinator which will start a standard server and optionally runs
`worker_fn` depending whether it is between-graph training or in-graph
replicated training.
The `strategy` object is expected to be a DistributionStrategy object which
has implemented methods needed by distributed coordinator such as
`configure(session_config, cluster_spec, task_type, task_id)` which configures
the strategy object for a specific task and `should_init` property which
instructs the distribute coordinator whether to run init ops for a task. The
distribute coordinator will make a copy of the `strategy` object, call its
`configure` method and pass it to `worker_fn` as an argument.
The `worker_fn` defines the training logic and is called under a its own
worker context which can be accessed to via `get_current_worker_context`. A
worker context provides access to configurations for each task, e.g. the
task_type, task_id, master target and so on. Since `worker_fn` will be called
in a thread and possibly multiple times, caller should be careful when it
accesses global data. For example, it is unsafe to define flags in a
`worker_fn` or to define different environment variables for different
`worker_fn`s.
The `worker_fn` for the between-graph replication is defined as if there is
only one worker corresponding to the `worker_fn` and possibly ps jobs. For
example, when training with parameter servers, it assigns variables to
parameter servers and all other operations to that worker. In the in-graph
replication case, the `worker_fn` has to define operations for all worker
jobs. Using a distribution strategy can simplify the `worker_fn` by not having
to worry about the replication and device assignment of variables and
operations.
This method is intended to be invoked by high-level APIs so that users don't
have to explictly call it to run this coordinator. For those who don't use
high-level APIs, to change a program to use this coordinator, wrap everything
in a the program after global data definitions such as commandline flag
definition into the `worker_fn` and get task-specific configurations from
the worker context.
The `cluster_spec` can be either passed by the argument or parsed from the
"TF_CONFIG" envrionment variable. Example of a TF_CONFIG:
```
cluster = {'chief': ['host0:2222'],
'ps': ['host1:2222', 'host2:2222'],
'worker': ['host3:2222', 'host4:2222', 'host5:2222']}
os.environ['TF_CONFIG'] = json.dumps({'cluster': cluster})
```
If `cluster_spec` is not given in any format, it becomes local training and
this coordinator will connect to a local session.
For evaluation, if "evaluator" exists in the cluster_spec, a separate thread
will be created to call `eval_fn` with its `task_type` set to "evaluator". If
`eval_fn` is not defined, fall back to `worker_fn`. This implies that
evaluation will be done on a single machine if there is an "evaluator" task.
If "evaluator" doesn't exit in the cluster_spec, it entirely depends on the
`worker_fn` for how to do evaluation.
Args:
worker_fn: the function to be called. The function should accept a
`strategy` object and will be given access to a context object via a
context manager scope.
strategy: a DistributionStrategy object which specifying whether it should
run between-graph replicated training or not, whether to run init ops,
etc. This object will also be configured given `session_config`,
`cluster_spec`, `task_type` and `task_id`.
eval_fn: optional function for "evaluator" task. If `eval_fn` is not passed
in but a "evaluator" task found in the `cluster_spec`, the `worker_fn`
will be used for this task.
eval_strategy: optional DistributionStrategy object for "evaluator" task.
mode: in which mode this distribute coordinator runs.
cluster_spec: a dict, ClusterDef or ClusterSpec specifying servers and roles
in a cluster. If not set or empty, fall back to local training.
task_type: the current task type, optional if this is a client.
task_id: the current task id, optional if this is a client.
session_config: an optional `tf.ConfigProto` object which will be passed
to `strategy`'s `configure` method and used to create a session.
rpc_layer: optional string, the protocol for RPC, e.g. "grpc".
Raises:
ValueError: if `cluster_spec` is supplied but not a dict or a ClusterDef or
a ClusterSpec.
Returns:
In the client job, return the value returned by `worker_fn` if
it is in-graph replication; return None otherwise.
"""
tf_config = json.loads(os.environ.get("TF_CONFIG", "{}"))
if not cluster_spec:
cluster_spec = tf_config.get("cluster", {})
task_env = tf_config.get("task", {})
if task_env:
task_type = task_env.get("type", task_type)
task_id = int(task_env.get("index", task_id))
if cluster_spec:
cluster_spec = multi_worker_util.normalize_cluster_spec(cluster_spec)
# TODO(yuefengz): validate cluster_spec.
rpc_layer = tf_config.get("rpc_layer", rpc_layer)
environment = tf_config.get("environment", None)
# Setting the session config is necessary for some strategies such
# CollectiveAllReduceStrategy.
session_config = session_config or config_pb2.ConfigProto(
allow_soft_placement=True)
if cluster_spec:
logging.info(
"Running Distribute Coordinator with mode = %r, cluster_spec = %r, "
"task_type = %r, task_id = %r, environment = %r, rpc_layer = %r", mode,
cluster_spec.as_dict(), task_type, task_id, environment, rpc_layer)
if not cluster_spec:
# `mode` is ignored in the local case.
logging.info("Running local Distribute Coordinator.")
_run_single_worker(worker_fn, strategy, None, None, None, session_config,
rpc_layer)
if eval_fn:
_run_single_worker(eval_fn, eval_strategy, None, None, None,
session_config, rpc_layer)
else:
logging.warning("Skipped evaluation since `eval_fn` is not passed in.")
elif mode == CoordinatorMode.STANDALONE_CLIENT:
if not eval_fn:
logging.warning("`eval_fn` is not passed in. The `worker_fn` will be "
"used if an \"evaluator\" task exists in the cluster.")
eval_fn = eval_fn or worker_fn
if not eval_strategy:
logging.warning("`eval_strategy` is not passed in. No distribution "
"strategy will be used for evaluation.")
# The client must know the cluster but servers in the cluster don't have to
# know the client.
if task_type in [_TaskType.CLIENT, None]:
if strategy.between_graph:
return _run_between_graph_client(worker_fn, strategy, eval_fn,
eval_strategy, cluster_spec,
session_config, rpc_layer)
else:
return _run_in_graph_client(worker_fn, strategy, eval_fn, eval_strategy,
cluster_spec, session_config, rpc_layer)
else:
# If not a client job, run the standard server.
_configure_session_config_for_std_servers(strategy, eval_strategy,
session_config, cluster_spec,
task_type, task_id)
server = _run_std_server(
cluster_spec=cluster_spec,
task_type=task_type,
task_id=task_id,
session_config=session_config,
rpc_layer=rpc_layer,
environment=environment)
server.join()
else:
if mode != CoordinatorMode.INDEPENDENT_WORKER:
raise ValueError("Unexpected coordinator mode: %r" % mode)
if not eval_fn:
logging.warning("`eval_fn` is not passed in. The `worker_fn` will be "
"used if an \"evaluator\" task exists in the cluster.")
eval_fn = eval_fn or worker_fn
if not eval_strategy:
logging.warning("`eval_strategy` is not passed in. No distribution "
"strategy will be used for evaluation.")
# Every one starts a standard server, get session config from `configure`
# method.
_configure_session_config_for_std_servers(strategy, eval_strategy,
session_config, cluster_spec,
task_type, task_id)
server = _run_std_server(
cluster_spec=cluster_spec,
task_type=task_type,
task_id=task_id,
session_config=session_config,
rpc_layer=rpc_layer,
environment=environment)
if task_type in [_TaskType.CHIEF, _TaskType.WORKER]:
if strategy.between_graph:
# All jobs run `worker_fn` if between-graph.
_run_single_worker(worker_fn, strategy, cluster_spec, task_type,
task_id, session_config, rpc_layer)
else:
# Only one node runs `worker_fn` if in-graph.
context = _WorkerContext(strategy, cluster_spec, task_type, task_id)
if context.is_chief:
_run_single_worker(worker_fn, strategy, cluster_spec, None, None,
session_config, rpc_layer)
else:
server.join()
elif task_type == _TaskType.EVALUATOR:
_run_single_worker(eval_fn, eval_strategy, cluster_spec, task_type,
task_id, session_config, rpc_layer)
else:
if task_type != _TaskType.PS:
raise ValueError("Unexpected task_type: %r" % task_type)
server.join()
|
|
# -*- coding: utf-8 -*-
import traceback
import pandas as pd
import numpy as np
import dcs
import re
def fillDown(df, columnFrom, columnTo, method):
"""Replaces invalid values in specified columns with the last/next valid value, in-place
Multiple columns can be specified by giving a range of column indices. Therefore the operation can only be performed on a series of adjacent columns.
The function makes use of the :meth:`pandas.Series.fillna` method.
Args:
df (pandas.DataFrame): data frame
columnFrom (int): starting index for range of columns
columnTo (int): ending index for range of columns (inclusive)
method (str): 'bfill' for backwards fill (next valid value) and 'pad' for forward fill (last valid value)
"""
for columnIndex in range(columnFrom, columnTo + 1):
print("filling down ", df.columns[columnIndex], " using ", method)
if method == 'pad':
df[df.columns[columnIndex]].fillna(method='pad', inplace=True)
else:
df[df.columns[columnIndex]].fillna(method='bfill', inplace=True)
def fillByInterpolation(df, columnIndex, method, order):
"""Fills in invalid values in the specified column by performing interpolation, in-place
.. warning::
The function only works on numeric columns and will raise an exception in any other case.
The function makes use of the :meth:`pandas.Series.interpolate` method.
Args:
df (pandas.DataFrame): data frame
columnIndex (int): index of numeric column
method (str): passed directly to ``method`` kwarg for :meth:`pandas.Series.interpolate`, options include 'linear', 'spline' and 'polynomial'
order (int): passed direclty to ``order`` kwarg for :meth:`pandas.Series.interpolate`, required for certain methods such as 'polynomial'
"""
method = method.lower()
if method == 'polynomial' or method == 'spline':
df[df.columns[columnIndex]].interpolate(method=method, order=order, inplace=True)
else:
df[df.columns[columnIndex]].interpolate(method=method, inplace=True)
def fillWithCustomValue(df, columnIndex, newValue):
"""Fills in all invalid values in the specified column with a custom specified value, in-place
Args:
df (pandas.DataFrame): data frame
columnIndex (int): index of column
newValue: value to fill with
"""
if (df[df.columns[columnIndex]].dtype == np.float64):
try:
newValue = float(newValue)
except ValueError:
pass
elif (df[df.columns[columnIndex]].dtype == np.int64):
try:
newValue = int(float(newValue))
except ValueError:
pass
df[df.columns[columnIndex]].fillna(value=newValue, inplace=True)
def fillWithAverage(df, columnIndex, metric):
"""Fills in invalid values in the specified column with an average metric, in-place
Average metrics that can be used to fill with are: mean, median and mode.
.. warning::
Using mean or median metric on a non numeric column will raise an exception.
Args:
df (pandas.DataFrame): data frame
columnIndex (int): index of column
metric (str): average metric to use, options are: 'mean', 'median' and 'mode'
Returns:
bool: True on success, False on failure
"""
if metric == "mean":
average = df[df.columns[columnIndex]].mean()
elif metric == "median":
average = df[df.columns[columnIndex]].median()
elif metric == "mode":
analysis = dcs.analyze.genericAnalysis(df[df.columns[columnIndex]])
if "mode" in analysis:
average = analysis["mode"][0]
else:
return False
else:
return False
df[df.columns[columnIndex]].fillna(value=average, inplace=True)
return True
def normalize(df, columnIndex, rangeFrom=0, rangeTo=1):
"""Performs normalization on a numeric column, in-place
Uniformally scales the values in a numeric data set to fit in the specified range
.. warning::
Calling the function on a non numeric column will raise an exception.
Args:
df (pandas.DataFrame): data frame
columnIndex (int): index of column
rangeFrom (int/float, optional): range start
rangeTo (int/float, optional): range end
"""
if (df[df.columns[columnIndex]].max() - df[df.columns[columnIndex]].min()) != 0:
df[df.columns[columnIndex]] = rangeFrom + ((df[df.columns[columnIndex]] - df[df.columns[columnIndex]].min()) * (rangeTo - rangeFrom)) / (df[df.columns[columnIndex]].max() - df[df.columns[columnIndex]].min())
def standardize(df, columnIndex):
"""Performs standardization on a numeric column, in-place
Uniformally scales the values in a numeric data set so that the mean is 0 and standard deviation is 1.
.. warning::
Calling the function on a non numeric column will raise an exception.
Args:
df (pandas.DataFrame): data frame
columnIndex (int): index of column
"""
if df[df.columns[columnIndex]].std() != 0:
df[df.columns[columnIndex]] = (df[df.columns[columnIndex]] - df[df.columns[columnIndex]].mean()) / df[df.columns[columnIndex]].std()
def deleteRowsWithNA(df, columnIndex):
"""Drops all rows with missing values in the specified column
The function uses the :meth:`pandas.DataFrame.dropna` function,
before resetting the index of the dataframe with :meth:`pandas.DataFrame.reset_index`
Args:
df (pandas.DataFrame): data frame
columnIndex (int): index of column
"""
df.dropna(subset=[df.columns[columnIndex]], inplace=True)
df.reset_index(drop=True, inplace=True)
def findReplace(df, columnIndex, toReplace, replaceWith, matchRegex):
"""Finds all values matching the given patterns in the specified column and replaces them with a value
The function supports searching for multiple patterns, and uses the :meth:`pandas:pandas.Series.replace` method
Patterns can be strings which will be matched as a whole, or regular expressions (if *matchRegex* boolean flag is set to ``True``).
Standard Pythonic :func:`regex subsitutions <python:re.sub>` are also possible.
Args:
df (pandas.DataFrame): data frame
columnIndex (int): index of column
toReplace (list<str>): list of search strings or regular expressions
replaceWith (list<str>): list of replacement strings or regular expressions
matchRegex (bool): must be set to True if supplying list of regular expressions
"""
for i in range(0, len(toReplace)):
df[df.columns[columnIndex]].replace(to_replace=str(toReplace[i]), value=str(replaceWith[i]), regex=matchRegex, inplace=True)
try:
df[df.columns[columnIndex]].replace(to_replace=float(toReplace[i]), value=replaceWith[i], regex=matchRegex, inplace=True)
except ValueError:
pass
def generateDummies(df, columnIndex, inplace):
"""Generates dummies/indicator variable columns from a specified column (containing categorical data)
The function uses the :func:`pandas.get_dummies` function.
Args:
df (pandas.DataFrame): data frame
columnIndex (int): index of column
inplace (bool): removes original column if ``True``
"""
dummies = pd.get_dummies(df[df.columns[columnIndex]])
dummiesCount = len(dummies.columns)
for i in range(0, dummiesCount):
df.insert(columnIndex+i+1, str(df.columns[columnIndex])+"_"+str(dummies.columns[i]), dummies[dummies.columns[i]], allow_duplicates=True)
'''
df = pd.concat([df, dummies], axis=1)
cols = df.columns.tolist()
cols = cols[:columnIndex+1] + cols[-dummiesCount:] + cols[columnIndex+1:-dummiesCount]
df = df[cols]
'''
if inplace:
df.drop(df.columns[columnIndex], axis=1, inplace=True)
def insertDuplicateColumn(df, columnIndex):
"""Duplicates a column, inserting the new column to the right of the original column
Args:
df (pandas.DataFrame): data frame
columnIndex (int): index of column to duplicate
"""
df.insert(columnIndex + 1, str(df.columns[columnIndex]) + "_copy", df.iloc[:, columnIndex], allow_duplicates=True)
def splitColumn(df, columnIndex, delimiter, regex=False):
"""Splits a string column according to a specified delimiter or regular expression.
The split values are put in new columns inserted to the right of the original column
Args:
df (pandas.DataFrame): data frame
columnIndex (int): index of column to split
delimiter (str): delimiting character, string or regular expression for splitting each row
regex (bool, optional): must be set to ``True`` if delimiter is a regular expression
"""
tempDF = df.copy()
tempDF[tempDF.columns[columnIndex]].replace(to_replace=np.nan, value="", inplace=True)
if regex:
newColumns = tempDF[tempDF.columns[columnIndex]].apply(lambda x: pd.Series(re.split(delimiter, x)))
else:
newColumns = tempDF[tempDF.columns[columnIndex]].apply(lambda x: pd.Series(x.split(delimiter)))
newColumnsCount = len(newColumns.columns)
for i in range(0, newColumnsCount):
newColumns[newColumns.columns[i]].replace(to_replace="", value=np.nan, inplace=True)
df.insert(columnIndex+i+1, str(df.columns[columnIndex])+"_"+str(newColumns.columns[i]), newColumns[newColumns.columns[i]], allow_duplicates=True)
def combineColumns(df, columnHeadings, seperator="", newName="merged_column", insertIndex=0):
"""Combines multiple columns into a new column, concatenating each value using a specified separator
Args:
df (pandas.DataFrame): data frame
columnHeadings (list<str>): list of columns to combine
seperator (str, optional): separator character or string
newName (str, optional): name for column containing combined values
insertIndex (int, optional): index to insert new column at
Raises:
ValueError: if *columnHeadings* parameter doesn't contain at least two columns
"""
if len(columnHeadings) < 2:
raise ValueError('dcs.clean.combineColumns must be provided at least two columns to combine')
newColumn = pd.Series(index=df.index, dtype=str)
for index, row in df.iterrows():
strings = []
for column in columnHeadings:
if pd.notnull(row[column]):
strings.append(str(row[column]))
newColumn[index] = seperator.join(strings)
df.insert(insertIndex, newName, newColumn, allow_duplicates=True)
def discretize(df, columnIndex, cutMode, numberOfBins):
"""Performs in-place discretization on a numeric column
The function has two modes of operation: discretization and quantiling, using the :func:`pandas.cut`
and :func:`pandas.qcut` functions respectively.
Args:
df (pandas.DataFrame): data frame
columnIndex (int): index of column to discretize
cutMode (str): 'quantiling' or 'discretization'
numberOfBins (int): arg passed directly into pandas.cut() and pandas.qcut() functions
"""
if (cutMode == "discretization"):
if type(numberOfBins) is not int:
numberOfBins = numberOfBins.split(',')
numberOfBins = map(float, numberOfBins)
df[df.columns[columnIndex]] = pd.cut(df[df.columns[columnIndex]], numberOfBins).astype(str)
elif (cutMode == "quantiling"):
if type(numberOfBins) is not int:
numberOfBins = numberOfBins.split(',')
numberOfBins = map(float, numberOfBins)
df[df.columns[columnIndex]] = pd.qcut(df[df.columns[columnIndex]], numberOfBins).astype(str)
else:
return False
# Replace 'nan' strings with np.nan
df[df.columns[columnIndex]].replace(to_replace="nan", value=np.nan, inplace=True)
# HIGHWAY TO THE DANGER ZONE
def executeCommand(df, command):
"""Executes a Python statement in a pre-configured environment
.. danger::
Using this function carries direct risk, as any arbitrary command can be executed
The *command* parameter can be a string containing multiple lines of Python statements. The command is executed
in a pre-configured environment with ``df`` holding a reference to the data frame, and multiple modules loaded,
including ``pandas`` and ``numpy``
Args:
df (pandas.DataFrame): data frame
command (str): string containing a single Python command, or multiple Python commands delimited by newline
"""
exec(command)
|
|
#! /usr/bin/env python2.7
# -*- coding: utf-8 -*-
""" Announcements Plugin for Indigo Home Control Server
The Announcements Plugin is used to construct complex announcements for use
with text-to-speech tools in Indigo. The plugin provides a simple call to the
indigo.server.speak() hook for simple audio announcements; however, the plugin
is more geared towards creating announcements to be used with more advanced
speech tools.
"""
# =================================== TO DO ===================================
# TODO: add instructions on how to implement embedded speech synthesizer codes (i.e., [[rate 160]], etc.)
# ================================== IMPORTS ==================================
# Built-in modules
import ast
import datetime as dt
from dateutil import parser
import logging
import os
import re
import shutil
import string
import traceback
# Third-party modules
try:
import indigo
except ImportError, error:
indigo.server.log(unicode(error), isError=True)
try:
import pydevd_pycharm
except ImportError:
pass
# My modules
from Constants import *
import DLFramework.DLFramework as Dave
# =================================== HEADER ==================================
__author__ = Dave.__author__
__copyright__ = Dave.__copyright__
__license__ = Dave.__license__
__build__ = Dave.__build__
__title__ = u'Announcements Plugin for Indigo Home Control'
__version__ = u'1.0.21'
# =============================================================================
kDefaultPluginPrefs = {
u'pluginRefresh': "15",
u'showDebugLevel': "30",
}
class Plugin(indigo.PluginBase):
def __init__(self, pluginId, pluginDisplayName, pluginVersion, pluginPrefs):
indigo.PluginBase.__init__(self, pluginId, pluginDisplayName, pluginVersion, pluginPrefs)
self.pluginIsInitializing = True
self.pluginIsShuttingDown = False
log_format = '%(asctime)s.%(msecs)03d\t%(levelname)-10s\t%(name)s.%(funcName)-28s %(msg)s'
self.plugin_file_handler.setFormatter(logging.Formatter(fmt=log_format, datefmt='%Y-%m-%d %H:%M:%S'))
self.debug = True
self.debugLevel = int(self.pluginPrefs.get('showDebugLevel', "30"))
self.indigo_log_handler.setLevel(self.debugLevel)
self.update_frequency = int(self.pluginPrefs.get('pluginRefresh', 15))
self.logger.debug(u"Plugin refresh interval: {0}".format(self.update_frequency))
# ====================== Initialize DLFramework =======================
self.Fogbert = Dave.Fogbert(self)
# Log pluginEnvironment information when plugin is first started
self.Fogbert.pluginEnvironment()
# =====================================================================
# Establish the default announcements file.
working_directory = u"{0}/Announcements Plugin/".format(os.path.expanduser('~'))
old_file = u"{0}announcements.txt".format(working_directory)
self.announcements_file = u"{0}/Preferences/Plugins/com.fogbert.indigoplugin.announcements.txt".format(
indigo.server.getInstallFolderPath())
# If it exists under the old location, let's move it over.
if os.path.isfile(old_file):
os.rename(old_file, self.announcements_file)
self.sleep(1)
shutil.rmtree(path=working_directory, ignore_errors=True)
# If a new install, lets establish a new empty dict.
if not os.path.isfile(self.announcements_file):
with open(self.announcements_file, 'w+') as outfile:
outfile.write("{}")
self.sleep(1) # Wait a moment to let the system catch up.
try:
pydevd_pycharm.settrace('localhost', port=5678, stdoutToServer=True, stderrToServer=True, suspend=False)
except:
pass
self.pluginIsInitializing = False
def __del__(self):
indigo.PluginBase.__del__(self)
# =============================================================================
# ============================== Indigo Methods ===============================
# =============================================================================
# =============================================================================
def closedPrefsConfigUi(self, values_dict, user_cancelled):
if not user_cancelled:
debug_label = {10: u"Debugging Messages",
20: u"Informational Messages",
30: u"Warning Messages",
40: u"Error Messages",
50: u"Critical Errors Only"
}
self.debugLevel = int(values_dict['showDebugLevel'])
self.update_frequency = int(values_dict['pluginRefresh'])
self.indigo_log_handler.setLevel(self.debugLevel)
indigo.server.log(u"Debugging set to: {0}".format(debug_label[self.debugLevel]))
# Update the devices to reflect any changes
self.announcement_update_states()
# Ensure that self.pluginPrefs includes any recent changes.
for k in values_dict:
self.pluginPrefs[k] = values_dict[k]
# =============================================================================
def deviceStartComm(self, dev):
dev.stateListOrDisplayStateIdChanged()
dev.updateStateOnServer('onOffState', value=False, uiValue=u" ")
# =============================================================================
def deviceStopComm(self, dev):
dev.updateStateOnServer('onOffState', value=False, uiValue=u" ")
# =============================================================================
def getDeviceConfigUiValues(self, values_dict, type_id, dev_id):
# Set the device to disabled while it's being edited.
indigo.device.enable(dev_id, value=False)
# Ensure that the dialog opens with fresh fields.
if type_id == 'announcementsDevice':
for key in ('announcementName',
'announcementList',
'announcementRefresh',
'announcementText',
'subGeneratorResult'
):
values_dict[key] = ''
return values_dict
# =============================================================================
def getDeviceStateList(self, dev):
dev_id = dev.id
type_id = dev.deviceTypeId
if type_id not in self.devicesTypeDict:
return None
default_states_list = self.devicesTypeDict[type_id][u'States']
# Open the announcements file and load the contents
with open(self.announcements_file) as outfile:
infile = outfile.read()
# Convert the string implementation of the dict to an actual dict, and get the sub dict for the device.
infile = ast.literal_eval(node_or_string=infile)
# Sort the dict and create a list of tuples.
try:
announcement_list = [(key, infile[dev_id][key]['Name']) for key in infile[dev_id].keys()]
except KeyError:
announcement_list = []
# Iterate through the list of tuples and save each announcement name as a device key. Keys (state id's) can't
# contain Unicode.
for thing in announcement_list:
thing_name = thing[1].replace(' ', '_')
announcement_state = self.getDeviceStateDictForStringType(thing_name, thing_name, thing_name)
default_states_list.append(announcement_state)
return default_states_list
# =============================================================================
def runConcurrentThread(self):
try:
while True:
self.sleep(1)
self.update_frequency = int(self.pluginPrefs.get('pluginRefresh', 15))
self.announcement_update_states()
self.sleep(self.update_frequency)
except self.StopThread:
pass
# =============================================================================
def sendDevicePing(self, dev_id=0, suppress_logging=False):
indigo.server.log(u"Announcements Plugin devices do not support the ping function.")
return {'result': 'Failure'}
# =============================================================================
def startup(self):
# =========================== Audit Indigo Version ============================
self.Fogbert.audit_server_version(min_ver=7)
# ============= Delete Out of Date Announcements ===============
# Open the announcements file and load the contents
with open(name=self.announcements_file) as outfile:
infile = outfile.read()
# Convert the string implementation of the dict to an actual dict.
infile = ast.literal_eval(infile)
# Look at each plugin device id and delete any announcements if there is no longer an associated device.
for key in infile.keys():
if key not in indigo.devices.keys('self'):
del infile[key]
# Look at each plugin device and construct a placeholder if not already present.
for dev in indigo.devices.iter('self'):
if dev.id not in infile.keys():
infile[dev.id] = {}
# Open the announcements file and save the new dict.
with open(self.announcements_file, 'w') as outfile:
outfile.write("{0}".format(infile))
# =============================================================================
def validateDeviceConfigUi(self, values_dict, type_id, dev_id):
error_msg_dict = indigo.Dict()
# try:
# Announcements device
if type_id == 'announcementsDevice':
return True, values_dict
# Salutations device
try:
if type_id == 'salutationsDevice':
morning = int(values_dict['morningStart'])
afternoon = int(values_dict['afternoonStart'])
evening = int(values_dict['eveningStart'])
night = int(values_dict['nightStart'])
if not (morning < afternoon < evening < night):
for key in ('morningStart', 'afternoonStart', 'eveningStart', 'nightStart'):
error_msg_dict[key] = u"Each start time must be greater than the prior one."
except ValueError:
for key in ('morningStart', 'afternoonStart', 'eveningStart', 'nightStart'):
error_msg_dict[key] = u"You must set *all* the time controls to proceed. Otherwise, select cancel."
if len(error_msg_dict) > 0:
return False, values_dict, error_msg_dict
self.announcement_update_states()
return True, values_dict
# =============================================================================
# ============================== Plugin Methods ===============================
# =============================================================================
def announcement_clear(self, values_dict, type_id="", target_id=0):
"""
Clear announcement data from input field
Clears whatever is in the Announcement textfield.
-----
:param indigo.dict values_dict:
:param str type_id:
:param int target_id:
:return indigo.dict values_dict:
"""
for key in ('announcementIndex',
'announcementName',
'announcementRefresh',
'announcementList',
'announcementText'
):
values_dict[key] = ''
values_dict['editFlag'] = False
return values_dict
# =============================================================================
def announcement_create_id(self, temp_dict):
"""
Create a unique ID number for the announcement
In order to properly track the various announcement strings, we must assign
each one a unique ID number. We check to see if the number has already been
assigned to another announcement and, if not, the new ID is assigned.
-----
:param dict temp_dict:
"""
# Create a new index number.
index = id('dummy object')
# If the new index happens to exist, repeat until unique.
while index in temp_dict.keys():
index += 1
return index
# =============================================================================
def announcement_delete(self, values_dict, type_id, dev_id):
"""
Delete the highlighted announcement
Called when user clicks the Delete Announcement button
-----
:param indigo.dict values_dict:
:param str type_id:
:param int dev_id:
:return indigo.dict values_dict:
"""
# Open the announcements file and load the contents
with open(name=self.announcements_file) as outfile:
infile = outfile.read()
# Convert the string implementation of the dict to an actual dict, and delete the key.
infile = ast.literal_eval(node_or_string=infile)
index = int(values_dict['announcementList'])
del infile[dev_id][index]
# Open the announcements file and save the new dict.
with open(name=self.announcements_file, mode='w') as outfile:
outfile.write("{0}".format(infile))
for key in ('announcementIndex',
'announcementName',
'announcementRefresh',
'announcementList',
'announcementText'
):
values_dict[key] = ''
values_dict['editFlag'] = False
return values_dict
# =============================================================================
def announcement_duplicate(self, values_dict, type_id, dev_id):
"""
Create a duplicate of the selected announcement
Called when user clicks the Duplicate Announcement button.
-----
:param indigo.dict values_dict:
:param str type_id:
:param int dev_id:
:return indigo.dict values_dict:
"""
index = int(values_dict['announcementList'])
self.logger.info(u"Announcement to be duplicated: {0}".format(index))
# Open the announcements file and load the contents
with open(name=self.announcements_file) as outfile:
infile = outfile.read()
# Convert the string implementation of the dict to an actual dict, and delete the key.
infile = ast.literal_eval(node_or_string=infile)
# Create a new announcement.
temp_dict = infile[dev_id]
new_index = self.announcement_create_id(temp_dict)
temp_dict[new_index] = {}
temp_dict[new_index]['Name'] = infile[dev_id][index]['Name'] + u" copy"
temp_dict[new_index]['Announcement'] = infile[dev_id][index]['Announcement']
temp_dict[new_index]['Refresh'] = infile[dev_id][index]['Refresh']
temp_dict[new_index]['nextRefresh'] = infile[dev_id][index]['nextRefresh']
# Set the dict element equal to the new list
infile[dev_id] = temp_dict
# Open the announcements file and save the new dict.
with open(name=self.announcements_file, mode='w') as outfile:
outfile.write("{0}".format(infile))
return values_dict
# =============================================================================
def announcement_edit(self, values_dict, type_id, dev_id):
"""
Load the selected announcement for editing
Called when user clicks the Edit Announcement button.
-----
:param indigo.dict values_dict:
:param str type_id:
:param int dev_id:
:return indigo.dict values_dict:
"""
self.logger.debug(u"Editing the {0} announcement".format(values_dict['announcementName']))
# Open the announcements file and load the contents
with open(name=self.announcements_file) as outfile:
infile = outfile.read()
# Convert the string implementation of the dict to an actual dict, and get the data for this device.
infile = ast.literal_eval(node_or_string=infile)
temp_dict = infile[dev_id]
# Get the selected announcement index and populate the UI elements.
index = int(values_dict['announcementList'])
values_dict['announcementIndex'] = index
values_dict['announcementName'] = temp_dict[index]['Name']
values_dict['announcementRefresh'] = temp_dict[index]['Refresh']
values_dict['announcementText'] = temp_dict[index]['Announcement']
values_dict['editFlag'] = True
return values_dict
# =============================================================================
def announcementRefreshAction(self, plugin_action):
"""
Refresh an announcement in response to Indigo Action call
The announcementRefreshAction() method is used to force an
announcement to be refreshed by using an Indigo Action Item.
-----
:param indigo.action plugin_action:
"""
announcement_name = plugin_action.props['announcementToRefresh']
device_id = int(plugin_action.props['announcementDeviceToRefresh'])
dev = indigo.devices[device_id]
# Open the announcements file and load the contents
with open(name=self.announcements_file) as outfile:
infile = outfile.read()
# Convert the string implementation of the dict to an actual dict, and get the sub dict for the device.
infile = ast.literal_eval(node_or_string=infile)
# Iterate through the keys to find the right announcement to update.
announcement_dict = infile[int(device_id)]
for key in announcement_dict.keys():
if announcement_dict[key]['Name'] == announcement_name.replace('_', ' '):
announcement = self.substitute(infile[device_id][key]['Announcement'])
result = self.substitution_regex(announcement=announcement)
dev.updateStateOnServer(announcement_name, value=result)
# =============================================================================
def announcement_save(self, values_dict, type_id, dev_id):
"""
Save the current announcement
Called when user clicks the Save Announcement button.
-----
:param indigo.dict values_dict:
:param str type_id:
:param int dev_id:
:return indigo.dict values_dict:
"""
error_msg_dict = indigo.Dict()
# ===================== Validation Methods =====================
# Strip leading and trailing whitespace if there is any.
values_dict['announcementName'] = values_dict['announcementName'].strip()
# Announcement Name empty or 'REQUIRED'
if values_dict['announcementName'].isspace() or values_dict['announcementName'] in ('', 'REQUIRED',):
values_dict['announcementName'] = 'REQUIRED'
error_msg_dict['announcementName'] = u"A announcement name is required."
return values_dict, error_msg_dict
# Announcement Name starts with digit'
if values_dict['announcementName'][0].isdigit():
error_msg_dict['announcementName'] = u"An announcement name can not start with a number."
return values_dict, error_msg_dict
# Announcement Name starts with punctuation
exclude = set(string.punctuation)
if values_dict['announcementName'][0] in exclude:
error_msg_dict['announcementName'] = u"A announcement name can not start with punctuation."
return values_dict, error_msg_dict
# Announcement Name starts with XML.
if values_dict['announcementName'][0:3].lower() == 'xml':
error_msg_dict['announcementName'] = u"A announcement name can not start with the letters 'xml'."
return values_dict, error_msg_dict
if not all(ord(char) < 128 for char in values_dict['announcementName']):
error_msg_dict['announcementName'] = u"A announcement name can not contain Unicode characters."
return values_dict, error_msg_dict
# Announcement Text is empty or 'REQUIRED'
if values_dict['announcementText'].isspace() or values_dict['announcementText'] in ('', 'REQUIRED',):
values_dict['announcementText'] = 'REQUIRED'
error_msg_dict['announcementText'] = u"An announcement is required."
return values_dict, error_msg_dict
# Announcement Text not digit or less than 1
if not values_dict['announcementRefresh'].isdigit() or int(values_dict['announcementRefresh']) < 1:
error_msg_dict['announcementRefresh'] = u"A positive integer greater than zero is required."
return values_dict, error_msg_dict
# Open the announcements file and load the contents
with open(name=self.announcements_file) as outfile:
infile = outfile.read()
# Convert the string implementation of the dict to an actual dict.
infile = ast.literal_eval(node_or_string=infile)
try:
temp_dict = infile[dev_id]
except KeyError:
temp_dict = {}
# Generate a list of announcement names in use for this device.
announcement_name_list = [temp_dict[key]['Name'] for key in temp_dict.keys()]
# If new announcement, create unique id, then save to dict.
if not values_dict['editFlag'] and values_dict['announcementName'] not in announcement_name_list:
index = self.announcement_create_id(temp_dict=temp_dict)
temp_dict[index] = {}
temp_dict[index]['Name'] = values_dict['announcementName']
temp_dict[index]['Announcement'] = values_dict['announcementText']
temp_dict[index]['Refresh'] = values_dict['announcementRefresh']
temp_dict[index]['nextRefresh'] = unicode(dt.datetime.now())
# If key exists, save to dict.
elif values_dict['editFlag']:
index = int(values_dict['announcementIndex'])
temp_dict[index]['Name'] = values_dict['announcementName']
temp_dict[index]['Announcement'] = values_dict['announcementText']
temp_dict[index]['Refresh'] = values_dict['announcementRefresh']
# User has created a new announcement with a name already in use
else:
index = self.announcement_create_id(temp_dict=temp_dict)
temp_dict[index] = {}
temp_dict[index]['Name'] = values_dict['announcementName'] + u'*'
temp_dict[index]['Announcement'] = values_dict['announcementText']
temp_dict[index]['Refresh'] = values_dict['announcementRefresh']
temp_dict[index]['nextRefresh'] = unicode(dt.datetime.now())
self.logger.error(u"Duplicate announcement name found.")
# Set the dict element equal to the new list
infile[dev_id] = temp_dict
# Open the announcements file and save the new dict.
with open(name=self.announcements_file, mode='w') as outfile:
outfile.write("{0}".format(infile))
# Clear the fields.
for key in ('announcementIndex',
'announcementName',
'announcementRefresh',
'announcementList',
'announcementText'
):
values_dict[key] = ''
values_dict['editFlag'] = False
return values_dict
# =============================================================================
def announcementSpeak(self, values_dict, type_id, dev_id):
"""
Speak the selected announcement
Called when user clicks the Speak Announcement button. If an announcement is
selected in the list, that is the announcement that will be spoken, if there is
announcement data in the text fields, that will be what is spoken.
-----
:param indigo.dict values_dict:
:param str type_id:
:param int dev_id:
:return indigo.dict values_dict:
"""
default_string = u"Please select or enter an item to speak."
# The user has entered a value in the announcement field. Speak that.
if len(values_dict['announcementText']) > 0:
result = self.substitution_regex(announcement=self.substitute(values_dict['announcementText']))
indigo.server.speak(result, waitUntilDone=False)
self.logger.info(u"{0}".format(result))
# If the announcement field is blank, and the user has selected an announcement in the list.
elif values_dict['announcementList'] != "":
# Open the announcements file and load the contents
with open(name=self.announcements_file) as outfile:
infile = outfile.read()
# Convert the string implementation of the dict to an actual dict, and get the sub dict for the device.
infile = ast.literal_eval(node_or_string=infile)
announcement = self.substitute(infile[dev_id][int(values_dict['announcementList'])]['Announcement'])
result = self.substitution_regex(announcement=announcement)
indigo.server.speak(result, waitUntilDone=False)
self.logger.info(u"{0}".format(result))
# Otherwise, let the user know that there is nothing to speak.
else:
self.logger.error(default_string)
indigo.server.speak(default_string, waitUntilDone=False)
return values_dict
# =============================================================================
def announcementSpeakAction(self, plugin_action):
"""
Speak an announcement in response to an Indigo action item
Indigo action for speaking any device state or variable value.
-----
:param indigo.action plugin_action:
"""
item_source = int(plugin_action.props['announcementDeviceToRefresh'])
item_to_speak = plugin_action.props['announcementToSpeak']
try:
if item_source in indigo.devices.keys():
announcement = unicode(indigo.devices[item_source].states[item_to_speak])
indigo.server.speak(announcement, waitUntilDone=False)
else:
announcement = indigo.variables[item_source].value
indigo.server.speak(announcement, waitUntilDone=False)
except ValueError:
self.Fogbert.pluginErrorHandler(sub_error=traceback.format_exc())
self.logger.warning(u"Unable to speak {0} value.".format(item_to_speak))
except KeyError:
self.Fogbert.pluginErrorHandler(sub_error=traceback.format_exc())
self.logger.warning(u"No announcements to speak for this device.".format(item_to_speak))
# =============================================================================
def announcement_update_states(self, force=False):
"""
Update the state values of each announcement
Refresh the custom state values of select announcements. The user sets a
preference for how often the plugin will cycle, and a per-announcement refresh
cycle. For example, the plugin will check every X seconds to see if any
announcements require a refresh. The determination is based on the setting for
each announcement and the amount of time that has transpired since it was last
refreshed.
-----
"""
now = indigo.server.getTime()
# Open the announcements file and load the contents
with open(name=self.announcements_file) as outfile:
infile = outfile.read()
# Convert the string implementation of the dict to an actual dict, and get the sub dict for the device.
infile = ast.literal_eval(node_or_string=infile)
for dev in indigo.devices.iter('self'):
states_list = []
if dev.enabled:
if dev.deviceTypeId == 'salutationsDevice':
now = dt.datetime.now()
today = dt.datetime.today().date()
morning_start = int(dev.pluginProps.get('morningStart', '5'))
afternoon_start = int(dev.pluginProps.get('afternoonStart', '12'))
evening_start = int(dev.pluginProps.get('eveningStart', '17'))
night_start = int(dev.pluginProps.get('nightStart', '21'))
morning = dt.datetime.combine(today, dt.time(morning_start, 0))
afternoon = dt.datetime.combine(today, dt.time(afternoon_start, 0))
evening = dt.datetime.combine(today, dt.time(evening_start, 0))
night = dt.datetime.combine(today, dt.time(night_start, 0))
# Determine proper salutation based on the current time.
if morning <= now < afternoon:
intro_value = (dev.pluginProps.get('morningMessageIn', 'Good morning.'))
outro_value = (dev.pluginProps.get('morningMessageOut', 'Have a great morning.'))
elif afternoon <= now < evening:
intro_value = (dev.pluginProps.get('afternoonMessageIn', 'Good afternoon.'))
outro_value = (dev.pluginProps.get('afternoonMessageOut', 'Have a great afternoon.'))
elif evening <= now < night:
intro_value = (dev.pluginProps.get('eveningMessageIn', 'Good evening.'))
outro_value = (dev.pluginProps.get('eveningMessageOut', 'Have a great evening.'))
else:
intro_value = (dev.pluginProps.get('nightMessageIn', 'Good night.'))
outro_value = (dev.pluginProps.get('nightMessageOut', 'Have a great night.'))
# Don't update the device state unless the value has changed.
if intro_value != dev.states['intro']:
self.logger.debug(u"Updating intro to: {0}".format(intro_value))
states_list.append({'key': 'intro', 'value': intro_value})
if outro_value != dev.states['outro']:
self.logger.debug(u"Updating outro to: {0}".format(outro_value))
states_list.append({'key': 'outro', 'value': outro_value})
states_list.append({'key': 'onOffState', 'value': True, 'uiValue': u" "})
dev.updateStatesOnServer(states_list)
elif dev.deviceTypeId == 'announcementsDevice':
# Cycle through the announcements and update as needed
try:
# Look at each plugin device and construct a placeholder if not already present. This is a
# placeholder and doesn't actually write the key back to the file.
if dev.id not in infile.keys():
infile[dev.id] = {}
for key in infile[dev.id].keys():
state_name = u"{0}".format(infile[dev.id][key]['Name'].replace(' ', '_'))
try:
refresh_time = infile[dev.id][key].get('nextRefresh', '1970-01-01 00:00:00')
update_time = parser.parse(refresh_time)
except ValueError as sub_error:
self.Fogbert.pluginErrorHandler(sub_error=traceback.format_exc())
self.logger.warning(u"Error coercing announcement update time.")
update_time = now - dt.timedelta(minutes=1)
# If it's time for an announcement to be refreshed.
if now >= update_time:
# Update the announcement text.
announcement = self.substitute(infile[dev.id][key]['Announcement'])
result = self.substitution_regex(announcement)
states_list.append({'key': state_name, 'value': result})
# Set the next refresh time
next_update = now + dt.timedelta(minutes=int(infile[dev.id][key]['Refresh']))
infile[dev.id][key]['nextRefresh'] = next_update.strftime('%Y-%m-%d %H:%M:%S')
self.logger.debug(u"{0} updated.".format(infile[dev.id][key]['Name']))
elif force:
# Force update the announcement text.
announcement = self.substitute(infile[dev.id][key]['Announcement'])
result = self.substitution_regex(announcement)
states_list.append({'key': state_name, 'value': result})
states_list.append({'key': 'onOffState', 'value': True, 'uiValue': u" "})
dev.updateStatesOnServer(states_list)
except KeyError as sub_error:
self.Fogbert.pluginErrorHandler(sub_error=traceback.format_exc())
# Open the announcements file and save the updated dict.
with open(self.announcements_file, 'w') as outfile:
outfile.write("{0}".format(infile))
# =============================================================================
def announcement_update_states_now(self):
self.announcement_update_states(force=True)
# =============================================================================
def commsKillAll(self):
"""
Disable communication for all plugin-defined devices
commsKillAll() sets the enabled status of all plugin devices to
false.
-----
"""
for dev in indigo.devices.itervalues("self"):
try:
indigo.device.enable(dev, value=False)
except ValueError:
self.Fogbert.pluginErrorHandler(sub_error=traceback.format_exc())
self.logger.critical(u"Exception when trying to kill all comms.")
# =============================================================================
def commsUnkillAll(self):
"""
Enable communication for all plugin-defined devices
commsUnkillAll() sets the enabled status of all plugin devices to
true.
-----
"""
for dev in indigo.devices.itervalues("self"):
try:
indigo.device.enable(dev, value=True)
except ValueError:
self.Fogbert.pluginErrorHandler(sub_error=traceback.format_exc())
self.logger.critical(u"Exception when trying to unkill all comms.")
# =============================================================================
def format_digits(self, match):
"""
Format announcement digits based on announcement criteria
The format_digits function determines the proper formatting routine to
use when converting target values to the specified format. It sends the
target value to the proper function for formatting.
-----
:param regex match:
:return re.match result:
"""
match1 = match.group(1) # the string to be formatted
match2 = match.group(2) # the format specification
match1 = match1.replace('<<', '')
match2 = match2.replace('>>', '')
# Current time conversions specified with ct: ...
if match2.startswith('ct:'):
result = self.format_current_time(match1, match2)
# Datetime conversions specified with dt: ...
elif match2.startswith('dt:'):
result = self.format_datetime(match1, match2)
# Number conversions specified with n: ...
elif match2.startswith('n:'):
result = self.format_number(match1, match2)
else:
result = u"{0} {1}".format(match1, match2)
return result
# =============================================================================
def format_current_time(self, match1, match2):
"""
Format announcement times based on announcement criteria
The format_current_time function is used to create a formatted version
of the current time.
-----
:param str match1:
:param str match2:
:return str result:
"""
match2 = match2.replace('ct:', '')
try:
for char in match2:
if char not in '.,%:-aAwdbBmyYHIpMSfzZjUWcxX ': # allowable datetime specifiers
raise ValueError
match1 = dt.datetime.now()
return "{0:{1}}".format(match1, match2)
except ValueError:
return "Unallowable datetime specifiers: {0} {1}".format(match1, match2)
# =============================================================================
def format_datetime(self, match1, match2):
"""
Format announcement datetime based on announcement criteria
The format_datetime function is used to format the string based on common
Python datetime format specifiers.
-----
:param str match1:
:param str match2:
:return str result:
"""
match2 = match2.replace('dt:', '')
try:
for char in match2:
if char not in '.,%:-aAwdbBmyYHIpMSfzZjUWcxX ': # allowable datetime specifiers
raise ValueError
match1 = parser.parse(match1)
return "{0:{1}}".format(match1, match2)
except ValueError:
return "Unallowable datetime specifiers: {0} {1}".format(match1, match2)
# =============================================================================
def format_number(self, match1, match2):
"""
Format announcement number based on announcement criteria
The format_number function is used to format the string based on common
Python numeric format specifiers
-----
:param str match1:
:param str match2:
:return str result:
"""
match2 = match2.replace('n:', '')
try:
for char in match2:
if char not in '%+-0123456789eEfFgGn': # allowable numeric specifiers
raise ValueError
return u"{0:0.{1}f}".format(float(match1), int(match2))
except ValueError:
return "Unallowable datetime specifiers: {0} {1}".format(match1, match2)
# =============================================================================
def generatorAnnouncementList(self, filter="", values_dict=None, type_id="", target_id=0):
"""
Generate a list of states for Indigo controls
Returns a list of states for selected plugin device.
-----
:param str filter:
:param indigo.dict values_dict:
:param str type_id:
:param int target_id:
:return list result:
"""
try:
announcement_id = int(values_dict['announcementDeviceToRefresh'])
if announcement_id in indigo.devices.keys():
return [(state, state) for state in indigo.devices[announcement_id].states if 'onOffState' not in state]
else:
return [('value', 'Value')]
except KeyError:
return [('None', 'None')]
# =============================================================================
def generatorDeviceList(self, filter="", values_dict=None, type_id="", target_id=0):
"""
Generate a list of plugin-owned devices.
Returns a list of plugin devices. Returns a list of tuples in the form:
[(ID, "Name"), (ID, "Name")].
-----
:param str filter:
:param indigo.dict values_dict:
:param str type_id:
:param int target_id:
:return list result:
"""
return self.Fogbert.deviceList(dev_filter='self')
# =============================================================================
def generatorDevVar(self, filter="", values_dict=None, type_id="", target_id=0):
"""
Generate a list of Indigo devices and variables.
This method collects IDs and names for all Indigo devices and
variables. It creates a list of the form:
[(dev.id, dev.name), (var.id, var.name)].
-----
:param str filter:
:param indigo.dict values_dict:
:param str type_id:
:param int target_id:
:return list result:
"""
return self.Fogbert.deviceAndVariableList()
# =============================================================================
def generatorList(self, filter="", values_dict=None, type_id="", target_id=0):
"""
Generate a list of configured announcements
Populates the list of announcements based on the device's states. Returns a
list based on a dict (infile) of the form:
{announcement ID:
{'Announcement': u"announcement string",
'nextRefresh': 'YYYY-MM-DD HH:MM:SS',
'Name': u"announcement name",
'Refresh': u"minutes"
}
}
The returned list is of the form:
[(announcement ID, announcement name),]
-----
:param str filter:
:param indigo.dict values_dict:
:param str type_id:
:param int target_id:
:return list result:
"""
# Open the announcements file and load the contents
with open(name=self.announcements_file) as input_file:
infile = input_file.read()
# Convert the string implementation of the dict to an actual dict, and get the sub dict for the device.
infile = ast.literal_eval(node_or_string=infile)
# Sort the dict and create a list of tuples for the device config list control.
try:
announcement_list = [(key, infile[target_id][key]['Name']) for key in infile[target_id].keys()]
except KeyError:
announcement_list = []
return sorted(announcement_list, key=lambda (k, val): unicode.lower(val))
# =============================================================================
def generatorStateOrValue(self, filter="", values_dict=None, type_id="", target_id=0):
"""
Return a list of device states or variable value for selected device
The generatorStateOrValue() method returns a list to populate the relevant
device states or variable value to populate a menu control.
-----
:param str filter:
:param indigo.dict values_dict:
:param str type_id:
:param int target_id:
:return list result:
"""
id_number = values_dict.get('devVarMenu', 'None')
return self.Fogbert.generatorStateOrValue(dev_id=id_number)
# =============================================================================
def generator_substitutions(self, values_dict, type_id="", target_id=0):
"""
Generate an Indigo substitution string
The generator_substitutions function is used with the Substitution Generator.
It is the callback that's used to create the Indigo substitution
construct.
-----
:param indigo.dict values_dict:
:param str type_id:
:param int target_id:
:return indigo.dict values_dict:
"""
dev_var_id = values_dict['devVarMenu']
dev_var_value = values_dict['generatorStateOrValue']
try:
if int(values_dict['devVarMenu']) in indigo.devices.keys():
values_dict['subGeneratorResult'] = u"%%d:{0}:{1}%%".format(dev_var_id, dev_var_value)
else:
values_dict['subGeneratorResult'] = u"%%v:{0}%%".format(dev_var_id)
values_dict['devVarMenu'] = ''
values_dict['generatorStateOrValue'] = ''
return values_dict
except ValueError:
announcement = self.substitute(values_dict['textfield1'])
result = self.substitution_regex(announcement=announcement)
self.logger.info(u"Substitution Generator announcement: \"{0}\"".format(result))
return values_dict
# =============================================================================
def generator_time(self, filter="", values_dict=None, type_id="", target_id=0):
"""
Generate a list of times for plugin control menus
Creates a list of times for use in setting salutation settings of the form:
[(0, "00:00"), (1, "01:00"), ...]
-----
:param str filter:
:param indigo.dict values_dict:
:param str type_id:
:param int target_id:
:return list result:
"""
return [(hour, u"{0:02.0f}:00".format(hour)) for hour in range(0, 24)]
# =============================================================================
def refreshFields(self, filter="", type_id="", target_id=0):
"""
Dummy callback to force dynamic control refreshes
The refreshFields() method is a dummy callback used solely to fire
other actions that require a callback be run. It performs no other
function.
-----
:param str filter:
:param str type_id:
:param int target_id:
"""
pass
# =============================================================================
def substitution_regex(self, announcement):
"""
Regex method for formatting substitutions
This is the main regex used for formatting substitutions.
-----
:param str announcement:
:return str result:
"""
return re.sub(r'(<<.*?), *([ct|dt|n:].*?>>)', self.format_digits, announcement)
|
|
"""Standard retry behavior.
This contains the default standard retry behavior.
It provides consistent behavior with other AWS SDKs.
The key base classes uses for retries:
* ``BaseRetryableChecker`` - Use to check a specific condition that
indicates a retry should happen. This can include things like
max attempts, HTTP status code checks, error code checks etc.
* ``RetryBackoff`` - Use to determine how long we should backoff until
we retry a request. This is the class that will implement delay such
as exponential backoff.
* ``RetryPolicy`` - Main class that determines if a retry should
happen. It can combine data from a various BaseRetryableCheckers
to make a final call as to whether or not a retry should happen.
It then uses a ``BaseRetryBackoff`` to determine how long to delay.
* ``RetryHandler`` - The bridge between botocore's event system
used by endpoint.py to manage retries and the interfaces defined
in this module.
This allows us to define an API that has minimal coupling to the event
based API used by botocore.
"""
import random
import logging
from botocore.exceptions import ConnectionError, HTTPClientError
from botocore.exceptions import ReadTimeoutError, ConnectTimeoutError
from botocore.retries import quota
from botocore.retries import special
from botocore.retries.base import BaseRetryBackoff, BaseRetryableChecker
DEFAULT_MAX_ATTEMPTS = 3
logger = logging.getLogger(__name__)
def register_retry_handler(client, max_attempts=DEFAULT_MAX_ATTEMPTS):
retry_quota = RetryQuotaChecker(quota.RetryQuota())
service_id = client.meta.service_model.service_id
service_event_name = service_id.hyphenize()
client.meta.events.register('after-call.%s' % service_event_name,
retry_quota.release_retry_quota)
handler = RetryHandler(
retry_policy=RetryPolicy(
retry_checker=StandardRetryConditions(max_attempts=max_attempts),
retry_backoff=ExponentialBackoff(),
),
retry_event_adapter=RetryEventAdapter(),
retry_quota=retry_quota,
)
unique_id = 'retry-config-%s' % service_event_name
client.meta.events.register(
'needs-retry.%s' % service_event_name, handler.needs_retry,
unique_id=unique_id
)
return handler
class RetryHandler(object):
"""Bridge between botocore's event system and this module.
This class is intended to be hooked to botocore's event system
as an event handler.
"""
def __init__(self, retry_policy, retry_event_adapter, retry_quota):
self._retry_policy = retry_policy
self._retry_event_adapter = retry_event_adapter
self._retry_quota = retry_quota
def needs_retry(self, **kwargs):
"""Connect as a handler to the needs-retry event."""
retry_delay = None
context = self._retry_event_adapter.create_retry_context(**kwargs)
if self._retry_policy.should_retry(context):
# Before we can retry we need to ensure we have sufficient
# capacity in our retry quota.
if self._retry_quota.acquire_retry_quota(context):
retry_delay = self._retry_policy.compute_retry_delay(context)
logger.debug("Retry needed, retrying request after "
"delay of: %s", retry_delay)
else:
logger.debug("Retry needed but retry quota reached, "
"not retrying request.")
else:
logger.debug("Not retrying request.")
self._retry_event_adapter.adapt_retry_response_from_context(
context)
return retry_delay
class RetryEventAdapter(object):
"""Adapter to existing retry interface used in the endpoints layer.
This existing interface for determining if a retry needs to happen
is event based and used in ``botocore.endpoint``. The interface has
grown organically over the years and could use some cleanup. This
adapter converts that interface into the interface used by the
new retry strategies.
"""
def create_retry_context(self, **kwargs):
"""Create context based on needs-retry kwargs."""
response = kwargs['response']
if response is None:
# If response is None it means that an exception was raised
# because we never received a response from the service. This
# could be something like a ConnectionError we get from our
# http layer.
http_response = None
parsed_response = None
else:
http_response, parsed_response = response
# This provides isolation between the kwargs emitted in the
# needs-retry event, and what this module uses to check for
# retries.
context = RetryContext(
attempt_number=kwargs['attempts'],
operation_model=kwargs['operation'],
http_response=http_response,
parsed_response=parsed_response,
caught_exception=kwargs['caught_exception'],
request_context=kwargs['request_dict']['context'],
)
return context
def adapt_retry_response_from_context(self, context):
"""Modify response back to user back from context."""
# This will mutate attributes that are returned back to the end
# user. We do it this way so that all the various retry classes
# don't mutate any input parameters from the needs-retry event.
metadata = context.get_retry_metadata()
if context.parsed_response is not None:
context.parsed_response.setdefault(
'ResponseMetadata', {}).update(metadata)
# Implementation note: this is meant to encapsulate all the misc stuff
# that gets sent in the needs-retry event. This is mapped so that params
# are more clear and explicit.
class RetryContext(object):
"""Normalize a response that we use to check if a retry should occur.
This class smoothes over the different types of responses we may get
from a service including:
* A modeled error response from the service that contains a service
code and error message.
* A raw HTTP response that doesn't contain service protocol specific
error keys.
* An exception received while attempting to retrieve a response.
This could be a ConnectionError we receive from our HTTP layer which
could represent that we weren't able to receive a response from
the service.
This class guarantees that at least one of the above attributes will be
non None.
This class is meant to provide a read-only view into the properties
associated with a possible retryable response. None of the properties
are meant to be modified directly.
"""
def __init__(self, attempt_number, operation_model=None,
parsed_response=None, http_response=None,
caught_exception=None, request_context=None):
# 1-based attempt number.
self.attempt_number = attempt_number
self.operation_model = operation_model
# This is the parsed response dictionary we get from parsing
# the HTTP response from the service.
self.parsed_response = parsed_response
# This is an instance of botocore.awsrequest.AWSResponse.
self.http_response = http_response
# This is a subclass of Exception that will be non None if
# an exception was raised when retrying to retrieve a response.
self.caught_exception = caught_exception
# This is the request context dictionary that's added to the
# request dict. This is used to story any additional state
# about the request. We use this for storing retry quota
# capacity.
if request_context is None:
request_context = {}
self.request_context = request_context
self._retry_metadata = {}
# These are misc helper methods to avoid duplication in the various
# checkers.
def get_error_code(self):
"""Check if there was a parsed response with an error code.
If we could not find any error codes, ``None`` is returned.
"""
if self.parsed_response is None:
return
error = self.parsed_response.get('Error', {})
if not isinstance(error, dict):
return
return error.get('Code')
def add_retry_metadata(self, **kwargs):
"""Add key/value pairs to the retry metadata.
This allows any objects during the retry process to add
metadata about any checks/validations that happened.
This gets added to the response metadata in the retry handler.
"""
self._retry_metadata.update(**kwargs)
def get_retry_metadata(self):
return self._retry_metadata.copy()
class RetryPolicy(object):
def __init__(self, retry_checker, retry_backoff):
self._retry_checker = retry_checker
self._retry_backoff = retry_backoff
def should_retry(self, context):
return self._retry_checker.is_retryable(context)
def compute_retry_delay(self, context):
return self._retry_backoff.delay_amount(context)
class ExponentialBackoff(BaseRetryBackoff):
_BASE = 2
_MAX_BACKOFF = 20
def __init__(self, max_backoff=20, random=random.random):
self._base = self._BASE
self._max_backoff = max_backoff
self._random = random
def delay_amount(self, context):
"""Calculates delay based on exponential backoff.
This class implements truncated binary exponential backoff
with jitter::
t_i = min(rand(0, 1) * 2 ** attempt, MAX_BACKOFF)
where ``i`` is the request attempt (0 based).
"""
# The context.attempt_number is a 1-based value, but we have
# to calculate the delay based on i based a 0-based value. We
# want the first delay to just be ``rand(0, 1)``.
return min(
self._random() * (self._base ** (context.attempt_number - 1)),
self._max_backoff
)
class MaxAttemptsChecker(BaseRetryableChecker):
def __init__(self, max_attempts):
self._max_attempts = max_attempts
def is_retryable(self, context):
under_max_attempts = context.attempt_number < self._max_attempts
retries_context = context.request_context.get('retries')
if retries_context:
retries_context['max'] = max(
retries_context.get('max', 0), self._max_attempts
)
if not under_max_attempts:
logger.debug("Max attempts of %s reached.", self._max_attempts)
context.add_retry_metadata(MaxAttemptsReached=True)
return under_max_attempts
class TransientRetryableChecker(BaseRetryableChecker):
_TRANSIENT_ERROR_CODES = [
'RequestTimeout',
'RequestTimeoutException',
'PriorRequestNotComplete',
]
_TRANSIENT_STATUS_CODES = [500, 502, 503, 504]
_TRANSIENT_EXCEPTION_CLS = (
ConnectionError,
HTTPClientError,
)
def __init__(self, transient_error_codes=None,
transient_status_codes=None,
transient_exception_cls=None):
if transient_error_codes is None:
transient_error_codes = self._TRANSIENT_ERROR_CODES[:]
if transient_status_codes is None:
transient_status_codes = self._TRANSIENT_STATUS_CODES[:]
if transient_exception_cls is None:
transient_exception_cls = self._TRANSIENT_EXCEPTION_CLS
self._transient_error_codes = transient_error_codes
self._transient_status_codes = transient_status_codes
self._transient_exception_cls = transient_exception_cls
def is_retryable(self, context):
if context.get_error_code() in self._transient_error_codes:
return True
if context.http_response is not None:
if context.http_response.status_code in \
self._transient_status_codes:
return True
if context.caught_exception is not None:
return isinstance(context.caught_exception,
self._transient_exception_cls)
return False
class ThrottledRetryableChecker(BaseRetryableChecker):
# This is the union of all error codes we've seen that represent
# a throttled error.
_THROTTLED_ERROR_CODES = [
'Throttling',
'ThrottlingException',
'ThrottledException',
'RequestThrottledException',
'TooManyRequestsException',
'ProvisionedThroughputExceededException',
'TransactionInProgressException',
'RequestLimitExceeded',
'BandwidthLimitExceeded',
'LimitExceededException',
'RequestThrottled',
'SlowDown',
'PriorRequestNotComplete',
'EC2ThrottledException',
]
def __init__(self, throttled_error_codes=None):
if throttled_error_codes is None:
throttled_error_codes = self._THROTTLED_ERROR_CODES[:]
self._throttled_error_codes = throttled_error_codes
def is_retryable(self, context):
# Only the error code from a parsed service response is used
# to determine if the response is a throttled response.
return context.get_error_code() in self._throttled_error_codes
class ModeledRetryableChecker(BaseRetryableChecker):
"""Check if an error has been modeled as retryable."""
def __init__(self):
self._error_detector = ModeledRetryErrorDetector()
def is_retryable(self, context):
error_code = context.get_error_code()
if error_code is None:
return False
return self._error_detector.detect_error_type(context) is not None
class ModeledRetryErrorDetector(object):
"""Checks whether or not an error is a modeled retryable error."""
# There are return values from the detect_error_type() method.
TRANSIENT_ERROR = 'TRANSIENT_ERROR'
THROTTLING_ERROR = 'THROTTLING_ERROR'
# This class is lower level than ModeledRetryableChecker, which
# implements BaseRetryableChecker. This object allows you to distinguish
# between the various types of retryable errors.
def detect_error_type(self, context):
"""Detect the error type associated with an error code and model.
This will either return:
* ``self.TRANSIENT_ERROR`` - If the error is a transient error
* ``self.THROTTLING_ERROR`` - If the error is a throttling error
* ``None`` - If the error is neither type of error.
"""
error_code = context.get_error_code()
op_model = context.operation_model
if op_model is None or not op_model.error_shapes:
return
for shape in op_model.error_shapes:
if shape.metadata.get('retryable') is not None:
# Check if this error code matches the shape. This can
# be either by name or by a modeled error code.
error_code_to_check = (
shape.metadata.get('error', {}).get('code') or shape.name
)
if error_code == error_code_to_check:
if shape.metadata['retryable'].get('throttling'):
return self.THROTTLING_ERROR
return self.TRANSIENT_ERROR
class ThrottlingErrorDetector(object):
def __init__(self, retry_event_adapter):
self._modeled_error_detector = ModeledRetryErrorDetector()
self._fixed_error_code_detector = ThrottledRetryableChecker()
self._retry_event_adapter = retry_event_adapter
# This expects the kwargs from needs-retry to be passed through.
def is_throttling_error(self, **kwargs):
context = self._retry_event_adapter.create_retry_context(**kwargs)
if self._fixed_error_code_detector.is_retryable(context):
return True
error_type = self._modeled_error_detector.detect_error_type(context)
return error_type == self._modeled_error_detector.THROTTLING_ERROR
class StandardRetryConditions(BaseRetryableChecker):
"""Concrete class that implements the standard retry policy checks.
Specifically:
not max_attempts and (transient or throttled or modeled_retry)
"""
def __init__(self, max_attempts=DEFAULT_MAX_ATTEMPTS):
# Note: This class is for convenience so you can have the
# standard retry condition in a single class.
self._max_attempts_checker = MaxAttemptsChecker(max_attempts)
self._additional_checkers = OrRetryChecker([
TransientRetryableChecker(),
ThrottledRetryableChecker(),
ModeledRetryableChecker(),
OrRetryChecker([
special.RetryIDPCommunicationError(),
special.RetryDDBChecksumError(),
])
])
def is_retryable(self, context):
return (
self._max_attempts_checker.is_retryable(context)
and self._additional_checkers.is_retryable(context)
)
class OrRetryChecker(BaseRetryableChecker):
def __init__(self, checkers):
self._checkers = checkers
def is_retryable(self, context):
return any(checker.is_retryable(context) for checker in self._checkers)
class RetryQuotaChecker(object):
_RETRY_COST = 5
_NO_RETRY_INCREMENT = 1
_TIMEOUT_RETRY_REQUEST = 10
_TIMEOUT_EXCEPTIONS = (ConnectTimeoutError, ReadTimeoutError)
# Implementation note: We're not making this a BaseRetryableChecker
# because this isn't just a check if we can retry. This also changes
# state so we have to careful when/how we call this. Making it
# a BaseRetryableChecker implies you can call .is_retryable(context)
# as many times as you want and not affect anything.
def __init__(self, quota):
self._quota = quota
# This tracks the last amount
self._last_amount_acquired = None
def acquire_retry_quota(self, context):
if self._is_timeout_error(context):
capacity_amount = self._TIMEOUT_RETRY_REQUEST
else:
capacity_amount = self._RETRY_COST
success = self._quota.acquire(capacity_amount)
if success:
# We add the capacity amount to the request context so we know
# how much to release later. The capacity amount can vary based
# on the error.
context.request_context['retry_quota_capacity'] = capacity_amount
return True
context.add_retry_metadata(RetryQuotaReached=True)
return False
def _is_timeout_error(self, context):
return isinstance(context.caught_exception, self._TIMEOUT_EXCEPTIONS)
# This is intended to be hooked up to ``after-call``.
def release_retry_quota(self, context, http_response, **kwargs):
# There's three possible options.
# 1. The HTTP response did not have a 2xx response. In that case we
# give no quota back.
# 2. The HTTP request was successful and was never retried. In
# that case we give _NO_RETRY_INCREMENT back.
# 3. The API call had retries, and we eventually receive an HTTP
# response with a 2xx status code. In that case we give back
# whatever quota was associated with the last acquisition.
if http_response is None:
return
status_code = http_response.status_code
if 200 <= status_code < 300:
if 'retry_quota_capacity' not in context:
self._quota.release(self._NO_RETRY_INCREMENT)
else:
capacity_amount = context['retry_quota_capacity']
self._quota.release(capacity_amount)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Normal (Gaussian) distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.contrib.distributions.python.ops import distribution # pylint: disable=line-too-long
from tensorflow.contrib.distributions.python.ops import kullback_leibler # pylint: disable=line-too-long
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util # pylint: disable=line-too-long
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
class Normal(distribution.Distribution):
"""The scalar Normal distribution with mean and stddev parameters mu, sigma.
#### Mathematical details
The PDF of this distribution is:
```f(x) = sqrt(1/(2*pi*sigma^2)) exp(-(x-mu)^2/(2*sigma^2))```
#### Examples
Examples of initialization of one or a batch of distributions.
```python
# Define a single scalar Normal distribution.
dist = tf.contrib.distributions.Normal(mu=0, sigma=3)
# Evaluate the cdf at 1, returning a scalar.
dist.cdf(1)
# Define a batch of two scalar valued Normals.
# The first has mean 1 and standard deviation 11, the second 2 and 22.
dist = tf.contrib.distributions.Normal(mu=[1, 2.], sigma=[11, 22.])
# Evaluate the pdf of the first distribution on 0, and the second on 1.5,
# returning a length two tensor.
dist.pdf([0, 1.5])
# Get 3 samples, returning a 3 x 2 tensor.
dist.sample(3)
```
Arguments are broadcast when possible.
```python
# Define a batch of two scalar valued Normals.
# Both have mean 1, but different standard deviations.
dist = tf.contrib.distributions.Normal(mu=1, sigma=[11, 22.])
# Evaluate the pdf of both distributions on the same point, 3.0,
# returning a length 2 tensor.
dist.pdf(3.0)
```
"""
def __init__(self,
mu,
sigma,
validate_args=True,
allow_nan_stats=False,
name="Normal"):
"""Construct Normal distributions with mean and stddev `mu` and `sigma`.
The parameters `mu` and `sigma` must be shaped in a way that supports
broadcasting (e.g. `mu + sigma` is a valid operation).
Args:
mu: Floating point tensor, the means of the distribution(s).
sigma: Floating point tensor, the stddevs of the distribution(s).
sigma must contain only positive values.
validate_args: Whether to assert that `sigma > 0`. If `validate_args` is
`False`, correct output is not guaranteed when input is invalid.
allow_nan_stats: Boolean, default `False`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member. If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
TypeError: if mu and sigma are different dtypes.
"""
self._allow_nan_stats = allow_nan_stats
self._validate_args = validate_args
with ops.op_scope([mu, sigma], name):
mu = ops.convert_to_tensor(mu)
sigma = ops.convert_to_tensor(sigma)
with ops.control_dependencies([check_ops.assert_positive(sigma)] if
validate_args else []):
self._name = name
self._mu = array_ops.identity(mu, name="mu")
self._sigma = array_ops.identity(sigma, name="sigma")
self._batch_shape = self._ones().get_shape()
self._event_shape = tensor_shape.TensorShape([])
contrib_tensor_util.assert_same_float_dtype((mu, sigma))
@property
def allow_nan_stats(self):
"""Boolean describing behavior when a stat is undefined for batch member."""
return self._allow_nan_stats
@property
def validate_args(self):
"""Boolean describing behavior on invalid input."""
return self._validate_args
@property
def name(self):
return self._name
@property
def dtype(self):
return self._mu.dtype
def batch_shape(self, name="batch_shape"):
"""Batch dimensions of this instance as a 1-D int32 `Tensor`.
The product of the dimensions of the `batch_shape` is the number of
independent distributions of this kind the instance represents.
Args:
name: name to give to the op.
Returns:
`Tensor` `batch_shape`
"""
with ops.name_scope(self.name):
with ops.op_scope([], name):
return array_ops.shape(self._ones())
def get_batch_shape(self):
"""`TensorShape` available at graph construction time.
Same meaning as `batch_shape`. May be only partially defined.
Returns:
batch shape
"""
return self._batch_shape
def event_shape(self, name="event_shape"):
"""Shape of a sample from a single distribution as a 1-D int32 `Tensor`.
Args:
name: name to give to the op.
Returns:
`Tensor` `event_shape`
"""
with ops.name_scope(self.name):
with ops.op_scope([], name):
return constant_op.constant([], dtype=dtypes.int32)
def get_event_shape(self):
"""`TensorShape` available at graph construction time.
Same meaning as `event_shape`. May be only partially defined.
Returns:
event shape
"""
return self._event_shape
@property
def mu(self):
"""Distribution parameter for the mean."""
return self._mu
@property
def sigma(self):
"""Distribution parameter for standard deviation."""
return self._sigma
def mean(self, name="mean"):
"""Mean of this distribution."""
with ops.name_scope(self.name):
with ops.op_scope([self._sigma, self._mu], name):
return self._mu * array_ops.ones_like(self._sigma)
def mode(self, name="mode"):
"""Mode of this distribution."""
return self.mean(name="mode")
def std(self, name="std"):
"""Standard deviation of this distribution."""
with ops.name_scope(self.name):
with ops.op_scope([self._sigma, self._mu], name):
return self._sigma * array_ops.ones_like(self._mu)
def variance(self, name="variance"):
"""Variance of this distribution."""
with ops.name_scope(self.name):
with ops.op_scope([], name):
return math_ops.square(self.std())
def log_prob(self, x, name="log_prob"):
"""Log prob of observations in `x` under these Normal distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `mu` and `sigma`.
name: The name to give this op.
Returns:
log_prob: tensor of dtype `dtype`, the log-PDFs of `x`.
"""
with ops.name_scope(self.name):
with ops.op_scope([self._mu, self._sigma, x], name):
x = ops.convert_to_tensor(x)
if x.dtype != self.dtype:
raise TypeError("Input x dtype does not match dtype: %s vs. %s"
% (x.dtype, self.dtype))
log_2_pi = constant_op.constant(math.log(2 * math.pi), dtype=self.dtype)
return (-0.5*log_2_pi - math_ops.log(self._sigma)
-0.5*math_ops.square((x - self._mu) / self._sigma))
def cdf(self, x, name="cdf"):
"""CDF of observations in `x` under these Normal distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `mu` and `sigma`.
name: The name to give this op.
Returns:
cdf: tensor of dtype `dtype`, the CDFs of `x`.
"""
with ops.name_scope(self.name):
with ops.op_scope([self._mu, self._sigma, x], name):
x = ops.convert_to_tensor(x)
if x.dtype != self.dtype:
raise TypeError("Input x dtype does not match dtype: %s vs. %s"
% (x.dtype, self.dtype))
# TODO(ebrevdo): wrap this in a Defun with a custom Defun
# gradient because the analytic gradient may be faster than
# automatic differentiation.
return (0.5 + 0.5*math_ops.erf(
1.0/(math.sqrt(2.0) * self._sigma)*(x - self._mu)))
def log_cdf(self, x, name="log_cdf"):
"""Log CDF of observations `x` under these Normal distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `mu` and `sigma`.
name: The name to give this op.
Returns:
log_cdf: tensor of dtype `dtype`, the log-CDFs of `x`.
"""
with ops.name_scope(self.name):
with ops.op_scope([self._mu, self._sigma, x], name):
return math_ops.log(self.cdf(x))
def prob(self, x, name="prob"):
"""The PDF of observations in `x` under these Normal distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `mu` and `sigma`.
name: The name to give this op.
Returns:
prob: tensor of dtype `dtype`, the prob values of `x`.
"""
return super(Normal, self).prob(x, name=name)
def entropy(self, name="entropy"):
"""The entropy of Normal distribution(s).
Args:
name: The name to give this op.
Returns:
entropy: tensor of dtype `dtype`, the entropy.
"""
with ops.name_scope(self.name):
with ops.op_scope([self._mu, self._sigma], name):
two_pi_e1 = constant_op.constant(
2 * math.pi * math.exp(1), dtype=self.dtype)
# Use broadcasting rules to calculate the full broadcast sigma.
sigma = self._sigma * array_ops.ones_like(self._mu)
return 0.5 * math_ops.log(two_pi_e1 * math_ops.square(sigma))
def sample_n(self, n, seed=None, name="sample_n"):
"""Sample `n` observations from the Normal Distributions.
Args:
n: `Scalar`, type int32, the number of observations to sample.
seed: Python integer, the random seed.
name: The name to give this op.
Returns:
samples: `[n, ...]`, a `Tensor` of `n` samples for each
of the distributions determined by broadcasting the hyperparameters.
"""
with ops.name_scope(self.name):
with ops.op_scope([self._mu, self._sigma, n], name):
broadcast_shape = (self._mu + self._sigma).get_shape()
n = ops.convert_to_tensor(n)
shape = array_ops.concat(0, ([n], array_ops.shape(self.mean())))
sampled = random_ops.random_normal(
shape=shape, mean=0, stddev=1, dtype=self._mu.dtype, seed=seed)
# Provide some hints to shape inference
n_val = tensor_util.constant_value(n)
final_shape = tensor_shape.vector(n_val).concatenate(broadcast_shape)
sampled.set_shape(final_shape)
return sampled * self._sigma + self._mu
@property
def is_reparameterized(self):
return True
def _ones(self):
return array_ops.ones_like(self._mu + self._sigma)
def _zeros(self):
return array_ops.zeros_like(self._mu + self._sigma)
@property
def is_continuous(self):
return True
@kullback_leibler.RegisterKL(Normal, Normal)
def _kl_normal_normal(n_a, n_b, name=None):
"""Calculate the batched KL divergence KL(n_a || n_b) with n_a and n_b Normal.
Args:
n_a: instance of a Normal distribution object.
n_b: instance of a Normal distribution object.
name: (optional) Name to use for created operations.
default is "kl_normal_normal".
Returns:
Batchwise KL(n_a || n_b)
"""
with ops.op_scope([n_a.mu, n_b.mu], name, "kl_normal_normal"):
one = constant_op.constant(1, dtype=n_a.dtype)
two = constant_op.constant(2, dtype=n_a.dtype)
half = constant_op.constant(0.5, dtype=n_a.dtype)
s_a_squared = math_ops.square(n_a.sigma)
s_b_squared = math_ops.square(n_b.sigma)
ratio = s_a_squared / s_b_squared
return (math_ops.square(n_a.mu - n_b.mu) / (two * s_b_squared)
+ half * (ratio - one - math_ops.log(ratio)))
|
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration tests for module django_cloud_deploy.cloudlib."""
from django_cloud_deploy.cloudlib import cloudbuild
from django_cloud_deploy.cloudlib import cloudkms
from django_cloud_deploy.cloudlib import cloud_source
from django_cloud_deploy.cloudlib import container
from django_cloud_deploy.cloudlib import database
from django_cloud_deploy.cloudlib import service_account
from django_cloud_deploy.cloudlib import storage
from django_cloud_deploy.tests.lib import test_base
from django_cloud_deploy.tests.lib import utils
from googleapiclient import discovery
class StorageClientIntegrationTest(test_base.DjangoFileGeneratorTest,
test_base.ResourceCleanUp):
"""Integration test for django_gke.cloudlib.storage."""
def setUp(self):
super().setUp()
self._storage_client = (storage.StorageClient.from_credentials(
self.credentials))
def test_reuse_bucket(self):
bucket_name = utils.get_resource_name('bucket')
with self.clean_up_bucket(bucket_name):
for _ in range(3):
self._storage_client.create_bucket(self.project_id, bucket_name)
def test_set_cors_policy(self):
bucket_name = utils.get_resource_name('bucket')
with self.clean_up_bucket(bucket_name):
self._storage_client.create_bucket(self.project_id, bucket_name)
url = 'http://www.example.com'
self._storage_client.set_cors_policy(bucket_name, url)
client = discovery.build('storage',
'v1',
credentials=self.credentials,
cache_discovery=False)
request = client.buckets().get(bucket=bucket_name)
bucket_body = request.execute(num_retries=5)
cors_policy = bucket_body.get('cors')
self.assertNotEmpty(cors_policy)
self.assertIn(url, cors_policy[0].get('origin'))
class ServiceAccountClientIntegrationTest(test_base.ResourceCleanUp):
"""Integration test for cloudlib.service_account."""
_ROLES = ('roles/cloudsql.client', 'roles/cloudsql.editor',
'roles/cloudsql.admin')
def setUp(self):
super().setUp()
self._service_account_client = (
service_account.ServiceAccountClient.from_credentials(
self.credentials))
def test_create_duplicate_service_account(self):
service_account_id = utils.get_resource_name(resource_type='sa')
# Assert no exceptions are raised when creating the same
# service account twice
for _ in range(2):
self._service_account_client.create_service_account(
self.project_id, service_account_id, 'Test Service Account',
self._ROLES)
class ContainerClientIntegrationTest(test_base.ResourceCleanUp):
"""Integration test for django_cloud_deploy.cloudlib.container."""
def setUp(self):
super().setUp()
self._container_client = container.ContainerClient.from_credentials(
self.credentials)
def test_reuse_cluster(self):
cluster_name = utils.get_resource_name(resource_type='cluster')
with self.clean_up_cluster(cluster_name):
for _ in range(2):
self._container_client.create_cluster_sync(
self.project_id, cluster_name)
class CloudBuildClientIntegrationTest(test_base.ResourceCleanUp,
test_base.ResourceList):
"""Integration test for django_cloud_deploy.cloudlib.cloudbuild."""
def setUp(self):
super().setUp()
self._cloudbuild_client = cloudbuild.CloudBuildClient.from_credentials(
self.credentials)
def test_create_trigger(self):
fake_repo_name = utils.get_resource_name(resource_type='repo')
branch_regexp = 'fake-branch'
env_vars = {
'MY_ENV_VAR1': utils.get_resource_name(resource_type='envvar'),
'MY_ENV_VAR2': utils.get_resource_name(resource_type='envvar')
}
with self.clean_up_cloudbuild_trigger(fake_repo_name):
self._cloudbuild_client.create_trigger(self.project_id,
fake_repo_name,
branch_regexp, env_vars)
service = discovery.build('cloudbuild',
'v1',
credentials=self.credentials,
cache_discovery=False)
request = service.projects().triggers().list(
projectId=self.project_id)
triggers = []
while request:
response = request.execute()
triggers += response.get('triggers', [])
request = service.projects().triggers().list_next(
previous_request=request, previous_response=response)
trigger_repo_names = [
trigger.get('triggerTemplate').get('repoName')
for trigger in triggers
]
self.assertIn(fake_repo_name, trigger_repo_names)
for trigger in triggers:
repo_name = trigger.get('triggerTemplate').get('repoName')
if repo_name == fake_repo_name:
self.assertDictEqual(env_vars, trigger.get('substitutions'))
class CloudSourceRepositoryClientIntegrationTest(test_base.ResourceCleanUp):
def setUp(self):
super().setUp()
self._cloudsource_client = \
cloud_source.CloudSourceRepositoryClient.from_credentials(
self.credentials)
self._cloudsource_service = \
self._cloudsource_client._cloudsource_service
def _create_repo(self, project_id, repo_name):
parent = 'projects/{}'.format(project_id)
resource_name = 'projects/{}/repos/{}'.format(project_id, repo_name)
body = {
'name': resource_name,
}
request = self._cloudsource_service.projects().repos().create(
parent=parent, body=body)
request.execute(num_retries=5)
def test_list_repos(self):
repo_name = utils.get_resource_name(resource_type='repo')
full_repo_name = 'projects/{}/repos/{}'.format(self.project_id,
repo_name)
prev_repos = self._cloudsource_client.list_repos(self.project_id)
prev_repo_names = [repo.get('name') for repo in prev_repos]
self.assertNotIn(full_repo_name, prev_repo_names)
with self.clean_up_repo(repo_name):
self._create_repo(self.project_id, repo_name)
cur_repos = self._cloudsource_client.list_repos(self.project_id)
repo_names = [repo.get('name') for repo in cur_repos]
self.assertIn(full_repo_name, repo_names)
class CloudKmsClientIntegrationTest(test_base.ResourceCleanUp):
def setUp(self):
super().setUp()
self._cloudkms_client = cloudkms.CloudKmsClient.from_credentials(
self.credentials)
def test_create_keyring_and_key(self):
# Keyrings and keys cannot be deleted. And they do not have billable
# costs or quota limitations. So no resource cleanup is here. See
# https://cloud.google.com/kms/docs/object-hierarchy#lifetime
keyring_name = utils.get_resource_name(resource_type='keyring')
key_name = utils.get_resource_name(resource_type='key')
self._cloudkms_client.create_keyring(self.project_id, keyring_name)
keyrings = self._cloudkms_client.list_keyrings(self.project_id)
self.assertIn(keyring_name, keyrings)
self._cloudkms_client.create_key(self.project_id, keyring_name,
key_name)
keys = self._cloudkms_client.list_keys(self.project_id, keyring_name)
self.assertIn(key_name, keys)
def test_encrypt_and_decrypt(self):
keyring_name = 'integration-test'
key_name = 'integration-test-key'
expected_plaintext = 'This is plain text'
ciphertext = self._cloudkms_client.encrypt(expected_plaintext,
self.project_id, key_name,
keyring_name)
plaintext = self._cloudkms_client.decrypt(ciphertext, self.project_id,
key_name, keyring_name)
self.assertEqual(expected_plaintext, plaintext)
|
|
#!/usr/bin/env python
"""
runtests.py [OPTIONS] [-- ARGS]
Run tests, building the project first.
Examples::
$ python runtests.py
$ python runtests.py -s {SAMPLE_SUBMODULE}
$ python runtests.py -t {SAMPLE_TEST}
$ python runtests.py --ipython
$ python runtests.py --python somescript.py
$ python runtests.py --bench
Run a debugger:
$ gdb --args python runtests.py [...other args...]
Generate C code coverage listing under build/lcov/:
(requires http://ltp.sourceforge.net/coverage/lcov.php)
$ python runtests.py --gcov [...other args...]
$ python runtests.py --lcov-html
"""
#
# This is a generic test runner script for projects using NumPy's test
# framework. Change the following values to adapt to your project:
#
PROJECT_MODULE = "scipy"
PROJECT_ROOT_FILES = ['scipy', 'LICENSE.txt', 'setup.py']
SAMPLE_TEST = "scipy.fftpack.tests.test_real_transforms::TestIDSTIIIInt"
SAMPLE_SUBMODULE = "optimize"
EXTRA_PATH = ['/usr/lib/ccache', '/usr/lib/f90cache',
'/usr/local/lib/ccache', '/usr/local/lib/f90cache']
# ---------------------------------------------------------------------
if __doc__ is None:
__doc__ = "Run without -OO if you want usage info"
else:
__doc__ = __doc__.format(**globals())
import sys
import os
# In case we are run from the source directory, we don't want to import the
# project from there:
sys.path.pop(0)
from argparse import ArgumentParser, REMAINDER
import shutil
import subprocess
import time
import datetime
try:
from types import ModuleType as new_module
except ImportError: # old Python
from imp import new_module
ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__)))
def main(argv):
parser = ArgumentParser(usage=__doc__.lstrip())
parser.add_argument("--verbose", "-v", action="count", default=1,
help="more verbosity")
parser.add_argument("--no-build", "-n", action="store_true", default=False,
help="do not build the project (use system installed version)")
parser.add_argument("--build-only", "-b", action="store_true", default=False,
help="just build, do not run any tests")
parser.add_argument("--doctests", action="store_true", default=False,
help="Run doctests in module")
parser.add_argument("--refguide-check", action="store_true", default=False,
help="Run refguide check (do not run regular tests.)")
parser.add_argument("--coverage", action="store_true", default=False,
help=("report coverage of project code. HTML output"
" goes under build/coverage"))
parser.add_argument("--gcov", action="store_true", default=False,
help=("enable C code coverage via gcov (requires GCC)."
" gcov output goes to build/**/*.gc*"))
parser.add_argument("--lcov-html", action="store_true", default=False,
help=("produce HTML for C code coverage information "
"from a previous run with --gcov. "
"HTML output goes to build/lcov/"))
parser.add_argument("--mode", "-m", default="fast",
help="'fast', 'full', or something that could be "
"passed to nosetests -A [default: fast]")
parser.add_argument("--submodule", "-s", default=None,
help="Submodule whose tests to run (cluster,"
" constants, ...)")
parser.add_argument("--pythonpath", "-p", default=None,
help="Paths to prepend to PYTHONPATH")
parser.add_argument("--tests", "-t", action='append',
help="Specify tests to run")
parser.add_argument("--python", action="store_true",
help="Start a Python shell with PYTHONPATH set")
parser.add_argument("--ipython", "-i", action="store_true",
help="Start IPython shell with PYTHONPATH set")
parser.add_argument("--shell", action="store_true",
help="Start Unix shell with PYTHONPATH set")
parser.add_argument("--debug", "-g", action="store_true",
help="Debug build")
parser.add_argument("--parallel", "-j", type=int, default=1,
help="Number of parallel jobs for build and testing")
parser.add_argument("--show-build-log", action="store_true",
help="Show build output rather than using a log file")
parser.add_argument("--bench", action="store_true",
help="Run benchmark suite instead of test suite")
parser.add_argument("--bench-compare", action="append", metavar="BEFORE",
help=("Compare benchmark results of current HEAD to"
" BEFORE. Use an additional "
"--bench-compare=COMMIT to override HEAD with"
" COMMIT. Note that you need to commit your "
"changes first!"
))
parser.add_argument("args", metavar="ARGS", default=[], nargs=REMAINDER,
help="Arguments to pass to Nose, Python or shell")
parser.add_argument("--pep8", action="store_true", default=False,
help="Perform pep8 check with pycodestyle.")
parser.add_argument("--doc", action="append", nargs="?",
const="html-scipyorg", help="Build documentation")
args = parser.parse_args(argv)
if args.pep8:
# os.system("flake8 scipy --ignore=F403,F841,F401,F811,F405,E121,E122,"
# "E123,E125,E126,E127,E128,E226,E231,E251,E265,E266,E302,"
# "E402,E501,E712,E721,E731,E741,W291,W293,W391,W503,W504"
# "--exclude=scipy/_lib/six.py")
os.system("pycodestyle scipy benchmarks/benchmarks")
sys.exit(0)
if args.bench_compare:
args.bench = True
args.no_build = True # ASV does the building
if args.lcov_html:
# generate C code coverage output
lcov_generate()
sys.exit(0)
if args.pythonpath:
for p in reversed(args.pythonpath.split(os.pathsep)):
sys.path.insert(0, p)
if args.gcov:
gcov_reset_counters()
if args.debug and args.bench:
print("*** Benchmarks should not be run against debug version; "
"remove -g flag ***")
if not args.no_build:
site_dir = build_project(args)
sys.path.insert(0, site_dir)
os.environ['PYTHONPATH'] = site_dir
extra_argv = args.args[:]
if extra_argv and extra_argv[0] == '--':
extra_argv = extra_argv[1:]
if args.python:
if extra_argv:
# Don't use subprocess, since we don't want to include the
# current path in PYTHONPATH.
sys.argv = extra_argv
with open(extra_argv[0], 'r') as f:
script = f.read()
sys.modules['__main__'] = new_module('__main__')
ns = dict(__name__='__main__',
__file__=extra_argv[0])
exec(script, ns)
sys.exit(0)
else:
import code
code.interact()
sys.exit(0)
if args.ipython:
import IPython
IPython.embed(user_ns={})
sys.exit(0)
if args.shell:
shell = os.environ.get('SHELL', 'sh')
print("Spawning a Unix shell...")
os.execv(shell, [shell] + extra_argv)
sys.exit(1)
if args.doc:
cmd = ["make", "-Cdoc", 'PYTHON="{}"'.format(sys.executable)]
cmd += args.doc
if args.parallel:
cmd.append('SPHINXOPTS="-j{}"'.format(args.parallel))
subprocess.run(cmd, check=True)
sys.exit(0)
if args.coverage:
dst_dir = os.path.join(ROOT_DIR, 'build', 'coverage')
fn = os.path.join(dst_dir, 'coverage_html.js')
if os.path.isdir(dst_dir) and os.path.isfile(fn):
shutil.rmtree(dst_dir)
extra_argv += ['--cov-report=html:' + dst_dir]
if args.refguide_check:
cmd = [os.path.join(ROOT_DIR, 'tools', 'refguide_check.py'),
'--doctests']
if args.submodule:
cmd += [args.submodule]
os.execv(sys.executable, [sys.executable] + cmd)
sys.exit(0)
if args.bench:
# Run ASV
items = extra_argv
if args.tests:
items += args.tests
if args.submodule:
items += [args.submodule]
bench_args = []
for a in items:
bench_args.extend(['--bench', a])
if not args.bench_compare:
cmd = [os.path.join(ROOT_DIR, 'benchmarks', 'run.py'),
'run', '-n', '-e', '--python=same'] + bench_args
os.execv(sys.executable, [sys.executable] + cmd)
sys.exit(1)
else:
if len(args.bench_compare) == 1:
commit_a = args.bench_compare[0]
commit_b = 'HEAD'
elif len(args.bench_compare) == 2:
commit_a, commit_b = args.bench_compare
else:
p.error("Too many commits to compare benchmarks for")
# Check for uncommitted files
if commit_b == 'HEAD':
r1 = subprocess.call(['git', 'diff-index', '--quiet',
'--cached', 'HEAD'])
r2 = subprocess.call(['git', 'diff-files', '--quiet'])
if r1 != 0 or r2 != 0:
print("*"*80)
print("WARNING: you have uncommitted changes --- "
"these will NOT be benchmarked!")
print("*"*80)
# Fix commit ids (HEAD is local to current repo)
p = subprocess.Popen(['git', 'rev-parse', commit_b],
stdout=subprocess.PIPE)
out, err = p.communicate()
commit_b = out.strip()
p = subprocess.Popen(['git', 'rev-parse', commit_a],
stdout=subprocess.PIPE)
out, err = p.communicate()
commit_a = out.strip()
cmd = [os.path.join(ROOT_DIR, 'benchmarks', 'run.py'),
'continuous', '-e', '-f', '1.05',
commit_a, commit_b] + bench_args
os.execv(sys.executable, [sys.executable] + cmd)
sys.exit(1)
if args.build_only:
sys.exit(0)
else:
__import__(PROJECT_MODULE)
test = sys.modules[PROJECT_MODULE].test
if args.submodule:
tests = [PROJECT_MODULE + "." + args.submodule]
elif args.tests:
tests = args.tests
else:
tests = None
# Run the tests
if not args.no_build:
test_dir = site_dir
else:
test_dir = os.path.join(ROOT_DIR, 'build', 'test')
if not os.path.isdir(test_dir):
os.makedirs(test_dir)
shutil.copyfile(os.path.join(ROOT_DIR, '.coveragerc'),
os.path.join(test_dir, '.coveragerc'))
cwd = os.getcwd()
try:
os.chdir(test_dir)
result = test(args.mode,
verbose=args.verbose,
extra_argv=extra_argv,
doctests=args.doctests,
coverage=args.coverage,
tests=tests,
parallel=args.parallel)
finally:
os.chdir(cwd)
if isinstance(result, bool):
sys.exit(0 if result else 1)
elif result.wasSuccessful():
sys.exit(0)
else:
sys.exit(1)
def build_project(args):
"""
Build a dev version of the project.
Returns
-------
site_dir
site-packages directory where it was installed
"""
root_ok = [os.path.exists(os.path.join(ROOT_DIR, fn))
for fn in PROJECT_ROOT_FILES]
if not all(root_ok):
print("To build the project, run runtests.py in "
"git checkout or unpacked source")
sys.exit(1)
dst_dir = os.path.join(ROOT_DIR, 'build', 'testenv')
env = dict(os.environ)
cmd = [sys.executable, 'setup.py']
# Always use ccache, if installed
env['PATH'] = os.pathsep.join(EXTRA_PATH +
env.get('PATH', '').split(os.pathsep))
if args.debug or args.gcov:
# assume everyone uses gcc/gfortran
env['OPT'] = '-O0 -ggdb'
env['FOPT'] = '-O0 -ggdb'
if args.gcov:
import distutils.sysconfig
cvars = distutils.sysconfig.get_config_vars()
env['OPT'] = '-O0 -ggdb'
env['FOPT'] = '-O0 -ggdb'
env['CC'] = cvars['CC'] + ' --coverage'
env['CXX'] = cvars['CXX'] + ' --coverage'
env['F77'] = 'gfortran --coverage '
env['F90'] = 'gfortran --coverage '
env['LDSHARED'] = cvars['LDSHARED'] + ' --coverage'
env['LDFLAGS'] = " ".join(cvars['LDSHARED'].split()[1:]) +\
' --coverage'
cmd += ['build']
if args.parallel > 1:
cmd += ['-j', str(args.parallel)]
# Install; avoid producing eggs so SciPy can be imported from dst_dir.
cmd += ['install', '--prefix=' + dst_dir,
'--single-version-externally-managed',
'--record=' + dst_dir + 'tmp_install_log.txt']
from distutils.sysconfig import get_python_lib
site_dir = get_python_lib(prefix=dst_dir, plat_specific=True)
# easy_install won't install to a path that Python by default cannot see
# and isn't on the PYTHONPATH. Plus, it has to exist.
if not os.path.exists(site_dir):
os.makedirs(site_dir)
env['PYTHONPATH'] = site_dir
log_filename = os.path.join(ROOT_DIR, 'build.log')
start_time = datetime.datetime.now()
if args.show_build_log:
ret = subprocess.call(cmd, env=env, cwd=ROOT_DIR)
else:
log_filename = os.path.join(ROOT_DIR, 'build.log')
print("Building, see build.log...")
with open(log_filename, 'w') as log:
p = subprocess.Popen(cmd, env=env, stdout=log, stderr=log,
cwd=ROOT_DIR)
try:
# Wait for it to finish, and print something to indicate the
# process is alive, but only if the log file has grown (to
# allow continuous integration environments kill a hanging
# process accurately if it produces no output)
last_blip = time.time()
last_log_size = os.stat(log_filename).st_size
while p.poll() is None:
time.sleep(0.5)
if time.time() - last_blip > 60:
log_size = os.stat(log_filename).st_size
if log_size > last_log_size:
elapsed = datetime.datetime.now() - start_time
print(" ... build in progress ({0} "
"elapsed)".format(elapsed))
last_blip = time.time()
last_log_size = log_size
ret = p.wait()
except: # noqa: E722
p.terminate()
raise
elapsed = datetime.datetime.now() - start_time
if ret == 0:
print("Build OK ({0} elapsed)".format(elapsed))
else:
if not args.show_build_log:
with open(log_filename, 'r') as f:
print(f.read())
print("Build failed! ({0} elapsed)".format(elapsed))
sys.exit(1)
return site_dir
#
# GCOV support
#
def gcov_reset_counters():
print("Removing previous GCOV .gcda files...")
build_dir = os.path.join(ROOT_DIR, 'build')
for dirpath, dirnames, filenames in os.walk(build_dir):
for fn in filenames:
if fn.endswith('.gcda') or fn.endswith('.da'):
pth = os.path.join(dirpath, fn)
os.unlink(pth)
#
# LCOV support
#
LCOV_OUTPUT_FILE = os.path.join(ROOT_DIR, 'build', 'lcov.out')
LCOV_HTML_DIR = os.path.join(ROOT_DIR, 'build', 'lcov')
def lcov_generate():
try:
os.unlink(LCOV_OUTPUT_FILE)
except OSError:
pass
try:
shutil.rmtree(LCOV_HTML_DIR)
except OSError:
pass
print("Capturing lcov info...")
subprocess.call(['lcov', '-q', '-c',
'-d', os.path.join(ROOT_DIR, 'build'),
'-b', ROOT_DIR,
'--output-file', LCOV_OUTPUT_FILE])
print("Generating lcov HTML output...")
ret = subprocess.call(['genhtml', '-q', LCOV_OUTPUT_FILE,
'--output-directory', LCOV_HTML_DIR,
'--legend', '--highlight'])
if ret != 0:
print("genhtml failed!")
else:
print("HTML output generated under build/lcov/")
if __name__ == "__main__":
main(argv=sys.argv[1:])
|
|
# -*- coding: utf-8 -*-
"""
sale
"""
from trytond.model import fields
from trytond.transaction import Transaction
from trytond.pool import Pool, PoolMeta
from trytond.pyson import Eval, Or, Bool
__all__ = ['Sale', 'SaleLine']
__metaclass__ = PoolMeta
class Sale:
__name__ = 'sale.sale'
channel = fields.Many2One(
'sale.channel', 'Channel', required=True, select=True, domain=[
('id', 'in', Eval('context', {}).get('allowed_read_channels', [])),
],
states={
'readonly': Or(
(Eval('id', 0) > 0),
Bool(Eval('lines', [])),
)
}, depends=['id']
)
channel_type = fields.Function(
fields.Char('Channel Type'), 'on_change_with_channel_type'
)
has_channel_exception = fields.Function(
fields.Boolean('Has Channel Exception ?'), 'get_has_channel_exception',
searcher='search_has_channel_exception'
)
exceptions = fields.One2Many(
"channel.exception", "origin", "Exceptions"
)
# XXX: to identify sale order in external channel
channel_identifier = fields.Char('Channel Identifier', readonly=True)
@classmethod
def view_attributes(cls):
return super(Sale, cls).view_attributes() + [
('//page[@id="exceptions"]', 'states', {
'invisible': Eval('source') == 'manual'
})]
@classmethod
def validate(cls, sales):
super(Sale, cls).validate(sales)
for sale in sales:
sale.check_channel_identifier()
def check_channel_identifier(self):
"""
Make sure sale has no duplicate channel identifier
"""
if self.channel_identifier and self.search([
('channel_identifier', '=', self.channel_identifier),
('id', '!=', self.id),
]):
self.raise_user_error('duplicate_order', (self.channel_identifier,))
@classmethod
def search_has_channel_exception(cls, name, clause):
"""
Returns domain for sale with exceptions
"""
if clause[2]:
return [('exceptions.is_resolved', '=', False)]
else:
return [
'OR',
[('exceptions', '=', None)],
[('exceptions.is_resolved', '=', True)],
]
def get_channel_exceptions(self, name=None):
ChannelException = Pool().get('channel.exception')
return map(
int, ChannelException.search([
('origin', '=', '%s,%s' % (self.__name__, self.id)),
('channel', '=', self.channel.id),
], order=[('is_resolved', 'desc')])
)
@classmethod
def set_channel_exceptions(cls, exceptions, name, value):
pass
def get_has_channel_exception(self, name):
"""
Returs True if sale has exception
"""
ChannelException = Pool().get('channel.exception')
return bool(
ChannelException.search([
('origin', '=', '%s,%s' % (self.__name__, self.id)),
('channel', '=', self.channel.id),
('is_resolved', '=', False)
])
)
@classmethod
def __setup__(cls):
super(Sale, cls).__setup__()
cls._error_messages.update({
'channel_missing': (
'Go to user preferences and select a current_channel ("%s")'
),
'channel_change_not_allowed': (
'Cannot change channel'
),
'not_create_channel': (
'You cannot create order under this channel because you do not '
'have required permissions'
),
"duplicate_order": 'Sale with Order ID "%s" already exists',
})
@classmethod
def default_channel(cls):
User = Pool().get('res.user')
user = User(Transaction().user)
channel_id = Transaction().context.get('current_channel')
if channel_id:
return channel_id
return user.current_channel and \
user.current_channel.id # pragma: nocover
@staticmethod
def default_company():
Sale = Pool().get('sale.sale')
Channel = Pool().get('sale.channel')
channel_id = Sale.default_channel()
if channel_id:
return Channel(channel_id).company.id
return Transaction().context.get('company') # pragma: nocover
@staticmethod
def default_invoice_method():
Sale = Pool().get('sale.sale')
Channel = Pool().get('sale.channel')
Config = Pool().get('sale.configuration')
channel_id = Sale.default_channel()
if not channel_id: # pragma: nocover
config = Config(1)
return config.sale_invoice_method
return Channel(channel_id).invoice_method
@staticmethod
def default_shipment_method():
Sale = Pool().get('sale.sale')
Channel = Pool().get('sale.channel')
Config = Pool().get('sale.configuration')
channel_id = Sale.default_channel()
if not channel_id: # pragma: nocover
config = Config(1)
return config.sale_invoice_method
return Channel(channel_id).shipment_method
@staticmethod
def default_warehouse():
Sale = Pool().get('sale.sale')
Channel = Pool().get('sale.channel')
Location = Pool().get('stock.location')
channel_id = Sale.default_channel()
if not channel_id: # pragma: nocover
return Location.search([('type', '=', 'warehouse')], limit=1)[0].id
else:
return Channel(channel_id).warehouse.id
@staticmethod
def default_price_list():
Sale = Pool().get('sale.sale')
Channel = Pool().get('sale.channel')
channel_id = Sale.default_channel()
if channel_id:
return Channel(channel_id).price_list.id
return None # pragma: nocover
@staticmethod
def default_payment_term():
Sale = Pool().get('sale.sale')
Channel = Pool().get('sale.channel')
channel_id = Sale.default_channel()
if channel_id:
return Channel(channel_id).payment_term.id
return None # pragma: nocover
@fields.depends('channel', 'party')
def on_change_channel(self):
if not self.channel:
return # pragma: nocover
for fname in ('company', 'warehouse', 'currency', 'payment_term'):
fvalue = getattr(self.channel, fname)
if fvalue:
setattr(self, fname, fvalue.id)
if (not self.party or not self.party.sale_price_list):
self.price_list = self.channel.price_list.id # pragma: nocover
if self.channel.invoice_method:
self.invoice_method = self.channel.invoice_method
if self.channel.shipment_method:
self.shipment_method = self.channel.shipment_method
@fields.depends('channel', 'price_list', 'invoice_address', 'payment_term')
def on_change_party(self): # pragma: nocover
super(Sale, self).on_change_party()
channel = self.channel
if channel:
if not self.price_list and self.invoice_address:
self.price_list = channel.price_list.id
self.price_list.rec_name = channel.price_list.rec_name
if not self.payment_term and self.invoice_address:
self.payment_term = channel.payment_term.id
self.payment_term.rec_name = self.channel.payment_term.rec_name
@fields.depends('channel')
def on_change_with_channel_type(self, name=None):
"""
Returns the source of the channel
"""
if self.channel:
return self.channel.source
def check_create_access(self, silent=False):
"""
Check sale creation in channel
"""
User = Pool().get('res.user')
user = User(Transaction().user)
if user.id == 0:
return # pragma: nocover
if self.channel not in user.allowed_create_channels:
if silent:
return False
self.raise_user_error('not_create_channel')
return True
@classmethod
def write(cls, sales, values, *args):
"""
Check if channel in sale is is user's create_channel
"""
if 'channel' in values:
# Channel cannot be changed at any cost.
cls.raise_user_error('channel_change_not_allowed')
super(Sale, cls).write(sales, values, *args)
@classmethod
def create(cls, vlist):
"""
Check if user is allowed to create sale in channel
"""
User = Pool().get('res.user')
user = User(Transaction().user)
for values in vlist:
if 'channel' not in values and not cls.default_channel():
cls.raise_user_error(
'channel_missing', (user.rec_name,)
) # pragma: nocover
sales = super(Sale, cls).create(vlist)
for sale in sales:
sale.check_create_access()
return sales
@classmethod
def copy(cls, sales, default=None):
"""
Duplicating records
"""
if default is None:
default = {}
for sale in sales:
if not sale.check_create_access(True):
default['channel'] = cls.default_channel()
default['channel_identifier'] = None
return super(Sale, cls).copy(sales, default=default)
def process_to_channel_state(self, channel_state):
"""
Process the sale in tryton based on the state of order
when its imported from channel
:param channel_state: State on external channel the order was imported
"""
Sale = Pool().get('sale.sale')
Shipment = Pool().get('stock.shipment.out')
data = self.channel.get_tryton_action(channel_state)
if data['action'] in ['process_manually', 'process_automatically']:
Sale.quote([self])
Sale.confirm([self])
if data['action'] == 'process_automatically':
Sale.process([self])
for shipment in self.shipments:
if shipment.state == 'draft':
Shipment.wait([shipment])
if shipment.state == 'waiting':
Shipment.assign_try([shipment])
if data['action'] == 'import_as_past':
# XXX: mark past orders as completed
self.state = 'done'
self.save()
class SaleLine:
"Sale Line"
__name__ = 'sale.line'
# XXX: to identify sale order item in external channel
channel_identifier = fields.Char('Channel Identifier', readonly=True)
@classmethod
def __setup__(cls):
"""
Setup the class before adding to pool
"""
super(SaleLine, cls).__setup__()
cls._error_messages.update({
"duplicate_order_line":
'Sale Line with Order Item ID "%s" already exists',
})
@classmethod
def copy(cls, lines, default=None):
"""
Duplicating records
"""
if default is None:
default = {}
default['channel_identifier'] = None
return super(SaleLine, cls).copy(lines, default=default)
@classmethod
def validate(cls, lines):
super(SaleLine, cls).validate(lines)
for line in lines:
line.check_channel_identifier()
def check_channel_identifier(self):
"""
Make sure sale line has no duplicate channel identifier
"""
if self.channel_identifier and self.search([
('channel_identifier', '=', self.channel_identifier),
('id', '!=', self.id),
]):
self.raise_user_error(
'duplicate_order_line', (self.channel_identifier,)
)
|
|
"""
Utility Routines for Working with Matplotlib Objects
====================================================
"""
import itertools
import io
import base64
import numpy as np
import warnings
import matplotlib
from matplotlib.colors import colorConverter
from matplotlib.path import Path
from matplotlib.markers import MarkerStyle
from matplotlib.transforms import Affine2D
from matplotlib import ticker
def color_to_hex(color):
"""Convert matplotlib color code to hex color code"""
if color is None or colorConverter.to_rgba(color)[3] == 0:
return 'none'
else:
rgb = colorConverter.to_rgb(color)
return '#{0:02X}{1:02X}{2:02X}'.format(*(int(255 * c) for c in rgb))
def many_to_one(input_dict):
"""Convert a many-to-one mapping to a one-to-one mapping"""
return dict((key, val)
for keys, val in input_dict.items()
for key in keys)
LINESTYLES = many_to_one({('solid', '-', (None, None)): "10,0",
('dashed', '--'): "6,6",
('dotted', ':'): "2,2",
('dashdot', '-.'): "4,4,2,4",
('', ' ', 'None', 'none'): "none"})
def get_dasharray(obj, i=None):
"""Get an SVG dash array for the given matplotlib linestyle
Parameters
----------
obj : matplotlib object
The matplotlib line or path object, which must have a get_linestyle()
method which returns a valid matplotlib line code
i : integer (optional)
Returns
-------
dasharray : string
The HTML/SVG dasharray code associated with the object.
"""
if obj.__dict__.get('_dashSeq', None) is not None:
return ','.join(map(str, obj._dashSeq))
else:
ls = obj.get_linestyle()
if i is not None:
ls = ls[i]
dasharray = LINESTYLES.get(ls, None)
if dasharray is None:
warnings.warn("dash style '{0}' not understood: "
"defaulting to solid.".format(ls))
dasharray = LINESTYLES['-']
return dasharray
PATH_DICT = {Path.LINETO: 'L',
Path.MOVETO: 'M',
Path.CURVE3: 'S',
Path.CURVE4: 'C',
Path.CLOSEPOLY: 'Z'}
def SVG_path(path, transform=None, simplify=False):
"""Construct the vertices and SVG codes for the path
Parameters
----------
path : matplotlib.Path object
transform : matplotlib transform (optional)
if specified, the path will be transformed before computing the output.
Returns
-------
vertices : array
The shape (M, 2) array of vertices of the Path. Note that some Path
codes require multiple vertices, so the length of these vertices may
be longer than the list of path codes.
path_codes : list
A length N list of single-character path codes, N <= M. Each code is
a single character, in ['L','M','S','C','Z']. See the standard SVG
path specification for a description of these.
"""
if transform is not None:
path = path.transformed(transform)
vc_tuples = [(vertices if path_code != Path.CLOSEPOLY else [],
PATH_DICT[path_code])
for (vertices, path_code)
in path.iter_segments(simplify=simplify)]
if not vc_tuples:
# empty path is a special case
return np.zeros((0, 2)), []
else:
vertices, codes = zip(*vc_tuples)
vertices = np.array(list(itertools.chain(*vertices))).reshape(-1, 2)
return vertices, list(codes)
def get_path_style(path, fill=True):
"""Get the style dictionary for matplotlib path objects"""
style = {}
style['alpha'] = path.get_alpha()
if style['alpha'] is None:
style['alpha'] = 1
style['edgecolor'] = color_to_hex(path.get_edgecolor())
if fill:
style['facecolor'] = color_to_hex(path.get_facecolor())
else:
style['facecolor'] = 'none'
style['edgewidth'] = path.get_linewidth()
style['dasharray'] = get_dasharray(path)
style['zorder'] = path.get_zorder()
return style
def get_line_style(line):
"""Get the style dictionary for matplotlib line objects"""
style = {}
style['alpha'] = line.get_alpha()
if style['alpha'] is None:
style['alpha'] = 1
style['color'] = color_to_hex(line.get_color())
style['linewidth'] = line.get_linewidth()
style['dasharray'] = get_dasharray(line)
style['zorder'] = line.get_zorder()
return style
def get_marker_style(line):
"""Get the style dictionary for matplotlib marker objects"""
style = {}
style['alpha'] = line.get_alpha()
if style['alpha'] is None:
style['alpha'] = 1
style['facecolor'] = color_to_hex(line.get_markerfacecolor())
style['edgecolor'] = color_to_hex(line.get_markeredgecolor())
style['edgewidth'] = line.get_markeredgewidth()
style['marker'] = line.get_marker()
markerstyle = MarkerStyle(line.get_marker())
markersize = line.get_markersize()
markertransform = (markerstyle.get_transform()
+ Affine2D().scale(markersize, -markersize))
style['markerpath'] = SVG_path(markerstyle.get_path(),
markertransform)
style['markersize'] = markersize
style['zorder'] = line.get_zorder()
return style
def get_text_style(text):
"""Return the text style dict for a text instance"""
style = {}
style['alpha'] = text.get_alpha()
if style['alpha'] is None:
style['alpha'] = 1
style['fontsize'] = text.get_size()
style['color'] = color_to_hex(text.get_color())
style['halign'] = text.get_horizontalalignment() # left, center, right
style['valign'] = text.get_verticalalignment() # baseline, center, top
style['rotation'] = text.get_rotation()
style['zorder'] = text.get_zorder()
return style
def get_axis_properties(axis):
"""Return the property dictionary for a matplotlib.Axis instance"""
props = {}
label1On = axis._major_tick_kw.get('label1On', True)
if isinstance(axis, matplotlib.axis.XAxis):
if label1On:
props['position'] = "bottom"
else:
props['position'] = "top"
elif isinstance(axis, matplotlib.axis.YAxis):
if label1On:
props['position'] = "left"
else:
props['position'] = "right"
else:
raise ValueError("{0} should be an Axis instance".format(axis))
# Use tick values if appropriate
locator = axis.get_major_locator()
props['nticks'] = len(locator())
if isinstance(locator, ticker.FixedLocator):
props['tickvalues'] = list(locator())
else:
props['tickvalues'] = None
# Find tick formats
formatter = axis.get_major_formatter()
if isinstance(formatter, ticker.NullFormatter):
props['tickformat'] = ""
elif not any(label.get_visible() for label in axis.get_ticklabels()):
props['tickformat'] = ""
else:
props['tickformat'] = None
# Get axis scale
props['scale'] = axis.get_scale()
# Get major tick label size (assumes that's all we really care about!)
labels = axis.get_ticklabels()
if labels:
props['fontsize'] = labels[0].get_fontsize()
else:
props['fontsize'] = None
# Get associated grid
props['grid'] = get_grid_style(axis)
return props
def get_grid_style(axis):
gridlines = axis.get_gridlines()
if axis._gridOnMajor and len(gridlines) > 0:
color = color_to_hex(gridlines[0].get_color())
alpha = gridlines[0].get_alpha()
dasharray = get_dasharray(gridlines[0])
return dict(gridOn=True,
color=color,
dasharray=dasharray,
alpha=alpha)
else:
return {"gridOn":False}
def get_figure_properties(fig):
return {'figwidth': fig.get_figwidth(),
'figheight': fig.get_figheight(),
'dpi': fig.dpi}
def get_axes_properties(ax):
props = {'axesbg': color_to_hex(ax.patch.get_facecolor()),
'axesbgalpha': ax.patch.get_alpha(),
'bounds': ax.get_position().bounds,
'dynamic': ax.get_navigate(),
'axes': [get_axis_properties(ax.xaxis),
get_axis_properties(ax.yaxis)]}
for axname in ['x', 'y']:
axis = getattr(ax, axname + 'axis')
domain = getattr(ax, 'get_{0}lim'.format(axname))()
lim = domain
if isinstance(axis.converter, matplotlib.dates.DateConverter):
scale = 'date'
try:
import pandas as pd
from pandas.tseries.converter import PeriodConverter
except ImportError:
pd = None
if (pd is not None and isinstance(axis.converter,
PeriodConverter)):
_dates = [pd.Period(ordinal=int(d), freq=axis.freq)
for d in domain]
domain = [(d.year, d.month - 1, d.day,
d.hour, d.minute, d.second, 0)
for d in _dates]
else:
domain = [(d.year, d.month - 1, d.day,
d.hour, d.minute, d.second,
d.microsecond * 1E-3)
for d in matplotlib.dates.num2date(domain)]
else:
scale = axis.get_scale()
if scale not in ['date', 'linear', 'log']:
raise ValueError("Unknown axis scale: "
"{0}".format(axis[axname].get_scale()))
props[axname + 'scale'] = scale
props[axname + 'lim'] = lim
props[axname + 'domain'] = domain
return props
def iter_all_children(obj, skipContainers=False):
"""
Returns an iterator over all childen and nested children using
obj's get_children() method
if skipContainers is true, only childless objects are returned.
"""
if hasattr(obj, 'get_children') and len(obj.get_children()) > 0:
for child in obj.get_children():
if not skipContainers:
yield child
# could use `yield from` in python 3...
for grandchild in iter_all_children(child, skipContainers):
yield grandchild
else:
yield obj
def get_legend_properties(ax, legend):
handles, labels = ax.get_legend_handles_labels()
visible = legend.get_visible()
return {'handles': handles, 'labels': labels, 'visible': visible}
def image_to_base64(image):
"""
Convert a matplotlib image to a base64 png representation
Parameters
----------
image : matplotlib image object
The image to be converted.
Returns
-------
image_base64 : string
The UTF8-encoded base64 string representation of the png image.
"""
ax = image.axes
binary_buffer = io.BytesIO()
# image is saved in axes coordinates: we need to temporarily
# set the correct limits to get the correct image
lim = ax.axis()
ax.axis(image.get_extent())
image.write_png(binary_buffer)
ax.axis(lim)
binary_buffer.seek(0)
return base64.b64encode(binary_buffer.read()).decode('utf-8')
|
|
import argparse # parsing command line arguments
import importlib # dynamically importing modules
import random # randint
import time # delay & timing
from math import sqrt
from operator import add, attrgetter
import copy
import sys # to exit and append to path
sys.path.append('../utils')
sys.path.append('../functions')
import oa_utils # optimization algorithm utils
from timer import Timer
from plot_utils import PlotUtils # plotting each iteration if plot is True
"""http://www.swarmintelligence.org/tutorials.php"""
class Particle:
"""One particle to be used with particle swarm optimization. Keeps
track of the following attributes:
Attributes:
id: A number that specifies an id
pos: An array of floats defining the organisms position is space.
func: A function to call to calculate this organisms fitness
"""
def __init__(self, id, pos, func):
self.id = id
self.pos = pos
self.func = func
self.velocity = [0 for b in pos]
self.fval = self.get_fval()
self.pbest = pos
def __str__(self):
x_str = "["
for x in self.pos:
x_str += "%6.3f " % x
x_str += "]"
return "(id: %d, fval: %7.4f, X: %s)" % \
(self.id, self.fval, x_str)
def __repr__(self):
return "<Particle(%d)>" % self.id
def __cmp__(self, other):
return cmp(self.fval, other.get_fval())
# TODO to make this a class function with a pos parameter??
def get_fval(self):
return self.func(self.pos)
def get_velocity(self):
return self.velocity
class PSO(Timer, object):
"""A particle swarm class that contains methods for handling
the population over iterations
Attributes:
There are not attributes for this class. All settings/attributes
are read in from pso_settings.py which should be located in the same
directory as this file
"""
def __init__(self, settings, function): # TODO add settings parameter
super(self.__class__, self).__init__()
# read in settings
num_dims = settings['number_of_dimensions']
population_size = settings['population_size']
bounds = settings['bounds']
if settings['velocity_type'] == 'constriction':
phi = max(settings['cp'] + settings['cg'], 4.0)
self.k = 2.0/abs(2.0 - phi - sqrt(phi*phi - 4.0*phi))
else:
self.k = 1
# check to make sure num_dims and number of bounds provided match
if len(bounds) != num_dims:
raise ValueError("Number of dimensions doesn't match number of bounds provided")
# set instance variables
self.settings = settings
self.function = function
# initialize population
self.population = PSO.__gen_population(bounds, population_size, function)
self.total_population = population_size
self.best_x = PSO.__get_best_particle(self.population)
self.num_iterations = 1
if settings['plot']:
try:
self.plotutils = PlotUtils(num_dims, bounds, function)
self.__plot_state()
except ValueError:
print("Can not plot more than 2 dimensions")
settings['plot'] = False
if settings['print_iterations']:
self.__display_state()
if settings['step_through']:
oa_utils.pause()
@staticmethod
def __gen_particle(id, bounds, function):
# use gen_random_numbers to get a list of positions within the bounds
return Particle(id, oa_utils.gen_random_numbers(bounds), function)
@staticmethod
def __gen_population(bounds, size, function):
b = bounds
f = function
# generate a list of organisms
p = [PSO.__gen_particle(i+1, b, f) for i in range(0, size)]
return p
###########################
### PSO steps and loop ###
###########################
@staticmethod
def __update_velocity(population, velocity_type, print_actions, gbest, cp, cg, k, w):
for p in population:
if (velocity_type == 'normal'):
p.velocity = PSO.__get_velocity(1, cp, cg, gbest, p, 1)
elif (velocity_type == 'inertia'):
p.velocity = PSO.__get_velocity(k, cp, cg, gbest, p, w)
elif (velocity_type == 'constriction'):
p.velocity = PSO.__get_velocity(k, cp, cg, gbest, p, 1)
return population
@staticmethod
def __get_velocity(k, c1, c2, gbest, p, w):
velocity_array = []
for i, v in enumerate(p.velocity):
velocity_array.append(k*(w*v + c1*random.random()*(p.pbest[i] - p.pos[i]) + c2*random.random()*(gbest[i] - p.pos[i])))
return velocity_array
@staticmethod
def __update_position(population): # TODO put bounds on what position can be updated to
for p in population:
p.pos = list(map(add, p.pos, p.velocity))
p.fval = p.get_fval()
return population
@staticmethod
def __get_best_particle(population):
return copy.deepcopy( min(population, key=attrgetter('fval')) )
def __display_state(self):
print("The best organism in generation %d is %s" \
% (self.num_generations, str(self.get_best_x())))
def __plot_state(self):
pts = [(organism.pos[0], organism.pos[1]) for organism in self.population]
self.plotutils.plot(pts)
def __str__(self):
return "Best Fitness: %8.4f by particle %s" % \
(self.get_best_f(), str(self.get_best_x()))
####################################
# These are the only methods that #
# should be called outside of this #
# class #
####################################
def get_best_x(self):
return self.best_x
def get_best_f(self):
return self.best_x.fval
def do_loop(self):
population = self.population
population = PSO.__update_velocity(population, \
self.settings['velocity_type'], \
self.settings['print_actions'], \
self.get_best_x().pos, \
self.settings['cp'], \
self.settings['cg'], \
self.k, \
self.settings['weight'])
if self.settings['cg_plus']:
self.settings['cg'] += 0.1
phi = max(self.settings['cp'] + self.settings['cg'], 4.0)
self.k = 2.0/abs(2.0 - phi - sqrt(phi*phi - 4.0*phi))
population = PSO.__update_position(population)
self.num_iterations += 1
self.population = population
current_best = PSO.__get_best_particle(self.population)
if current_best.get_fval() < self.best_x.get_fval():
self.best_x = current_best
if self.settings['plot']:
self.__plot_state()
if self.settings['print_iterations']:
self.__display_state()
if self.settings['step_through']:
oa_utils.pause()
def run(self):
# iterate over generations
while self.settings['num_iterations'] > self.num_iterations:
self.do_loop()
time.sleep(self.settings['time_delay'])
@staticmethod
def get_name():
return "Particle Swarm"
########################################################################################
# MAIN #
########################################################################################
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Accept an optional settings file')
parser.add_argument('--settings', '-s', nargs=1, type=str, \
metavar='<file>', help='specify settings file to use')
parser.add_argument('--function', '-f', nargs=1, type=str, \
metavar='<file>', help='specify objective function file to use')
parser.add_argument('-v', action='store_true', help='print info when method is doing an action')
parser.add_argument('--time', '-t', action='store_true', help='turn timing on for the algorithm')
parser.add_argument('--plot', '-p', action='store_true', help='plot each iteration')
args = parser.parse_args()
function_module = None
settings_module = None
# get objective function
if args.function:
function_module = importlib.import_module(args.function[0])
else:
function_module = importlib.import_module('ackley_function')
function = function_module.objective_function
# get settings
if args.settings:
settings_module = importlib.import_module(args.settings[0])
else:
settings_module = importlib.import_module('pso_settings')
settings = settings_module.settings
# if -v is set change the setting
if args.v:
settings['print_actions'] = True
settings['print_iterations'] = True
# check for a couple more command line arguments
if args.time: settings['time'] = True
if args.plot: settings['plot'] = True
# --- END OF ARG PARSING --- #
# print a empty line
print("")
# time initialization
if settings['time']:
start_time = time.time()
# create algorithm instance
pso = PSO(settings, function)
if settings['time']:
print(" --- Initialized in %s seconds --- " % (time.time() - start_time))
if settings['time_delay'] > 0.0 or settings['plot'] \
or settings['print_actions'] or settings['print_iterations'] or settings['step_through']:
print("\n --- WARNING: You are timing with either time_delay, plot, print_actions,")
print(" print_iterations, or step_through enabled. --- \n")
oa_utils.pause()
pso.start_timer()
# iterate over generations
pso.run()
if settings['time']:
pso.stop_timer()
print(" --- Ran for %s seconds --- " % (pso.get_time()))
# print out some data
print("")
print(str(pso))
sys.exit()
|
|
# coding=utf-8
"""
Copyright 2013 Load Impact
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
__all__ = ['ApiTokenClient']
try:
import httplib
except ImportError:
from http import client as httplib
import os
import platform
import requests
from .exceptions import (
ApiError, BadRequestError, ConflictError, ConnectionError, ClientError,
ForbiddenError, HTTPError, GoneError, MethodNotAllowedError,
MissingApiTokenError, NotFoundError, RateLimitError, ServerError,
TimeoutError, UnauthorizedError)
from .resources import (
DataStore, Test, TestConfig, UserScenario, UserScenarioValidation)
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
from .version import __version__
def requests_exceptions_handling(func):
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except requests.exceptions.ConnectionError as e:
raise ConnectionError(str(e))
except requests.exceptions.HTTPError as e:
raise HTTPError(str(e))
except requests.exceptions.Timeout as e:
raise TimeoutError(str(e))
except requests.exceptions.RequestException as e:
raise ApiError(str(e))
return wrapper
class Client(object):
"""Base client class handling all communication with the Load Impact REST
API, using simple API token based authentication."""
api_base_url = 'https://api.loadimpact.com/v2/'
default_timeout = 30
error_classes = {
400: BadRequestError,
401: UnauthorizedError,
403: ForbiddenError,
404: NotFoundError,
405: MethodNotAllowedError,
409: ConflictError,
410: GoneError,
427: RateLimitError
}
library_versions = "python %s; requests %s" % (platform.python_version(),
requests.__version__)
user_agent = "LoadImpactPythonSDK/%s (%s)" % (__version__, library_versions)
def __init__(self, timeout=default_timeout, debug=False):
self.timeout = timeout
if debug:
httplib.HTTPConnection.debuglevel = 1
def create_data_store(self, data, file_object):
return DataStore.create(self, data, file_object=file_object)
def get_data_store(self, resource_id):
return DataStore.get(self, resource_id)
def list_data_stores(self):
return DataStore.list(self)
def get_test(self, resource_id):
return Test.get(self, resource_id)
def list_tests(self):
return Test.list(self)
def create_test_config(self, data):
return TestConfig.create(self, data)
def get_test_config(self, resource_id):
return TestConfig.get(self, resource_id)
def list_test_configs(self):
return TestConfig.list(self)
def create_user_scenario(self, data):
return UserScenario.create(self, data)
def get_user_scenario(self, resource_id):
return UserScenario.get(self, resource_id)
def list_user_scenarios(self):
return UserScenario.list(self)
def create_user_scenario_validation(self, data):
return UserScenarioValidation.create(self, data)
@requests_exceptions_handling
def delete(self, path, headers=None, params=None):
"""Make a DELETE request to the API.
Args:
path: Path of resource URI to where we're making the request.
headers: Dict of headers to send with request.
params: Dict with query string parameters.
Returns:
A requests response object on success.
Raises:
BadRequestError: Request was deemed formatted incorrectly by server.
UnauthorizedError: API token is incorrect/not valid.
ForbiddenError: Permission denied.
APIError: Generic error from requests library.
"""
url = urljoin(self.__class__.api_base_url, path)
response = self._request('delete', url, headers=headers, params=params)
return self._check_response(response)
@requests_exceptions_handling
def get(self, path, headers=None, params=None):
"""Make a GET request to the API.
Args:
path: Path of resource URI to where we're making the request.
headers: Dict of headers to send with request.
params: Dict with query string parameters.
Returns:
A requests response object on success.
Raises:
BadRequestError: Request was deemed formatted incorrectly by server.
UnauthorizedError: API token is incorrect/not valid.
ForbiddenError: Permission denied.
APIError: Generic error from requests library.
"""
url = urljoin(self.__class__.api_base_url, path)
response = self._request('get', url, headers=headers, params=params)
return self._check_response(response)
@requests_exceptions_handling
def post(self, path, headers=None, params=None, data=None,
file_object=None):
"""Make a POST request to the API.
Args:
path: Path of resource URI to where we're making the request.
headers: Dict of headers to send with request.
params: Dict with query string parameters.
data: Body data to send with request.
file_object: File object with data to send as file.
Returns:
A requests response object on success.
Raises:
BadRequestError: Request was deemed formatted incorrectly by server.
UnauthorizedError: API token is incorrect/not valid.
ForbiddenError: Permission denied.
APIError: Generic error from requests library.
"""
url = urljoin(self.__class__.api_base_url, path)
files = {'file': file_object} if file_object else None
response = self._request('post', url, headers=headers, params=params,
data=data, files=files)
return self._check_response(response)
@requests_exceptions_handling
def put(self, path, headers={"Content-Type": "application/json"}, params=None, data=None, file_object=None):
"""Make a PUT request to the API.
Args:
path: Path of resource URI to where we're making the request.
headers: Dict of headers to send with request.
params: Dict with query string parameters.
data: Body data to send with request.
file_object: File object with data to send as file.
Returns:
A requests response object on success.
Raises:
BadRequestError: Request was deemed formatted incorrectly by server.
UnauthorizedError: API token is incorrect/not valid.
ForbiddenError: Permission denied.
APIError: Generic error from requests library.
"""
url = urljoin(self.__class__.api_base_url, path)
files = {'file': file_object} if file_object else None
response = self._request('put', url, headers=headers, params=params,
data=data, files=files)
return self._check_response(response)
def _check_response(self, response):
status_code = response.status_code
if 399 < status_code and 600 > status_code:
try:
error = response.json()
msg = "%s (%s)" % (error['message'], response.url)
except KeyError:
msg = response.url
if status_code in self.__class__.error_classes:
raise self.__class__.error_classes[status_code](
msg, response=response)
elif 399 < response.status_code and 500 > response.status_code:
raise ClientError(msg, response=response)
elif 499 < response.status_code and 600 > response.status_code:
raise ServerError(msg, response=response)
return response
def _prepare_requests_kwargs(self, kwargs):
return kwargs
def _request(self, method, *args, **kwargs):
headers = {'user-agent': self.__class__.user_agent}
if 'headers' not in kwargs:
kwargs['headers'] = headers
else:
if not kwargs['headers']:
kwargs['headers'] = {}
kwargs['headers'].update(headers)
kwargs['timeout'] = self.timeout
kwargs = self._prepare_requests_kwargs(kwargs)
return self._requests_request(method, *args, **kwargs)
def _requests_request(self, method, *args, **kwargs):
return getattr(requests, method)(*args, **kwargs)
class ApiTokenClient(Client):
"""Client class handling all communication with the Load Impact REST API,
using simple API token based authentication."""
def __init__(self, api_token=None, *args, **kwargs):
super(ApiTokenClient, self).__init__(*args, **kwargs)
if not api_token:
try:
api_token = self._get_api_token_from_environment()
except KeyError:
raise MissingApiTokenError("An API token must be specified "
"either as the first argument to "
"ApiClient or by setting the "
"environment variable "
"LOADIMPACT_API_TOKEN.")
self.api_token = api_token
def _get_api_token_from_environment(self):
return os.environ['LOADIMPACT_API_TOKEN']
def _prepare_requests_kwargs(self, kwargs):
kwargs['auth'] = (self.api_token, '')
return kwargs
|
|
#################################################
# __ ____ ____ _________ _______ __ #
# .' _| |_ || _|| _ _ ||_ __ \ |_ `. #
# | | | |__| | |_/ | | \_| | |__) | | | #
# | | | __ | | | | ___/ | | #
# | |_ _| | | |_ _| |_ _| |_ _| | #
# `.__| |____||____| |_____| |_____| |__,' #
# #
# 48 61 63 6B 54 68 65 50 6C 61 6E 65 74 #
#################################################
# File: Directory.py
# A Directory represents a virtual in-game filesystem directory on a computer.
import GameController as gc
from File import File
from Database import Database
from MessageBox import MessageBox
from termcolor import colored
class Directory:
def __init__(self, name = '', parent_id = -1, id = -1):
self.name = name
self.parent_id = parent_id
self.files = []
self.subdirs = []
self.id = id
self.comp_id = -1
self.exists = False
self.fullpath = '~'
self.nesting = 0
self.size = 0
self.creation_time = gc.current_time()
self.modified_time = self.creation_time
self.read_only = False
# gets information from database about this directory if it exists
def lookup(self):
db = Database()
sql = ''
args = []
can_lookup = False # whether there is enough info to query the database
# search by directory ID
if self.id != -1:
sql = 'SELECT * FROM directories WHERE id = %s'
args = [self.id]
can_lookup = True
# search by name and parent ID
elif self.parent_id != -1 and self.name != '':
sql = 'SELECT * FROM directories WHERE parent_id = %s AND dir_name = %s'
args = [self.parent_id, self.name]
can_lookup = True
# perform the lookup if the object has enough information
if can_lookup:
result = db.get_query(sql, args)
if len(result) > 0:
self.exists = True
self.id = int(result[0][0])
self.name = result[0][1]
self.parent_id = int(result[0][2])
self.comp_id = int(result[0][3])
self.creation_time = gc.ts_to_string(result[0][4])
self.modified_time = gc.ts_to_string(result[0][5])
self.read_only = bool(result[0][6])
# get parent's name
sql = 'SELECT * FROM directories WHERE id = %s'
args = [self.parent_id]
result = db.get_query(sql, args)
if len(result) > 0:
self.parent_name = result[0][1]
# get full path of this directory
pid = self.parent_id
self.fullpath = self.name
self.nesting = 1
while pid != 0:
sql = 'SELECT * FROM directories WHERE id = %s'
args = [pid]
result = db.get_query(sql, args)
if len(result) > 0:
pid = int(result[0][2])
self.fullpath = result[0][1] + '/' + self.fullpath
self.nesting += 1
else:
pid = 0
self.fullpath = '~/' + self.fullpath
self.size = gc.DIR_SIZE # start counting total size
# get files in this directory
self.files = []
sql = 'SELECT file_name FROM files WHERE parent_id = %s'
args = [self.id]
response = db.get_query(sql, args)
i = 0
while i < len(response):
row = response[i]
name = row[0]
file = File(name, self)
file.lookup()
self.files.append(file)
self.size += file.size
i += 1
# get subdirectories
self.subdirs = []
sql = 'SELECT * FROM directories WHERE parent_id = %s'
args = [self.id]
response = db.get_query(sql, args)
for subdir in response:
dir_name = subdir[1]
d = Directory(name = dir_name, parent_id = self.id)
d.lookup()
self.subdirs.append(d)
self.size += d.size
i += 1
db.close()
# synchronize object with database
def save(self):
db = Database()
if not self.exists:
sql = 'INSERT INTO directories (dir_name, parent_id, computer_id, modified_time, read_only) '
sql += 'VALUES (%s, %s, %s, %s, %s)'
args = [self.name, self.parent_id, self.comp_id, self.modified_time, self.read_only]
db.post_query(sql, args)
self.exists = True
else:
sql = 'UPDATE directories SET dir_name = %s, parent_id = %s, computer_id = %s, '
sql += 'modified_time = %s, read_only = %s WHERE id = %s'
args = [self.name, self.parent_id, self.comp_id, self.modified_time, self.read_only, self.id]
db.post_query(sql, args)
db.close()
# get id of this directory
def get_id(self):
return self.id
# returns total size of directory
def get_size(self):
pass
# returns subdirectories in this directory
def get_subdirs(self):
subdir_list = ''
for subdir in self.subdirs:
subdir_list += subdir.name + ' '
return subdir_list
# returns files in this directory (excluding active viruses)
def get_files(self):
file_list = ''
for file in self.files:
if file.type == 'bin' and not file.is_live:
file_list += colored(file.name, 'green') + ' '
elif not file.is_live:
file_list += file.name + ' '
return file_list
# prints list of files and subdirectories to screen
def print_contents(self):
self.lookup()
dir_list = self.get_subdirs()
file_list = self.get_files()
gc.msg_pair('Directories: ', colored(dir_list, 'yellow'))
gc.msg_pair('Files: ', file_list)
# prints the entire file structure in this directory
def print_all_contents(self):
indents = u'\u251c'
spaces = ''
i = 1
while i < self.nesting:
indents += u'\u2500'
spaces += ' '
i += 1
gc.msg(indents + '[' + self.name + ']')
gc.msg(u'\u2502' + spaces + ' D: ' + colored(self.get_subdirs(), 'yellow'))
gc.msg(u'\u2502' + spaces + ' F: ' + colored(self.get_files(), 'white'))
for subdir in self.subdirs:
subdir.print_all_contents()
# permanently delete this directory and its contents
def delete(self):
self.lookup()
# delete subdirectories
for subdir in self.subdirs:
subdir.lookup()
subdir.delete()
# delete files in directory
for file in self.files:
file.delete()
db = Database()
# delete self
sql = 'DELETE FROM directories WHERE id = %s'
args = [self.id]
db.post_query(sql, args)
db.close()
# shows general information about a directory
def print_info(self):
self.lookup()
mb = MessageBox()
mb.title = self.name + ' [' + str(self.size) + ' bytes]'
mb.add_property('Total Size', gc.hr_bytes(self.size))
mb.add_property('Full Path', self.fullpath)
mb.add_property('Files', self.get_files())
mb.add_property('Subdirectories', self.get_subdirs())
mb.add_property('Created On', self.creation_time)
mb.add_property('Modified On', self.modified_time)
mb.add_property('Read Only', str(self.read_only))
mb.display()
# returns a relative directory based on a string containing slashes, if it exists
def navigate(self, nav_string):
new_dir = Directory(id = self.id)
new_dir.lookup()
error = False
for d in nav_string.split('/'):
# up one level
if d == '..':
if new_dir.parent_id == 0:
error = True
gc.error('You are already in the highest-level directory.')
else:
new_dir.id = new_dir.parent_id
new_dir.lookup()
if not new_dir.exists:
error = True
# to home directory of this computer
elif d == '~':
while new_dir.parent_id != 0:
new_dir.id = new_dir.parent_id
new_dir.lookup()
# to specified directory name
else:
if not d.isalnum():
error = True
gc.error('Directories can only contain letters and numbers.')
else:
new_dir.name = d
new_dir.parent_id = new_dir.id # current directory becomes the parent
new_dir.id = -1
new_dir.exists = False
new_dir.lookup()
if not new_dir.exists:
error = True
if error:
gc.error('The directory you entered doesn\'t exist.')
new_dir = self # reset changes
return new_dir
|
|
# -*- coding: UTF-8 -*-
import urwid
from urwid.util import (move_prev_char, move_next_char, calc_width,
calc_text_pos, is_wide_char)
from urwid.text_layout import CanNotDisplayText, TextLayout
from urwid.compat import bytes, PYTHON3, B
ONECHAR_NEWLINES = (u'\n', b'\n', u'\r', b'\r')
TWOCHAR_NEWLINES = (u'\n\r', b'\n\r', u'\r\n', b'\r\n')
def find_newline(text, pos):
l = len(text)
while pos < l:
char = text[pos:pos+1]
if char in ONECHAR_NEWLINES:
return pos
pos += 1
return pos
class CodeLayout(TextLayout):
"""A layout for Urwid that can deal with tabs."""
tab_width = 8
def supports_align_mode(self, align):
"""Return True if align is a supported align mode."""
return align == urwid.LEFT
def supports_wrap_mode(self, wrap):
"""Return True if wrap is a supported wrap mode."""
return wrap == urwid.SPACE
def layout(self, text, width, align, wrap):
"""Return a layout structure for text."""
try:
segs = self.calculate_text_segments(text, width, wrap)
return self.align_layout(text, width, segs, wrap, align)
except CanNotDisplayText:
return [[]]
def calculate_text_segments(self, text, width, wrap):
"""
Calculate the segments of text to display given width screen
columns to display them.
text - unicode text or byte string to display
width - number of available screen columns
wrap - wrapping mode used
Returns a layout structure without alignment applied.
"""
# TODO: This function is a horror and a mess, and really hard to
# understand. It's based on urwids StandardLayout, which by itself
# is overly complex, and I added tab handling, which made it worse.
# It's a prime candidate for refacturing, making easier to understand
# and as it is heavily used, profiling would be nice too.
nl, nl_o, sp_o, tab_o = "\n", "\n", " ", "\t"
if PYTHON3 and isinstance(text, bytes):
nl = B(nl) # can only find bytes in python3 bytestrings
nl_o = ord(nl_o) # + an item of a bytestring is the ordinal value
sp_o = ord(sp_o)
tab_o = ord(tab_o)
b = []
p = 0
if wrap == 'clip':
# no wrapping to calculate, so it's easy.
l = []
while p <= len(text):
n_cr = find_newline(text, p)
if p != n_cr:
line = text[p:n_cr]
pt = 0
while pt < len(line):
n_tab = line.find(tab_o, pt)
if n_tab == -1:
end = len(line)
else:
end = n_tab
sc = calc_width(line, pt, end)
if sc != 0:
l.append((sc, p + pt, p + end))
if end == n_tab: # A tab was found
extra_space = (self.tab_width - (
sc % self.tab_width))
l.append((extra_space, p + n_tab))
pt = end + 1
l.append((0, n_cr))
b.append(l)
l = []
if text[n_cr:n_cr+2] in TWOCHAR_NEWLINES:
# Two char newline:
p = n_cr + 2
else:
p = n_cr + 1
return b
while p <= len(text):
# look for next eligible line break
n_cr = find_newline(text, p)
line = text[p:n_cr]
l = []
pt = 0
lc = 0
while pt < len(line):
n_tab = line.find(tab_o, pt)
if n_tab == -1:
end = len(line)
else:
end = n_tab
sc = calc_width(line, pt, end)
if lc + sc <= width:
# this segment fits
if sc:
l.append((sc, p + pt, p + end))
if end == n_tab: # A tab was found
extra_space = self.tab_width - (sc % self.tab_width)
l.append((extra_space, p + n_tab))
lc += extra_space
else:
# removed character hint
l.append((0, p + end))
pt = end + 1
lc += sc
if lc >= width:
# The tab can sometimes push line length to width, and
# then we adjust the line length and make a new line.
overshoot = lc - width
spaces, pos = l[-1]
l[-1] = (spaces - overshoot, pos)
b.append(l)
l = []
lc = 0
continue
# This segment does not fit. Let's fit it.
pos, sc = calc_text_pos(line, pt, end, width - lc)
if pos == pt: # pathological width=1 double-byte case
raise CanNotDisplayText(
"Wide character will not fit in 1-column width")
if wrap == 'any':
l.append((sc, p + pt, p + pos))
l.append((0, p + pos))
b.append(l)
l = []
lc = 0
pt = pos
continue
assert wrap == 'space'
if line[pos] == sp_o:
# perfect space wrap
l.append((sc, p + pt, p + pos))
# removed character hint
l.append((0, p + pos))
b.append(l)
l = []
lc = 0
pt = pos + 1
continue
if is_wide_char(line, pos):
# perfect next wide
l.append((sc, p + pt, p + pos))
b.append(l)
l = []
lc = 0
pt = pos
continue
prev = pos
while prev > pt:
prev = move_prev_char(line, pt, prev)
if line[prev] == sp_o:
sc = calc_width(line, pt, prev)
if prev != pt:
l.append((sc, p + pt, p + prev))
l.append((0, p + prev))
b.append(l)
l = []
lc = 0
pt = prev + 1
break
if is_wide_char(line, prev):
# wrap after wide char
nextc = move_next_char(line, prev, pos)
sc = calc_width(line, pt, nextc)
l.append((sc, p + pt, p + nextc))
b.append(l)
l = []
lc = 0
pt = nextc
break
else:
if lc == 0:
# unwrap previous line space if possible to
# fit more text (we're breaking a word anyway)
if b and (len(b[-1]) == 2 or (len(b[-1]) == 1 and
len(b[-1][0]) == 2)):
# look for removed space above
if len(b[-1]) == 1:
[(h_sc, h_off)] = b[-1]
p_sc = 0
p_off = p_end = h_off
else:
[(p_sc, p_off, p_end),
(h_sc, h_off)] = b[-1][-2:]
if (p_sc < width and h_sc == 0 and
text[h_off] == sp_o):
# combine with previous line
old_line = b[-1][:-2]
del b[-1]
pt = p_off - p
pos, sc = calc_text_pos(
line, pt, end, width)
old_line.append((sc, p + pt, p + pos))
b.append(old_line)
# check for trailing " " or "\n"
pt = pos
if pt < len(text) and (
text[pt] in (sp_o, nl_o)):
# removed character hint
b[-1].append((0, p + pt))
pt += 1
continue
# Break on previous tab, and try again.
if l:
b.append(l)
l = []
lc = 0
continue
# There is no space to break the line on, unwrapping the
# previous line doesn't help, I guess we just break on a
# character.
b.append([(sc, p + pt, p + pos)])
l = []
lc = 0
pt = pos
# force any char wrap
if l:
b.append(l)
elif not line:
# An empty line.
b.append([(0, n_cr)])
pt = 1
if text[pt-1:pt+1] in TWOCHAR_NEWLINES:
# Two char newline:
pt += 1
p += pt
return b
def align_layout(self, text, width, segs, wrap, align):
"""Convert the layout segs to an aligned layout."""
assert align == urwid.LEFT
return segs
|
|
from django.test import TestCase
from django.core.exceptions import PermissionDenied
from django.contrib.auth.models import Group
from hs_access_control.models import PrivilegeCodes
from hs_core import hydroshare
from hs_core.testing import MockIRODSTestCaseMixin
from hs_access_control.tests.utilities import global_reset, is_equal_to_as_set
class T12UserActive(MockIRODSTestCaseMixin, TestCase):
def setUp(self):
super(T12UserActive, self).setUp()
global_reset()
self.group, _ = Group.objects.get_or_create(name='Resource Author')
self.admin = hydroshare.create_account(
'admin@gmail.com',
username='admin',
first_name='administrator',
last_name='couch',
superuser=True,
groups=[]
)
self.cat = hydroshare.create_account(
'cat@gmail.com',
username='cat',
first_name='not a dog',
last_name='last_name_cat',
superuser=False,
groups=[]
)
self.dog = hydroshare.create_account(
'dog@gmail.com',
username='dog',
first_name='a little arfer',
last_name='last_name_dog',
superuser=False,
groups=[]
)
self.scratching = hydroshare.create_resource(
resource_type='GenericResource',
owner=self.cat,
title='all about sofas as scrathing posts',
metadata=[],
)
self.felines = self.cat.uaccess.create_group(
title='felines', description="We are the feliness")
def test_00_exceptions(self):
"All user routines raise PermissionDenied if user is inactive"
scratching = self.scratching
felines = self.felines
dog = self.dog
cat = self.cat
# turn off active
cat.is_active = False
cat.save()
# all user routines should raise exceptions
with self.assertRaises(PermissionDenied):
cat.uaccess.create_group(title='foo', description="We are the foo")
with self.assertRaises(PermissionDenied):
cat.uaccess.delete_group(felines)
with self.assertRaises(PermissionDenied):
cat.uaccess.view_groups
with self.assertRaises(PermissionDenied):
cat.uaccess.owned_groups
with self.assertRaises(PermissionDenied):
cat.uaccess.owns_group(felines)
with self.assertRaises(PermissionDenied):
cat.uaccess.can_change_group(felines)
with self.assertRaises(PermissionDenied):
cat.uaccess.can_view_group(felines)
with self.assertRaises(PermissionDenied):
cat.uaccess.can_view_group_metadata(felines)
with self.assertRaises(PermissionDenied):
cat.uaccess.can_change_group_flags(felines)
with self.assertRaises(PermissionDenied):
cat.uaccess.can_delete_group(felines)
with self.assertRaises(PermissionDenied):
cat.uaccess.can_share_group(felines, PrivilegeCodes.VIEW)
with self.assertRaises(PermissionDenied):
cat.uaccess.share_group_with_user(
felines, dog, PrivilegeCodes.VIEW)
with self.assertRaises(PermissionDenied):
cat.uaccess.unshare_group_with_user(felines, dog)
with self.assertRaises(PermissionDenied):
cat.uaccess.can_unshare_group_with_user(felines, dog)
with self.assertRaises(PermissionDenied):
cat.uaccess.get_group_unshare_users(felines)
with self.assertRaises(PermissionDenied):
cat.uaccess.view_resources
with self.assertRaises(PermissionDenied):
cat.uaccess.owned_resources
with self.assertRaises(PermissionDenied):
cat.uaccess.edit_resources
with self.assertRaises(PermissionDenied):
cat.uaccess.get_resources_with_explicit_access(PrivilegeCodes.VIEW)
with self.assertRaises(PermissionDenied):
cat.uaccess.owns_resource(scratching)
with self.assertRaises(PermissionDenied):
cat.uaccess.can_change_resource(scratching)
with self.assertRaises(PermissionDenied):
cat.uaccess.can_change_resource_flags(scratching)
with self.assertRaises(PermissionDenied):
cat.uaccess.can_view_resource(scratching)
with self.assertRaises(PermissionDenied):
cat.uaccess.can_delete_resource(scratching)
with self.assertRaises(PermissionDenied):
cat.uaccess.can_share_resource(scratching, PrivilegeCodes.VIEW)
with self.assertRaises(PermissionDenied):
cat.uaccess.can_share_resource_with_group(
scratching, felines, PrivilegeCodes.VIEW)
with self.assertRaises(PermissionDenied):
cat.uaccess.share_resource_with_user(
scratching, dog, PrivilegeCodes.VIEW)
with self.assertRaises(PermissionDenied):
cat.uaccess.unshare_resource_with_user(scratching, dog)
with self.assertRaises(PermissionDenied):
cat.uaccess.can_unshare_resource_with_user(scratching, dog)
with self.assertRaises(PermissionDenied):
cat.uaccess.share_resource_with_group(
scratching, felines, PrivilegeCodes.VIEW)
with self.assertRaises(PermissionDenied):
cat.uaccess.unshare_resource_with_group(scratching, felines)
with self.assertRaises(PermissionDenied):
cat.uaccess.can_unshare_resource_with_group(scratching, felines)
with self.assertRaises(PermissionDenied):
cat.uaccess.get_resource_unshare_users(scratching)
with self.assertRaises(PermissionDenied):
cat.uaccess.get_resource_unshare_groups(scratching)
def test_01_reporting(self):
"User records disappear when user is inactive"
scratching = self.scratching
felines = self.felines
dog = self.dog
cat = self.cat
cat.uaccess.share_resource_with_user(
scratching, dog, PrivilegeCodes.OWNER)
cat.uaccess.share_group_with_user(felines, dog, PrivilegeCodes.OWNER)
self.assertTrue(
is_equal_to_as_set(
cat.uaccess.get_group_unshare_users(felines), [
cat, dog]))
self.assertTrue(
# cat is the quota holder, so cannot be unshared
is_equal_to_as_set(
cat.uaccess.get_resource_unshare_users(scratching), [dog]))
self.assertTrue(
is_equal_to_as_set(
felines.gaccess.members, [
cat, dog]))
self.assertTrue(is_equal_to_as_set(felines.gaccess.owners, [cat, dog]))
self.assertTrue(
is_equal_to_as_set(
scratching.raccess.view_users, [
cat, dog]))
self.assertTrue(
is_equal_to_as_set(
scratching.raccess.edit_users, [
cat, dog]))
self.assertTrue(
is_equal_to_as_set(
scratching.raccess.owners, [
cat, dog]))
dog.is_active = False
dog.save()
self.assertTrue(
is_equal_to_as_set(
cat.uaccess.get_group_unshare_users(felines),
[]))
self.assertTrue(
is_equal_to_as_set(
cat.uaccess.get_resource_unshare_users(scratching),
[]))
self.assertTrue(is_equal_to_as_set(felines.gaccess.members, [cat]))
self.assertTrue(is_equal_to_as_set(felines.gaccess.owners, [cat]))
self.assertTrue(
is_equal_to_as_set(
scratching.raccess.view_users,
[cat]))
self.assertTrue(
is_equal_to_as_set(
scratching.raccess.edit_users,
[cat]))
self.assertTrue(is_equal_to_as_set(scratching.raccess.owners, [cat]))
|
|
###############################################################################
#
# Styles - A class for writing the Excel XLSX Worksheet file.
#
# Copyright 2013-2017, John McNamara, jmcnamara@cpan.org
#
# Package imports.
from . import xmlwriter
class Styles(xmlwriter.XMLwriter):
"""
A class for writing the Excel XLSX Styles file.
"""
###########################################################################
#
# Public API.
#
###########################################################################
def __init__(self):
"""
Constructor.
"""
super(Styles, self).__init__()
self.xf_formats = []
self.palette = []
self.font_count = 0
self.num_format_count = 0
self.border_count = 0
self.fill_count = 0
self.custom_colors = []
self.dxf_formats = []
###########################################################################
#
# Private API.
#
###########################################################################
def _assemble_xml_file(self):
# Assemble and write the XML file.
# Write the XML declaration.
self._xml_declaration()
# Add the style sheet.
self._write_style_sheet()
# Write the number formats.
self._write_num_fmts()
# Write the fonts.
self._write_fonts()
# Write the fills.
self._write_fills()
# Write the borders element.
self._write_borders()
# Write the cellStyleXfs element.
self._write_cell_style_xfs()
# Write the cellXfs element.
self._write_cell_xfs()
# Write the cellStyles element.
self._write_cell_styles()
# Write the dxfs element.
self._write_dxfs()
# Write the tableStyles element.
self._write_table_styles()
# Write the colors element.
self._write_colors()
# Close the style sheet tag.
self._xml_end_tag('styleSheet')
# Close the file.
self._xml_close()
def _set_style_properties(self, properties):
# Pass in the Format objects and other properties used in the styles.
self.xf_formats = properties[0]
self.palette = properties[1]
self.font_count = properties[2]
self.num_format_count = properties[3]
self.border_count = properties[4]
self.fill_count = properties[5]
self.custom_colors = properties[6]
self.dxf_formats = properties[7]
def _get_palette_color(self, color):
# Convert the RGB color.
if color[0] == '#':
color = color[1:]
return "FF" + color.upper()
###########################################################################
#
# XML methods.
#
###########################################################################
def _write_style_sheet(self):
# Write the <styleSheet> element.
xmlns = 'http://schemas.openxmlformats.org/spreadsheetml/2006/main'
attributes = [('xmlns', xmlns)]
self._xml_start_tag('styleSheet', attributes)
def _write_num_fmts(self):
# Write the <numFmts> element.
if not self.num_format_count:
return
attributes = [('count', self.num_format_count)]
self._xml_start_tag('numFmts', attributes)
# Write the numFmts elements.
for xf_format in self.xf_formats:
# Ignore built-in number formats, i.e., < 164.
if xf_format.num_format_index >= 164:
self._write_num_fmt(xf_format.num_format_index,
xf_format.num_format)
self._xml_end_tag('numFmts')
def _write_num_fmt(self, num_fmt_id, format_code):
# Write the <numFmt> element.
format_codes = {
0: 'General',
1: '0',
2: '0.00',
3: '#,##0',
4: '#,##0.00',
5: '($#,##0_);($#,##0)',
6: '($#,##0_);[Red]($#,##0)',
7: '($#,##0.00_);($#,##0.00)',
8: '($#,##0.00_);[Red]($#,##0.00)',
9: '0%',
10: '0.00%',
11: '0.00E+00',
12: '# ?/?',
13: '# ??/??',
14: 'm/d/yy',
15: 'd-mmm-yy',
16: 'd-mmm',
17: 'mmm-yy',
18: 'h:mm AM/PM',
19: 'h:mm:ss AM/PM',
20: 'h:mm',
21: 'h:mm:ss',
22: 'm/d/yy h:mm',
37: '(#,##0_);(#,##0)',
38: '(#,##0_);[Red](#,##0)',
39: '(#,##0.00_);(#,##0.00)',
40: '(#,##0.00_);[Red](#,##0.00)',
41: '_(* #,##0_);_(* (#,##0);_(* "-"_);_(_)',
42: '_($* #,##0_);_($* (#,##0);_($* "-"_);_(_)',
43: '_(* #,##0.00_);_(* (#,##0.00);_(* "-"??_);_(_)',
44: '_($* #,##0.00_);_($* (#,##0.00);_($* "-"??_);_(_)',
45: 'mm:ss',
46: '[h]:mm:ss',
47: 'mm:ss.0',
48: '##0.0E+0',
49: '@'}
# Set the format code for built-in number formats.
if num_fmt_id < 164:
if num_fmt_id in format_codes:
format_code = format_codes[num_fmt_id]
else:
format_code = 'General'
attributes = [
('numFmtId', num_fmt_id),
('formatCode', format_code),
]
self._xml_empty_tag('numFmt', attributes)
def _write_fonts(self):
# Write the <fonts> element.
attributes = [('count', self.font_count)]
self._xml_start_tag('fonts', attributes)
# Write the font elements for xf_format objects that have them.
for xf_format in self.xf_formats:
if xf_format.has_font:
self._write_font(xf_format)
self._xml_end_tag('fonts')
def _write_font(self, xf_format, is_dxf_format=False):
# Write the <font> element.
self._xml_start_tag('font')
# The condense and extend elements are mainly used in dxf formats.
if xf_format.font_condense:
self._write_condense()
if xf_format.font_extend:
self._write_extend()
if xf_format.bold:
self._xml_empty_tag('b')
if xf_format.italic:
self._xml_empty_tag('i')
if xf_format.font_strikeout:
self._xml_empty_tag('strike')
if xf_format.font_outline:
self._xml_empty_tag('outline')
if xf_format.font_shadow:
self._xml_empty_tag('shadow')
# Handle the underline variants.
if xf_format.underline:
self._write_underline(xf_format.underline)
if xf_format.font_script == 1:
self._write_vert_align('superscript')
if xf_format.font_script == 2:
self._write_vert_align('subscript')
if not is_dxf_format:
self._xml_empty_tag('sz', [('val', xf_format.font_size)])
if xf_format.theme == -1:
# Ignore for excel2003_style.
pass
elif xf_format.theme:
self._write_color('theme', xf_format.theme)
elif xf_format.color_indexed:
self._write_color('indexed', xf_format.color_indexed)
elif xf_format.font_color:
color = self._get_palette_color(xf_format.font_color)
self._write_color('rgb', color)
elif not is_dxf_format:
self._write_color('theme', 1)
if not is_dxf_format:
self._xml_empty_tag('name', [('val', xf_format.font_name)])
if xf_format.font_family:
self._xml_empty_tag('family', [('val', xf_format.font_family)])
if xf_format.font_charset:
self._xml_empty_tag('charset',
[('val', xf_format.font_charset)])
if xf_format.font_name == 'Calibri' and not xf_format.hyperlink:
self._xml_empty_tag(
'scheme',
[('val', xf_format.font_scheme)])
self._xml_end_tag('font')
def _write_underline(self, underline):
# Write the underline font element.
if underline == 2:
attributes = [('val', 'double')]
elif underline == 33:
attributes = [('val', 'singleAccounting')]
elif underline == 34:
attributes = [('val', 'doubleAccounting')]
else:
# Default to single underline.
attributes = []
self._xml_empty_tag('u', attributes)
def _write_vert_align(self, val):
# Write the <vertAlign> font sub-element.
attributes = [('val', val)]
self._xml_empty_tag('vertAlign', attributes)
def _write_color(self, name, value):
# Write the <color> element.
attributes = [(name, value)]
self._xml_empty_tag('color', attributes)
def _write_fills(self):
# Write the <fills> element.
attributes = [('count', self.fill_count)]
self._xml_start_tag('fills', attributes)
# Write the default fill element.
self._write_default_fill('none')
self._write_default_fill('gray125')
# Write the fill elements for xf_format objects that have them.
for xf_format in self.xf_formats:
if xf_format.has_fill:
self._write_fill(xf_format)
self._xml_end_tag('fills')
def _write_default_fill(self, pattern_type):
# Write the <fill> element for the default fills.
self._xml_start_tag('fill')
self._xml_empty_tag('patternFill', [('patternType', pattern_type)])
self._xml_end_tag('fill')
def _write_fill(self, xf_format, is_dxf_format=False):
# Write the <fill> element.
pattern = xf_format.pattern
bg_color = xf_format.bg_color
fg_color = xf_format.fg_color
# Colors for dxf formats are handled differently from normal formats
# since the normal xf_format reverses the meaning of BG and FG for
# solid fills.
if is_dxf_format:
bg_color = xf_format.dxf_bg_color
fg_color = xf_format.dxf_fg_color
patterns = (
'none',
'solid',
'mediumGray',
'darkGray',
'lightGray',
'darkHorizontal',
'darkVertical',
'darkDown',
'darkUp',
'darkGrid',
'darkTrellis',
'lightHorizontal',
'lightVertical',
'lightDown',
'lightUp',
'lightGrid',
'lightTrellis',
'gray125',
'gray0625',
)
self._xml_start_tag('fill')
# The "none" pattern is handled differently for dxf formats.
if is_dxf_format and pattern <= 1:
self._xml_start_tag('patternFill')
else:
self._xml_start_tag(
'patternFill',
[('patternType', patterns[pattern])])
if fg_color:
fg_color = self._get_palette_color(fg_color)
self._xml_empty_tag('fgColor', [('rgb', fg_color)])
if bg_color:
bg_color = self._get_palette_color(bg_color)
self._xml_empty_tag('bgColor', [('rgb', bg_color)])
else:
if not is_dxf_format:
self._xml_empty_tag('bgColor', [('indexed', 64)])
self._xml_end_tag('patternFill')
self._xml_end_tag('fill')
def _write_borders(self):
# Write the <borders> element.
attributes = [('count', self.border_count)]
self._xml_start_tag('borders', attributes)
# Write the border elements for xf_format objects that have them.
for xf_format in self.xf_formats:
if xf_format.has_border:
self._write_border(xf_format)
self._xml_end_tag('borders')
def _write_border(self, xf_format, is_dxf_format=False):
# Write the <border> element.
attributes = []
# Diagonal borders add attributes to the <border> element.
if xf_format.diag_type == 1:
attributes.append(('diagonalUp', 1))
elif xf_format.diag_type == 2:
attributes.append(('diagonalDown', 1))
elif xf_format.diag_type == 3:
attributes.append(('diagonalUp', 1))
attributes.append(('diagonalDown', 1))
# Ensure that a default diag border is set if the diag type is set.
if xf_format.diag_type and not xf_format.diag_border:
xf_format.diag_border = 1
# Write the start border tag.
self._xml_start_tag('border', attributes)
# Write the <border> sub elements.
self._write_sub_border(
'left',
xf_format.left,
xf_format.left_color)
self._write_sub_border(
'right',
xf_format.right,
xf_format.right_color)
self._write_sub_border(
'top',
xf_format.top,
xf_format.top_color)
self._write_sub_border(
'bottom',
xf_format.bottom,
xf_format.bottom_color)
# Condition DXF formats don't allow diagonal borders.
if not is_dxf_format:
self._write_sub_border(
'diagonal',
xf_format.diag_border,
xf_format.diag_color)
if is_dxf_format:
self._write_sub_border('vertical', None, None)
self._write_sub_border('horizontal', None, None)
self._xml_end_tag('border')
def _write_sub_border(self, border_type, style, color):
# Write the <border> sub elements such as <right>, <top>, etc.
attributes = []
if not style:
self._xml_empty_tag(border_type)
return
border_styles = (
'none',
'thin',
'medium',
'dashed',
'dotted',
'thick',
'double',
'hair',
'mediumDashed',
'dashDot',
'mediumDashDot',
'dashDotDot',
'mediumDashDotDot',
'slantDashDot',
)
attributes.append(('style', border_styles[style]))
self._xml_start_tag(border_type, attributes)
if color:
color = self._get_palette_color(color)
self._xml_empty_tag('color', [('rgb', color)])
else:
self._xml_empty_tag('color', [('auto', 1)])
self._xml_end_tag(border_type)
def _write_cell_style_xfs(self):
# Write the <cellStyleXfs> element.
attributes = [('count', 1)]
self._xml_start_tag('cellStyleXfs', attributes)
self._write_style_xf()
self._xml_end_tag('cellStyleXfs')
def _write_cell_xfs(self):
# Write the <cellXfs> element.
formats = self.xf_formats
# Workaround for when the last xf_format is used for the comment font
# and shouldn't be used for cellXfs.
last_format = formats[-1]
if last_format.font_only:
formats.pop()
attributes = [('count', len(formats))]
self._xml_start_tag('cellXfs', attributes)
# Write the xf elements.
for xf_format in formats:
self._write_xf(xf_format)
self._xml_end_tag('cellXfs')
def _write_style_xf(self):
# Write the style <xf> element.
num_fmt_id = 0
font_id = 0
fill_id = 0
border_id = 0
attributes = [
('numFmtId', num_fmt_id),
('fontId', font_id),
('fillId', fill_id),
('borderId', border_id),
]
self._xml_empty_tag('xf', attributes)
def _write_xf(self, xf_format):
# Write the <xf> element.
num_fmt_id = xf_format.num_format_index
font_id = xf_format.font_index
fill_id = xf_format.fill_index
border_id = xf_format.border_index
xf_id = 0
has_align = 0
has_protect = 0
attributes = [
('numFmtId', num_fmt_id),
('fontId', font_id),
('fillId', fill_id),
('borderId', border_id),
('xfId', xf_id),
]
if xf_format.num_format_index > 0:
attributes.append(('applyNumberFormat', 1))
# Add applyFont attribute if XF format uses a font element.
if xf_format.font_index > 0:
attributes.append(('applyFont', 1))
# Add applyFill attribute if XF format uses a fill element.
if xf_format.fill_index > 0:
attributes.append(('applyFill', 1))
# Add applyBorder attribute if XF format uses a border element.
if xf_format.border_index > 0:
attributes.append(('applyBorder', 1))
# Check if XF format has alignment properties set.
(apply_align, align) = xf_format._get_align_properties()
# Check if an alignment sub-element should be written.
if apply_align and align:
has_align = 1
# We can also have applyAlignment without a sub-element.
if apply_align:
attributes.append(('applyAlignment', 1))
# Check for cell protection properties.
protection = xf_format._get_protection_properties()
if protection:
attributes.append(('applyProtection', 1))
has_protect = 1
# Write XF with sub-elements if required.
if has_align or has_protect:
self._xml_start_tag('xf', attributes)
if has_align:
self._xml_empty_tag('alignment', align)
if has_protect:
self._xml_empty_tag('protection', protection)
self._xml_end_tag('xf')
else:
self._xml_empty_tag('xf', attributes)
def _write_cell_styles(self):
# Write the <cellStyles> element.
attributes = [('count', 1)]
self._xml_start_tag('cellStyles', attributes)
self._write_cell_style()
self._xml_end_tag('cellStyles')
def _write_cell_style(self):
# Write the <cellStyle> element.
name = 'Normal'
xf_id = 0
builtin_id = 0
attributes = [
('name', name),
('xfId', xf_id),
('builtinId', builtin_id),
]
self._xml_empty_tag('cellStyle', attributes)
def _write_dxfs(self):
# Write the <dxfs> element.
formats = self.dxf_formats
count = len(formats)
attributes = [('count', len(formats))]
if count:
self._xml_start_tag('dxfs', attributes)
# Write the font elements for xf_format objects that have them.
for xf_format in self.dxf_formats:
self._xml_start_tag('dxf')
if xf_format.has_dxf_font:
self._write_font(xf_format, True)
if xf_format.num_format_index:
self._write_num_fmt(xf_format.num_format_index,
xf_format.num_format)
if xf_format.has_dxf_fill:
self._write_fill(xf_format, True)
if xf_format.has_dxf_border:
self._write_border(xf_format, True)
self._xml_end_tag('dxf')
self._xml_end_tag('dxfs')
else:
self._xml_empty_tag('dxfs', attributes)
def _write_table_styles(self):
# Write the <tableStyles> element.
count = 0
default_table_style = 'TableStyleMedium9'
default_pivot_style = 'PivotStyleLight16'
attributes = [
('count', count),
('defaultTableStyle', default_table_style),
('defaultPivotStyle', default_pivot_style),
]
self._xml_empty_tag('tableStyles', attributes)
def _write_colors(self):
# Write the <colors> element.
custom_colors = self.custom_colors
if not custom_colors:
return
self._xml_start_tag('colors')
self._write_mru_colors(custom_colors)
self._xml_end_tag('colors')
def _write_mru_colors(self, custom_colors):
# Write the <mruColors> element for the most recently used colors.
# Write the custom custom_colors in reverse order.
custom_colors.reverse()
# Limit the mruColors to the last 10.
if len(custom_colors) > 10:
custom_colors = custom_colors[0:10]
self._xml_start_tag('mruColors')
# Write the custom custom_colors in reverse order.
for color in custom_colors:
self._write_color('rgb', color)
self._xml_end_tag('mruColors')
def _write_condense(self):
# Write the <condense> element.
attributes = [('val', 0)]
self._xml_empty_tag('condense', attributes)
def _write_extend(self):
# Write the <extend> element.
attributes = [('val', 0)]
self._xml_empty_tag('extend', attributes)
|
|
"""Event parser and human readable log generator."""
from datetime import timedelta
from itertools import groupby
import logging
import voluptuous as vol
from homeassistant.loader import bind_hass
from homeassistant.components import sun
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import (
ATTR_DOMAIN, ATTR_ENTITY_ID, ATTR_HIDDEN, ATTR_NAME, ATTR_SERVICE,
CONF_EXCLUDE, CONF_INCLUDE, EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP, EVENT_LOGBOOK_ENTRY, EVENT_STATE_CHANGED,
EVENT_AUTOMATION_TRIGGERED, EVENT_SCRIPT_STARTED, HTTP_BAD_REQUEST,
STATE_NOT_HOME, STATE_OFF, STATE_ON)
from homeassistant.core import (
DOMAIN as HA_DOMAIN, State, callback, split_entity_id)
from homeassistant.components.alexa.smart_home import EVENT_ALEXA_SMART_HOME
from homeassistant.components.homekit.const import (
ATTR_DISPLAY_NAME, ATTR_VALUE, DOMAIN as DOMAIN_HOMEKIT,
EVENT_HOMEKIT_CHANGED)
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
ATTR_MESSAGE = 'message'
CONF_DOMAINS = 'domains'
CONF_ENTITIES = 'entities'
CONTINUOUS_DOMAINS = ['proximity', 'sensor']
DOMAIN = 'logbook'
GROUP_BY_MINUTES = 15
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
CONF_EXCLUDE: vol.Schema({
vol.Optional(CONF_ENTITIES, default=[]): cv.entity_ids,
vol.Optional(CONF_DOMAINS, default=[]):
vol.All(cv.ensure_list, [cv.string]),
}),
CONF_INCLUDE: vol.Schema({
vol.Optional(CONF_ENTITIES, default=[]): cv.entity_ids,
vol.Optional(CONF_DOMAINS, default=[]):
vol.All(cv.ensure_list, [cv.string]),
})
}),
}, extra=vol.ALLOW_EXTRA)
ALL_EVENT_TYPES = [
EVENT_STATE_CHANGED, EVENT_LOGBOOK_ENTRY,
EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP,
EVENT_ALEXA_SMART_HOME, EVENT_HOMEKIT_CHANGED,
EVENT_AUTOMATION_TRIGGERED, EVENT_SCRIPT_STARTED
]
LOG_MESSAGE_SCHEMA = vol.Schema({
vol.Required(ATTR_NAME): cv.string,
vol.Required(ATTR_MESSAGE): cv.template,
vol.Optional(ATTR_DOMAIN): cv.slug,
vol.Optional(ATTR_ENTITY_ID): cv.entity_id,
})
@bind_hass
def log_entry(hass, name, message, domain=None, entity_id=None):
"""Add an entry to the logbook."""
hass.add_job(async_log_entry, hass, name, message, domain, entity_id)
@bind_hass
def async_log_entry(hass, name, message, domain=None, entity_id=None):
"""Add an entry to the logbook."""
data = {
ATTR_NAME: name,
ATTR_MESSAGE: message
}
if domain is not None:
data[ATTR_DOMAIN] = domain
if entity_id is not None:
data[ATTR_ENTITY_ID] = entity_id
hass.bus.async_fire(EVENT_LOGBOOK_ENTRY, data)
async def async_setup(hass, config):
"""Listen for download events to download files."""
@callback
def log_message(service):
"""Handle sending notification message service calls."""
message = service.data[ATTR_MESSAGE]
name = service.data[ATTR_NAME]
domain = service.data.get(ATTR_DOMAIN)
entity_id = service.data.get(ATTR_ENTITY_ID)
message.hass = hass
message = message.async_render()
async_log_entry(hass, name, message, domain, entity_id)
hass.http.register_view(LogbookView(config.get(DOMAIN, {})))
hass.components.frontend.async_register_built_in_panel(
'logbook', 'logbook', 'hass:format-list-bulleted-type')
hass.services.async_register(
DOMAIN, 'log', log_message, schema=LOG_MESSAGE_SCHEMA)
return True
class LogbookView(HomeAssistantView):
"""Handle logbook view requests."""
url = '/api/logbook'
name = 'api:logbook'
extra_urls = ['/api/logbook/{datetime}']
def __init__(self, config):
"""Initialize the logbook view."""
self.config = config
async def get(self, request, datetime=None):
"""Retrieve logbook entries."""
if datetime:
datetime = dt_util.parse_datetime(datetime)
if datetime is None:
return self.json_message('Invalid datetime', HTTP_BAD_REQUEST)
else:
datetime = dt_util.start_of_local_day()
period = request.query.get('period')
if period is None:
period = 1
else:
period = int(period)
entity_id = request.query.get('entity')
start_day = dt_util.as_utc(datetime) - timedelta(days=period - 1)
end_day = start_day + timedelta(days=period)
hass = request.app['hass']
def json_events():
"""Fetch events and generate JSON."""
return self.json(
_get_events(hass, self.config, start_day, end_day, entity_id))
return await hass.async_add_job(json_events)
def humanify(hass, events):
"""Generate a converted list of events into Entry objects.
Will try to group events if possible:
- if 2+ sensor updates in GROUP_BY_MINUTES, show last
- if home assistant stop and start happen in same minute call it restarted
"""
domain_prefixes = tuple('{}.'.format(dom) for dom in CONTINUOUS_DOMAINS)
# Group events in batches of GROUP_BY_MINUTES
for _, g_events in groupby(
events,
lambda event: event.time_fired.minute // GROUP_BY_MINUTES):
events_batch = list(g_events)
# Keep track of last sensor states
last_sensor_event = {}
# Group HA start/stop events
# Maps minute of event to 1: stop, 2: stop + start
start_stop_events = {}
# Process events
for event in events_batch:
if event.event_type == EVENT_STATE_CHANGED:
entity_id = event.data.get('entity_id')
if entity_id.startswith(domain_prefixes):
last_sensor_event[entity_id] = event
elif event.event_type == EVENT_HOMEASSISTANT_STOP:
if event.time_fired.minute in start_stop_events:
continue
start_stop_events[event.time_fired.minute] = 1
elif event.event_type == EVENT_HOMEASSISTANT_START:
if event.time_fired.minute not in start_stop_events:
continue
start_stop_events[event.time_fired.minute] = 2
# Yield entries
for event in events_batch:
if event.event_type == EVENT_STATE_CHANGED:
to_state = State.from_dict(event.data.get('new_state'))
domain = to_state.domain
# Skip all but the last sensor state
if domain in CONTINUOUS_DOMAINS and \
event != last_sensor_event[to_state.entity_id]:
continue
# Don't show continuous sensor value changes in the logbook
if domain in CONTINUOUS_DOMAINS and \
to_state.attributes.get('unit_of_measurement'):
continue
yield {
'when': event.time_fired,
'name': to_state.name,
'message': _entry_message_from_state(domain, to_state),
'domain': domain,
'entity_id': to_state.entity_id,
'context_id': event.context.id,
'context_user_id': event.context.user_id
}
elif event.event_type == EVENT_HOMEASSISTANT_START:
if start_stop_events.get(event.time_fired.minute) == 2:
continue
yield {
'when': event.time_fired,
'name': "Home Assistant",
'message': "started",
'domain': HA_DOMAIN,
'context_id': event.context.id,
'context_user_id': event.context.user_id
}
elif event.event_type == EVENT_HOMEASSISTANT_STOP:
if start_stop_events.get(event.time_fired.minute) == 2:
action = "restarted"
else:
action = "stopped"
yield {
'when': event.time_fired,
'name': "Home Assistant",
'message': action,
'domain': HA_DOMAIN,
'context_id': event.context.id,
'context_user_id': event.context.user_id
}
elif event.event_type == EVENT_LOGBOOK_ENTRY:
domain = event.data.get(ATTR_DOMAIN)
entity_id = event.data.get(ATTR_ENTITY_ID)
if domain is None and entity_id is not None:
try:
domain = split_entity_id(str(entity_id))[0]
except IndexError:
pass
yield {
'when': event.time_fired,
'name': event.data.get(ATTR_NAME),
'message': event.data.get(ATTR_MESSAGE),
'domain': domain,
'entity_id': entity_id,
'context_id': event.context.id,
'context_user_id': event.context.user_id
}
elif event.event_type == EVENT_ALEXA_SMART_HOME:
data = event.data
entity_id = data['request'].get('entity_id')
if entity_id:
state = hass.states.get(entity_id)
name = state.name if state else entity_id
message = "send command {}/{} for {}".format(
data['request']['namespace'],
data['request']['name'], name)
else:
message = "send command {}/{}".format(
data['request']['namespace'], data['request']['name'])
yield {
'when': event.time_fired,
'name': 'Amazon Alexa',
'message': message,
'domain': 'alexa',
'entity_id': entity_id,
'context_id': event.context.id,
'context_user_id': event.context.user_id
}
elif event.event_type == EVENT_HOMEKIT_CHANGED:
data = event.data
entity_id = data.get(ATTR_ENTITY_ID)
value = data.get(ATTR_VALUE)
value_msg = " to {}".format(value) if value else ''
message = "send command {}{} for {}".format(
data[ATTR_SERVICE], value_msg, data[ATTR_DISPLAY_NAME])
yield {
'when': event.time_fired,
'name': 'HomeKit',
'message': message,
'domain': DOMAIN_HOMEKIT,
'entity_id': entity_id,
'context_id': event.context.id,
'context_user_id': event.context.user_id
}
elif event.event_type == EVENT_AUTOMATION_TRIGGERED:
yield {
'when': event.time_fired,
'name': event.data.get(ATTR_NAME),
'message': "has been triggered",
'domain': 'automation',
'entity_id': event.data.get(ATTR_ENTITY_ID),
'context_id': event.context.id,
'context_user_id': event.context.user_id
}
elif event.event_type == EVENT_SCRIPT_STARTED:
yield {
'when': event.time_fired,
'name': event.data.get(ATTR_NAME),
'message': 'started',
'domain': 'script',
'entity_id': event.data.get(ATTR_ENTITY_ID),
'context_id': event.context.id,
'context_user_id': event.context.user_id
}
def _get_related_entity_ids(session, entity_filter):
from homeassistant.components.recorder.models import States
from homeassistant.components.recorder.util import \
RETRIES, QUERY_RETRY_WAIT
from sqlalchemy.exc import SQLAlchemyError
import time
timer_start = time.perf_counter()
query = session.query(States).with_entities(States.entity_id).distinct()
for tryno in range(0, RETRIES):
try:
result = [
row.entity_id for row in query
if entity_filter(row.entity_id)]
if _LOGGER.isEnabledFor(logging.DEBUG):
elapsed = time.perf_counter() - timer_start
_LOGGER.debug(
'fetching %d distinct domain/entity_id pairs took %fs',
len(result),
elapsed)
return result
except SQLAlchemyError as err:
_LOGGER.error("Error executing query: %s", err)
if tryno == RETRIES - 1:
raise
time.sleep(QUERY_RETRY_WAIT)
def _generate_filter_from_config(config):
from homeassistant.helpers.entityfilter import generate_filter
excluded_entities = []
excluded_domains = []
included_entities = []
included_domains = []
exclude = config.get(CONF_EXCLUDE)
if exclude:
excluded_entities = exclude.get(CONF_ENTITIES, [])
excluded_domains = exclude.get(CONF_DOMAINS, [])
include = config.get(CONF_INCLUDE)
if include:
included_entities = include.get(CONF_ENTITIES, [])
included_domains = include.get(CONF_DOMAINS, [])
return generate_filter(included_domains, included_entities,
excluded_domains, excluded_entities)
def _get_events(hass, config, start_day, end_day, entity_id=None):
"""Get events for a period of time."""
from homeassistant.components.recorder.models import Events, States
from homeassistant.components.recorder.util import session_scope
entities_filter = _generate_filter_from_config(config)
def yield_events(query):
"""Yield Events that are not filtered away."""
for row in query.yield_per(500):
event = row.to_native()
if _keep_event(event, entities_filter):
yield event
with session_scope(hass=hass) as session:
if entity_id is not None:
entity_ids = [entity_id.lower()]
else:
entity_ids = _get_related_entity_ids(session, entities_filter)
query = session.query(Events).order_by(Events.time_fired) \
.outerjoin(States, (Events.event_id == States.event_id)) \
.filter(Events.event_type.in_(ALL_EVENT_TYPES)) \
.filter((Events.time_fired > start_day)
& (Events.time_fired < end_day)) \
.filter(((States.last_updated == States.last_changed) &
States.entity_id.in_(entity_ids))
| (States.state_id.is_(None)))
return list(humanify(hass, yield_events(query)))
def _keep_event(event, entities_filter):
domain, entity_id = None, None
if event.event_type == EVENT_STATE_CHANGED:
entity_id = event.data.get('entity_id')
if entity_id is None:
return False
# Do not report on new entities
if event.data.get('old_state') is None:
return False
new_state = event.data.get('new_state')
# Do not report on entity removal
if not new_state:
return False
attributes = new_state.get('attributes', {})
# If last_changed != last_updated only attributes have changed
# we do not report on that yet.
last_changed = new_state.get('last_changed')
last_updated = new_state.get('last_updated')
if last_changed != last_updated:
return False
domain = split_entity_id(entity_id)[0]
# Also filter auto groups.
if domain == 'group' and attributes.get('auto', False):
return False
# exclude entities which are customized hidden
hidden = attributes.get(ATTR_HIDDEN, False)
if hidden:
return False
elif event.event_type == EVENT_LOGBOOK_ENTRY:
domain = event.data.get(ATTR_DOMAIN)
entity_id = event.data.get(ATTR_ENTITY_ID)
elif event.event_type == EVENT_AUTOMATION_TRIGGERED:
domain = 'automation'
entity_id = event.data.get(ATTR_ENTITY_ID)
elif event.event_type == EVENT_SCRIPT_STARTED:
domain = 'script'
entity_id = event.data.get(ATTR_ENTITY_ID)
elif event.event_type == EVENT_ALEXA_SMART_HOME:
domain = 'alexa'
elif event.event_type == EVENT_HOMEKIT_CHANGED:
domain = DOMAIN_HOMEKIT
if not entity_id and domain:
entity_id = "%s." % (domain, )
return not entity_id or entities_filter(entity_id)
def _entry_message_from_state(domain, state):
"""Convert a state to a message for the logbook."""
# We pass domain in so we don't have to split entity_id again
if domain in ['device_tracker', 'person']:
if state.state == STATE_NOT_HOME:
return 'is away'
return 'is at {}'.format(state.state)
if domain == 'sun':
if state.state == sun.STATE_ABOVE_HORIZON:
return 'has risen'
return 'has set'
device_class = state.attributes.get('device_class')
if domain == 'binary_sensor' and device_class:
if device_class == 'battery':
if state.state == STATE_ON:
return "is low"
if state.state == STATE_OFF:
return "is normal"
if device_class == 'connectivity':
if state.state == STATE_ON:
return "is connected"
if state.state == STATE_OFF:
return "is disconnected"
if device_class in ['door', 'garage_door', 'opening', 'window']:
if state.state == STATE_ON:
return "is opened"
if state.state == STATE_OFF:
return "is closed"
if device_class == 'lock':
if state.state == STATE_ON:
return "is unlocked"
if state.state == STATE_OFF:
return "is locked"
if device_class == 'plug':
if state.state == STATE_ON:
return "is plugged in"
if state.state == STATE_OFF:
return "is unplugged"
if device_class == 'presence':
if state.state == STATE_ON:
return "is at home"
if state.state == STATE_OFF:
return "is away"
if device_class == 'safety':
if state.state == STATE_ON:
return "is unsafe"
if state.state == STATE_OFF:
return "is safe"
if (device_class in [
'cold', 'gas', 'heat', 'light', 'moisture', 'motion',
'occupancy', 'power', 'problem', 'smoke', 'sound', 'vibration'
]):
if state.state == STATE_ON:
return "detected {}".format(device_class)
if state.state == STATE_OFF:
return "cleared (no {} detected)".format(device_class)
if state.state == STATE_ON:
# Future: combine groups and its entity entries ?
return "turned on"
if state.state == STATE_OFF:
return "turned off"
return "changed to {}".format(state.state)
|
|
# Default Django settings. Override these with settings in the module
# pointed-to by the DJANGO_SETTINGS_MODULE environment variable.
# This is defined here as a do-nothing function because we can't import
# django.utils.translation -- that module depends on the settings.
gettext_noop = lambda s: s
####################
# CORE #
####################
DEBUG = False
TEMPLATE_DEBUG = False
# Whether the framework should propagate raw exceptions rather than catching
# them. This is useful under some testing situations and should never be used
# on a live site.
DEBUG_PROPAGATE_EXCEPTIONS = False
# Whether to use the "Etag" header. This saves bandwidth but slows down performance.
USE_ETAGS = False
# People who get code error notifications.
# In the format (('Full Name', 'email@example.com'), ('Full Name', 'anotheremail@example.com'))
ADMINS = ()
# Tuple of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = ()
# Hosts/domain names that are valid for this site.
# "*" matches anything, ".example.com" matches example.com and all subdomains
ALLOWED_HOSTS = []
# Local time zone for this installation. All choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name (although not all
# systems may support all possibilities). When USE_TZ is True, this is
# interpreted as the default user time zone.
TIME_ZONE = 'America/Chicago'
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = False
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# Languages we provide translations for, out of the box.
LANGUAGES = (
('af', gettext_noop('Afrikaans')),
('ar', gettext_noop('Arabic')),
('az', gettext_noop('Azerbaijani')),
('bg', gettext_noop('Bulgarian')),
('be', gettext_noop('Belarusian')),
('bn', gettext_noop('Bengali')),
('br', gettext_noop('Breton')),
('bs', gettext_noop('Bosnian')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('en-gb', gettext_noop('British English')),
('eo', gettext_noop('Esperanto')),
('es', gettext_noop('Spanish')),
('es-ar', gettext_noop('Argentinian Spanish')),
('es-mx', gettext_noop('Mexican Spanish')),
('es-ni', gettext_noop('Nicaraguan Spanish')),
('es-ve', gettext_noop('Venezuelan Spanish')),
('et', gettext_noop('Estonian')),
('eu', gettext_noop('Basque')),
('fa', gettext_noop('Persian')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('fy-nl', gettext_noop('Frisian')),
('ga', gettext_noop('Irish')),
('gl', gettext_noop('Galician')),
('he', gettext_noop('Hebrew')),
('hi', gettext_noop('Hindi')),
('hr', gettext_noop('Croatian')),
('hu', gettext_noop('Hungarian')),
('ia', gettext_noop('Interlingua')),
('id', gettext_noop('Indonesian')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('ka', gettext_noop('Georgian')),
('kk', gettext_noop('Kazakh')),
('km', gettext_noop('Khmer')),
('kn', gettext_noop('Kannada')),
('ko', gettext_noop('Korean')),
('lb', gettext_noop('Luxembourgish')),
('lt', gettext_noop('Lithuanian')),
('lv', gettext_noop('Latvian')),
('mk', gettext_noop('Macedonian')),
('ml', gettext_noop('Malayalam')),
('mn', gettext_noop('Mongolian')),
('my', gettext_noop('Burmese')),
('nb', gettext_noop('Norwegian Bokmal')),
('ne', gettext_noop('Nepali')),
('nl', gettext_noop('Dutch')),
('nn', gettext_noop('Norwegian Nynorsk')),
('os', gettext_noop('Ossetic')),
('pa', gettext_noop('Punjabi')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sq', gettext_noop('Albanian')),
('sr', gettext_noop('Serbian')),
('sr-latn', gettext_noop('Serbian Latin')),
('sv', gettext_noop('Swedish')),
('sw', gettext_noop('Swahili')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('th', gettext_noop('Thai')),
('tr', gettext_noop('Turkish')),
('tt', gettext_noop('Tatar')),
('udm', gettext_noop('Udmurt')),
('uk', gettext_noop('Ukrainian')),
('ur', gettext_noop('Urdu')),
('vi', gettext_noop('Vietnamese')),
('zh-cn', gettext_noop('Simplified Chinese')),
('zh-tw', gettext_noop('Traditional Chinese')),
)
# Languages using BiDi (right-to-left) layout
LANGUAGES_BIDI = ("he", "ar", "fa", "ur")
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
LOCALE_PATHS = ()
LANGUAGE_COOKIE_NAME = 'django_language'
# If you set this to True, Django will format dates, numbers and calendars
# according to user current locale.
USE_L10N = False
# Not-necessarily-technical managers of the site. They get broken link
# notifications and other various emails.
MANAGERS = ADMINS
# Default content type and charset to use for all HttpResponse objects, if a
# MIME type isn't manually specified. These are used to construct the
# Content-Type header.
DEFAULT_CONTENT_TYPE = 'text/html'
DEFAULT_CHARSET = 'utf-8'
# Encoding of files read from disk (template and initial SQL files).
FILE_CHARSET = 'utf-8'
# Email address that error messages come from.
SERVER_EMAIL = 'root@localhost'
# Whether to send broken-link emails. Deprecated, must be removed in 1.8.
SEND_BROKEN_LINK_EMAILS = False
# Database connection info. If left empty, will default to the dummy backend.
DATABASES = {}
# Classes used to implement DB routing behavior.
DATABASE_ROUTERS = []
# The email backend to use. For possible shortcuts see django.core.mail.
# The default is to use the SMTP backend.
# Third-party backends can be specified by providing a Python path
# to a module that defines an EmailBackend class.
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# Host for sending email.
EMAIL_HOST = 'localhost'
# Port for sending email.
EMAIL_PORT = 25
# Optional SMTP authentication information for EMAIL_HOST.
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = False
# List of strings representing installed apps.
INSTALLED_APPS = ()
# List of locations of the template source files, in search order.
TEMPLATE_DIRS = ()
# List of callables that know how to import templates from various sources.
# See the comments in django/core/template/loader.py for interface
# documentation.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
# List of processors used by RequestContext to populate the context.
# Each one should be a callable that takes the request object as its
# only parameter and returns a dictionary to add to the context.
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
# 'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
)
# Output to use in template system for invalid (e.g. misspelled) variables.
TEMPLATE_STRING_IF_INVALID = ''
# Default email address to use for various automated correspondence from
# the site managers.
DEFAULT_FROM_EMAIL = 'webmaster@localhost'
# Subject-line prefix for email messages send with django.core.mail.mail_admins
# or ...mail_managers. Make sure to include the trailing space.
EMAIL_SUBJECT_PREFIX = '[Django] '
# Whether to append trailing slashes to URLs.
APPEND_SLASH = True
# Whether to prepend the "www." subdomain to URLs that don't have it.
PREPEND_WWW = False
# Override the server-derived value of SCRIPT_NAME
FORCE_SCRIPT_NAME = None
# List of compiled regular expression objects representing User-Agent strings
# that are not allowed to visit any page, systemwide. Use this for bad
# robots/crawlers. Here are a few examples:
# import re
# DISALLOWED_USER_AGENTS = (
# re.compile(r'^NaverBot.*'),
# re.compile(r'^EmailSiphon.*'),
# re.compile(r'^SiteSucker.*'),
# re.compile(r'^sohu-search')
# )
DISALLOWED_USER_AGENTS = ()
ABSOLUTE_URL_OVERRIDES = {}
# Tuple of strings representing allowed prefixes for the {% ssi %} tag.
# Example: ('/home/html', '/var/www')
ALLOWED_INCLUDE_ROOTS = ()
# If this is a admin settings module, this should be a list of
# settings modules (in the format 'foo.bar.baz') for which this admin
# is an admin.
ADMIN_FOR = ()
# List of compiled regular expression objects representing URLs that need not
# be reported by BrokenLinkEmailsMiddleware. Here are a few examples:
# import re
# IGNORABLE_404_URLS = (
# re.compile(r'^/apple-touch-icon.*\.png$'),
# re.compile(r'^/favicon.ico$),
# re.compile(r'^/robots.txt$),
# re.compile(r'^/phpmyadmin/),
# re.compile(r'\.(cgi|php|pl)$'),
# )
IGNORABLE_404_URLS = ()
# A secret key for this particular Django installation. Used in secret-key
# hashing algorithms. Set this in your settings, or Django will complain
# loudly.
SECRET_KEY = ''
# Default file storage mechanism that holds media.
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL that handles the static files served from STATIC_ROOT.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = None
# List of upload handler classes to be applied in order.
FILE_UPLOAD_HANDLERS = (
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
)
# Maximum size, in bytes, of a request before it will be streamed to the
# file system instead of into memory.
FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Directory in which upload streamed files will be temporarily saved. A value of
# `None` will make Django use the operating system's default temporary directory
# (i.e. "/tmp" on *nix systems).
FILE_UPLOAD_TEMP_DIR = None
# The numeric mode to set newly-uploaded files to. The value should be a mode
# you'd pass directly to os.chmod; see http://docs.python.org/lib/os-file-dir.html.
FILE_UPLOAD_PERMISSIONS = None
# Python module path where user will place custom format definition.
# The directory where this setting is pointing should contain subdirectories
# named as the locales, containing a formats.py file
# (i.e. "myproject.locale" for myproject/locale/en/formats.py etc. use)
FORMAT_MODULE_PATH = None
# Default formatting for date objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
# Default formatting for datetime objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATETIME_FORMAT = 'N j, Y, P'
# Default formatting for time objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
TIME_FORMAT = 'P'
# Default formatting for date objects when only the year and month are relevant.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
YEAR_MONTH_FORMAT = 'F Y'
# Default formatting for date objects when only the month and day are relevant.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
MONTH_DAY_FORMAT = 'F j'
# Default short formatting for date objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATE_FORMAT = 'm/d/Y'
# Default short formatting for datetime objects.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATETIME_FORMAT = 'm/d/Y P'
# Default formats to be used when parsing dates from input boxes, in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
'%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
'%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
'%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
'%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
)
# Default formats to be used when parsing times from input boxes, in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M:%S.%f', # '14:30:59.000200'
'%H:%M', # '14:30'
)
# Default formats to be used when parsing dates and times from input boxes,
# in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
)
# First day of week, to be used on calendars
# 0 means Sunday, 1 means Monday...
FIRST_DAY_OF_WEEK = 0
# Decimal separator symbol
DECIMAL_SEPARATOR = '.'
# Boolean that sets whether to add thousand separator when formatting numbers
USE_THOUSAND_SEPARATOR = False
# Number of digits that will be together, when splitting them by
# THOUSAND_SEPARATOR. 0 means no grouping, 3 means splitting by thousands...
NUMBER_GROUPING = 0
# Thousand separator symbol
THOUSAND_SEPARATOR = ','
# Do you want to manage transactions manually?
# Hint: you really don't!
TRANSACTIONS_MANAGED = False
# The tablespaces to use for each model when not specified otherwise.
DEFAULT_TABLESPACE = ''
DEFAULT_INDEX_TABLESPACE = ''
# Default X-Frame-Options header value
X_FRAME_OPTIONS = 'SAMEORIGIN'
USE_X_FORWARDED_HOST = False
# The Python dotted path to the WSGI application that Django's internal servers
# (runserver, runfcgi) will use. If `None`, the return value of
# 'django.core.wsgi.get_wsgi_application' is used, thus preserving the same
# behavior as previous versions of Django. Otherwise this should point to an
# actual WSGI application object.
WSGI_APPLICATION = None
# If your Django app is behind a proxy that sets a header to specify secure
# connections, AND that proxy ensures that user-submitted headers with the
# same name are ignored (so that people can't spoof it), set this value to
# a tuple of (header_name, header_value). For any requests that come in with
# that header/value, request.is_secure() will return True.
# WARNING! Only set this if you fully understand what you're doing. Otherwise,
# you may be opening yourself up to a security risk.
SECURE_PROXY_SSL_HEADER = None
##############
# MIDDLEWARE #
##############
# List of middleware classes to use. Order is important; in the request phase,
# this middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# 'django.middleware.http.ConditionalGetMiddleware',
# 'django.middleware.gzip.GZipMiddleware',
)
############
# SESSIONS #
############
SESSION_CACHE_ALIAS = 'default' # Cache to store session data if using the cache session backend.
SESSION_COOKIE_NAME = 'sessionid' # Cookie name. This can be whatever you want.
SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2 # Age of cookie, in seconds (default: 2 weeks).
SESSION_COOKIE_DOMAIN = None # A string like ".example.com", or None for standard domain cookie.
SESSION_COOKIE_SECURE = False # Whether the session cookie should be secure (https:// only).
SESSION_COOKIE_PATH = '/' # The path of the session cookie.
SESSION_COOKIE_HTTPONLY = True # Whether to use the non-RFC standard httpOnly flag (IE, FF3+, others)
SESSION_SAVE_EVERY_REQUEST = False # Whether to save the session data on every request.
SESSION_EXPIRE_AT_BROWSER_CLOSE = False # Whether a user's session cookie expires when the Web browser is closed.
SESSION_ENGINE = 'django.contrib.sessions.backends.db' # The module to store session data
SESSION_FILE_PATH = None # Directory to store session files if using the file session module. If None, the backend will use a sensible default.
#########
# CACHE #
#########
# The cache backends to use.
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
CACHE_MIDDLEWARE_KEY_PREFIX = ''
CACHE_MIDDLEWARE_SECONDS = 600
CACHE_MIDDLEWARE_ALIAS = 'default'
####################
# COMMENTS #
####################
COMMENTS_ALLOW_PROFANITIES = False
# The profanities that will trigger a validation error in
# CommentDetailsForm.clean_comment. All of these should be in lowercase.
PROFANITIES_LIST = ()
##################
# AUTHENTICATION #
##################
AUTH_USER_MODEL = 'auth.User'
AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',)
LOGIN_URL = '/accounts/login/'
LOGOUT_URL = '/accounts/logout/'
LOGIN_REDIRECT_URL = '/accounts/profile/'
# The number of days a password reset link is valid for
PASSWORD_RESET_TIMEOUT_DAYS = 3
# the first hasher in this list is the preferred algorithm. any
# password using different algorithms will be converted automatically
# upon login
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.MD5PasswordHasher',
'django.contrib.auth.hashers.UnsaltedSHA1PasswordHasher',
'django.contrib.auth.hashers.UnsaltedMD5PasswordHasher',
'django.contrib.auth.hashers.CryptPasswordHasher',
)
###########
# SIGNING #
###########
SIGNING_BACKEND = 'django.core.signing.TimestampSigner'
########
# CSRF #
########
# Dotted path to callable to be used as view when a request is
# rejected by the CSRF middleware.
CSRF_FAILURE_VIEW = 'django.views.csrf.csrf_failure'
# Settings for CSRF cookie.
CSRF_COOKIE_NAME = 'csrftoken'
CSRF_COOKIE_DOMAIN = None
CSRF_COOKIE_PATH = '/'
CSRF_COOKIE_SECURE = False
CSRF_COOKIE_HTTPONLY = False
############
# MESSAGES #
############
# Class to use as messages backend
MESSAGE_STORAGE = 'django.contrib.messages.storage.fallback.FallbackStorage'
# Default values of MESSAGE_LEVEL and MESSAGE_TAGS are defined within
# django.contrib.messages to avoid imports in this settings file.
###########
# LOGGING #
###########
# The callable to use to configure logging
LOGGING_CONFIG = 'django.utils.log.dictConfig'
# Custom logging configuration.
LOGGING = {}
# Default exception reporter filter class used in case none has been
# specifically assigned to the HttpRequest instance.
DEFAULT_EXCEPTION_REPORTER_FILTER = 'django.views.debug.SafeExceptionReporterFilter'
###########
# TESTING #
###########
# The name of the class to use to run the test suite
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
############
# FIXTURES #
############
# The list of directories to search for fixtures
FIXTURE_DIRS = ()
###############
# STATICFILES #
###############
# A list of locations of additional static files
STATICFILES_DIRS = ()
# The default file storage backend used during the build process
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
|
|
# -*- coding: utf-8 -*-
""" Sahana Eden Hospital Management System Model
@copyright: 2009-2015 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("HospitalDataModel",
"CholeraTreatmentCapabilityModel",
"HospitalActivityReportModel",
"hms_hospital_rheader"
)
from gluon import *
from gluon.storage import Storage
from ..s3 import *
from s3dal import Row
from s3layouts import S3AddResourceLink
# =============================================================================
class HospitalDataModel(S3Model):
names = ("hms_hospital",
"hms_hospital_tag",
"hms_contact",
"hms_bed_capacity",
"hms_services",
"hms_image",
"hms_resources",
"hms_hospital_id",
)
def model(self):
T = current.T
db = current.db
settings = current.deployment_settings
messages = current.messages
NONE = messages["NONE"]
UNKNOWN_OPT = messages.UNKNOWN_OPT
add_components = self.add_components
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
super_link = self.super_link
# ---------------------------------------------------------------------
# Hospitals
#
# Use government-assigned UUIDs instead of internal UUIDs
HMS_HOSPITAL_USE_GOVUUID = True
hms_facility_type_opts = {
1: T("Hospital"),
2: T("Field Hospital"),
3: T("Specialized Hospital"),
11: T("Health center"),
12: T("Health center with beds"),
13: T("Health center without beds"),
21: T("Dispensary"),
31: T("Long-term care"),
41: T("Emergency Treatment Centre"),
42: T("Triage"),
43: T("Holding Center"),
44: T("Transit Center"),
98: T("Other"),
99: T("Unknown"),
} #: Facility Type Options
# Status opts defined here for use in Search widgets
hms_facility_status_opts = {
1: T("Normal"),
2: T("Compromised"),
3: T("Evacuating"),
4: T("Closed"),
5: T("Pending"),
99: T("No Response")
} #: Facility Status Options
hms_power_supply_type_opts = {
1: T("Grid"),
2: T("Generator"),
98: T("Other"),
99: T("None"),
} #: Power Supply Type Options
tablename = "hms_hospital"
define_table(tablename,
super_link("doc_id", "doc_entity"),
super_link("pe_id", "pr_pentity"),
super_link("site_id", "org_site"),
# UID assigned by Local Government
# required for EDXL-HAVE
# @ToDo: Move to a KV in hms_hospital_tag table?
Field("gov_uuid", unique=True, length=128,
label = T("Government UID"),
requires = IS_EMPTY_OR(
IS_NOT_ONE_OF(db,
"%s.gov_uuid" % tablename)
),
readable = False,
writable = False,
),
# Name of the facility
Field("name", notnull=True,
length=64, # Mayon compatibility
label = T("Name"),
),
# Alternate name, or name in local language
Field("aka1",
label = T("Other Name"),
),
# Alternate name, or name in local language
Field("aka2",
label = T("Other Name"),
readable = False,
writable = False,
),
Field("code", length=10, # Mayon compatibility
#notnull=True, unique=True,
# @ToDo: code_requires
label = T("Code"),
),
Field("facility_type", "integer",
default = 1,
label = T("Facility Type"),
represent = lambda opt: \
hms_facility_type_opts.get(opt, NONE),
requires = IS_EMPTY_OR(
IS_IN_SET(hms_facility_type_opts)
),
),
self.org_organisation_id(
requires = self.org_organisation_requires(updateable=True),
),
self.gis_location_id(),
# Address fields:
# @todo: Deprecate these & use location_id in HAVE export
Field("address",
label = T("Address"),
),
Field("postcode",
label = settings.get_ui_label_postcode(),
),
Field("city"),
Field("phone_exchange",
label = T("Phone/Exchange (Switchboard)"),
requires = IS_EMPTY_OR(s3_phone_requires),
),
Field("phone_business",
label = T("Phone/Business"),
requires = IS_EMPTY_OR(s3_phone_requires),
),
Field("phone_emergency",
label = T("Phone/Emergency"),
requires = IS_EMPTY_OR(s3_phone_requires),
),
Field("website",
label = T("Website"),
represent = s3_url_represent,
requires = IS_EMPTY_OR(IS_URL()),
),
Field("email",
label = T("Email"),
requires = IS_EMPTY_OR(IS_EMAIL()),
),
Field("fax",
label = T("Fax"),
requires = IS_EMPTY_OR(s3_phone_requires),
),
Field("total_beds", "integer",
label = T("Total Beds"),
#readable = False,
writable = False,
represent = lambda v: NONE if v is None else v,
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 9999)),
),
Field("available_beds", "integer",
label = T("Available Beds"),
#readable = False,
writable = False,
represent = lambda v: NONE if v is None else v,
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 9999)),
),
Field("doctors", "integer",
label = T("Number of doctors"),
represent = lambda v: IS_INT_AMOUNT.represent(v),
requires = IS_EMPTY_OR(
IS_INT_IN_RANGE(0, 9999)),
),
Field("nurses", "integer",
label = T("Number of nurses"),
represent = lambda v: IS_INT_AMOUNT.represent(v),
requires = IS_EMPTY_OR(
IS_INT_IN_RANGE(0, 9999)),
),
Field("non_medical_staff", "integer",
label = T("Number of non-medical staff"),
represent = lambda v: IS_INT_AMOUNT.represent(v),
requires = IS_EMPTY_OR(
IS_INT_IN_RANGE(0, 9999)),
),
Field("obsolete", "boolean",
default = False,
label = T("Obsolete"),
represent = lambda opt: \
(opt and [T("Obsolete")] or [NONE])[0],
readable = False,
writable = False,
),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
ADD_HOSPITAL = T("Create Hospital")
crud_strings[tablename] = Storage(
label_create = ADD_HOSPITAL,
title_display = T("Hospital Details"),
title_list = T("Hospitals"),
title_update = T("Edit Hospital"),
title_map = T("Map of Hospitals"),
label_list_button = T("List Hospitals"),
label_delete_button = T("Delete Hospital"),
msg_record_created = T("Hospital information added"),
msg_record_modified = T("Hospital information updated"),
msg_record_deleted = T("Hospital information deleted"),
msg_list_empty = T("No Hospitals currently registered"))
filter_widgets = [
S3TextFilter(["name",
"code",
"comments",
"organisation_id$name",
"organisation_id$acronym",
"location_id$name",
"location_id$L1",
"location_id$L2",
],
label=T("Name"),
_class="filter-search",
),
S3OptionsFilter("facility_type",
label = T("Type"),
represent = "%(name)s",
#hidden=True,
),
S3LocationFilter("location_id",
label = T("Location"),
levels = ("L0", "L1", "L2"),
#hidden=True,
),
S3OptionsFilter("status.facility_status",
label = T("Status"),
options = hms_facility_status_opts,
#represent="%(name)s",
#hidden=True,
),
S3OptionsFilter("status.power_supply_type",
label = T("Power"),
options = hms_power_supply_type_opts,
#represent = "%(name)s",
#hidden=True,
),
S3RangeFilter("total_beds",
label = T("Total Beds"),
#represent = "%(name)s",
#hidden = True,
),
]
report_fields = ["name",
(T("Type"), "facility_type"),
#"organisation_id",
"location_id$L1",
"location_id$L2",
"location_id$L3",
(T("Status"), "status.facility_status"),
"status.power_supply_type",
"total_beds",
"available_beds",
]
# Resource configuration
configure(tablename,
deduplicate = self.hms_hospital_duplicate,
filter_widgets = filter_widgets,
list_fields = ["id",
#"gov_uuid",
"name",
"facility_type",
"status.facility_status",
"status.power_supply_type",
#"organisation_id",
"location_id$L1",
"location_id$L2",
"location_id$L3",
#"phone_exchange",
"total_beds",
"available_beds",
],
onaccept = self.hms_hospital_onaccept,
report_options = Storage(
rows=report_fields,
cols=report_fields,
fact=report_fields,
defaults=Storage(rows="location_id$L2",
cols="status.facility_status",
fact="count(name)",
totals=True)
),
super_entity = ("org_site", "doc_entity", "pr_pentity"),
)
# Reusable field
hms_hospital_id_comment = S3AddResourceLink(c="hms",
f="hospital",
label=ADD_HOSPITAL,
title=T("Hospital"),
tooltip=T("If you don't see the Hospital in the list, you can add a new one by clicking link 'Create Hospital'."))
represent = S3Represent(lookup=tablename)
hospital_id = S3ReusableField("hospital_id", "reference %s" % tablename,
comment = hms_hospital_id_comment,
label = T("Hospital"),
ondelete = "RESTRICT",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hms_hospital.id",
represent
)),
sortby = "name",
)
# Components
single = dict(joinby="hospital_id", multiple=False)
multiple = "hospital_id"
add_components(tablename,
hms_status=single,
hms_contact=multiple,
hms_bed_capacity=multiple,
hms_services=single,
hms_resources=multiple,
)
# Optional components
if settings.get_hms_track_ctc():
add_components(tablename, hms_ctc=single)
if settings.get_hms_activity_reports():
add_components(tablename, hms_activity=multiple)
# Custom Method to Assign HRs
self.set_method("hms", "hospital",
method = "assign",
action = self.hrm_AssignMethod(component="human_resource_site"))
# ---------------------------------------------------------------------
# Hosptial Tags
# - Key-Value extensions
# - can be used to identify a Source (GPS, Imagery, Wikipedia, etc)
# - can link Hospitals to other Systems, such as:
# * Government IDs
# * PAHO
# * OpenStreetMap (although their IDs can change over time)
# * WHO
# * Wikipedia URL
# - can be a Triple Store for Semantic Web support
#
tablename = "hms_hospital_tag"
self.define_table(tablename,
hospital_id(empty = False,
ondelete = "CASCADE",
),
# key is a reserved word in MySQL
Field("tag",
label = T("Key"),
),
Field("value",
label = T("Value"),
),
s3_comments(),
*s3_meta_fields())
configure(tablename,
deduplicate = self.hms_hospital_tag_deduplicate,
)
# ---------------------------------------------------------------------
# Hospital status
#
hms_resource_status_opts = {
1: T("Adequate"),
2: T("Insufficient")
} #: Resource Status Options
hms_clinical_status_opts = {
1: T("Normal"),
2: T("Full"),
3: T("Closed")
} #: Clinical Status Options
hms_facility_damage_opts = {
1: T("Flooding"),
2: T("Power Outage"),
} #: Facility Damage Options
hms_gas_supply_type_opts = {
98: T("Other"),
99: T("None"),
} #: Gas Supply Type Options
hms_security_status_opts = {
1: T("Normal"),
2: T("Elevated"),
3: T("Restricted Access"),
4: T("Lockdown"),
5: T("Quarantine"),
6: T("Closed")
} #: Security Status Options
hms_ems_traffic_opts = {
1: T("Normal"),
2: T("Advisory"),
3: T("Closed"),
4: T("Not Applicable")
} #: EMS Traffic Options
hms_or_status_opts = {
1: T("Normal"),
#2: T("Advisory"),
3: T("Closed"),
4: T("Not Applicable")
} #: Operating Room Status Options
hms_morgue_status_opts = {
1: T("Open"),
2: T("Full"),
3: T("Exceeded"),
4: T("Closed")
} #: Morgue Status Options
def hms_facility_damage_multirepresent(opt):
""" Multi Represent """
set = hms_facility_damage_opts
if isinstance(opt, (list, tuple)):
opts = opt
try:
vals = [str(set.get(o)) for o in opts]
except:
return None
elif isinstance(opt, int):
opts = [opt]
vals = str(set.get(opt))
else:
return NONE
if len(opts) > 1:
vals = ", ".join(vals)
else:
vals = len(vals) and vals[0] or ""
return vals
tablename = "hms_status"
define_table(tablename,
hospital_id(ondelete = "CASCADE"),
# Status of the facility and facility operations
Field("facility_status", "integer",
label = T("Facility Status"),
represent = lambda opt: \
NONE if opt is None else \
hms_facility_status_opts.get(opt,
UNKNOWN_OPT),
requires = IS_EMPTY_OR(
IS_IN_SET(hms_facility_status_opts)),
),
s3_date("date_reopening",
label = T("Estimated Reopening Date"),
),
Field("facility_operations", "integer",
label = T("Facility Operations"),
represent = lambda opt: \
NONE if opt is None else \
hms_resource_status_opts.get(opt,
UNKNOWN_OPT),
requires = IS_EMPTY_OR(
IS_IN_SET(hms_resource_status_opts)),
),
# Facility Status Details
Field("damage", "list:integer",
label = T("Damage sustained"),
represent = hms_facility_damage_multirepresent,
requires = IS_EMPTY_OR(
IS_IN_SET(hms_facility_damage_opts,
multiple=True)),
widget = CheckboxesWidgetS3.widget,
),
Field("power_supply_type", "integer",
label = T("Power Supply Type"),
represent = lambda opt: \
NONE if opt is None else \
hms_power_supply_type_opts.get(opt,
UNKNOWN_OPT),
requires = IS_EMPTY_OR(
IS_IN_SET(hms_power_supply_type_opts,
zero=None)),
),
Field("gas_supply_type", "integer",
label = T("Gas Supply Type"),
represent = lambda opt: \
NONE if opt is None else \
hms_gas_supply_type_opts.get(opt,
UNKNOWN_OPT),
requires = IS_EMPTY_OR(
IS_IN_SET(hms_gas_supply_type_opts,
zero=None)),
),
Field("gas_supply_capacity", "integer",
label = T("Gas Supply Left (in hours)"),
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999)),
),
# Clinical status and clinical operations
Field("clinical_status", "integer",
label = T("Clinical Status"),
represent = lambda opt: \
NONE if opt is None else \
hms_clinical_status_opts.get(opt,
UNKNOWN_OPT),
requires = IS_EMPTY_OR(
IS_IN_SET(hms_clinical_status_opts)),
),
Field("clinical_operations", "integer",
label = T("Clinical Operations"),
represent = lambda opt: \
NONE if opt is None else \
hms_resource_status_opts.get(opt,
UNKNOWN_OPT),
requires = IS_EMPTY_OR(
IS_IN_SET(hms_resource_status_opts)),
),
Field("security_status", "integer",
label = T("Security Status"),
represent = lambda opt: \
NONE if opt is None else \
hms_security_status_opts.get(opt,
UNKNOWN_OPT),
requires = IS_EMPTY_OR(
IS_IN_SET(hms_security_status_opts)),
),
# Staffing status
Field("staffing", "integer",
label = T("Staffing Level"),
represent = lambda opt: \
NONE if opt is None else \
hms_resource_status_opts.get(opt,
UNKNOWN_OPT),
requires = IS_EMPTY_OR(
IS_IN_SET(hms_resource_status_opts)),
),
# Emergency Room Status
Field("ems_status", "integer",
label = T("ER Status"),
represent = lambda opt: \
NONE if opt is None else \
hms_ems_traffic_opts.get(opt,
UNKNOWN_OPT),
requires = IS_EMPTY_OR(
IS_IN_SET(hms_ems_traffic_opts)),
),
Field("ems_reason", length=128,
label = T("ER Status Reason"),
),
# Operating Room Status
Field("or_status", "integer",
label = T("OR Status"),
represent = lambda opt: \
NONE if opt is None else \
hms_or_status_opts.get(opt,
UNKNOWN_OPT),
requires = IS_EMPTY_OR(
IS_IN_SET(hms_or_status_opts)),
),
Field("or_reason", length=128,
label = T("OR Status Reason"),
),
# Morgue status and capacity
Field("morgue_status", "integer",
label = T("Morgue Status"),
represent = lambda opt: \
NONE if opt is None else \
hms_morgue_status_opts.get(opt,
UNKNOWN_OPT),
requires = IS_EMPTY_OR(
IS_IN_SET(hms_morgue_status_opts)),
),
Field("morgue_units", "integer",
label = T("Morgue Units Available"),
represent = lambda v: IS_INT_AMOUNT.represent(v),
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 9999)),
),
Field("access_status", "text",
label = T("Road Conditions")),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Status Report"),
title_display = T("Status Report"),
title_list = T("Status Report"),
title_update = T("Edit Status Report"),
label_list_button = T("List Status Reports"),
msg_record_created = T("Status Report added"),
msg_record_modified = T("Status Report updated"),
msg_record_deleted = T("Status Report deleted"),
msg_list_empty = T("No status information currently available"))
# ---------------------------------------------------------------------
# Contacts
#
tablename = "hms_contact"
define_table(tablename,
hospital_id(ondelete = "CASCADE"),
self.pr_person_id(empty = False,
label = T("Contact"),
),
Field("title",
label = T("Job Title"),
),
Field("phone",
label = T("Phone"),
requires = IS_EMPTY_OR(s3_phone_requires),
),
Field("mobile",
label = settings.get_ui_label_mobile_phone(),
requires = IS_EMPTY_OR(s3_phone_requires),
),
Field("email",
label = T("Email"),
requires = IS_EMPTY_OR(IS_EMAIL()),
),
Field("fax",
label = T("Fax"),
requires = IS_EMPTY_OR(s3_phone_requires),
),
Field("skype",
label = T("Skype ID"),
),
Field("website",
label = T("Website"),
),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Contact"),
title_display = T("Contact Details"),
title_list = T("Contacts"),
title_update = T("Edit Contact"),
label_list_button = T("List Contacts"),
msg_record_created = T("Contact information added"),
msg_record_modified = T("Contact information updated"),
msg_record_deleted = T("Contact information deleted"),
msg_list_empty = T("No contacts currently registered"))
# Resource configuration
configure(tablename,
extra = "title",
list_fields = ["id",
"person_id",
"title",
"phone",
"mobile",
"email",
"fax",
"skype"
],
main = "person_id",
mark_required = ("person_id",),
)
# ---------------------------------------------------------------------
# Bed Capacity
#
hms_bed_type_opts = {
1: T("Adult ICU"),
2: T("Pediatric ICU"),
3: T("Neonatal ICU"),
4: T("Emergency Department"),
5: T("Nursery Beds"),
6: T("General Medical/Surgical"),
7: T("Rehabilitation/Long Term Care"),
8: T("Burn ICU"),
9: T("Pediatrics"),
10: T("Adult Psychiatric"),
11: T("Pediatric Psychiatric"),
12: T("Negative Flow Isolation"),
13: T("Other Isolation"),
14: T("Operating Rooms"),
15: T("Cholera Treatment"),
16: T("Ebola Treatment"),
99: T("Other")
}
tablename = "hms_bed_capacity"
define_table(tablename,
hospital_id(ondelete = "CASCADE"),
Field("unit_id", length=128, unique=True,
readable = False,
writable = False),
Field("bed_type", "integer",
default = 6,
label = T("Bed Type"),
represent = lambda opt: \
hms_bed_type_opts.get(opt,
UNKNOWN_OPT),
requires = IS_IN_SET(hms_bed_type_opts,
zero=None),
),
s3_datetime(empty = False,
label = T("Date of Report"),
future = 0,
),
Field("beds_baseline", "integer",
default = 0,
label = T("Baseline Number of Beds"),
represent = lambda v: IS_INT_AMOUNT.represent(v),
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 9999)),
),
Field("beds_available", "integer",
default = 0,
label = T("Available Beds"),
represent = lambda v: IS_INT_AMOUNT.represent(v),
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 9999)),
),
Field("beds_add24", "integer",
default = 0,
label = T("Additional Beds / 24hrs"),
represent = lambda v: IS_INT_AMOUNT.represent(v),
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 9999)),
),
s3_comments(),
*s3_meta_fields())
# Field configuration
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Bed Type"),
title_display = T("Bed Capacity"),
title_list = T("Bed Capacity"),
title_update = T("Update Unit"),
label_list_button = T("List Units"),
label_delete_button = T("Delete Unit"),
msg_record_created = T("Unit added"),
msg_record_modified = T("Unit updated"),
msg_record_deleted = T("Unit deleted"),
msg_list_empty = T("No units currently registered"))
# Resource configuration
configure(tablename,
extra = "id",
list_fields = ["id",
"unit_name",
"bed_type",
"date",
"beds_baseline",
"beds_available",
"beds_add24"
],
main = "hospital_id",
onaccept = self.hms_bed_capacity_onaccept,
ondelete = self.hms_bed_capacity_onaccept,
onvalidation = self.hms_bed_capacity_onvalidation,
)
# ---------------------------------------------------------------------
# Services
#
tablename = "hms_services"
define_table(tablename,
hospital_id(ondelete = "CASCADE"),
Field("burn", "boolean",
default = False,
label = T("Burn"),
),
Field("card", "boolean",
default = False,
label = T("Cardiology"),
),
Field("dial", "boolean",
default = False,
label = T("Dialysis"),
),
Field("emsd", "boolean",
default = False,
label = T("Emergency Department"),
),
Field("infd", "boolean",
default = False,
label = T("Infectious Diseases"),
),
Field("neon", "boolean",
default = False,
label = T("Neonatology"),
),
Field("neur", "boolean",
default = False,
label = T("Neurology"),
),
Field("pedi", "boolean",
default = False,
label = T("Pediatrics"),
),
Field("surg", "boolean",
default = False,
label = T("Surgery"),
),
Field("labs", "boolean",
default = False,
label = T("Clinical Laboratory"),
),
Field("tran", "boolean",
default = False,
label = T("Ambulance Service"),
),
Field("tair", "boolean",
default = False,
label = T("Air Transport Service"),
),
Field("trac", "boolean",
default = False,
label = T("Trauma Center"),
),
Field("psya", "boolean",
default = False,
label = T("Psychiatrics/Adult"),
),
Field("psyp", "boolean",
default = False,
label = T("Psychiatrics/Pediatric"),
),
Field("obgy", "boolean",
default = False,
label = T("Obstetrics/Gynecology"),
),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Service Profile"),
title_display = T("Services Available"),
title_list = T("Services Available"),
title_update = T("Update Service Profile"),
label_list_button = T("List Service Profiles"),
label_delete_button = T("Delete Service Profile"),
msg_record_created = T("Service profile added"),
msg_record_modified = T("Service profile updated"),
msg_record_deleted = T("Service profile deleted"),
msg_list_empty = T("No service profile available"))
# Resource configuration
configure(tablename,
extra = "id",
list_fields = ["id"],
main = "hospital_id",
)
# ---------------------------------------------------------------------
# Resources (multiple) - @todo: to be completed!
#
tablename = "hms_resources"
define_table(tablename,
hospital_id(ondelete = "CASCADE"),
Field("type"),
Field("description"),
Field("quantity"),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Report Resource"),
title_display = T("Resource Details"),
title_list = T("Resources"),
title_update = T("Edit Resource"),
label_list_button = T("List Resources"),
label_delete_button = T("Delete Resource"),
msg_record_created = T("Resource added"),
msg_record_modified = T("Resource updated"),
msg_record_deleted = T("Resource deleted"),
msg_list_empty = T("No resources currently reported"))
# Resource configuration
configure(tablename,
extra = "id",
list_fields = ["id"],
main = "hospital_id",
)
# ---------------------------------------------------------------------
# Return global names to s3db
#
return dict(hms_hospital_id = hospital_id,
)
# -------------------------------------------------------------------------
def defaults(self):
dummy = S3ReusableField("dummy_id", "integer",
readable = False,
writable = False)
return dict(hms_hospital_id = lambda **attr: dummy("hospital_id"),
)
# -------------------------------------------------------------------------
@staticmethod
def hms_hospital_duplicate(item):
"""
Hospital record duplicate detection, used for the deduplicate hook
@param item: the S3ImportItem to check
"""
data = item.data
#org = data.get("organisation_id")
address = data.get("address")
table = item.table
query = (table.name == data.name)
#if org:
# query = query & (table.organisation_id == org)
if address:
query = query & (table.address == address)
row = current.db(query).select(table.id,
limitby=(0, 1)).first()
if row:
item.id = row.id
item.method = item.METHOD.UPDATE
# -------------------------------------------------------------------------
@staticmethod
def hms_hospital_onaccept(form):
"""
Update Affiliation, record ownership and component ownership
"""
current.s3db.org_update_affiliations("hms_hospital", form.vars)
# -------------------------------------------------------------------------
@staticmethod
def hms_hospital_tag_deduplicate(item):
"""
If the record is a duplicate then it will set the item method to update
"""
data = item.data
tag = data.get("tag", None)
hospital_id = data.get("hospital_id", None)
if not tag or not hospital_id:
return
table = item.table
query = (table.tag.lower() == tag.lower()) & \
(table.hospital_id == hospital_id)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# -------------------------------------------------------------------------
@staticmethod
def hms_bed_capacity_onvalidation(form):
""" Bed Capacity Validation """
db = current.db
htable = db.hms_hospital
ctable = db.hms_bed_capacity
hospital_id = ctable.hospital_id.update
bed_type = form.vars.bed_type
query = (ctable.hospital_id == hospital_id) & \
(ctable.bed_type == bed_type)
row = db(query).select(ctable.id,
limitby=(0, 1)).first()
if row and str(row.id) != current.request.post_vars.id:
form.errors["bed_type"] = current.T("Bed type already registered")
elif "unit_id" not in form.vars:
query = htable.id == hospital_id
hospital = db(query).select(htable.uuid,
limitby=(0, 1)).first()
if hospital:
form.vars.unit_id = "%s-%s" % (hospital.uuid, bed_type)
# -------------------------------------------------------------------------
@staticmethod
def hms_bed_capacity_onaccept(form):
""" Updates the number of total/available beds of a hospital """
if isinstance(form, Row):
formvars = form
else:
formvars = form.vars
db = current.db
ctable = db.hms_bed_capacity
htable = db.hms_hospital
query = ((ctable.id == formvars.id) &
(htable.id == ctable.hospital_id))
hospital = db(query).select(htable.id,
limitby=(0, 1))
if hospital:
hospital = hospital.first()
a_beds = ctable.beds_available.sum()
t_beds = ctable.beds_baseline.sum()
query = (ctable.hospital_id == hospital.id) & \
(ctable.deleted == False)
count = db(query).select(a_beds, t_beds)
if count:
a_beds = count[0]._extra[a_beds]
t_beds = count[0]._extra[t_beds]
db(htable.id == hospital.id).update(total_beds=t_beds,
available_beds=a_beds)
# =============================================================================
class CholeraTreatmentCapabilityModel(S3Model):
names = ("hms_ctc",)
def model(self):
T = current.T
define_table = self.define_table
# ---------------------------------------------------------------------
# Cholera Treatment Capability
#
hms_problem_types = {
1: T("Security problems"),
2: T("Hygiene problems"),
3: T("Sanitation problems"),
4: T("Improper handling of dead bodies"),
5: T("Improper decontamination"),
6: T("Understaffed"),
7: T("Lack of material"),
8: T("Communication problems"),
9: T("Information gaps")
}
tablename = "hms_ctc"
define_table(tablename,
self.hms_hospital_id(ondelete = "CASCADE"),
Field("ctc", "boolean", default=False,
label = T("Cholera-Treatment-Center"),
represent = lambda opt: \
opt and T("yes") or T("no"),
),
Field("number_of_patients", "integer",
default = 0,
label = T("Current number of patients"),
represent = lambda v: IS_INT_AMOUNT.represent(v),
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999)),
),
Field("cases_24", "integer",
default = 0,
label = T("New cases in the past 24h"),
represent = lambda v: IS_INT_AMOUNT.represent(v),
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999)),
),
Field("deaths_24", "integer",
default = 0,
label = T("Deaths in the past 24h"),
represent = lambda v: IS_INT_AMOUNT.represent(v),
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999)),
),
#Field("staff_total", "integer", default = 0),
Field("icaths_available", "integer",
default = 0,
label = T("Infusion catheters available"),
represent = lambda v: IS_INT_AMOUNT.represent(v),
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 99999999)),
),
Field("icaths_needed_24", "integer",
default = 0,
label = T("Infusion catheters needed per 24h"),
represent = lambda v: IS_INT_AMOUNT.represent(v),
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 99999999)),
),
Field("infusions_available", "integer",
default = 0,
label = T("Infusions available"),
represent = lambda v: IS_INT_AMOUNT.represent(v),
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 99999999)),
),
Field("infusions_needed_24", "integer",
default = 0,
label = T("Infusions needed per 24h"),
represent = lambda v: IS_INT_AMOUNT.represent(v),
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 99999999)),
),
#Field("infset_available", "integer", default = 0),
#Field("infset_needed_24", "integer", default = 0),
Field("antibiotics_available", "integer",
default = 0,
label = T("Antibiotics available"),
represent = lambda v: IS_INT_AMOUNT.represent(v),
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 99999999)),
),
Field("antibiotics_needed_24", "integer",
default = 0,
label = T("Antibiotics needed per 24h"),
represent = lambda v: IS_INT_AMOUNT.represent(v),
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 99999999)),
),
Field("problem_types", "list:integer",
label = T("Current problems, categories"),
represent = lambda optlist: \
optlist and ", ".join(map(str,optlist)) or T("N/A"),
requires = IS_EMPTY_OR(
IS_IN_SET(hms_problem_types,
zero=None,
multiple=True)),
),
Field("problem_details", "text",
label = T("Current problems, details"),
),
s3_comments(),
*s3_meta_fields())
# Field configuration
# @todo: make lazy table
table = current.db[tablename]
table.modified_on.label = T("Last updated on")
table.modified_on.readable = True
table.modified_by.label = T("Last updated by")
table.modified_by.readable = True
# CRUD Strings
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Create Cholera Treatment Capability Information"),
title_display = T("Cholera Treatment Capability"),
title_list = T("Cholera Treatment Capability"),
title_update = T("Update Cholera Treatment Capability Information"),
label_list_button = T("List Statuses"),
label_delete_button = T("Delete Status"),
msg_record_created = T("Status added"),
msg_record_modified = T("Status updated"),
msg_record_deleted = T("Status deleted"),
msg_list_empty = T("No status information available"))
# Resource configuration
self.configure(tablename,
list_fields = ["id"],
subheadings = {
"Activities": "ctc",
"Medical Supplies Availability": "icaths_available",
"Current Problems": "problem_types",
"Comments": "comments"
},
)
# ---------------------------------------------------------------------
# Return global names to s3db
#
return {}
# -------------------------------------------------------------------------
def defaults(self):
return {}
# =============================================================================
class HospitalActivityReportModel(S3Model):
names = ("hms_activity",)
def model(self):
T = current.T
# ---------------------------------------------------------------------
# Activity
#
is_number_of_patients = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 9999))
represent_int_amount = lambda v, row=None: IS_INT_AMOUNT.represent(v)
tablename = "hms_activity"
self.define_table(tablename,
self.hms_hospital_id(ondelete = "CASCADE"),
s3_datetime(label = T("Date & Time"),
empty = False,
future = 0,
),
# Current Number of Patients
Field("patients", "integer",
default = 0,
label = T("Number of Patients"),
represent = represent_int_amount,
requires = is_number_of_patients,
),
# Admissions in the past 24 hours
Field("admissions24", "integer",
default = 0,
label = T("Admissions/24hrs"),
represent = represent_int_amount,
requires = is_number_of_patients,
),
# Discharges in the past 24 hours
Field("discharges24", "integer",
default = 0,
label = T("Discharges/24hrs"),
represent = represent_int_amount,
requires = is_number_of_patients,
),
# Deaths in the past 24 hours
Field("deaths24", "integer",
default = 0,
label = T("Deaths/24hrs"),
represent = represent_int_amount,
requires = is_number_of_patients,
),
Field("comment", length=128),
*s3_meta_fields())
# CRUD Strings
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Create Activity Report"),
title_display = T("Activity Report"),
title_list = T("Activity Reports"),
title_update = T("Update Activity Report"),
label_list_button = T("List Activity Reports"),
label_delete_button = T("Delete Report"),
msg_record_created = T("Report added"),
msg_record_modified = T("Report updated"),
msg_record_deleted = T("Report deleted"),
msg_list_empty = T("No reports currently available"))
# Resource configuration
self.configure(tablename,
extra = "id",
list_fields = ["id",
"date",
"patients",
"admissions24",
"discharges24",
"deaths24",
"comment",
],
main = "hospital_id",
onaccept = self.hms_activity_onaccept,
)
# ---------------------------------------------------------------------
# Return global names to s3db
#
return {}
# -------------------------------------------------------------------------
def defaults(self):
return {}
# -------------------------------------------------------------------------
@staticmethod
def hms_activity_onaccept(form):
db = current.db
atable = db.hms_activity
htable = db.hms_hospital
query = ((atable.id == form.vars.id) & \
(htable.id == atable.hospital_id))
hospital = db(query).select(htable.id,
htable.modified_on,
limitby=(0, 1)).first()
timestmp = form.vars.date
if hospital and hospital.modified_on < timestmp:
hospital.update_record(modified_on=timestmp)
# =============================================================================
def hms_hospital_rheader(r, tabs=[]):
""" Page header for component resources """
rheader = None
if r.representation == "html":
T = current.T
s3db = current.s3db
settings = current.deployment_settings
tablename, record = s3_rheader_resource(r)
if tablename == "hms_hospital" and record:
if not tabs:
tabs = [(T("Details"), ""),
(T("Status"), "status"),
(T("Contacts"), "contact"),
(T("Images"), "image"),
(T("Services"), "services"),
(T("Bed Capacity"), "bed_capacity"),
]
if settings.get_hms_activity_reports():
tabs.append((T("Activity Report"), "activity"))
if settings.get_hms_track_ctc():
tabs.append((T("Cholera Treatment Capability"), "ctc"))
if settings.has_module("hrm"):
STAFF = settings.get_hrm_staff_label()
tabs.append((STAFF, "human_resource"))
permit = current.auth.s3_has_permission
if permit("update", tablename, r.id) and \
permit("create", "hrm_human_resource_site"):
tabs.append((T("Assign %(staff)s") % dict(staff=STAFF), "assign"))
try:
tabs = tabs + s3db.req_tabs(r, match=False)
except:
pass
try:
tabs = tabs + s3db.inv_tabs(r)
except:
pass
tabs.append((T("User Roles"), "roles"))
rheader_tabs = s3_rheader_tabs(r, tabs)
hospital = record
table = s3db.hms_hospital
ltable = s3db.gis_location
stable = s3db.hms_status
query = (stable.hospital_id == hospital.id)
s = current.db(query).select(limitby=(0, 1)).first()
status = lambda k: (s is not None and
[stable[k].represent(s[k])] or
[T("n/a")])[0]
NONE = current.messages["NONE"]
total_beds = hospital.total_beds
if total_beds is None:
total_beds = NONE
available_beds = hospital.available_beds
if available_beds is None:
available_beds = NONE
rheader = DIV(TABLE(
TR(TH("%s: " % T("Name")),
hospital.name,
TH("%s: " % T("Facility Status")),
status("facility_status")
),
TR(TH("%s: " % T("Location")),
ltable[hospital.location_id] and \
ltable[hospital.location_id].name or "unknown",
TH("%s: " % T("Estimated Reopening Date")),
status("date_reopening")
#TH("%s: " % T("EMS Status")),
# status("ems_status")
),
TR(TH("%s: " % T("Total Beds")),
total_beds,
#TH("%s: " % T("Clinical Status")),
# status("clinical_status")
),
TR(TH("%s: " % T("Available Beds")),
available_beds,
#TH("%s: " % T("Security Status")),
# status("security_status")
)
), rheader_tabs)
return rheader
# END =========================================================================
|
|
# Copyright (c) 2015, MapR Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections as c
from oslo_log import log as logging
import six
import sahara.plugins.mapr.domain.configuration_file as cf
import sahara.plugins.mapr.domain.service as s
import sahara.plugins.mapr.util.general as g
import sahara.utils.files as f
LOG = logging.getLogger(__name__)
db_spec = c.namedtuple('DatabaseSpec', ['db_name', 'user', 'password'])
class MySQL(s.Service):
METRICS_SPECS = db_spec('metrics', 'maprmetrics', 'mapr')
HUE_SPECS = db_spec('hue', 'maprhue', 'mapr')
METASTORE_SPECS = db_spec('metastore', 'maprmetastore', 'mapr')
RDBMS_SPECS = db_spec('rdbms', 'maprrdbms', 'mapr')
OOZIE_SPECS = db_spec('oozie', 'maproozie', 'mapr')
SENTRY_SPECS = db_spec('sentry', 'maprsentry', 'mapr')
SELECT_DATA = 'mysql -uroot --skip-column-names -e "%s"| grep -E "\w+"'
GET_DBS_LIST = SELECT_DATA % 'SHOW DATABASES'
GET_USERS_HOSTS = (
SELECT_DATA %
"SELECT Host FROM mysql.user WHERE mysql.user.User='%s'" # nosec
)
DRIVER_CLASS = 'com.mysql.jdbc.Driver'
MYSQL_SERVER_PORT = 3306
MYSQL_INSTALL_SCRIPT = 'plugins/mapr/resources/install_mysql.sh'
INSTALL_PACKAGES_TIMEOUT = 1800
def __init__(self):
super(MySQL, self).__init__()
self._ui_name = 'MySQL'
@staticmethod
def _get_db_daemon_name(distro, distro_version):
if distro.lower() == 'ubuntu':
return 'mysql'
if distro.lower() == 'suse':
return 'mysqld'
if distro.lower() in ['centos', 'redhatenterpriseserver']:
if distro_version.split('.')[0] == '7':
return 'mariadb'
return 'mysqld'
return None
@staticmethod
def _execute_script(instance, script_path, script_text=None,
user='root', password=None):
with instance.remote() as r:
if script_text:
r.write_file_to(script_path, script_text, run_as_root=True)
LOG.debug('Executing SQL script {path}'.format(path=script_path))
r.execute_command(("mysql %s %s < %s" %
('-u' + user if user else '',
'-p' + password if password else '',
script_path)),
run_as_root=True)
@staticmethod
def _create_service_db(instance, specs):
f_name = 'create_db_%s.sql' % specs.db_name
script = MySQL._create_script_obj(f_name, 'create_database.sql',
db_name=specs.db_name,
user=specs.user,
password=specs.password)
MySQL._execute_script(instance, script.remote_path, script.render())
@staticmethod
def _create_metrics_db(instance, databases, instances):
if MySQL.METRICS_SPECS.db_name not in databases:
MySQL._create_service_db(instance, MySQL.METRICS_SPECS)
MySQL._execute_script(instance=instance,
script_path='/opt/mapr/bin/setup.sql')
MySQL._grant_access(instance, MySQL.METRICS_SPECS, instances)
@staticmethod
def _create_hue_db(instance, databases, instances):
if MySQL.HUE_SPECS.db_name not in databases:
MySQL._create_service_db(instance, MySQL.HUE_SPECS)
MySQL._grant_access(instance, MySQL.HUE_SPECS, instances)
@staticmethod
def _create_rdbms_db(instance, databases, instances):
if MySQL.RDBMS_SPECS.db_name not in databases:
MySQL._create_service_db(instance, MySQL.RDBMS_SPECS)
MySQL._grant_access(instance, MySQL.RDBMS_SPECS, instances)
@staticmethod
def _create_metastore_db(instance, cluster_context, databases, instances):
hive_meta = cluster_context.get_instance('HiveMetastore')
if not hive_meta:
return
db_name = MySQL.METASTORE_SPECS.db_name
if db_name not in databases:
MySQL._create_service_db(instance, MySQL.METASTORE_SPECS)
MySQL._grant_access(instance, MySQL.METASTORE_SPECS, instances)
@staticmethod
def _create_oozie_db(instance, databases, instances):
if MySQL.OOZIE_SPECS.db_name not in databases:
MySQL._create_service_db(instance, MySQL.OOZIE_SPECS)
MySQL._grant_access(instance, MySQL.OOZIE_SPECS, instances)
@staticmethod
def _create_sentry_db(instance, cluster_context, databases, instances):
sentry_instance = cluster_context.get_instance('Sentry')
if not sentry_instance:
return
if MySQL.SENTRY_SPECS.db_name not in databases:
MySQL._create_service_db(instance, MySQL.SENTRY_SPECS)
MySQL._grant_access(instance, MySQL.SENTRY_SPECS, instances)
@staticmethod
def start_mysql_server(cluster_context):
LOG.debug('Starting MySQL Server')
instance = MySQL.get_db_instance(cluster_context)
distro = cluster_context.distro
distro_version = cluster_context.distro_version
with instance.remote() as r:
r.execute_command(('service %s restart' %
MySQL._get_db_daemon_name(distro.name,
distro_version)),
run_as_root=True)
LOG.debug('MySQL Server successfully started')
@staticmethod
def get_databases_list(db_instance):
with db_instance.remote() as r:
ec, out = r.execute_command(MySQL.GET_DBS_LIST)
if out:
return out.splitlines()
return list()
@staticmethod
def get_user_hosts(db_instance, username):
with db_instance.remote() as r:
ec, out = r.execute_command(MySQL.GET_USERS_HOSTS % username)
if out:
return out.splitlines()
return list()
@staticmethod
def get_db_instance(cluster_context):
return cluster_context.oozie_server
@staticmethod
def create_databases(cluster_context, instances):
db_instance = MySQL.get_db_instance(cluster_context)
databases = MySQL.get_databases_list(db_instance)
MySQL._create_metrics_db(db_instance, databases, instances)
MySQL._create_hue_db(db_instance, databases, instances)
MySQL._create_rdbms_db(db_instance, databases, instances)
MySQL._create_oozie_db(db_instance, databases, instances)
MySQL._create_metastore_db(
db_instance, cluster_context, databases, instances)
MySQL._create_sentry_db(db_instance, cluster_context, databases,
instances)
@staticmethod
def _create_script_obj(filename, template, **kwargs):
script = cf.TemplateFile(filename)
script.remote_path = '/tmp/'
script.parse(f.get_file_text(
'plugins/mapr/services/mysql/resources/%s' % template))
for k, v in six.iteritems(kwargs):
script.add_property(k, v)
return script
@staticmethod
def _grant_access(instance, specs, instances):
f_name = 'grant_access_%s.sql' % specs.db_name
ips = [i.internal_ip for i in instances]
user_hosts = MySQL.get_user_hosts(instance, specs.user)
script = MySQL._create_script_obj(f_name, 'grant_access.sql',
hosts=set(ips) - set(user_hosts),
db_name=specs.db_name,
user=specs.user,
password=specs.password)
MySQL._execute_script(instance, script.remote_path, script.render())
@staticmethod
def install_mysql(instance, distro_name):
g.run_script(instance, MySQL.MYSQL_INSTALL_SCRIPT, 'root', distro_name)
|
|
# Copyright 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from oslo.concurrency import processutils
from oslo.config import cfg
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common.i18n import _LW
from ironic.common import utils
from ironic.openstack.common import log as logging
from ironic.openstack.common import loopingcall
opts = [
cfg.IntOpt('check_device_interval',
default=1,
help='After Ironic has completed creating the partition table, '
'it continues to check for activity on the attached iSCSI '
'device status at this interval prior to copying the image'
' to the node, in seconds'),
cfg.IntOpt('check_device_max_retries',
default=20,
help='The maximum number of times to check that the device is '
'not accessed by another process. If the device is still '
'busy after that, the disk partitioning will be treated as'
' having failed.'),
]
CONF = cfg.CONF
opt_group = cfg.OptGroup(name='disk_partitioner',
title='Options for the disk partitioner')
CONF.register_group(opt_group)
CONF.register_opts(opts, opt_group)
LOG = logging.getLogger(__name__)
class DiskPartitioner(object):
def __init__(self, device, disk_label='msdos', alignment='optimal'):
"""A convenient wrapper around the parted tool.
:param device: The device path.
:param disk_label: The type of the partition table. Valid types are:
"bsd", "dvh", "gpt", "loop", "mac", "msdos",
"pc98", or "sun".
:param alignment: Set alignment for newly created partitions.
Valid types are: none, cylinder, minimal and
optimal.
"""
self._device = device
self._disk_label = disk_label
self._alignment = alignment
self._partitions = []
self._fuser_pids_re = re.compile(r'((\d)+\s*)+')
def _exec(self, *args):
# NOTE(lucasagomes): utils.execute() is already a wrapper on top
# of processutils.execute() which raises specific
# exceptions. It also logs any failure so we don't
# need to log it again here.
utils.execute('parted', '-a', self._alignment, '-s', self._device,
'--', 'unit', 'MiB', *args, check_exit_code=[0],
run_as_root=True)
def add_partition(self, size, part_type='primary', fs_type='',
bootable=False):
"""Add a partition.
:param size: The size of the partition in MiB.
:param part_type: The type of the partition. Valid values are:
primary, logical, or extended.
:param fs_type: The filesystem type. Valid types are: ext2, fat32,
fat16, HFS, linux-swap, NTFS, reiserfs, ufs.
If blank (''), it will create a Linux native
partition (83).
:param bootable: Boolean value; whether the partition is bootable
or not.
:returns: The partition number.
"""
self._partitions.append({'size': size,
'type': part_type,
'fs_type': fs_type,
'bootable': bootable})
return len(self._partitions)
def get_partitions(self):
"""Get the partitioning layout.
:returns: An iterator with the partition number and the
partition layout.
"""
return enumerate(self._partitions, 1)
def _wait_for_disk_to_become_available(self, retries, max_retries, pids,
stderr):
retries[0] += 1
if retries[0] > max_retries:
raise loopingcall.LoopingCallDone()
try:
# NOTE(ifarkas): fuser returns a non-zero return code if none of
# the specified files is accessed
out, err = utils.execute('fuser', self._device,
check_exit_code=[0, 1], run_as_root=True)
if not out and not err:
raise loopingcall.LoopingCallDone()
else:
if err:
stderr[0] = err
if out:
pids_match = re.search(self._fuser_pids_re, out)
pids[0] = pids_match.group()
except processutils.ProcessExecutionError as exc:
LOG.warning(_LW('Failed to check the device %(device)s with fuser:'
' %(err)s'), {'device': self._device, 'err': exc})
def commit(self):
"""Write to the disk."""
cmd_args = ['mklabel', self._disk_label]
# NOTE(lucasagomes): Lead in with 1MiB to allow room for the
# partition table itself.
start = 1
for num, part in self.get_partitions():
end = start + part['size']
cmd_args.extend(['mkpart', part['type'], part['fs_type'],
str(start), str(end)])
if part['bootable']:
cmd_args.extend(['set', str(num), 'boot', 'on'])
start = end
self._exec(*cmd_args)
retries = [0]
pids = ['']
fuser_err = ['']
interval = CONF.disk_partitioner.check_device_interval
max_retries = CONF.disk_partitioner.check_device_max_retries
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_disk_to_become_available,
retries, max_retries, pids, fuser_err)
timer.start(interval=interval).wait()
if retries[0] > max_retries:
if pids[0]:
raise exception.InstanceDeployFailure(
_('Disk partitioning failed on device %(device)s. '
'Processes with the following PIDs are holding it: '
'%(pids)s. Time out waiting for completion.')
% {'device': self._device, 'pids': pids[0]})
else:
raise exception.InstanceDeployFailure(
_('Disk partitioning failed on device %(device)s. Fuser '
'exited with "%(fuser_err)s". Time out waiting for '
'completion.')
% {'device': self._device, 'fuser_err': fuser_err[0]})
_PARTED_PRINT_RE = re.compile(r"^\d+:([\d\.]+)MiB:"
"([\d\.]+)MiB:([\d\.]+)MiB:(\w*)::(\w*)")
def list_partitions(device):
"""Get partitions information from given device.
:param device: The device path.
:returns: list of dictionaries (one per partition) with keys:
start, end, size (in MiB), filesystem, flags
"""
output = utils.execute(
'parted', '-s', '-m', device, 'unit', 'MiB', 'print',
use_standard_locale=True)[0]
lines = [line for line in output.split('\n') if line.strip()][2:]
# Example of line: 1:1.00MiB:501MiB:500MiB:ext4::boot
fields = ('start', 'end', 'size', 'filesystem', 'flags')
result = []
for line in lines:
match = _PARTED_PRINT_RE.match(line)
if match is None:
LOG.warn(_LW("Partition information from parted for device "
"%(device)s does not match "
"expected format: %(line)s"),
dict(device=device, line=line))
continue
# Cast int fields to ints (some are floats and we round them down)
groups = [int(float(x)) if i < 3 else x
for i, x in enumerate(match.groups())]
result.append(dict(zip(fields, groups)))
return result
|
|
# Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from netforce.model import Model, fields, get_model
import time
from netforce.access import get_active_company
from netforce.utils import print_color
class Sequence(Model):
_name = "sequence"
_string = "Sequence"
_audit_log = True
_fields = {
"name": fields.Char("Name", required=True, search=True),
"type": fields.Selection([
["cust_invoice", "Customer Invoice"],
["supp_invoice", "Supplier Invoice"],
["cust_credit", "Customer Credit Note"],
["supp_credit", "Supplier Credit Note"],
["cust_debit", "Customer Debit Note"],
["supp_debit", "Supplier Debit Note"],
["pay_in", "Incoming Payment"],
["pay_out", "Outgoing Payment"],
["transfer", "Transfer"],
["tax_no", "Tax No"],
["wht_no", "WHT No"],
["account_move", "Journal Entry"],
["pick_in", "Goods Receipt"],
["pick_internal", "Goods Transfer"],
["pick_out", "Goods Issue"],
["stock_count", "Stock Count"],
["stock_move", "Stock Movement"],
["stock_lot", "Lot / Serial Number"],
["stock_container", "Container"],
["stock_transform", "Product Transforms"],
["landed_cost", "Landed Costs"],
["shipping_rates", "Shipping Rates"],
["delivery_route","Delivery Routes"],
["sale_quot", "Sales Quotations"],
["sale_order", "Sales Order"],
["sale_return","Sales Return"],
["ecom_sale_order", "Ecommerce Sales Order"],
["purchase_order", "Purchase Order"],
["purchase_return","Purchase Return"],
["purchase_request", "Purchase Request"],
["pos_closure", "POS Register Closure"],
["production", "Production Order"],
["bom", "Bill of Material"],
["service_item", "Service Item"],
["job", "Service Order"],
["task", "Task"],
["service_contract", "Service Contract"],
["issue", "Issue"],
["employee", "Employee"],
["payrun", "Payrun"],
["leave_request", "Leave Request"],
["expense", "Expense Claim"],
["fixed_asset", "Fixed Asset"],
["claim", "Product Claims"],
["borrow", "Product Borrowings"],
["contact", "Contact Number"],
["ecom_cart","Cart Number"],
["other", "Other"],
# TODO: Sequence type should not hardcode module
["account_bill_in","Supplier Bill Issue"],
["account_bill_out","Customer Bill Issue"],
["account_cheque_in","Cheque Receive"],
["account_cheque_out","Cheque Payment"],
("account_cheque_move_rb","Cheque Receipt Pay-In"),
("account_cheque_move_rp","Cheque Receipt Honor"),
("account_cheque_move_rr","Cheque Receipt Return"),
("account_cheque_move_rc","Cheque Receipt Cancel"),
("account_cheque_move_rs","Cheque Receipt Sale"),
("account_cheque_move_pp","Cheque Payment Honor"),
("account_cheque_move_pr","Cheque Payment Return"),
("account_cheque_move_pc","Cheque Payment Cancel"),
("account_advance","Advance Payment"),
("account_advance_clear","Advance Clearing"),
], "Type", required=True, search=True),
"prefix": fields.Char("Prefix", search=True),
"padding": fields.Integer("Number Padding"),
"running": fields.One2Many("sequence.running", "sequence_id", "Running Numbers"),
"comments": fields.One2Many("message", "related_id", "Comments"),
"company_id": fields.Many2One("company", "Company"),
}
_order = "name,company_id"
_defaults = {
"padding": 4,
}
def get_prefix(self, template, context={}):
date = context.get("date")
if not date:
date = time.strftime("%Y-%m-%d")
vals = {
"Y": date[0:4],
"y": date[2:4],
"m": date[5:7],
"d": date[8:10],
}
prefix = template % vals
return prefix
def get_number_by_type(self,type,model,context={}):
sequence_id = self.find_sequence(type=type,context=context)
if not sequence_id:
return None
number = self.get_number_by_id(sequence_id,model,context=context)
return number
def get_number_by_id(self,sequence_id,model,context={}):
assert model, "Model required to search latest sequence"
# if no seuqence
if not sequence_id:
raise Exception("[G001] No sequence configured %s" % type)
#return None
# specific column to find latest sequence
sequence_field = "number"
if context.get("sequence_field",False):
sequence_field = context["sequence_field"]
while 1:
num = self.get_next_number(sequence_id, context=context)
res = get_model(model).search([[sequence_field, "=", num]])
if not res:
return num
# set next number
self.increment_number(sequence_id, context=context)
return None
def find_sequence(self, type=None, name=None, context={}):
if type and name:
cond = [["type", "=", type], ["name", "=", name]]
elif type:
cond = [["type", "=", type]]
elif name:
cond = [["name", "=", name]]
company_id=context.get("company_id")
if not company_id:
company_id = get_active_company()
comp_cond = cond + [["company_id", "=", company_id]]
res = self.search(comp_cond, order="id")
if res:
return res[0]
res = self.search(cond, order="id")
if res:
return res[0]
return None
def get_next_number(self, sequence_id, context={}):
seq = self.browse(sequence_id)
prefix = self.get_prefix(seq.prefix, context) if seq.prefix else ""
res = get_model("sequence.running").search([["sequence_id", "=", sequence_id], ["prefix", "=", prefix]])
if res:
run_id = res[0]
else:
vals = {
"sequence_id": sequence_id,
"prefix": prefix,
}
run_id = get_model("sequence.running").create(vals)
run = get_model("sequence.running").browse([run_id])[0]
num = run.next
if seq.padding is None:
res = "%s%d" % (prefix, num)
elif seq.padding == 0:
res = prefix
else:
res = "%s%.*d" % (prefix, seq.padding, num)
if not res:
raise Exception("Empty sequence number")
return res
def increment_number(self, sequence_id, context={}):
seq = self.browse(sequence_id)
prefix = self.get_prefix(seq.prefix, context) if seq.prefix else ""
res = get_model("sequence.running").search([["sequence_id", "=", sequence_id], ["prefix", "=", prefix]])
if not res:
raise Exception("Sequence prefix not found")
run_id = res[0]
run = get_model("sequence.running").browse([run_id])[0]
run.write({"next": run.next + 1})
# XXX: deprecated
def get_number(self, type=None, name=None, seq_id=None, context={}):
print_color("WARNING: deprecated method called: sequence.get_number", "red")
if type:
res = self.search([["type", "=", type]])
if not res:
return None
seq_id = res[0]
elif name:
res = self.search([["name", "=", name]])
if not res:
return None
seq_id = res[0]
if not seq_id:
return None
seq = self.browse([seq_id])[0]
prefix = self.get_prefix(seq.prefix, context) if seq.prefix else ""
res = get_model("sequence.running").search([["sequence_id", "=", seq_id], ["prefix", "=", prefix]])
if res:
run_id = res[0]
else:
vals = {
"sequence_id": seq_id,
"prefix": prefix,
}
run_id = get_model("sequence.running").create(vals)
run = get_model("sequence.running").browse([run_id])[0]
num = run.next
if seq.padding is None:
res = "%s%d" % (prefix, num)
elif seq.padding == 0:
res = prefix
else:
res = "%s%.*d" % (prefix, seq.padding, num)
return res
# XXX: deprecated
def increment(self, type=None, name=None, seq_id=None, context={}):
print_color("WARNING: deprecated method called: sequence.increment", "red")
if type:
res = self.search([["type", "=", type]])
if not res:
return None
seq_id = res[0]
elif name:
res = self.search([["name", "=", name]])
if not res:
return None
seq_id = res[0]
if not seq_id:
return None
seq = self.browse([seq_id])[0]
prefix = self.get_prefix(seq.prefix, context) if seq.prefix else ""
res = get_model("sequence.running").search([["sequence_id", "=", seq_id], ["prefix", "=", prefix]])
if not res:
raise Exception("Sequence prefix not found")
run_id = res[0]
run = get_model("sequence.running").browse([run_id])[0]
run.write({"next": run.next + 1})
Sequence.register()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.