text
stringlengths
2
999k
class Foo: def qux2(self): z = 12 x = z * 3 self.baz = x for q in range(10): x += q lst = ["foo", "bar", "baz"] lst = lst[1:2] assert len(lst) == 2, 201 def qux(self): self.baz = self.bar self.blah = "hello" self._priv = 1 self._prot = self.baz def _prot2(self): pass class Bar(Foo): def something(self): super()._prot2() def something2(self): self._prot = 12 class SpriteKind(Enum): Player = 0 Projectile = 1 Enemy = 2 Food = 3 ii = img(""" . . . . . a . . . b b . """) hbuf = hex("a007") hbuf2 = b'\xB0\x07' asteroids = [sprites.space.space_small_asteroid1, sprites.space.space_small_asteroid0, sprites.space.space_asteroid0, sprites.space.space_asteroid1, sprites.space.space_asteroid4, sprites.space.space_asteroid3] ship = sprites.create(sprites.space.space_red_ship, SpriteKind.Player) ship.set_flag(SpriteFlag.STAY_IN_SCREEN, True) ship.bottom = 120 controller.move_sprite(ship, 100, 100) info.set_life(3) def player_damage(sprite, other_sprite): scene.camera_shake(4, 500) other_sprite.destroy(effects.disintegrate) sprite.start_effect(effects.fire, 200) info.change_life_by(-1) sprites.on_overlap(SpriteKind.Player, SpriteKind.Enemy, player_damage) if False: player_damage(ship, ship) def enemy_damage(sprite:Sprite, other_sprite:Sprite): sprite.destroy() other_sprite.destroy(effects.disintegrate) info.change_score_by(1) sprites.on_overlap(SpriteKind.Projectile, SpriteKind.Enemy, enemy_damage) def shoot(): projectile = sprites.create_projectile_from_sprite(sprites.food.small_apple, ship, 0, -140) projectile.start_effect(effects.cool_radial, 100) controller.A.on_event(ControllerButtonEvent.PRESSED, shoot) def spawn_enemy(): projectile = sprites.create_projectile_from_side(asteroids[math.random_range(0, asteroids.length - 1)], 0, 75) projectile.set_kind(SpriteKind.Enemy) projectile.x = math.random_range(10, 150) game.on_update_interval(500, spawn_enemy) def qq(): pass qq()
from urllib.request import urlopen from bs4 import BeautifulSoup html = urlopen("http://www.facebook.com/") bsObj = BeautifulSoup(html.read(), "html.parser") dom = open('facebook.html', "w") dom.write(bsObj.prettify()) #print(bsObj.h1) print(bsObj.prettify())
from application import handlers, Application import os app = Application(handlers, os.environ, debug=True) db = app.db celery = app.celery() import tasks login_session = app.login_session client_id = app.google_client_id
def put_char(position, max_positions, large_ok, spec_ok, num_ok): global temp_txt global text_file global combs my_chars = list(chars_small) my_large_ok = large_ok my_specs_ok = spec_ok my_nums_ok = num_ok if my_large_ok: my_chars.extend(chars_large) if my_specs_ok: my_chars.extend(chars_spec) if my_nums_ok: my_chars.extend(chars_num) if position < max_positions: i = position for x in my_chars: temp_pass[i] = x my_large = len([x for x in temp_pass if x in chars_large]) my_num = len([x for x in temp_pass if x in chars_num]) my_specs = len([x for x in temp_pass if x in chars_spec]) if my_large <= max_large: if my_num <= max_num: if my_specs <= max_spec: if not (x in chars_spec and temp_pass.count(x) > same_specs): if temp_pass.count(x) <= max_rept: print_pass = ''.join(temp_pass) #text_file.write(f'{print_pass}\n') print(print_pass) combs += 1 if combs % 1000000 == 0: print(f'{combs * 100/status[lenght]:.0f}% done') if position < max_positions - 1: next_position = position + 1 put_char(next_position, max_positions, my_large < max_large, my_specs < max_spec, my_num < max_num) # for j in range(i + 1, max_positions): # temp_pass[j] = chars[0] # print_pass = ''.join(temp_pass) # if temp_txt != print_pass: # temp_txt = print_pass # print(print_pass) # #text_file.write(f'{print_pass}\n') chars_apha = [chr(x) for x in range(128) if chr(x).isalpha()] chars_small = tuple(x for x in 'abcdefghijklmnopqrstuvwxyz') chars_large = tuple(x for x in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ') chars_num = tuple(x for x in '0123') chars_spec = tuple(x for x in '.!@#$') chars = [] chars.extend(chars_small) chars.extend(chars_large) chars.extend(chars_num) chars.extend(chars_spec) status = {6: 309, 7:8032, 8:208827} lenght = 4 max_num = 1#4 max_spec = 1#3 max_large = 1#2 same_specs = 1 max_rept = 3 combs = 0 temp_large = [0 for x in range(lenght)] temp_pass = [' ' for _ in range(lenght)] temp_txt = '' #text_file = open("c:/testPY.txt", "w") put_char(0,lenght, True, True, True) #text_file.close()
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from azure.core.exceptions import HttpResponseError import msrest.serialization class AddressSpace(msrest.serialization.Model): """AddressSpace contains an array of IP address ranges that can be used by subnets of the virtual network. :param address_prefixes: A list of address blocks reserved for this virtual network in CIDR notation. :type address_prefixes: list[str] """ _attribute_map = { 'address_prefixes': {'key': 'addressPrefixes', 'type': '[str]'}, } def __init__( self, **kwargs ): super(AddressSpace, self).__init__(**kwargs) self.address_prefixes = kwargs.get('address_prefixes', None) class Resource(msrest.serialization.Model): """Common resource representation. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :param location: Resource location. :type location: str :param tags: A set of tags. Resource tags. :type tags: dict[str, str] """ _validation = { 'name': {'readonly': True}, 'type': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, } def __init__( self, **kwargs ): super(Resource, self).__init__(**kwargs) self.id = kwargs.get('id', None) self.name = None self.type = None self.location = kwargs.get('location', None) self.tags = kwargs.get('tags', None) class ApplicationGateway(Resource): """Application gateway resource. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :param location: Resource location. :type location: str :param tags: A set of tags. Resource tags. :type tags: dict[str, str] :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :param zones: A list of availability zones denoting where the resource needs to come from. :type zones: list[str] :param sku: SKU of the application gateway resource. :type sku: ~azure.mgmt.network.v2018_06_01.models.ApplicationGatewaySku :param ssl_policy: SSL policy of the application gateway resource. :type ssl_policy: ~azure.mgmt.network.v2018_06_01.models.ApplicationGatewaySslPolicy :ivar operational_state: Operational state of the application gateway resource. Possible values include: "Stopped", "Starting", "Running", "Stopping". :vartype operational_state: str or ~azure.mgmt.network.v2018_06_01.models.ApplicationGatewayOperationalState :param gateway_ip_configurations: Subnets of application the gateway resource. :type gateway_ip_configurations: list[~azure.mgmt.network.v2018_06_01.models.ApplicationGatewayIPConfiguration] :param authentication_certificates: Authentication certificates of the application gateway resource. :type authentication_certificates: list[~azure.mgmt.network.v2018_06_01.models.ApplicationGatewayAuthenticationCertificate] :param ssl_certificates: SSL certificates of the application gateway resource. :type ssl_certificates: list[~azure.mgmt.network.v2018_06_01.models.ApplicationGatewaySslCertificate] :param frontend_ip_configurations: Frontend IP addresses of the application gateway resource. :type frontend_ip_configurations: list[~azure.mgmt.network.v2018_06_01.models.ApplicationGatewayFrontendIPConfiguration] :param frontend_ports: Frontend ports of the application gateway resource. :type frontend_ports: list[~azure.mgmt.network.v2018_06_01.models.ApplicationGatewayFrontendPort] :param probes: Probes of the application gateway resource. :type probes: list[~azure.mgmt.network.v2018_06_01.models.ApplicationGatewayProbe] :param backend_address_pools: Backend address pool of the application gateway resource. :type backend_address_pools: list[~azure.mgmt.network.v2018_06_01.models.ApplicationGatewayBackendAddressPool] :param backend_http_settings_collection: Backend http settings of the application gateway resource. :type backend_http_settings_collection: list[~azure.mgmt.network.v2018_06_01.models.ApplicationGatewayBackendHttpSettings] :param http_listeners: Http listeners of the application gateway resource. :type http_listeners: list[~azure.mgmt.network.v2018_06_01.models.ApplicationGatewayHttpListener] :param url_path_maps: URL path map of the application gateway resource. :type url_path_maps: list[~azure.mgmt.network.v2018_06_01.models.ApplicationGatewayUrlPathMap] :param request_routing_rules: Request routing rules of the application gateway resource. :type request_routing_rules: list[~azure.mgmt.network.v2018_06_01.models.ApplicationGatewayRequestRoutingRule] :param redirect_configurations: Redirect configurations of the application gateway resource. :type redirect_configurations: list[~azure.mgmt.network.v2018_06_01.models.ApplicationGatewayRedirectConfiguration] :param web_application_firewall_configuration: Web application firewall configuration. :type web_application_firewall_configuration: ~azure.mgmt.network.v2018_06_01.models.ApplicationGatewayWebApplicationFirewallConfiguration :param enable_http2: Whether HTTP2 is enabled on the application gateway resource. :type enable_http2: bool :param enable_fips: Whether FIPS is enabled on the application gateway resource. :type enable_fips: bool :param autoscale_configuration: Autoscale Configuration. :type autoscale_configuration: ~azure.mgmt.network.v2018_06_01.models.ApplicationGatewayAutoscaleConfiguration :param resource_guid: Resource GUID property of the application gateway resource. :type resource_guid: str :param provisioning_state: Provisioning state of the application gateway resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :type provisioning_state: str """ _validation = { 'name': {'readonly': True}, 'type': {'readonly': True}, 'operational_state': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'etag': {'key': 'etag', 'type': 'str'}, 'zones': {'key': 'zones', 'type': '[str]'}, 'sku': {'key': 'properties.sku', 'type': 'ApplicationGatewaySku'}, 'ssl_policy': {'key': 'properties.sslPolicy', 'type': 'ApplicationGatewaySslPolicy'}, 'operational_state': {'key': 'properties.operationalState', 'type': 'str'}, 'gateway_ip_configurations': {'key': 'properties.gatewayIPConfigurations', 'type': '[ApplicationGatewayIPConfiguration]'}, 'authentication_certificates': {'key': 'properties.authenticationCertificates', 'type': '[ApplicationGatewayAuthenticationCertificate]'}, 'ssl_certificates': {'key': 'properties.sslCertificates', 'type': '[ApplicationGatewaySslCertificate]'}, 'frontend_ip_configurations': {'key': 'properties.frontendIPConfigurations', 'type': '[ApplicationGatewayFrontendIPConfiguration]'}, 'frontend_ports': {'key': 'properties.frontendPorts', 'type': '[ApplicationGatewayFrontendPort]'}, 'probes': {'key': 'properties.probes', 'type': '[ApplicationGatewayProbe]'}, 'backend_address_pools': {'key': 'properties.backendAddressPools', 'type': '[ApplicationGatewayBackendAddressPool]'}, 'backend_http_settings_collection': {'key': 'properties.backendHttpSettingsCollection', 'type': '[ApplicationGatewayBackendHttpSettings]'}, 'http_listeners': {'key': 'properties.httpListeners', 'type': '[ApplicationGatewayHttpListener]'}, 'url_path_maps': {'key': 'properties.urlPathMaps', 'type': '[ApplicationGatewayUrlPathMap]'}, 'request_routing_rules': {'key': 'properties.requestRoutingRules', 'type': '[ApplicationGatewayRequestRoutingRule]'}, 'redirect_configurations': {'key': 'properties.redirectConfigurations', 'type': '[ApplicationGatewayRedirectConfiguration]'}, 'web_application_firewall_configuration': {'key': 'properties.webApplicationFirewallConfiguration', 'type': 'ApplicationGatewayWebApplicationFirewallConfiguration'}, 'enable_http2': {'key': 'properties.enableHttp2', 'type': 'bool'}, 'enable_fips': {'key': 'properties.enableFips', 'type': 'bool'}, 'autoscale_configuration': {'key': 'properties.autoscaleConfiguration', 'type': 'ApplicationGatewayAutoscaleConfiguration'}, 'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(ApplicationGateway, self).__init__(**kwargs) self.etag = kwargs.get('etag', None) self.zones = kwargs.get('zones', None) self.sku = kwargs.get('sku', None) self.ssl_policy = kwargs.get('ssl_policy', None) self.operational_state = None self.gateway_ip_configurations = kwargs.get('gateway_ip_configurations', None) self.authentication_certificates = kwargs.get('authentication_certificates', None) self.ssl_certificates = kwargs.get('ssl_certificates', None) self.frontend_ip_configurations = kwargs.get('frontend_ip_configurations', None) self.frontend_ports = kwargs.get('frontend_ports', None) self.probes = kwargs.get('probes', None) self.backend_address_pools = kwargs.get('backend_address_pools', None) self.backend_http_settings_collection = kwargs.get('backend_http_settings_collection', None) self.http_listeners = kwargs.get('http_listeners', None) self.url_path_maps = kwargs.get('url_path_maps', None) self.request_routing_rules = kwargs.get('request_routing_rules', None) self.redirect_configurations = kwargs.get('redirect_configurations', None) self.web_application_firewall_configuration = kwargs.get('web_application_firewall_configuration', None) self.enable_http2 = kwargs.get('enable_http2', None) self.enable_fips = kwargs.get('enable_fips', None) self.autoscale_configuration = kwargs.get('autoscale_configuration', None) self.resource_guid = kwargs.get('resource_guid', None) self.provisioning_state = kwargs.get('provisioning_state', None) class SubResource(msrest.serialization.Model): """Reference to another subresource. :param id: Resource ID. :type id: str """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, } def __init__( self, **kwargs ): super(SubResource, self).__init__(**kwargs) self.id = kwargs.get('id', None) class ApplicationGatewayAuthenticationCertificate(SubResource): """Authentication certificates of an application gateway. :param id: Resource ID. :type id: str :param name: Name of the authentication certificate that is unique within an Application Gateway. :type name: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :param type: Type of the resource. :type type: str :param data: Certificate public data. :type data: str :param provisioning_state: Provisioning state of the authentication certificate resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :type provisioning_state: str """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'data': {'key': 'properties.data', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(ApplicationGatewayAuthenticationCertificate, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.etag = kwargs.get('etag', None) self.type = kwargs.get('type', None) self.data = kwargs.get('data', None) self.provisioning_state = kwargs.get('provisioning_state', None) class ApplicationGatewayAutoscaleBounds(msrest.serialization.Model): """Application Gateway autoscale bounds on number of Application Gateway instance. All required parameters must be populated in order to send to Azure. :param min: Required. Lower bound on number of Application Gateway instances. :type min: int :param max: Required. Upper bound on number of Application Gateway instances. :type max: int """ _validation = { 'min': {'required': True}, 'max': {'required': True}, } _attribute_map = { 'min': {'key': 'min', 'type': 'int'}, 'max': {'key': 'max', 'type': 'int'}, } def __init__( self, **kwargs ): super(ApplicationGatewayAutoscaleBounds, self).__init__(**kwargs) self.min = kwargs['min'] self.max = kwargs['max'] class ApplicationGatewayAutoscaleConfiguration(msrest.serialization.Model): """Application Gateway autoscale configuration. All required parameters must be populated in order to send to Azure. :param bounds: Required. Autoscale bounds. :type bounds: ~azure.mgmt.network.v2018_06_01.models.ApplicationGatewayAutoscaleBounds """ _validation = { 'bounds': {'required': True}, } _attribute_map = { 'bounds': {'key': 'bounds', 'type': 'ApplicationGatewayAutoscaleBounds'}, } def __init__( self, **kwargs ): super(ApplicationGatewayAutoscaleConfiguration, self).__init__(**kwargs) self.bounds = kwargs['bounds'] class ApplicationGatewayAvailableSslOptions(Resource): """Response for ApplicationGatewayAvailableSslOptions API service call. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :param location: Resource location. :type location: str :param tags: A set of tags. Resource tags. :type tags: dict[str, str] :param predefined_policies: List of available Ssl predefined policy. :type predefined_policies: list[~azure.mgmt.network.v2018_06_01.models.SubResource] :param default_policy: Name of the Ssl predefined policy applied by default to application gateway. Possible values include: "AppGwSslPolicy20150501", "AppGwSslPolicy20170401", "AppGwSslPolicy20170401S". :type default_policy: str or ~azure.mgmt.network.v2018_06_01.models.ApplicationGatewaySslPolicyName :param available_cipher_suites: List of available Ssl cipher suites. :type available_cipher_suites: list[str or ~azure.mgmt.network.v2018_06_01.models.ApplicationGatewaySslCipherSuite] :param available_protocols: List of available Ssl protocols. :type available_protocols: list[str or ~azure.mgmt.network.v2018_06_01.models.ApplicationGatewaySslProtocol] """ _validation = { 'name': {'readonly': True}, 'type': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'predefined_policies': {'key': 'properties.predefinedPolicies', 'type': '[SubResource]'}, 'default_policy': {'key': 'properties.defaultPolicy', 'type': 'str'}, 'available_cipher_suites': {'key': 'properties.availableCipherSuites', 'type': '[str]'}, 'available_protocols': {'key': 'properties.availableProtocols', 'type': '[str]'}, } def __init__( self, **kwargs ): super(ApplicationGatewayAvailableSslOptions, self).__init__(**kwargs) self.predefined_policies = kwargs.get('predefined_policies', None) self.default_policy = kwargs.get('default_policy', None) self.available_cipher_suites = kwargs.get('available_cipher_suites', None) self.available_protocols = kwargs.get('available_protocols', None) class ApplicationGatewayAvailableSslPredefinedPolicies(msrest.serialization.Model): """Response for ApplicationGatewayAvailableSslOptions API service call. :param value: List of available Ssl predefined policy. :type value: list[~azure.mgmt.network.v2018_06_01.models.ApplicationGatewaySslPredefinedPolicy] :param next_link: URL to get the next set of results. :type next_link: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[ApplicationGatewaySslPredefinedPolicy]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(ApplicationGatewayAvailableSslPredefinedPolicies, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class ApplicationGatewayAvailableWafRuleSetsResult(msrest.serialization.Model): """Response for ApplicationGatewayAvailableWafRuleSets API service call. :param value: The list of application gateway rule sets. :type value: list[~azure.mgmt.network.v2018_06_01.models.ApplicationGatewayFirewallRuleSet] """ _attribute_map = { 'value': {'key': 'value', 'type': '[ApplicationGatewayFirewallRuleSet]'}, } def __init__( self, **kwargs ): super(ApplicationGatewayAvailableWafRuleSetsResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) class ApplicationGatewayBackendAddress(msrest.serialization.Model): """Backend address of an application gateway. :param fqdn: Fully qualified domain name (FQDN). :type fqdn: str :param ip_address: IP address. :type ip_address: str """ _attribute_map = { 'fqdn': {'key': 'fqdn', 'type': 'str'}, 'ip_address': {'key': 'ipAddress', 'type': 'str'}, } def __init__( self, **kwargs ): super(ApplicationGatewayBackendAddress, self).__init__(**kwargs) self.fqdn = kwargs.get('fqdn', None) self.ip_address = kwargs.get('ip_address', None) class ApplicationGatewayBackendAddressPool(SubResource): """Backend Address Pool of an application gateway. :param id: Resource ID. :type id: str :param name: Name of the backend address pool that is unique within an Application Gateway. :type name: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :param type: Type of the resource. :type type: str :param backend_ip_configurations: Collection of references to IPs defined in network interfaces. :type backend_ip_configurations: list[~azure.mgmt.network.v2018_06_01.models.NetworkInterfaceIPConfiguration] :param backend_addresses: Backend addresses. :type backend_addresses: list[~azure.mgmt.network.v2018_06_01.models.ApplicationGatewayBackendAddress] :param provisioning_state: Provisioning state of the backend address pool resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :type provisioning_state: str """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'backend_ip_configurations': {'key': 'properties.backendIPConfigurations', 'type': '[NetworkInterfaceIPConfiguration]'}, 'backend_addresses': {'key': 'properties.backendAddresses', 'type': '[ApplicationGatewayBackendAddress]'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(ApplicationGatewayBackendAddressPool, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.etag = kwargs.get('etag', None) self.type = kwargs.get('type', None) self.backend_ip_configurations = kwargs.get('backend_ip_configurations', None) self.backend_addresses = kwargs.get('backend_addresses', None) self.provisioning_state = kwargs.get('provisioning_state', None) class ApplicationGatewayBackendHealth(msrest.serialization.Model): """List of ApplicationGatewayBackendHealthPool resources. :param backend_address_pools: :type backend_address_pools: list[~azure.mgmt.network.v2018_06_01.models.ApplicationGatewayBackendHealthPool] """ _attribute_map = { 'backend_address_pools': {'key': 'backendAddressPools', 'type': '[ApplicationGatewayBackendHealthPool]'}, } def __init__( self, **kwargs ): super(ApplicationGatewayBackendHealth, self).__init__(**kwargs) self.backend_address_pools = kwargs.get('backend_address_pools', None) class ApplicationGatewayBackendHealthHttpSettings(msrest.serialization.Model): """Application gateway BackendHealthHttp settings. :param backend_http_settings: Reference of an ApplicationGatewayBackendHttpSettings resource. :type backend_http_settings: ~azure.mgmt.network.v2018_06_01.models.ApplicationGatewayBackendHttpSettings :param servers: List of ApplicationGatewayBackendHealthServer resources. :type servers: list[~azure.mgmt.network.v2018_06_01.models.ApplicationGatewayBackendHealthServer] """ _attribute_map = { 'backend_http_settings': {'key': 'backendHttpSettings', 'type': 'ApplicationGatewayBackendHttpSettings'}, 'servers': {'key': 'servers', 'type': '[ApplicationGatewayBackendHealthServer]'}, } def __init__( self, **kwargs ): super(ApplicationGatewayBackendHealthHttpSettings, self).__init__(**kwargs) self.backend_http_settings = kwargs.get('backend_http_settings', None) self.servers = kwargs.get('servers', None) class ApplicationGatewayBackendHealthPool(msrest.serialization.Model): """Application gateway BackendHealth pool. :param backend_address_pool: Reference of an ApplicationGatewayBackendAddressPool resource. :type backend_address_pool: ~azure.mgmt.network.v2018_06_01.models.ApplicationGatewayBackendAddressPool :param backend_http_settings_collection: List of ApplicationGatewayBackendHealthHttpSettings resources. :type backend_http_settings_collection: list[~azure.mgmt.network.v2018_06_01.models.ApplicationGatewayBackendHealthHttpSettings] """ _attribute_map = { 'backend_address_pool': {'key': 'backendAddressPool', 'type': 'ApplicationGatewayBackendAddressPool'}, 'backend_http_settings_collection': {'key': 'backendHttpSettingsCollection', 'type': '[ApplicationGatewayBackendHealthHttpSettings]'}, } def __init__( self, **kwargs ): super(ApplicationGatewayBackendHealthPool, self).__init__(**kwargs) self.backend_address_pool = kwargs.get('backend_address_pool', None) self.backend_http_settings_collection = kwargs.get('backend_http_settings_collection', None) class ApplicationGatewayBackendHealthServer(msrest.serialization.Model): """Application gateway backendhealth http settings. :param address: IP address or FQDN of backend server. :type address: str :param ip_configuration: Reference of IP configuration of backend server. :type ip_configuration: ~azure.mgmt.network.v2018_06_01.models.NetworkInterfaceIPConfiguration :param health: Health of backend server. Possible values include: "Unknown", "Up", "Down", "Partial", "Draining". :type health: str or ~azure.mgmt.network.v2018_06_01.models.ApplicationGatewayBackendHealthServerHealth """ _attribute_map = { 'address': {'key': 'address', 'type': 'str'}, 'ip_configuration': {'key': 'ipConfiguration', 'type': 'NetworkInterfaceIPConfiguration'}, 'health': {'key': 'health', 'type': 'str'}, } def __init__( self, **kwargs ): super(ApplicationGatewayBackendHealthServer, self).__init__(**kwargs) self.address = kwargs.get('address', None) self.ip_configuration = kwargs.get('ip_configuration', None) self.health = kwargs.get('health', None) class ApplicationGatewayBackendHttpSettings(SubResource): """Backend address pool settings of an application gateway. :param id: Resource ID. :type id: str :param name: Name of the backend http settings that is unique within an Application Gateway. :type name: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :param type: Type of the resource. :type type: str :param port: The destination port on the backend. :type port: int :param protocol: The protocol used to communicate with the backend. Possible values are 'Http' and 'Https'. Possible values include: "Http", "Https". :type protocol: str or ~azure.mgmt.network.v2018_06_01.models.ApplicationGatewayProtocol :param cookie_based_affinity: Cookie based affinity. Possible values include: "Enabled", "Disabled". :type cookie_based_affinity: str or ~azure.mgmt.network.v2018_06_01.models.ApplicationGatewayCookieBasedAffinity :param request_timeout: Request timeout in seconds. Application Gateway will fail the request if response is not received within RequestTimeout. Acceptable values are from 1 second to 86400 seconds. :type request_timeout: int :param probe: Probe resource of an application gateway. :type probe: ~azure.mgmt.network.v2018_06_01.models.SubResource :param authentication_certificates: Array of references to application gateway authentication certificates. :type authentication_certificates: list[~azure.mgmt.network.v2018_06_01.models.SubResource] :param connection_draining: Connection draining of the backend http settings resource. :type connection_draining: ~azure.mgmt.network.v2018_06_01.models.ApplicationGatewayConnectionDraining :param host_name: Host header to be sent to the backend servers. :type host_name: str :param pick_host_name_from_backend_address: Whether to pick host header should be picked from the host name of the backend server. Default value is false. :type pick_host_name_from_backend_address: bool :param affinity_cookie_name: Cookie name to use for the affinity cookie. :type affinity_cookie_name: str :param probe_enabled: Whether the probe is enabled. Default value is false. :type probe_enabled: bool :param path: Path which should be used as a prefix for all HTTP requests. Null means no path will be prefixed. Default value is null. :type path: str :param provisioning_state: Provisioning state of the backend http settings resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :type provisioning_state: str """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'port': {'key': 'properties.port', 'type': 'int'}, 'protocol': {'key': 'properties.protocol', 'type': 'str'}, 'cookie_based_affinity': {'key': 'properties.cookieBasedAffinity', 'type': 'str'}, 'request_timeout': {'key': 'properties.requestTimeout', 'type': 'int'}, 'probe': {'key': 'properties.probe', 'type': 'SubResource'}, 'authentication_certificates': {'key': 'properties.authenticationCertificates', 'type': '[SubResource]'}, 'connection_draining': {'key': 'properties.connectionDraining', 'type': 'ApplicationGatewayConnectionDraining'}, 'host_name': {'key': 'properties.hostName', 'type': 'str'}, 'pick_host_name_from_backend_address': {'key': 'properties.pickHostNameFromBackendAddress', 'type': 'bool'}, 'affinity_cookie_name': {'key': 'properties.affinityCookieName', 'type': 'str'}, 'probe_enabled': {'key': 'properties.probeEnabled', 'type': 'bool'}, 'path': {'key': 'properties.path', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(ApplicationGatewayBackendHttpSettings, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.etag = kwargs.get('etag', None) self.type = kwargs.get('type', None) self.port = kwargs.get('port', None) self.protocol = kwargs.get('protocol', None) self.cookie_based_affinity = kwargs.get('cookie_based_affinity', None) self.request_timeout = kwargs.get('request_timeout', None) self.probe = kwargs.get('probe', None) self.authentication_certificates = kwargs.get('authentication_certificates', None) self.connection_draining = kwargs.get('connection_draining', None) self.host_name = kwargs.get('host_name', None) self.pick_host_name_from_backend_address = kwargs.get('pick_host_name_from_backend_address', None) self.affinity_cookie_name = kwargs.get('affinity_cookie_name', None) self.probe_enabled = kwargs.get('probe_enabled', None) self.path = kwargs.get('path', None) self.provisioning_state = kwargs.get('provisioning_state', None) class ApplicationGatewayConnectionDraining(msrest.serialization.Model): """Connection draining allows open connections to a backend server to be active for a specified time after the backend server got removed from the configuration. All required parameters must be populated in order to send to Azure. :param enabled: Required. Whether connection draining is enabled or not. :type enabled: bool :param drain_timeout_in_sec: Required. The number of seconds connection draining is active. Acceptable values are from 1 second to 3600 seconds. :type drain_timeout_in_sec: int """ _validation = { 'enabled': {'required': True}, 'drain_timeout_in_sec': {'required': True, 'maximum': 3600, 'minimum': 1}, } _attribute_map = { 'enabled': {'key': 'enabled', 'type': 'bool'}, 'drain_timeout_in_sec': {'key': 'drainTimeoutInSec', 'type': 'int'}, } def __init__( self, **kwargs ): super(ApplicationGatewayConnectionDraining, self).__init__(**kwargs) self.enabled = kwargs['enabled'] self.drain_timeout_in_sec = kwargs['drain_timeout_in_sec'] class ApplicationGatewayFirewallDisabledRuleGroup(msrest.serialization.Model): """Allows to disable rules within a rule group or an entire rule group. All required parameters must be populated in order to send to Azure. :param rule_group_name: Required. The name of the rule group that will be disabled. :type rule_group_name: str :param rules: The list of rules that will be disabled. If null, all rules of the rule group will be disabled. :type rules: list[int] """ _validation = { 'rule_group_name': {'required': True}, } _attribute_map = { 'rule_group_name': {'key': 'ruleGroupName', 'type': 'str'}, 'rules': {'key': 'rules', 'type': '[int]'}, } def __init__( self, **kwargs ): super(ApplicationGatewayFirewallDisabledRuleGroup, self).__init__(**kwargs) self.rule_group_name = kwargs['rule_group_name'] self.rules = kwargs.get('rules', None) class ApplicationGatewayFirewallRule(msrest.serialization.Model): """A web application firewall rule. All required parameters must be populated in order to send to Azure. :param rule_id: Required. The identifier of the web application firewall rule. :type rule_id: int :param description: The description of the web application firewall rule. :type description: str """ _validation = { 'rule_id': {'required': True}, } _attribute_map = { 'rule_id': {'key': 'ruleId', 'type': 'int'}, 'description': {'key': 'description', 'type': 'str'}, } def __init__( self, **kwargs ): super(ApplicationGatewayFirewallRule, self).__init__(**kwargs) self.rule_id = kwargs['rule_id'] self.description = kwargs.get('description', None) class ApplicationGatewayFirewallRuleGroup(msrest.serialization.Model): """A web application firewall rule group. All required parameters must be populated in order to send to Azure. :param rule_group_name: Required. The name of the web application firewall rule group. :type rule_group_name: str :param description: The description of the web application firewall rule group. :type description: str :param rules: Required. The rules of the web application firewall rule group. :type rules: list[~azure.mgmt.network.v2018_06_01.models.ApplicationGatewayFirewallRule] """ _validation = { 'rule_group_name': {'required': True}, 'rules': {'required': True}, } _attribute_map = { 'rule_group_name': {'key': 'ruleGroupName', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, 'rules': {'key': 'rules', 'type': '[ApplicationGatewayFirewallRule]'}, } def __init__( self, **kwargs ): super(ApplicationGatewayFirewallRuleGroup, self).__init__(**kwargs) self.rule_group_name = kwargs['rule_group_name'] self.description = kwargs.get('description', None) self.rules = kwargs['rules'] class ApplicationGatewayFirewallRuleSet(Resource): """A web application firewall rule set. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :param location: Resource location. :type location: str :param tags: A set of tags. Resource tags. :type tags: dict[str, str] :param provisioning_state: The provisioning state of the web application firewall rule set. :type provisioning_state: str :param rule_set_type: The type of the web application firewall rule set. :type rule_set_type: str :param rule_set_version: The version of the web application firewall rule set type. :type rule_set_version: str :param rule_groups: The rule groups of the web application firewall rule set. :type rule_groups: list[~azure.mgmt.network.v2018_06_01.models.ApplicationGatewayFirewallRuleGroup] """ _validation = { 'name': {'readonly': True}, 'type': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'rule_set_type': {'key': 'properties.ruleSetType', 'type': 'str'}, 'rule_set_version': {'key': 'properties.ruleSetVersion', 'type': 'str'}, 'rule_groups': {'key': 'properties.ruleGroups', 'type': '[ApplicationGatewayFirewallRuleGroup]'}, } def __init__( self, **kwargs ): super(ApplicationGatewayFirewallRuleSet, self).__init__(**kwargs) self.provisioning_state = kwargs.get('provisioning_state', None) self.rule_set_type = kwargs.get('rule_set_type', None) self.rule_set_version = kwargs.get('rule_set_version', None) self.rule_groups = kwargs.get('rule_groups', None) class ApplicationGatewayFrontendIPConfiguration(SubResource): """Frontend IP configuration of an application gateway. :param id: Resource ID. :type id: str :param name: Name of the frontend IP configuration that is unique within an Application Gateway. :type name: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :param type: Type of the resource. :type type: str :param private_ip_address: PrivateIPAddress of the network interface IP Configuration. :type private_ip_address: str :param private_ip_allocation_method: PrivateIP allocation method. Possible values include: "Static", "Dynamic". :type private_ip_allocation_method: str or ~azure.mgmt.network.v2018_06_01.models.IPAllocationMethod :param subnet: Reference of the subnet resource. :type subnet: ~azure.mgmt.network.v2018_06_01.models.SubResource :param public_ip_address: Reference of the PublicIP resource. :type public_ip_address: ~azure.mgmt.network.v2018_06_01.models.SubResource :param provisioning_state: Provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :type provisioning_state: str """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'private_ip_address': {'key': 'properties.privateIPAddress', 'type': 'str'}, 'private_ip_allocation_method': {'key': 'properties.privateIPAllocationMethod', 'type': 'str'}, 'subnet': {'key': 'properties.subnet', 'type': 'SubResource'}, 'public_ip_address': {'key': 'properties.publicIPAddress', 'type': 'SubResource'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(ApplicationGatewayFrontendIPConfiguration, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.etag = kwargs.get('etag', None) self.type = kwargs.get('type', None) self.private_ip_address = kwargs.get('private_ip_address', None) self.private_ip_allocation_method = kwargs.get('private_ip_allocation_method', None) self.subnet = kwargs.get('subnet', None) self.public_ip_address = kwargs.get('public_ip_address', None) self.provisioning_state = kwargs.get('provisioning_state', None) class ApplicationGatewayFrontendPort(SubResource): """Frontend port of an application gateway. :param id: Resource ID. :type id: str :param name: Name of the frontend port that is unique within an Application Gateway. :type name: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :param type: Type of the resource. :type type: str :param port: Frontend port. :type port: int :param provisioning_state: Provisioning state of the frontend port resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :type provisioning_state: str """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'port': {'key': 'properties.port', 'type': 'int'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(ApplicationGatewayFrontendPort, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.etag = kwargs.get('etag', None) self.type = kwargs.get('type', None) self.port = kwargs.get('port', None) self.provisioning_state = kwargs.get('provisioning_state', None) class ApplicationGatewayHttpListener(SubResource): """Http listener of an application gateway. :param id: Resource ID. :type id: str :param name: Name of the HTTP listener that is unique within an Application Gateway. :type name: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :param type: Type of the resource. :type type: str :param frontend_ip_configuration: Frontend IP configuration resource of an application gateway. :type frontend_ip_configuration: ~azure.mgmt.network.v2018_06_01.models.SubResource :param frontend_port: Frontend port resource of an application gateway. :type frontend_port: ~azure.mgmt.network.v2018_06_01.models.SubResource :param protocol: Protocol of the HTTP listener. Possible values are 'Http' and 'Https'. Possible values include: "Http", "Https". :type protocol: str or ~azure.mgmt.network.v2018_06_01.models.ApplicationGatewayProtocol :param host_name: Host name of HTTP listener. :type host_name: str :param ssl_certificate: SSL certificate resource of an application gateway. :type ssl_certificate: ~azure.mgmt.network.v2018_06_01.models.SubResource :param require_server_name_indication: Applicable only if protocol is https. Enables SNI for multi-hosting. :type require_server_name_indication: bool :param provisioning_state: Provisioning state of the HTTP listener resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :type provisioning_state: str """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'frontend_ip_configuration': {'key': 'properties.frontendIPConfiguration', 'type': 'SubResource'}, 'frontend_port': {'key': 'properties.frontendPort', 'type': 'SubResource'}, 'protocol': {'key': 'properties.protocol', 'type': 'str'}, 'host_name': {'key': 'properties.hostName', 'type': 'str'}, 'ssl_certificate': {'key': 'properties.sslCertificate', 'type': 'SubResource'}, 'require_server_name_indication': {'key': 'properties.requireServerNameIndication', 'type': 'bool'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(ApplicationGatewayHttpListener, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.etag = kwargs.get('etag', None) self.type = kwargs.get('type', None) self.frontend_ip_configuration = kwargs.get('frontend_ip_configuration', None) self.frontend_port = kwargs.get('frontend_port', None) self.protocol = kwargs.get('protocol', None) self.host_name = kwargs.get('host_name', None) self.ssl_certificate = kwargs.get('ssl_certificate', None) self.require_server_name_indication = kwargs.get('require_server_name_indication', None) self.provisioning_state = kwargs.get('provisioning_state', None) class ApplicationGatewayIPConfiguration(SubResource): """IP configuration of an application gateway. Currently 1 public and 1 private IP configuration is allowed. :param id: Resource ID. :type id: str :param name: Name of the IP configuration that is unique within an Application Gateway. :type name: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :param type: Type of the resource. :type type: str :param subnet: Reference of the subnet resource. A subnet from where application gateway gets its private address. :type subnet: ~azure.mgmt.network.v2018_06_01.models.SubResource :param provisioning_state: Provisioning state of the application gateway subnet resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :type provisioning_state: str """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'subnet': {'key': 'properties.subnet', 'type': 'SubResource'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(ApplicationGatewayIPConfiguration, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.etag = kwargs.get('etag', None) self.type = kwargs.get('type', None) self.subnet = kwargs.get('subnet', None) self.provisioning_state = kwargs.get('provisioning_state', None) class ApplicationGatewayListResult(msrest.serialization.Model): """Response for ListApplicationGateways API service call. :param value: List of an application gateways in a resource group. :type value: list[~azure.mgmt.network.v2018_06_01.models.ApplicationGateway] :param next_link: URL to get the next set of results. :type next_link: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[ApplicationGateway]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(ApplicationGatewayListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class ApplicationGatewayPathRule(SubResource): """Path rule of URL path map of an application gateway. :param id: Resource ID. :type id: str :param name: Name of the path rule that is unique within an Application Gateway. :type name: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :param type: Type of the resource. :type type: str :param paths: Path rules of URL path map. :type paths: list[str] :param backend_address_pool: Backend address pool resource of URL path map path rule. :type backend_address_pool: ~azure.mgmt.network.v2018_06_01.models.SubResource :param backend_http_settings: Backend http settings resource of URL path map path rule. :type backend_http_settings: ~azure.mgmt.network.v2018_06_01.models.SubResource :param redirect_configuration: Redirect configuration resource of URL path map path rule. :type redirect_configuration: ~azure.mgmt.network.v2018_06_01.models.SubResource :param provisioning_state: Path rule of URL path map resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :type provisioning_state: str """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'paths': {'key': 'properties.paths', 'type': '[str]'}, 'backend_address_pool': {'key': 'properties.backendAddressPool', 'type': 'SubResource'}, 'backend_http_settings': {'key': 'properties.backendHttpSettings', 'type': 'SubResource'}, 'redirect_configuration': {'key': 'properties.redirectConfiguration', 'type': 'SubResource'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(ApplicationGatewayPathRule, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.etag = kwargs.get('etag', None) self.type = kwargs.get('type', None) self.paths = kwargs.get('paths', None) self.backend_address_pool = kwargs.get('backend_address_pool', None) self.backend_http_settings = kwargs.get('backend_http_settings', None) self.redirect_configuration = kwargs.get('redirect_configuration', None) self.provisioning_state = kwargs.get('provisioning_state', None) class ApplicationGatewayProbe(SubResource): """Probe of the application gateway. :param id: Resource ID. :type id: str :param name: Name of the probe that is unique within an Application Gateway. :type name: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :param type: Type of the resource. :type type: str :param protocol: The protocol used for the probe. Possible values are 'Http' and 'Https'. Possible values include: "Http", "Https". :type protocol: str or ~azure.mgmt.network.v2018_06_01.models.ApplicationGatewayProtocol :param host: Host name to send the probe to. :type host: str :param path: Relative path of probe. Valid path starts from '/'. Probe is sent to :code:`<Protocol>`://:code:`<host>`::code:`<port>`:code:`<path>`. :type path: str :param interval: The probing interval in seconds. This is the time interval between two consecutive probes. Acceptable values are from 1 second to 86400 seconds. :type interval: int :param timeout: the probe timeout in seconds. Probe marked as failed if valid response is not received with this timeout period. Acceptable values are from 1 second to 86400 seconds. :type timeout: int :param unhealthy_threshold: The probe retry count. Backend server is marked down after consecutive probe failure count reaches UnhealthyThreshold. Acceptable values are from 1 second to 20. :type unhealthy_threshold: int :param pick_host_name_from_backend_http_settings: Whether the host header should be picked from the backend http settings. Default value is false. :type pick_host_name_from_backend_http_settings: bool :param min_servers: Minimum number of servers that are always marked healthy. Default value is 0. :type min_servers: int :param match: Criterion for classifying a healthy probe response. :type match: ~azure.mgmt.network.v2018_06_01.models.ApplicationGatewayProbeHealthResponseMatch :param provisioning_state: Provisioning state of the backend http settings resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :type provisioning_state: str """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'protocol': {'key': 'properties.protocol', 'type': 'str'}, 'host': {'key': 'properties.host', 'type': 'str'}, 'path': {'key': 'properties.path', 'type': 'str'}, 'interval': {'key': 'properties.interval', 'type': 'int'}, 'timeout': {'key': 'properties.timeout', 'type': 'int'}, 'unhealthy_threshold': {'key': 'properties.unhealthyThreshold', 'type': 'int'}, 'pick_host_name_from_backend_http_settings': {'key': 'properties.pickHostNameFromBackendHttpSettings', 'type': 'bool'}, 'min_servers': {'key': 'properties.minServers', 'type': 'int'}, 'match': {'key': 'properties.match', 'type': 'ApplicationGatewayProbeHealthResponseMatch'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(ApplicationGatewayProbe, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.etag = kwargs.get('etag', None) self.type = kwargs.get('type', None) self.protocol = kwargs.get('protocol', None) self.host = kwargs.get('host', None) self.path = kwargs.get('path', None) self.interval = kwargs.get('interval', None) self.timeout = kwargs.get('timeout', None) self.unhealthy_threshold = kwargs.get('unhealthy_threshold', None) self.pick_host_name_from_backend_http_settings = kwargs.get('pick_host_name_from_backend_http_settings', None) self.min_servers = kwargs.get('min_servers', None) self.match = kwargs.get('match', None) self.provisioning_state = kwargs.get('provisioning_state', None) class ApplicationGatewayProbeHealthResponseMatch(msrest.serialization.Model): """Application gateway probe health response match. :param body: Body that must be contained in the health response. Default value is empty. :type body: str :param status_codes: Allowed ranges of healthy status codes. Default range of healthy status codes is 200-399. :type status_codes: list[str] """ _attribute_map = { 'body': {'key': 'body', 'type': 'str'}, 'status_codes': {'key': 'statusCodes', 'type': '[str]'}, } def __init__( self, **kwargs ): super(ApplicationGatewayProbeHealthResponseMatch, self).__init__(**kwargs) self.body = kwargs.get('body', None) self.status_codes = kwargs.get('status_codes', None) class ApplicationGatewayRedirectConfiguration(SubResource): """Redirect configuration of an application gateway. :param id: Resource ID. :type id: str :param name: Name of the redirect configuration that is unique within an Application Gateway. :type name: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :param type: Type of the resource. :type type: str :param redirect_type: Supported http redirection types - Permanent, Temporary, Found, SeeOther. Possible values include: "Permanent", "Found", "SeeOther", "Temporary". :type redirect_type: str or ~azure.mgmt.network.v2018_06_01.models.ApplicationGatewayRedirectType :param target_listener: Reference to a listener to redirect the request to. :type target_listener: ~azure.mgmt.network.v2018_06_01.models.SubResource :param target_url: Url to redirect the request to. :type target_url: str :param include_path: Include path in the redirected url. :type include_path: bool :param include_query_string: Include query string in the redirected url. :type include_query_string: bool :param request_routing_rules: Request routing specifying redirect configuration. :type request_routing_rules: list[~azure.mgmt.network.v2018_06_01.models.SubResource] :param url_path_maps: Url path maps specifying default redirect configuration. :type url_path_maps: list[~azure.mgmt.network.v2018_06_01.models.SubResource] :param path_rules: Path rules specifying redirect configuration. :type path_rules: list[~azure.mgmt.network.v2018_06_01.models.SubResource] """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'redirect_type': {'key': 'properties.redirectType', 'type': 'str'}, 'target_listener': {'key': 'properties.targetListener', 'type': 'SubResource'}, 'target_url': {'key': 'properties.targetUrl', 'type': 'str'}, 'include_path': {'key': 'properties.includePath', 'type': 'bool'}, 'include_query_string': {'key': 'properties.includeQueryString', 'type': 'bool'}, 'request_routing_rules': {'key': 'properties.requestRoutingRules', 'type': '[SubResource]'}, 'url_path_maps': {'key': 'properties.urlPathMaps', 'type': '[SubResource]'}, 'path_rules': {'key': 'properties.pathRules', 'type': '[SubResource]'}, } def __init__( self, **kwargs ): super(ApplicationGatewayRedirectConfiguration, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.etag = kwargs.get('etag', None) self.type = kwargs.get('type', None) self.redirect_type = kwargs.get('redirect_type', None) self.target_listener = kwargs.get('target_listener', None) self.target_url = kwargs.get('target_url', None) self.include_path = kwargs.get('include_path', None) self.include_query_string = kwargs.get('include_query_string', None) self.request_routing_rules = kwargs.get('request_routing_rules', None) self.url_path_maps = kwargs.get('url_path_maps', None) self.path_rules = kwargs.get('path_rules', None) class ApplicationGatewayRequestRoutingRule(SubResource): """Request routing rule of an application gateway. :param id: Resource ID. :type id: str :param name: Name of the request routing rule that is unique within an Application Gateway. :type name: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :param type: Type of the resource. :type type: str :param rule_type: Rule type. Possible values include: "Basic", "PathBasedRouting". :type rule_type: str or ~azure.mgmt.network.v2018_06_01.models.ApplicationGatewayRequestRoutingRuleType :param backend_address_pool: Backend address pool resource of the application gateway. :type backend_address_pool: ~azure.mgmt.network.v2018_06_01.models.SubResource :param backend_http_settings: Backend http settings resource of the application gateway. :type backend_http_settings: ~azure.mgmt.network.v2018_06_01.models.SubResource :param http_listener: Http listener resource of the application gateway. :type http_listener: ~azure.mgmt.network.v2018_06_01.models.SubResource :param url_path_map: URL path map resource of the application gateway. :type url_path_map: ~azure.mgmt.network.v2018_06_01.models.SubResource :param redirect_configuration: Redirect configuration resource of the application gateway. :type redirect_configuration: ~azure.mgmt.network.v2018_06_01.models.SubResource :param provisioning_state: Provisioning state of the request routing rule resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :type provisioning_state: str """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'rule_type': {'key': 'properties.ruleType', 'type': 'str'}, 'backend_address_pool': {'key': 'properties.backendAddressPool', 'type': 'SubResource'}, 'backend_http_settings': {'key': 'properties.backendHttpSettings', 'type': 'SubResource'}, 'http_listener': {'key': 'properties.httpListener', 'type': 'SubResource'}, 'url_path_map': {'key': 'properties.urlPathMap', 'type': 'SubResource'}, 'redirect_configuration': {'key': 'properties.redirectConfiguration', 'type': 'SubResource'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(ApplicationGatewayRequestRoutingRule, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.etag = kwargs.get('etag', None) self.type = kwargs.get('type', None) self.rule_type = kwargs.get('rule_type', None) self.backend_address_pool = kwargs.get('backend_address_pool', None) self.backend_http_settings = kwargs.get('backend_http_settings', None) self.http_listener = kwargs.get('http_listener', None) self.url_path_map = kwargs.get('url_path_map', None) self.redirect_configuration = kwargs.get('redirect_configuration', None) self.provisioning_state = kwargs.get('provisioning_state', None) class ApplicationGatewaySku(msrest.serialization.Model): """SKU of an application gateway. :param name: Name of an application gateway SKU. Possible values include: "Standard_Small", "Standard_Medium", "Standard_Large", "WAF_Medium", "WAF_Large", "Standard_v2", "WAF_v2". :type name: str or ~azure.mgmt.network.v2018_06_01.models.ApplicationGatewaySkuName :param tier: Tier of an application gateway. Possible values include: "Standard", "WAF", "Standard_v2", "WAF_v2". :type tier: str or ~azure.mgmt.network.v2018_06_01.models.ApplicationGatewayTier :param capacity: Capacity (instance count) of an application gateway. :type capacity: int """ _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'tier': {'key': 'tier', 'type': 'str'}, 'capacity': {'key': 'capacity', 'type': 'int'}, } def __init__( self, **kwargs ): super(ApplicationGatewaySku, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.tier = kwargs.get('tier', None) self.capacity = kwargs.get('capacity', None) class ApplicationGatewaySslCertificate(SubResource): """SSL certificates of an application gateway. :param id: Resource ID. :type id: str :param name: Name of the SSL certificate that is unique within an Application Gateway. :type name: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :param type: Type of the resource. :type type: str :param data: Base-64 encoded pfx certificate. Only applicable in PUT Request. :type data: str :param password: Password for the pfx file specified in data. Only applicable in PUT request. :type password: str :param public_cert_data: Base-64 encoded Public cert data corresponding to pfx specified in data. Only applicable in GET request. :type public_cert_data: str :param provisioning_state: Provisioning state of the SSL certificate resource Possible values are: 'Updating', 'Deleting', and 'Failed'. :type provisioning_state: str """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'data': {'key': 'properties.data', 'type': 'str'}, 'password': {'key': 'properties.password', 'type': 'str'}, 'public_cert_data': {'key': 'properties.publicCertData', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(ApplicationGatewaySslCertificate, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.etag = kwargs.get('etag', None) self.type = kwargs.get('type', None) self.data = kwargs.get('data', None) self.password = kwargs.get('password', None) self.public_cert_data = kwargs.get('public_cert_data', None) self.provisioning_state = kwargs.get('provisioning_state', None) class ApplicationGatewaySslPolicy(msrest.serialization.Model): """Application Gateway Ssl policy. :param disabled_ssl_protocols: Ssl protocols to be disabled on application gateway. :type disabled_ssl_protocols: list[str or ~azure.mgmt.network.v2018_06_01.models.ApplicationGatewaySslProtocol] :param policy_type: Type of Ssl Policy. Possible values include: "Predefined", "Custom". :type policy_type: str or ~azure.mgmt.network.v2018_06_01.models.ApplicationGatewaySslPolicyType :param policy_name: Name of Ssl predefined policy. Possible values include: "AppGwSslPolicy20150501", "AppGwSslPolicy20170401", "AppGwSslPolicy20170401S". :type policy_name: str or ~azure.mgmt.network.v2018_06_01.models.ApplicationGatewaySslPolicyName :param cipher_suites: Ssl cipher suites to be enabled in the specified order to application gateway. :type cipher_suites: list[str or ~azure.mgmt.network.v2018_06_01.models.ApplicationGatewaySslCipherSuite] :param min_protocol_version: Minimum version of Ssl protocol to be supported on application gateway. Possible values include: "TLSv1_0", "TLSv1_1", "TLSv1_2". :type min_protocol_version: str or ~azure.mgmt.network.v2018_06_01.models.ApplicationGatewaySslProtocol """ _attribute_map = { 'disabled_ssl_protocols': {'key': 'disabledSslProtocols', 'type': '[str]'}, 'policy_type': {'key': 'policyType', 'type': 'str'}, 'policy_name': {'key': 'policyName', 'type': 'str'}, 'cipher_suites': {'key': 'cipherSuites', 'type': '[str]'}, 'min_protocol_version': {'key': 'minProtocolVersion', 'type': 'str'}, } def __init__( self, **kwargs ): super(ApplicationGatewaySslPolicy, self).__init__(**kwargs) self.disabled_ssl_protocols = kwargs.get('disabled_ssl_protocols', None) self.policy_type = kwargs.get('policy_type', None) self.policy_name = kwargs.get('policy_name', None) self.cipher_suites = kwargs.get('cipher_suites', None) self.min_protocol_version = kwargs.get('min_protocol_version', None) class ApplicationGatewaySslPredefinedPolicy(SubResource): """An Ssl predefined policy. :param id: Resource ID. :type id: str :param name: Name of the Ssl predefined policy. :type name: str :param cipher_suites: Ssl cipher suites to be enabled in the specified order for application gateway. :type cipher_suites: list[str or ~azure.mgmt.network.v2018_06_01.models.ApplicationGatewaySslCipherSuite] :param min_protocol_version: Minimum version of Ssl protocol to be supported on application gateway. Possible values include: "TLSv1_0", "TLSv1_1", "TLSv1_2". :type min_protocol_version: str or ~azure.mgmt.network.v2018_06_01.models.ApplicationGatewaySslProtocol """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'cipher_suites': {'key': 'properties.cipherSuites', 'type': '[str]'}, 'min_protocol_version': {'key': 'properties.minProtocolVersion', 'type': 'str'}, } def __init__( self, **kwargs ): super(ApplicationGatewaySslPredefinedPolicy, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.cipher_suites = kwargs.get('cipher_suites', None) self.min_protocol_version = kwargs.get('min_protocol_version', None) class ApplicationGatewayUrlPathMap(SubResource): """UrlPathMaps give a url path to the backend mapping information for PathBasedRouting. :param id: Resource ID. :type id: str :param name: Name of the URL path map that is unique within an Application Gateway. :type name: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :param type: Type of the resource. :type type: str :param default_backend_address_pool: Default backend address pool resource of URL path map. :type default_backend_address_pool: ~azure.mgmt.network.v2018_06_01.models.SubResource :param default_backend_http_settings: Default backend http settings resource of URL path map. :type default_backend_http_settings: ~azure.mgmt.network.v2018_06_01.models.SubResource :param default_redirect_configuration: Default redirect configuration resource of URL path map. :type default_redirect_configuration: ~azure.mgmt.network.v2018_06_01.models.SubResource :param path_rules: Path rule of URL path map resource. :type path_rules: list[~azure.mgmt.network.v2018_06_01.models.ApplicationGatewayPathRule] :param provisioning_state: Provisioning state of the backend http settings resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :type provisioning_state: str """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'default_backend_address_pool': {'key': 'properties.defaultBackendAddressPool', 'type': 'SubResource'}, 'default_backend_http_settings': {'key': 'properties.defaultBackendHttpSettings', 'type': 'SubResource'}, 'default_redirect_configuration': {'key': 'properties.defaultRedirectConfiguration', 'type': 'SubResource'}, 'path_rules': {'key': 'properties.pathRules', 'type': '[ApplicationGatewayPathRule]'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(ApplicationGatewayUrlPathMap, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.etag = kwargs.get('etag', None) self.type = kwargs.get('type', None) self.default_backend_address_pool = kwargs.get('default_backend_address_pool', None) self.default_backend_http_settings = kwargs.get('default_backend_http_settings', None) self.default_redirect_configuration = kwargs.get('default_redirect_configuration', None) self.path_rules = kwargs.get('path_rules', None) self.provisioning_state = kwargs.get('provisioning_state', None) class ApplicationGatewayWebApplicationFirewallConfiguration(msrest.serialization.Model): """Application gateway web application firewall configuration. All required parameters must be populated in order to send to Azure. :param enabled: Required. Whether the web application firewall is enabled or not. :type enabled: bool :param firewall_mode: Required. Web application firewall mode. Possible values include: "Detection", "Prevention". :type firewall_mode: str or ~azure.mgmt.network.v2018_06_01.models.ApplicationGatewayFirewallMode :param rule_set_type: Required. The type of the web application firewall rule set. Possible values are: 'OWASP'. :type rule_set_type: str :param rule_set_version: Required. The version of the rule set type. :type rule_set_version: str :param disabled_rule_groups: The disabled rule groups. :type disabled_rule_groups: list[~azure.mgmt.network.v2018_06_01.models.ApplicationGatewayFirewallDisabledRuleGroup] :param request_body_check: Whether allow WAF to check request Body. :type request_body_check: bool :param max_request_body_size: Maximum request body size for WAF. :type max_request_body_size: int """ _validation = { 'enabled': {'required': True}, 'firewall_mode': {'required': True}, 'rule_set_type': {'required': True}, 'rule_set_version': {'required': True}, 'max_request_body_size': {'maximum': 128, 'minimum': 8}, } _attribute_map = { 'enabled': {'key': 'enabled', 'type': 'bool'}, 'firewall_mode': {'key': 'firewallMode', 'type': 'str'}, 'rule_set_type': {'key': 'ruleSetType', 'type': 'str'}, 'rule_set_version': {'key': 'ruleSetVersion', 'type': 'str'}, 'disabled_rule_groups': {'key': 'disabledRuleGroups', 'type': '[ApplicationGatewayFirewallDisabledRuleGroup]'}, 'request_body_check': {'key': 'requestBodyCheck', 'type': 'bool'}, 'max_request_body_size': {'key': 'maxRequestBodySize', 'type': 'int'}, } def __init__( self, **kwargs ): super(ApplicationGatewayWebApplicationFirewallConfiguration, self).__init__(**kwargs) self.enabled = kwargs['enabled'] self.firewall_mode = kwargs['firewall_mode'] self.rule_set_type = kwargs['rule_set_type'] self.rule_set_version = kwargs['rule_set_version'] self.disabled_rule_groups = kwargs.get('disabled_rule_groups', None) self.request_body_check = kwargs.get('request_body_check', None) self.max_request_body_size = kwargs.get('max_request_body_size', None) class ApplicationSecurityGroup(Resource): """An application security group in a resource group. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :param location: Resource location. :type location: str :param tags: A set of tags. Resource tags. :type tags: dict[str, str] :ivar etag: A unique read-only string that changes whenever the resource is updated. :vartype etag: str :ivar resource_guid: The resource GUID property of the application security group resource. It uniquely identifies a resource, even if the user changes its name or migrate the resource across subscriptions or resource groups. :vartype resource_guid: str :ivar provisioning_state: The provisioning state of the application security group resource. Possible values are: 'Succeeded', 'Updating', 'Deleting', and 'Failed'. :vartype provisioning_state: str """ _validation = { 'name': {'readonly': True}, 'type': {'readonly': True}, 'etag': {'readonly': True}, 'resource_guid': {'readonly': True}, 'provisioning_state': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'etag': {'key': 'etag', 'type': 'str'}, 'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(ApplicationSecurityGroup, self).__init__(**kwargs) self.etag = None self.resource_guid = None self.provisioning_state = None class ApplicationSecurityGroupListResult(msrest.serialization.Model): """A list of application security groups. Variables are only populated by the server, and will be ignored when sending a request. :param value: A list of application security groups. :type value: list[~azure.mgmt.network.v2018_06_01.models.ApplicationSecurityGroup] :ivar next_link: The URL to get the next set of results. :vartype next_link: str """ _validation = { 'next_link': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[ApplicationSecurityGroup]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(ApplicationSecurityGroupListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = None class AuthorizationListResult(msrest.serialization.Model): """Response for ListAuthorizations API service call retrieves all authorizations that belongs to an ExpressRouteCircuit. :param value: The authorizations in an ExpressRoute Circuit. :type value: list[~azure.mgmt.network.v2018_06_01.models.ExpressRouteCircuitAuthorization] :param next_link: The URL to get the next set of results. :type next_link: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[ExpressRouteCircuitAuthorization]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(AuthorizationListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class Availability(msrest.serialization.Model): """Availability of the metric. :param time_grain: The time grain of the availability. :type time_grain: str :param retention: The retention of the availability. :type retention: str :param blob_duration: Duration of the availability blob. :type blob_duration: str """ _attribute_map = { 'time_grain': {'key': 'timeGrain', 'type': 'str'}, 'retention': {'key': 'retention', 'type': 'str'}, 'blob_duration': {'key': 'blobDuration', 'type': 'str'}, } def __init__( self, **kwargs ): super(Availability, self).__init__(**kwargs) self.time_grain = kwargs.get('time_grain', None) self.retention = kwargs.get('retention', None) self.blob_duration = kwargs.get('blob_duration', None) class AvailableProvidersList(msrest.serialization.Model): """List of available countries with details. All required parameters must be populated in order to send to Azure. :param countries: Required. List of available countries. :type countries: list[~azure.mgmt.network.v2018_06_01.models.AvailableProvidersListCountry] """ _validation = { 'countries': {'required': True}, } _attribute_map = { 'countries': {'key': 'countries', 'type': '[AvailableProvidersListCountry]'}, } def __init__( self, **kwargs ): super(AvailableProvidersList, self).__init__(**kwargs) self.countries = kwargs['countries'] class AvailableProvidersListCity(msrest.serialization.Model): """City or town details. :param city_name: The city or town name. :type city_name: str :param providers: A list of Internet service providers. :type providers: list[str] """ _attribute_map = { 'city_name': {'key': 'cityName', 'type': 'str'}, 'providers': {'key': 'providers', 'type': '[str]'}, } def __init__( self, **kwargs ): super(AvailableProvidersListCity, self).__init__(**kwargs) self.city_name = kwargs.get('city_name', None) self.providers = kwargs.get('providers', None) class AvailableProvidersListCountry(msrest.serialization.Model): """Country details. :param country_name: The country name. :type country_name: str :param providers: A list of Internet service providers. :type providers: list[str] :param states: List of available states in the country. :type states: list[~azure.mgmt.network.v2018_06_01.models.AvailableProvidersListState] """ _attribute_map = { 'country_name': {'key': 'countryName', 'type': 'str'}, 'providers': {'key': 'providers', 'type': '[str]'}, 'states': {'key': 'states', 'type': '[AvailableProvidersListState]'}, } def __init__( self, **kwargs ): super(AvailableProvidersListCountry, self).__init__(**kwargs) self.country_name = kwargs.get('country_name', None) self.providers = kwargs.get('providers', None) self.states = kwargs.get('states', None) class AvailableProvidersListParameters(msrest.serialization.Model): """Constraints that determine the list of available Internet service providers. :param azure_locations: A list of Azure regions. :type azure_locations: list[str] :param country: The country for available providers list. :type country: str :param state: The state for available providers list. :type state: str :param city: The city or town for available providers list. :type city: str """ _attribute_map = { 'azure_locations': {'key': 'azureLocations', 'type': '[str]'}, 'country': {'key': 'country', 'type': 'str'}, 'state': {'key': 'state', 'type': 'str'}, 'city': {'key': 'city', 'type': 'str'}, } def __init__( self, **kwargs ): super(AvailableProvidersListParameters, self).__init__(**kwargs) self.azure_locations = kwargs.get('azure_locations', None) self.country = kwargs.get('country', None) self.state = kwargs.get('state', None) self.city = kwargs.get('city', None) class AvailableProvidersListState(msrest.serialization.Model): """State details. :param state_name: The state name. :type state_name: str :param providers: A list of Internet service providers. :type providers: list[str] :param cities: List of available cities or towns in the state. :type cities: list[~azure.mgmt.network.v2018_06_01.models.AvailableProvidersListCity] """ _attribute_map = { 'state_name': {'key': 'stateName', 'type': 'str'}, 'providers': {'key': 'providers', 'type': '[str]'}, 'cities': {'key': 'cities', 'type': '[AvailableProvidersListCity]'}, } def __init__( self, **kwargs ): super(AvailableProvidersListState, self).__init__(**kwargs) self.state_name = kwargs.get('state_name', None) self.providers = kwargs.get('providers', None) self.cities = kwargs.get('cities', None) class AzureAsyncOperationResult(msrest.serialization.Model): """The response body contains the status of the specified asynchronous operation, indicating whether it has succeeded, is in progress, or has failed. Note that this status is distinct from the HTTP status code returned for the Get Operation Status operation itself. If the asynchronous operation succeeded, the response body includes the HTTP status code for the successful request. If the asynchronous operation failed, the response body includes the HTTP status code for the failed request and error information regarding the failure. :param status: Status of the Azure async operation. Possible values are: 'InProgress', 'Succeeded', and 'Failed'. Possible values include: "InProgress", "Succeeded", "Failed". :type status: str or ~azure.mgmt.network.v2018_06_01.models.NetworkOperationStatus :param error: :type error: ~azure.mgmt.network.v2018_06_01.models.Error """ _attribute_map = { 'status': {'key': 'status', 'type': 'str'}, 'error': {'key': 'error', 'type': 'Error'}, } def __init__( self, **kwargs ): super(AzureAsyncOperationResult, self).__init__(**kwargs) self.status = kwargs.get('status', None) self.error = kwargs.get('error', None) class AzureFirewall(Resource): """Azure Firewall resource. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :param location: Resource location. :type location: str :param tags: A set of tags. Resource tags. :type tags: dict[str, str] :ivar etag: Gets a unique read-only string that changes whenever the resource is updated. :vartype etag: str :param application_rule_collections: Collection of application rule collections used by a Azure Firewall. :type application_rule_collections: list[~azure.mgmt.network.v2018_06_01.models.AzureFirewallApplicationRuleCollection] :param network_rule_collections: Collection of network rule collections used by a Azure Firewall. :type network_rule_collections: list[~azure.mgmt.network.v2018_06_01.models.AzureFirewallNetworkRuleCollection] :param ip_configurations: IP configuration of the Azure Firewall resource. :type ip_configurations: list[~azure.mgmt.network.v2018_06_01.models.AzureFirewallIPConfiguration] :param provisioning_state: The provisioning state of the resource. Possible values include: "Succeeded", "Updating", "Deleting", "Failed". :type provisioning_state: str or ~azure.mgmt.network.v2018_06_01.models.ProvisioningState """ _validation = { 'name': {'readonly': True}, 'type': {'readonly': True}, 'etag': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'etag': {'key': 'etag', 'type': 'str'}, 'application_rule_collections': {'key': 'properties.applicationRuleCollections', 'type': '[AzureFirewallApplicationRuleCollection]'}, 'network_rule_collections': {'key': 'properties.networkRuleCollections', 'type': '[AzureFirewallNetworkRuleCollection]'}, 'ip_configurations': {'key': 'properties.ipConfigurations', 'type': '[AzureFirewallIPConfiguration]'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(AzureFirewall, self).__init__(**kwargs) self.etag = None self.application_rule_collections = kwargs.get('application_rule_collections', None) self.network_rule_collections = kwargs.get('network_rule_collections', None) self.ip_configurations = kwargs.get('ip_configurations', None) self.provisioning_state = kwargs.get('provisioning_state', None) class AzureFirewallApplicationRule(msrest.serialization.Model): """Properties of an application rule. :param name: Name of the application rule. :type name: str :param description: Description of the rule. :type description: str :param source_addresses: List of source IP addresses for this rule. :type source_addresses: list[str] :param protocols: Array of ApplicationRuleProtocols. :type protocols: list[~azure.mgmt.network.v2018_06_01.models.AzureFirewallApplicationRuleProtocol] :param target_urls: List of URLs for this rule. :type target_urls: list[str] """ _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, 'source_addresses': {'key': 'sourceAddresses', 'type': '[str]'}, 'protocols': {'key': 'protocols', 'type': '[AzureFirewallApplicationRuleProtocol]'}, 'target_urls': {'key': 'targetUrls', 'type': '[str]'}, } def __init__( self, **kwargs ): super(AzureFirewallApplicationRule, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.description = kwargs.get('description', None) self.source_addresses = kwargs.get('source_addresses', None) self.protocols = kwargs.get('protocols', None) self.target_urls = kwargs.get('target_urls', None) class AzureFirewallApplicationRuleCollection(SubResource): """Application rule collection resource. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :param name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource. :type name: str :ivar etag: Gets a unique read-only string that changes whenever the resource is updated. :vartype etag: str :param priority: Priority of the application rule collection resource. :type priority: int :param action: The action type of a rule collection. :type action: ~azure.mgmt.network.v2018_06_01.models.AzureFirewallRCAction :param rules: Collection of rules used by a application rule collection. :type rules: list[~azure.mgmt.network.v2018_06_01.models.AzureFirewallApplicationRule] :param provisioning_state: The provisioning state of the resource. Possible values include: "Succeeded", "Updating", "Deleting", "Failed". :type provisioning_state: str or ~azure.mgmt.network.v2018_06_01.models.ProvisioningState """ _validation = { 'etag': {'readonly': True}, 'priority': {'maximum': 65000, 'minimum': 100}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'priority': {'key': 'properties.priority', 'type': 'int'}, 'action': {'key': 'properties.action', 'type': 'AzureFirewallRCAction'}, 'rules': {'key': 'properties.rules', 'type': '[AzureFirewallApplicationRule]'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(AzureFirewallApplicationRuleCollection, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.etag = None self.priority = kwargs.get('priority', None) self.action = kwargs.get('action', None) self.rules = kwargs.get('rules', None) self.provisioning_state = kwargs.get('provisioning_state', None) class AzureFirewallApplicationRuleProtocol(msrest.serialization.Model): """Properties of the application rule protocol. :param protocol_type: Protocol type. Possible values include: "Http", "Https". :type protocol_type: str or ~azure.mgmt.network.v2018_06_01.models.AzureFirewallApplicationRuleProtocolType :param port: Port number for the protocol, cannot be greater than 64000. This field is optional. :type port: int """ _validation = { 'port': {'maximum': 64000, 'minimum': 0}, } _attribute_map = { 'protocol_type': {'key': 'protocolType', 'type': 'str'}, 'port': {'key': 'port', 'type': 'int'}, } def __init__( self, **kwargs ): super(AzureFirewallApplicationRuleProtocol, self).__init__(**kwargs) self.protocol_type = kwargs.get('protocol_type', None) self.port = kwargs.get('port', None) class AzureFirewallIPConfiguration(SubResource): """IP configuration of an Azure Firewall. :param id: Resource ID. :type id: str :param name: Name of the resource that is unique within a resource group. This name can be used to access the resource. :type name: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :param private_ip_address: The Firewall Internal Load Balancer IP to be used as the next hop in User Defined Routes. :type private_ip_address: str :param subnet: Reference of the subnet resource. This resource must be named 'AzureFirewallSubnet'. :type subnet: ~azure.mgmt.network.v2018_06_01.models.SubResource :param internal_public_ip_address: Reference of the PublicIP resource. This field is a mandatory input. :type internal_public_ip_address: ~azure.mgmt.network.v2018_06_01.models.SubResource :param public_ip_address: Reference of the PublicIP resource. This field is populated in the output. :type public_ip_address: ~azure.mgmt.network.v2018_06_01.models.SubResource :param provisioning_state: The provisioning state of the resource. Possible values include: "Succeeded", "Updating", "Deleting", "Failed". :type provisioning_state: str or ~azure.mgmt.network.v2018_06_01.models.ProvisioningState """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'private_ip_address': {'key': 'properties.privateIPAddress', 'type': 'str'}, 'subnet': {'key': 'properties.subnet', 'type': 'SubResource'}, 'internal_public_ip_address': {'key': 'properties.internalPublicIpAddress', 'type': 'SubResource'}, 'public_ip_address': {'key': 'properties.publicIPAddress', 'type': 'SubResource'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(AzureFirewallIPConfiguration, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.etag = kwargs.get('etag', None) self.private_ip_address = kwargs.get('private_ip_address', None) self.subnet = kwargs.get('subnet', None) self.internal_public_ip_address = kwargs.get('internal_public_ip_address', None) self.public_ip_address = kwargs.get('public_ip_address', None) self.provisioning_state = kwargs.get('provisioning_state', None) class AzureFirewallListResult(msrest.serialization.Model): """Response for ListAzureFirewalls API service call. :param value: List of a Azure Firewalls in a resource group. :type value: list[~azure.mgmt.network.v2018_06_01.models.AzureFirewall] :param next_link: URL to get the next set of results. :type next_link: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[AzureFirewall]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(AzureFirewallListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class AzureFirewallNetworkRule(msrest.serialization.Model): """Properties of the network rule. :param name: Name of the network rule. :type name: str :param description: Description of the rule. :type description: str :param protocols: Array of AzureFirewallNetworkRuleProtocols. :type protocols: list[str or ~azure.mgmt.network.v2018_06_01.models.AzureFirewallNetworkRuleProtocol] :param source_addresses: List of source IP addresses for this rule. :type source_addresses: list[str] :param destination_addresses: List of destination IP addresses. :type destination_addresses: list[str] :param destination_ports: List of destination ports. :type destination_ports: list[str] """ _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, 'protocols': {'key': 'protocols', 'type': '[str]'}, 'source_addresses': {'key': 'sourceAddresses', 'type': '[str]'}, 'destination_addresses': {'key': 'destinationAddresses', 'type': '[str]'}, 'destination_ports': {'key': 'destinationPorts', 'type': '[str]'}, } def __init__( self, **kwargs ): super(AzureFirewallNetworkRule, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.description = kwargs.get('description', None) self.protocols = kwargs.get('protocols', None) self.source_addresses = kwargs.get('source_addresses', None) self.destination_addresses = kwargs.get('destination_addresses', None) self.destination_ports = kwargs.get('destination_ports', None) class AzureFirewallNetworkRuleCollection(SubResource): """Network rule collection resource. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :param name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource. :type name: str :ivar etag: Gets a unique read-only string that changes whenever the resource is updated. :vartype etag: str :param priority: Priority of the network rule collection resource. :type priority: int :param action: The action type of a rule collection. :type action: ~azure.mgmt.network.v2018_06_01.models.AzureFirewallRCAction :param rules: Collection of rules used by a network rule collection. :type rules: list[~azure.mgmt.network.v2018_06_01.models.AzureFirewallNetworkRule] :param provisioning_state: The provisioning state of the resource. Possible values include: "Succeeded", "Updating", "Deleting", "Failed". :type provisioning_state: str or ~azure.mgmt.network.v2018_06_01.models.ProvisioningState """ _validation = { 'etag': {'readonly': True}, 'priority': {'maximum': 65000, 'minimum': 100}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'priority': {'key': 'properties.priority', 'type': 'int'}, 'action': {'key': 'properties.action', 'type': 'AzureFirewallRCAction'}, 'rules': {'key': 'properties.rules', 'type': '[AzureFirewallNetworkRule]'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(AzureFirewallNetworkRuleCollection, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.etag = None self.priority = kwargs.get('priority', None) self.action = kwargs.get('action', None) self.rules = kwargs.get('rules', None) self.provisioning_state = kwargs.get('provisioning_state', None) class AzureFirewallRCAction(msrest.serialization.Model): """Properties of the AzureFirewallRCAction. :param type: The type of action. Possible values include: "Allow", "Deny". :type type: str or ~azure.mgmt.network.v2018_06_01.models.AzureFirewallRCActionType """ _attribute_map = { 'type': {'key': 'type', 'type': 'str'}, } def __init__( self, **kwargs ): super(AzureFirewallRCAction, self).__init__(**kwargs) self.type = kwargs.get('type', None) class AzureReachabilityReport(msrest.serialization.Model): """Azure reachability report details. All required parameters must be populated in order to send to Azure. :param aggregation_level: Required. The aggregation level of Azure reachability report. Can be Country, State or City. :type aggregation_level: str :param provider_location: Required. Parameters that define a geographic location. :type provider_location: ~azure.mgmt.network.v2018_06_01.models.AzureReachabilityReportLocation :param reachability_report: Required. List of Azure reachability report items. :type reachability_report: list[~azure.mgmt.network.v2018_06_01.models.AzureReachabilityReportItem] """ _validation = { 'aggregation_level': {'required': True}, 'provider_location': {'required': True}, 'reachability_report': {'required': True}, } _attribute_map = { 'aggregation_level': {'key': 'aggregationLevel', 'type': 'str'}, 'provider_location': {'key': 'providerLocation', 'type': 'AzureReachabilityReportLocation'}, 'reachability_report': {'key': 'reachabilityReport', 'type': '[AzureReachabilityReportItem]'}, } def __init__( self, **kwargs ): super(AzureReachabilityReport, self).__init__(**kwargs) self.aggregation_level = kwargs['aggregation_level'] self.provider_location = kwargs['provider_location'] self.reachability_report = kwargs['reachability_report'] class AzureReachabilityReportItem(msrest.serialization.Model): """Azure reachability report details for a given provider location. :param provider: The Internet service provider. :type provider: str :param azure_location: The Azure region. :type azure_location: str :param latencies: List of latency details for each of the time series. :type latencies: list[~azure.mgmt.network.v2018_06_01.models.AzureReachabilityReportLatencyInfo] """ _attribute_map = { 'provider': {'key': 'provider', 'type': 'str'}, 'azure_location': {'key': 'azureLocation', 'type': 'str'}, 'latencies': {'key': 'latencies', 'type': '[AzureReachabilityReportLatencyInfo]'}, } def __init__( self, **kwargs ): super(AzureReachabilityReportItem, self).__init__(**kwargs) self.provider = kwargs.get('provider', None) self.azure_location = kwargs.get('azure_location', None) self.latencies = kwargs.get('latencies', None) class AzureReachabilityReportLatencyInfo(msrest.serialization.Model): """Details on latency for a time series. :param time_stamp: The time stamp. :type time_stamp: ~datetime.datetime :param score: The relative latency score between 1 and 100, higher values indicating a faster connection. :type score: int """ _validation = { 'score': {'maximum': 100, 'minimum': 1}, } _attribute_map = { 'time_stamp': {'key': 'timeStamp', 'type': 'iso-8601'}, 'score': {'key': 'score', 'type': 'int'}, } def __init__( self, **kwargs ): super(AzureReachabilityReportLatencyInfo, self).__init__(**kwargs) self.time_stamp = kwargs.get('time_stamp', None) self.score = kwargs.get('score', None) class AzureReachabilityReportLocation(msrest.serialization.Model): """Parameters that define a geographic location. All required parameters must be populated in order to send to Azure. :param country: Required. The name of the country. :type country: str :param state: The name of the state. :type state: str :param city: The name of the city or town. :type city: str """ _validation = { 'country': {'required': True}, } _attribute_map = { 'country': {'key': 'country', 'type': 'str'}, 'state': {'key': 'state', 'type': 'str'}, 'city': {'key': 'city', 'type': 'str'}, } def __init__( self, **kwargs ): super(AzureReachabilityReportLocation, self).__init__(**kwargs) self.country = kwargs['country'] self.state = kwargs.get('state', None) self.city = kwargs.get('city', None) class AzureReachabilityReportParameters(msrest.serialization.Model): """Geographic and time constraints for Azure reachability report. All required parameters must be populated in order to send to Azure. :param provider_location: Required. Parameters that define a geographic location. :type provider_location: ~azure.mgmt.network.v2018_06_01.models.AzureReachabilityReportLocation :param providers: List of Internet service providers. :type providers: list[str] :param azure_locations: Optional Azure regions to scope the query to. :type azure_locations: list[str] :param start_time: Required. The start time for the Azure reachability report. :type start_time: ~datetime.datetime :param end_time: Required. The end time for the Azure reachability report. :type end_time: ~datetime.datetime """ _validation = { 'provider_location': {'required': True}, 'start_time': {'required': True}, 'end_time': {'required': True}, } _attribute_map = { 'provider_location': {'key': 'providerLocation', 'type': 'AzureReachabilityReportLocation'}, 'providers': {'key': 'providers', 'type': '[str]'}, 'azure_locations': {'key': 'azureLocations', 'type': '[str]'}, 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, } def __init__( self, **kwargs ): super(AzureReachabilityReportParameters, self).__init__(**kwargs) self.provider_location = kwargs['provider_location'] self.providers = kwargs.get('providers', None) self.azure_locations = kwargs.get('azure_locations', None) self.start_time = kwargs['start_time'] self.end_time = kwargs['end_time'] class BackendAddressPool(SubResource): """Pool of backend IP addresses. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :param name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource. :type name: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :ivar backend_ip_configurations: Gets collection of references to IP addresses defined in network interfaces. :vartype backend_ip_configurations: list[~azure.mgmt.network.v2018_06_01.models.NetworkInterfaceIPConfiguration] :ivar load_balancing_rules: Gets load balancing rules that use this backend address pool. :vartype load_balancing_rules: list[~azure.mgmt.network.v2018_06_01.models.SubResource] :ivar outbound_nat_rule: Gets outbound rules that use this backend address pool. :vartype outbound_nat_rule: ~azure.mgmt.network.v2018_06_01.models.SubResource :param provisioning_state: Get provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :type provisioning_state: str """ _validation = { 'backend_ip_configurations': {'readonly': True}, 'load_balancing_rules': {'readonly': True}, 'outbound_nat_rule': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'backend_ip_configurations': {'key': 'properties.backendIPConfigurations', 'type': '[NetworkInterfaceIPConfiguration]'}, 'load_balancing_rules': {'key': 'properties.loadBalancingRules', 'type': '[SubResource]'}, 'outbound_nat_rule': {'key': 'properties.outboundNatRule', 'type': 'SubResource'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(BackendAddressPool, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.etag = kwargs.get('etag', None) self.backend_ip_configurations = None self.load_balancing_rules = None self.outbound_nat_rule = None self.provisioning_state = kwargs.get('provisioning_state', None) class BGPCommunity(msrest.serialization.Model): """Contains bgp community information offered in Service Community resources. :param service_supported_region: The region which the service support. e.g. For O365, region is Global. :type service_supported_region: str :param community_name: The name of the bgp community. e.g. Skype. :type community_name: str :param community_value: The value of the bgp community. For more information: https://docs.microsoft.com/en-us/azure/expressroute/expressroute-routing. :type community_value: str :param community_prefixes: The prefixes that the bgp community contains. :type community_prefixes: list[str] :param is_authorized_to_use: Customer is authorized to use bgp community or not. :type is_authorized_to_use: bool :param service_group: The service group of the bgp community contains. :type service_group: str """ _attribute_map = { 'service_supported_region': {'key': 'serviceSupportedRegion', 'type': 'str'}, 'community_name': {'key': 'communityName', 'type': 'str'}, 'community_value': {'key': 'communityValue', 'type': 'str'}, 'community_prefixes': {'key': 'communityPrefixes', 'type': '[str]'}, 'is_authorized_to_use': {'key': 'isAuthorizedToUse', 'type': 'bool'}, 'service_group': {'key': 'serviceGroup', 'type': 'str'}, } def __init__( self, **kwargs ): super(BGPCommunity, self).__init__(**kwargs) self.service_supported_region = kwargs.get('service_supported_region', None) self.community_name = kwargs.get('community_name', None) self.community_value = kwargs.get('community_value', None) self.community_prefixes = kwargs.get('community_prefixes', None) self.is_authorized_to_use = kwargs.get('is_authorized_to_use', None) self.service_group = kwargs.get('service_group', None) class BgpPeerStatus(msrest.serialization.Model): """BGP peer status details. Variables are only populated by the server, and will be ignored when sending a request. :ivar local_address: The virtual network gateway's local address. :vartype local_address: str :ivar neighbor: The remote BGP peer. :vartype neighbor: str :ivar asn: The autonomous system number of the remote BGP peer. :vartype asn: int :ivar state: The BGP peer state. Possible values include: "Unknown", "Stopped", "Idle", "Connecting", "Connected". :vartype state: str or ~azure.mgmt.network.v2018_06_01.models.BgpPeerState :ivar connected_duration: For how long the peering has been up. :vartype connected_duration: str :ivar routes_received: The number of routes learned from this peer. :vartype routes_received: long :ivar messages_sent: The number of BGP messages sent. :vartype messages_sent: long :ivar messages_received: The number of BGP messages received. :vartype messages_received: long """ _validation = { 'local_address': {'readonly': True}, 'neighbor': {'readonly': True}, 'asn': {'readonly': True}, 'state': {'readonly': True}, 'connected_duration': {'readonly': True}, 'routes_received': {'readonly': True}, 'messages_sent': {'readonly': True}, 'messages_received': {'readonly': True}, } _attribute_map = { 'local_address': {'key': 'localAddress', 'type': 'str'}, 'neighbor': {'key': 'neighbor', 'type': 'str'}, 'asn': {'key': 'asn', 'type': 'int'}, 'state': {'key': 'state', 'type': 'str'}, 'connected_duration': {'key': 'connectedDuration', 'type': 'str'}, 'routes_received': {'key': 'routesReceived', 'type': 'long'}, 'messages_sent': {'key': 'messagesSent', 'type': 'long'}, 'messages_received': {'key': 'messagesReceived', 'type': 'long'}, } def __init__( self, **kwargs ): super(BgpPeerStatus, self).__init__(**kwargs) self.local_address = None self.neighbor = None self.asn = None self.state = None self.connected_duration = None self.routes_received = None self.messages_sent = None self.messages_received = None class BgpPeerStatusListResult(msrest.serialization.Model): """Response for list BGP peer status API service call. :param value: List of BGP peers. :type value: list[~azure.mgmt.network.v2018_06_01.models.BgpPeerStatus] """ _attribute_map = { 'value': {'key': 'value', 'type': '[BgpPeerStatus]'}, } def __init__( self, **kwargs ): super(BgpPeerStatusListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) class BgpServiceCommunity(Resource): """Service Community Properties. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :param location: Resource location. :type location: str :param tags: A set of tags. Resource tags. :type tags: dict[str, str] :param service_name: The name of the bgp community. e.g. Skype. :type service_name: str :param bgp_communities: Get a list of bgp communities. :type bgp_communities: list[~azure.mgmt.network.v2018_06_01.models.BGPCommunity] """ _validation = { 'name': {'readonly': True}, 'type': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'service_name': {'key': 'properties.serviceName', 'type': 'str'}, 'bgp_communities': {'key': 'properties.bgpCommunities', 'type': '[BGPCommunity]'}, } def __init__( self, **kwargs ): super(BgpServiceCommunity, self).__init__(**kwargs) self.service_name = kwargs.get('service_name', None) self.bgp_communities = kwargs.get('bgp_communities', None) class BgpServiceCommunityListResult(msrest.serialization.Model): """Response for the ListServiceCommunity API service call. :param value: A list of service community resources. :type value: list[~azure.mgmt.network.v2018_06_01.models.BgpServiceCommunity] :param next_link: The URL to get the next set of results. :type next_link: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[BgpServiceCommunity]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(BgpServiceCommunityListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class BgpSettings(msrest.serialization.Model): """BGP settings details. :param asn: The BGP speaker's ASN. :type asn: long :param bgp_peering_address: The BGP peering address and BGP identifier of this BGP speaker. :type bgp_peering_address: str :param peer_weight: The weight added to routes learned from this BGP speaker. :type peer_weight: int """ _attribute_map = { 'asn': {'key': 'asn', 'type': 'long'}, 'bgp_peering_address': {'key': 'bgpPeeringAddress', 'type': 'str'}, 'peer_weight': {'key': 'peerWeight', 'type': 'int'}, } def __init__( self, **kwargs ): super(BgpSettings, self).__init__(**kwargs) self.asn = kwargs.get('asn', None) self.bgp_peering_address = kwargs.get('bgp_peering_address', None) self.peer_weight = kwargs.get('peer_weight', None) class ConnectionMonitor(msrest.serialization.Model): """Parameters that define the operation to create a connection monitor. All required parameters must be populated in order to send to Azure. :param location: Connection monitor location. :type location: str :param tags: A set of tags. Connection monitor tags. :type tags: dict[str, str] :param source: Required. Describes the source of connection monitor. :type source: ~azure.mgmt.network.v2018_06_01.models.ConnectionMonitorSource :param destination: Required. Describes the destination of connection monitor. :type destination: ~azure.mgmt.network.v2018_06_01.models.ConnectionMonitorDestination :param auto_start: Determines if the connection monitor will start automatically once created. :type auto_start: bool :param monitoring_interval_in_seconds: Monitoring interval in seconds. :type monitoring_interval_in_seconds: int """ _validation = { 'source': {'required': True}, 'destination': {'required': True}, } _attribute_map = { 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'source': {'key': 'properties.source', 'type': 'ConnectionMonitorSource'}, 'destination': {'key': 'properties.destination', 'type': 'ConnectionMonitorDestination'}, 'auto_start': {'key': 'properties.autoStart', 'type': 'bool'}, 'monitoring_interval_in_seconds': {'key': 'properties.monitoringIntervalInSeconds', 'type': 'int'}, } def __init__( self, **kwargs ): super(ConnectionMonitor, self).__init__(**kwargs) self.location = kwargs.get('location', None) self.tags = kwargs.get('tags', None) self.source = kwargs['source'] self.destination = kwargs['destination'] self.auto_start = kwargs.get('auto_start', True) self.monitoring_interval_in_seconds = kwargs.get('monitoring_interval_in_seconds', 60) class ConnectionMonitorDestination(msrest.serialization.Model): """Describes the destination of connection monitor. :param resource_id: The ID of the resource used as the destination by connection monitor. :type resource_id: str :param address: Address of the connection monitor destination (IP or domain name). :type address: str :param port: The destination port used by connection monitor. :type port: int """ _attribute_map = { 'resource_id': {'key': 'resourceId', 'type': 'str'}, 'address': {'key': 'address', 'type': 'str'}, 'port': {'key': 'port', 'type': 'int'}, } def __init__( self, **kwargs ): super(ConnectionMonitorDestination, self).__init__(**kwargs) self.resource_id = kwargs.get('resource_id', None) self.address = kwargs.get('address', None) self.port = kwargs.get('port', None) class ConnectionMonitorListResult(msrest.serialization.Model): """List of connection monitors. :param value: Information about connection monitors. :type value: list[~azure.mgmt.network.v2018_06_01.models.ConnectionMonitorResult] """ _attribute_map = { 'value': {'key': 'value', 'type': '[ConnectionMonitorResult]'}, } def __init__( self, **kwargs ): super(ConnectionMonitorListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) class ConnectionMonitorParameters(msrest.serialization.Model): """Parameters that define the operation to create a connection monitor. All required parameters must be populated in order to send to Azure. :param source: Required. Describes the source of connection monitor. :type source: ~azure.mgmt.network.v2018_06_01.models.ConnectionMonitorSource :param destination: Required. Describes the destination of connection monitor. :type destination: ~azure.mgmt.network.v2018_06_01.models.ConnectionMonitorDestination :param auto_start: Determines if the connection monitor will start automatically once created. :type auto_start: bool :param monitoring_interval_in_seconds: Monitoring interval in seconds. :type monitoring_interval_in_seconds: int """ _validation = { 'source': {'required': True}, 'destination': {'required': True}, } _attribute_map = { 'source': {'key': 'source', 'type': 'ConnectionMonitorSource'}, 'destination': {'key': 'destination', 'type': 'ConnectionMonitorDestination'}, 'auto_start': {'key': 'autoStart', 'type': 'bool'}, 'monitoring_interval_in_seconds': {'key': 'monitoringIntervalInSeconds', 'type': 'int'}, } def __init__( self, **kwargs ): super(ConnectionMonitorParameters, self).__init__(**kwargs) self.source = kwargs['source'] self.destination = kwargs['destination'] self.auto_start = kwargs.get('auto_start', True) self.monitoring_interval_in_seconds = kwargs.get('monitoring_interval_in_seconds', 60) class ConnectionMonitorQueryResult(msrest.serialization.Model): """List of connection states snapshots. :param source_status: Status of connection monitor source. Possible values include: "Uknown", "Active", "Inactive". :type source_status: str or ~azure.mgmt.network.v2018_06_01.models.ConnectionMonitorSourceStatus :param states: Information about connection states. :type states: list[~azure.mgmt.network.v2018_06_01.models.ConnectionStateSnapshot] """ _attribute_map = { 'source_status': {'key': 'sourceStatus', 'type': 'str'}, 'states': {'key': 'states', 'type': '[ConnectionStateSnapshot]'}, } def __init__( self, **kwargs ): super(ConnectionMonitorQueryResult, self).__init__(**kwargs) self.source_status = kwargs.get('source_status', None) self.states = kwargs.get('states', None) class ConnectionMonitorResult(msrest.serialization.Model): """Information about the connection monitor. Variables are only populated by the server, and will be ignored when sending a request. :ivar name: Name of the connection monitor. :vartype name: str :ivar id: ID of the connection monitor. :vartype id: str :param etag: :type etag: str :ivar type: Connection monitor type. :vartype type: str :param location: Connection monitor location. :type location: str :param tags: A set of tags. Connection monitor tags. :type tags: dict[str, str] :param source: Describes the source of connection monitor. :type source: ~azure.mgmt.network.v2018_06_01.models.ConnectionMonitorSource :param destination: Describes the destination of connection monitor. :type destination: ~azure.mgmt.network.v2018_06_01.models.ConnectionMonitorDestination :param auto_start: Determines if the connection monitor will start automatically once created. :type auto_start: bool :param monitoring_interval_in_seconds: Monitoring interval in seconds. :type monitoring_interval_in_seconds: int :param provisioning_state: The provisioning state of the connection monitor. Possible values include: "Succeeded", "Updating", "Deleting", "Failed". :type provisioning_state: str or ~azure.mgmt.network.v2018_06_01.models.ProvisioningState :param start_time: The date and time when the connection monitor was started. :type start_time: ~datetime.datetime :param monitoring_status: The monitoring status of the connection monitor. :type monitoring_status: str """ _validation = { 'name': {'readonly': True}, 'id': {'readonly': True}, 'type': {'readonly': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'id': {'key': 'id', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'source': {'key': 'properties.source', 'type': 'ConnectionMonitorSource'}, 'destination': {'key': 'properties.destination', 'type': 'ConnectionMonitorDestination'}, 'auto_start': {'key': 'properties.autoStart', 'type': 'bool'}, 'monitoring_interval_in_seconds': {'key': 'properties.monitoringIntervalInSeconds', 'type': 'int'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'start_time': {'key': 'properties.startTime', 'type': 'iso-8601'}, 'monitoring_status': {'key': 'properties.monitoringStatus', 'type': 'str'}, } def __init__( self, **kwargs ): super(ConnectionMonitorResult, self).__init__(**kwargs) self.name = None self.id = None self.etag = kwargs.get('etag', "A unique read-only string that changes whenever the resource is updated.") self.type = None self.location = kwargs.get('location', None) self.tags = kwargs.get('tags', None) self.source = kwargs.get('source', None) self.destination = kwargs.get('destination', None) self.auto_start = kwargs.get('auto_start', True) self.monitoring_interval_in_seconds = kwargs.get('monitoring_interval_in_seconds', 60) self.provisioning_state = kwargs.get('provisioning_state', None) self.start_time = kwargs.get('start_time', None) self.monitoring_status = kwargs.get('monitoring_status', None) class ConnectionMonitorResultProperties(ConnectionMonitorParameters): """Describes the properties of a connection monitor. All required parameters must be populated in order to send to Azure. :param source: Required. Describes the source of connection monitor. :type source: ~azure.mgmt.network.v2018_06_01.models.ConnectionMonitorSource :param destination: Required. Describes the destination of connection monitor. :type destination: ~azure.mgmt.network.v2018_06_01.models.ConnectionMonitorDestination :param auto_start: Determines if the connection monitor will start automatically once created. :type auto_start: bool :param monitoring_interval_in_seconds: Monitoring interval in seconds. :type monitoring_interval_in_seconds: int :param provisioning_state: The provisioning state of the connection monitor. Possible values include: "Succeeded", "Updating", "Deleting", "Failed". :type provisioning_state: str or ~azure.mgmt.network.v2018_06_01.models.ProvisioningState :param start_time: The date and time when the connection monitor was started. :type start_time: ~datetime.datetime :param monitoring_status: The monitoring status of the connection monitor. :type monitoring_status: str """ _validation = { 'source': {'required': True}, 'destination': {'required': True}, } _attribute_map = { 'source': {'key': 'source', 'type': 'ConnectionMonitorSource'}, 'destination': {'key': 'destination', 'type': 'ConnectionMonitorDestination'}, 'auto_start': {'key': 'autoStart', 'type': 'bool'}, 'monitoring_interval_in_seconds': {'key': 'monitoringIntervalInSeconds', 'type': 'int'}, 'provisioning_state': {'key': 'provisioningState', 'type': 'str'}, 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, 'monitoring_status': {'key': 'monitoringStatus', 'type': 'str'}, } def __init__( self, **kwargs ): super(ConnectionMonitorResultProperties, self).__init__(**kwargs) self.provisioning_state = kwargs.get('provisioning_state', None) self.start_time = kwargs.get('start_time', None) self.monitoring_status = kwargs.get('monitoring_status', None) class ConnectionMonitorSource(msrest.serialization.Model): """Describes the source of connection monitor. All required parameters must be populated in order to send to Azure. :param resource_id: Required. The ID of the resource used as the source by connection monitor. :type resource_id: str :param port: The source port used by connection monitor. :type port: int """ _validation = { 'resource_id': {'required': True}, } _attribute_map = { 'resource_id': {'key': 'resourceId', 'type': 'str'}, 'port': {'key': 'port', 'type': 'int'}, } def __init__( self, **kwargs ): super(ConnectionMonitorSource, self).__init__(**kwargs) self.resource_id = kwargs['resource_id'] self.port = kwargs.get('port', None) class ConnectionResetSharedKey(msrest.serialization.Model): """The virtual network connection reset shared key. All required parameters must be populated in order to send to Azure. :param key_length: Required. The virtual network connection reset shared key length, should between 1 and 128. :type key_length: int """ _validation = { 'key_length': {'required': True, 'maximum': 128, 'minimum': 1}, } _attribute_map = { 'key_length': {'key': 'keyLength', 'type': 'int'}, } def __init__( self, **kwargs ): super(ConnectionResetSharedKey, self).__init__(**kwargs) self.key_length = kwargs['key_length'] class ConnectionSharedKey(SubResource): """Response for GetConnectionSharedKey API service call. All required parameters must be populated in order to send to Azure. :param id: Resource ID. :type id: str :param value: Required. The virtual network connection shared key value. :type value: str """ _validation = { 'value': {'required': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'value': {'key': 'value', 'type': 'str'}, } def __init__( self, **kwargs ): super(ConnectionSharedKey, self).__init__(**kwargs) self.value = kwargs['value'] class ConnectionStateSnapshot(msrest.serialization.Model): """Connection state snapshot. Variables are only populated by the server, and will be ignored when sending a request. :param connection_state: The connection state. Possible values include: "Reachable", "Unreachable", "Unknown". :type connection_state: str or ~azure.mgmt.network.v2018_06_01.models.ConnectionState :param start_time: The start time of the connection snapshot. :type start_time: ~datetime.datetime :param end_time: The end time of the connection snapshot. :type end_time: ~datetime.datetime :param evaluation_state: Connectivity analysis evaluation state. Possible values include: "NotStarted", "InProgress", "Completed". :type evaluation_state: str or ~azure.mgmt.network.v2018_06_01.models.EvaluationState :param avg_latency_in_ms: Average latency in ms. :type avg_latency_in_ms: int :param min_latency_in_ms: Minimum latency in ms. :type min_latency_in_ms: int :param max_latency_in_ms: Maximum latency in ms. :type max_latency_in_ms: int :param probes_sent: The number of sent probes. :type probes_sent: int :param probes_failed: The number of failed probes. :type probes_failed: int :ivar hops: List of hops between the source and the destination. :vartype hops: list[~azure.mgmt.network.v2018_06_01.models.ConnectivityHop] """ _validation = { 'hops': {'readonly': True}, } _attribute_map = { 'connection_state': {'key': 'connectionState', 'type': 'str'}, 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, 'evaluation_state': {'key': 'evaluationState', 'type': 'str'}, 'avg_latency_in_ms': {'key': 'avgLatencyInMs', 'type': 'int'}, 'min_latency_in_ms': {'key': 'minLatencyInMs', 'type': 'int'}, 'max_latency_in_ms': {'key': 'maxLatencyInMs', 'type': 'int'}, 'probes_sent': {'key': 'probesSent', 'type': 'int'}, 'probes_failed': {'key': 'probesFailed', 'type': 'int'}, 'hops': {'key': 'hops', 'type': '[ConnectivityHop]'}, } def __init__( self, **kwargs ): super(ConnectionStateSnapshot, self).__init__(**kwargs) self.connection_state = kwargs.get('connection_state', None) self.start_time = kwargs.get('start_time', None) self.end_time = kwargs.get('end_time', None) self.evaluation_state = kwargs.get('evaluation_state', None) self.avg_latency_in_ms = kwargs.get('avg_latency_in_ms', None) self.min_latency_in_ms = kwargs.get('min_latency_in_ms', None) self.max_latency_in_ms = kwargs.get('max_latency_in_ms', None) self.probes_sent = kwargs.get('probes_sent', None) self.probes_failed = kwargs.get('probes_failed', None) self.hops = None class ConnectivityDestination(msrest.serialization.Model): """Parameters that define destination of connection. :param resource_id: The ID of the resource to which a connection attempt will be made. :type resource_id: str :param address: The IP address or URI the resource to which a connection attempt will be made. :type address: str :param port: Port on which check connectivity will be performed. :type port: int """ _attribute_map = { 'resource_id': {'key': 'resourceId', 'type': 'str'}, 'address': {'key': 'address', 'type': 'str'}, 'port': {'key': 'port', 'type': 'int'}, } def __init__( self, **kwargs ): super(ConnectivityDestination, self).__init__(**kwargs) self.resource_id = kwargs.get('resource_id', None) self.address = kwargs.get('address', None) self.port = kwargs.get('port', None) class ConnectivityHop(msrest.serialization.Model): """Information about a hop between the source and the destination. Variables are only populated by the server, and will be ignored when sending a request. :ivar type: The type of the hop. :vartype type: str :ivar id: The ID of the hop. :vartype id: str :ivar address: The IP address of the hop. :vartype address: str :ivar resource_id: The ID of the resource corresponding to this hop. :vartype resource_id: str :ivar next_hop_ids: List of next hop identifiers. :vartype next_hop_ids: list[str] :ivar issues: List of issues. :vartype issues: list[~azure.mgmt.network.v2018_06_01.models.ConnectivityIssue] """ _validation = { 'type': {'readonly': True}, 'id': {'readonly': True}, 'address': {'readonly': True}, 'resource_id': {'readonly': True}, 'next_hop_ids': {'readonly': True}, 'issues': {'readonly': True}, } _attribute_map = { 'type': {'key': 'type', 'type': 'str'}, 'id': {'key': 'id', 'type': 'str'}, 'address': {'key': 'address', 'type': 'str'}, 'resource_id': {'key': 'resourceId', 'type': 'str'}, 'next_hop_ids': {'key': 'nextHopIds', 'type': '[str]'}, 'issues': {'key': 'issues', 'type': '[ConnectivityIssue]'}, } def __init__( self, **kwargs ): super(ConnectivityHop, self).__init__(**kwargs) self.type = None self.id = None self.address = None self.resource_id = None self.next_hop_ids = None self.issues = None class ConnectivityInformation(msrest.serialization.Model): """Information on the connectivity status. Variables are only populated by the server, and will be ignored when sending a request. :ivar hops: List of hops between the source and the destination. :vartype hops: list[~azure.mgmt.network.v2018_06_01.models.ConnectivityHop] :ivar connection_status: The connection status. Possible values include: "Unknown", "Connected", "Disconnected", "Degraded". :vartype connection_status: str or ~azure.mgmt.network.v2018_06_01.models.ConnectionStatus :ivar avg_latency_in_ms: Average latency in milliseconds. :vartype avg_latency_in_ms: int :ivar min_latency_in_ms: Minimum latency in milliseconds. :vartype min_latency_in_ms: int :ivar max_latency_in_ms: Maximum latency in milliseconds. :vartype max_latency_in_ms: int :ivar probes_sent: Total number of probes sent. :vartype probes_sent: int :ivar probes_failed: Number of failed probes. :vartype probes_failed: int """ _validation = { 'hops': {'readonly': True}, 'connection_status': {'readonly': True}, 'avg_latency_in_ms': {'readonly': True}, 'min_latency_in_ms': {'readonly': True}, 'max_latency_in_ms': {'readonly': True}, 'probes_sent': {'readonly': True}, 'probes_failed': {'readonly': True}, } _attribute_map = { 'hops': {'key': 'hops', 'type': '[ConnectivityHop]'}, 'connection_status': {'key': 'connectionStatus', 'type': 'str'}, 'avg_latency_in_ms': {'key': 'avgLatencyInMs', 'type': 'int'}, 'min_latency_in_ms': {'key': 'minLatencyInMs', 'type': 'int'}, 'max_latency_in_ms': {'key': 'maxLatencyInMs', 'type': 'int'}, 'probes_sent': {'key': 'probesSent', 'type': 'int'}, 'probes_failed': {'key': 'probesFailed', 'type': 'int'}, } def __init__( self, **kwargs ): super(ConnectivityInformation, self).__init__(**kwargs) self.hops = None self.connection_status = None self.avg_latency_in_ms = None self.min_latency_in_ms = None self.max_latency_in_ms = None self.probes_sent = None self.probes_failed = None class ConnectivityIssue(msrest.serialization.Model): """Information about an issue encountered in the process of checking for connectivity. Variables are only populated by the server, and will be ignored when sending a request. :ivar origin: The origin of the issue. Possible values include: "Local", "Inbound", "Outbound". :vartype origin: str or ~azure.mgmt.network.v2018_06_01.models.Origin :ivar severity: The severity of the issue. Possible values include: "Error", "Warning". :vartype severity: str or ~azure.mgmt.network.v2018_06_01.models.Severity :ivar type: The type of issue. Possible values include: "Unknown", "AgentStopped", "GuestFirewall", "DnsResolution", "SocketBind", "NetworkSecurityRule", "UserDefinedRoute", "PortThrottled", "Platform". :vartype type: str or ~azure.mgmt.network.v2018_06_01.models.IssueType :ivar context: Provides additional context on the issue. :vartype context: list[dict[str, str]] """ _validation = { 'origin': {'readonly': True}, 'severity': {'readonly': True}, 'type': {'readonly': True}, 'context': {'readonly': True}, } _attribute_map = { 'origin': {'key': 'origin', 'type': 'str'}, 'severity': {'key': 'severity', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'context': {'key': 'context', 'type': '[{str}]'}, } def __init__( self, **kwargs ): super(ConnectivityIssue, self).__init__(**kwargs) self.origin = None self.severity = None self.type = None self.context = None class ConnectivityParameters(msrest.serialization.Model): """Parameters that determine how the connectivity check will be performed. All required parameters must be populated in order to send to Azure. :param source: Required. Parameters that define the source of the connection. :type source: ~azure.mgmt.network.v2018_06_01.models.ConnectivitySource :param destination: Required. Parameters that define destination of connection. :type destination: ~azure.mgmt.network.v2018_06_01.models.ConnectivityDestination :param protocol: Network protocol. Possible values include: "Tcp", "Http", "Https", "Icmp". :type protocol: str or ~azure.mgmt.network.v2018_06_01.models.Protocol :param protocol_configuration: Configuration of the protocol. :type protocol_configuration: ~azure.mgmt.network.v2018_06_01.models.ProtocolConfiguration """ _validation = { 'source': {'required': True}, 'destination': {'required': True}, } _attribute_map = { 'source': {'key': 'source', 'type': 'ConnectivitySource'}, 'destination': {'key': 'destination', 'type': 'ConnectivityDestination'}, 'protocol': {'key': 'protocol', 'type': 'str'}, 'protocol_configuration': {'key': 'protocolConfiguration', 'type': 'ProtocolConfiguration'}, } def __init__( self, **kwargs ): super(ConnectivityParameters, self).__init__(**kwargs) self.source = kwargs['source'] self.destination = kwargs['destination'] self.protocol = kwargs.get('protocol', None) self.protocol_configuration = kwargs.get('protocol_configuration', None) class ConnectivitySource(msrest.serialization.Model): """Parameters that define the source of the connection. All required parameters must be populated in order to send to Azure. :param resource_id: Required. The ID of the resource from which a connectivity check will be initiated. :type resource_id: str :param port: The source port from which a connectivity check will be performed. :type port: int """ _validation = { 'resource_id': {'required': True}, } _attribute_map = { 'resource_id': {'key': 'resourceId', 'type': 'str'}, 'port': {'key': 'port', 'type': 'int'}, } def __init__( self, **kwargs ): super(ConnectivitySource, self).__init__(**kwargs) self.resource_id = kwargs['resource_id'] self.port = kwargs.get('port', None) class DdosProtectionPlan(msrest.serialization.Model): """A DDoS protection plan in a resource group. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Resource ID. :vartype id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :param location: Resource location. :type location: str :param tags: A set of tags. Resource tags. :type tags: dict[str, str] :ivar etag: A unique read-only string that changes whenever the resource is updated. :vartype etag: str :ivar resource_guid: The resource GUID property of the DDoS protection plan resource. It uniquely identifies the resource, even if the user changes its name or migrate the resource across subscriptions or resource groups. :vartype resource_guid: str :ivar provisioning_state: The provisioning state of the DDoS protection plan resource. Possible values are: 'Succeeded', 'Updating', 'Deleting', and 'Failed'. :vartype provisioning_state: str :ivar virtual_networks: The list of virtual networks associated with the DDoS protection plan resource. This list is read-only. :vartype virtual_networks: list[~azure.mgmt.network.v2018_06_01.models.SubResource] """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'etag': {'readonly': True}, 'resource_guid': {'readonly': True}, 'provisioning_state': {'readonly': True}, 'virtual_networks': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'etag': {'key': 'etag', 'type': 'str'}, 'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'virtual_networks': {'key': 'properties.virtualNetworks', 'type': '[SubResource]'}, } def __init__( self, **kwargs ): super(DdosProtectionPlan, self).__init__(**kwargs) self.id = None self.name = None self.type = None self.location = kwargs.get('location', None) self.tags = kwargs.get('tags', None) self.etag = None self.resource_guid = None self.provisioning_state = None self.virtual_networks = None class DdosProtectionPlanListResult(msrest.serialization.Model): """A list of DDoS protection plans. Variables are only populated by the server, and will be ignored when sending a request. :param value: A list of DDoS protection plans. :type value: list[~azure.mgmt.network.v2018_06_01.models.DdosProtectionPlan] :ivar next_link: The URL to get the next set of results. :vartype next_link: str """ _validation = { 'next_link': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[DdosProtectionPlan]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(DdosProtectionPlanListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = None class DeviceProperties(msrest.serialization.Model): """List of properties of the device. :param device_vendor: Name of the device Vendor. :type device_vendor: str :param device_model: Model of the device. :type device_model: str :param link_speed_in_mbps: Link speed. :type link_speed_in_mbps: int """ _attribute_map = { 'device_vendor': {'key': 'deviceVendor', 'type': 'str'}, 'device_model': {'key': 'deviceModel', 'type': 'str'}, 'link_speed_in_mbps': {'key': 'linkSpeedInMbps', 'type': 'int'}, } def __init__( self, **kwargs ): super(DeviceProperties, self).__init__(**kwargs) self.device_vendor = kwargs.get('device_vendor', None) self.device_model = kwargs.get('device_model', None) self.link_speed_in_mbps = kwargs.get('link_speed_in_mbps', None) class DhcpOptions(msrest.serialization.Model): """DhcpOptions contains an array of DNS servers available to VMs deployed in the virtual network. Standard DHCP option for a subnet overrides VNET DHCP options. :param dns_servers: The list of DNS servers IP addresses. :type dns_servers: list[str] """ _attribute_map = { 'dns_servers': {'key': 'dnsServers', 'type': '[str]'}, } def __init__( self, **kwargs ): super(DhcpOptions, self).__init__(**kwargs) self.dns_servers = kwargs.get('dns_servers', None) class Dimension(msrest.serialization.Model): """Dimension of the metric. :param name: The name of the dimension. :type name: str :param display_name: The display name of the dimension. :type display_name: str :param internal_name: The internal name of the dimension. :type internal_name: str """ _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'display_name': {'key': 'displayName', 'type': 'str'}, 'internal_name': {'key': 'internalName', 'type': 'str'}, } def __init__( self, **kwargs ): super(Dimension, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.display_name = kwargs.get('display_name', None) self.internal_name = kwargs.get('internal_name', None) class DnsNameAvailabilityResult(msrest.serialization.Model): """Response for the CheckDnsNameAvailability API service call. :param available: Domain availability (True/False). :type available: bool """ _attribute_map = { 'available': {'key': 'available', 'type': 'bool'}, } def __init__( self, **kwargs ): super(DnsNameAvailabilityResult, self).__init__(**kwargs) self.available = kwargs.get('available', None) class EffectiveNetworkSecurityGroup(msrest.serialization.Model): """Effective network security group. :param network_security_group: The ID of network security group that is applied. :type network_security_group: ~azure.mgmt.network.v2018_06_01.models.SubResource :param association: Associated resources. :type association: ~azure.mgmt.network.v2018_06_01.models.EffectiveNetworkSecurityGroupAssociation :param effective_security_rules: A collection of effective security rules. :type effective_security_rules: list[~azure.mgmt.network.v2018_06_01.models.EffectiveNetworkSecurityRule] :param tag_map: Mapping of tags to list of IP Addresses included within the tag. :type tag_map: str """ _attribute_map = { 'network_security_group': {'key': 'networkSecurityGroup', 'type': 'SubResource'}, 'association': {'key': 'association', 'type': 'EffectiveNetworkSecurityGroupAssociation'}, 'effective_security_rules': {'key': 'effectiveSecurityRules', 'type': '[EffectiveNetworkSecurityRule]'}, 'tag_map': {'key': 'tagMap', 'type': 'str'}, } def __init__( self, **kwargs ): super(EffectiveNetworkSecurityGroup, self).__init__(**kwargs) self.network_security_group = kwargs.get('network_security_group', None) self.association = kwargs.get('association', None) self.effective_security_rules = kwargs.get('effective_security_rules', None) self.tag_map = kwargs.get('tag_map', None) class EffectiveNetworkSecurityGroupAssociation(msrest.serialization.Model): """The effective network security group association. :param subnet: The ID of the subnet if assigned. :type subnet: ~azure.mgmt.network.v2018_06_01.models.SubResource :param network_interface: The ID of the network interface if assigned. :type network_interface: ~azure.mgmt.network.v2018_06_01.models.SubResource """ _attribute_map = { 'subnet': {'key': 'subnet', 'type': 'SubResource'}, 'network_interface': {'key': 'networkInterface', 'type': 'SubResource'}, } def __init__( self, **kwargs ): super(EffectiveNetworkSecurityGroupAssociation, self).__init__(**kwargs) self.subnet = kwargs.get('subnet', None) self.network_interface = kwargs.get('network_interface', None) class EffectiveNetworkSecurityGroupListResult(msrest.serialization.Model): """Response for list effective network security groups API service call. Variables are only populated by the server, and will be ignored when sending a request. :param value: A list of effective network security groups. :type value: list[~azure.mgmt.network.v2018_06_01.models.EffectiveNetworkSecurityGroup] :ivar next_link: The URL to get the next set of results. :vartype next_link: str """ _validation = { 'next_link': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[EffectiveNetworkSecurityGroup]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(EffectiveNetworkSecurityGroupListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = None class EffectiveNetworkSecurityRule(msrest.serialization.Model): """Effective network security rules. :param name: The name of the security rule specified by the user (if created by the user). :type name: str :param protocol: The network protocol this rule applies to. Possible values are: 'Tcp', 'Udp', and 'All'. Possible values include: "Tcp", "Udp", "All". :type protocol: str or ~azure.mgmt.network.v2018_06_01.models.EffectiveSecurityRuleProtocol :param source_port_range: The source port or range. :type source_port_range: str :param destination_port_range: The destination port or range. :type destination_port_range: str :param source_port_ranges: The source port ranges. Expected values include a single integer between 0 and 65535, a range using '-' as separator (e.g. 100-400), or an asterisk (*). :type source_port_ranges: list[str] :param destination_port_ranges: The destination port ranges. Expected values include a single integer between 0 and 65535, a range using '-' as separator (e.g. 100-400), or an asterisk (*). :type destination_port_ranges: list[str] :param source_address_prefix: The source address prefix. :type source_address_prefix: str :param destination_address_prefix: The destination address prefix. :type destination_address_prefix: str :param source_address_prefixes: The source address prefixes. Expected values include CIDR IP ranges, Default Tags (VirtualNetwork, AzureLoadBalancer, Internet), System Tags, and the asterisk (*). :type source_address_prefixes: list[str] :param destination_address_prefixes: The destination address prefixes. Expected values include CIDR IP ranges, Default Tags (VirtualNetwork, AzureLoadBalancer, Internet), System Tags, and the asterisk (*). :type destination_address_prefixes: list[str] :param expanded_source_address_prefix: The expanded source address prefix. :type expanded_source_address_prefix: list[str] :param expanded_destination_address_prefix: Expanded destination address prefix. :type expanded_destination_address_prefix: list[str] :param access: Whether network traffic is allowed or denied. Possible values are: 'Allow' and 'Deny'. Possible values include: "Allow", "Deny". :type access: str or ~azure.mgmt.network.v2018_06_01.models.SecurityRuleAccess :param priority: The priority of the rule. :type priority: int :param direction: The direction of the rule. Possible values are: 'Inbound and Outbound'. Possible values include: "Inbound", "Outbound". :type direction: str or ~azure.mgmt.network.v2018_06_01.models.SecurityRuleDirection """ _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'protocol': {'key': 'protocol', 'type': 'str'}, 'source_port_range': {'key': 'sourcePortRange', 'type': 'str'}, 'destination_port_range': {'key': 'destinationPortRange', 'type': 'str'}, 'source_port_ranges': {'key': 'sourcePortRanges', 'type': '[str]'}, 'destination_port_ranges': {'key': 'destinationPortRanges', 'type': '[str]'}, 'source_address_prefix': {'key': 'sourceAddressPrefix', 'type': 'str'}, 'destination_address_prefix': {'key': 'destinationAddressPrefix', 'type': 'str'}, 'source_address_prefixes': {'key': 'sourceAddressPrefixes', 'type': '[str]'}, 'destination_address_prefixes': {'key': 'destinationAddressPrefixes', 'type': '[str]'}, 'expanded_source_address_prefix': {'key': 'expandedSourceAddressPrefix', 'type': '[str]'}, 'expanded_destination_address_prefix': {'key': 'expandedDestinationAddressPrefix', 'type': '[str]'}, 'access': {'key': 'access', 'type': 'str'}, 'priority': {'key': 'priority', 'type': 'int'}, 'direction': {'key': 'direction', 'type': 'str'}, } def __init__( self, **kwargs ): super(EffectiveNetworkSecurityRule, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.protocol = kwargs.get('protocol', None) self.source_port_range = kwargs.get('source_port_range', None) self.destination_port_range = kwargs.get('destination_port_range', None) self.source_port_ranges = kwargs.get('source_port_ranges', None) self.destination_port_ranges = kwargs.get('destination_port_ranges', None) self.source_address_prefix = kwargs.get('source_address_prefix', None) self.destination_address_prefix = kwargs.get('destination_address_prefix', None) self.source_address_prefixes = kwargs.get('source_address_prefixes', None) self.destination_address_prefixes = kwargs.get('destination_address_prefixes', None) self.expanded_source_address_prefix = kwargs.get('expanded_source_address_prefix', None) self.expanded_destination_address_prefix = kwargs.get('expanded_destination_address_prefix', None) self.access = kwargs.get('access', None) self.priority = kwargs.get('priority', None) self.direction = kwargs.get('direction', None) class EffectiveRoute(msrest.serialization.Model): """Effective Route. :param name: The name of the user defined route. This is optional. :type name: str :param source: Who created the route. Possible values are: 'Unknown', 'User', 'VirtualNetworkGateway', and 'Default'. Possible values include: "Unknown", "User", "VirtualNetworkGateway", "Default". :type source: str or ~azure.mgmt.network.v2018_06_01.models.EffectiveRouteSource :param state: The value of effective route. Possible values are: 'Active' and 'Invalid'. Possible values include: "Active", "Invalid". :type state: str or ~azure.mgmt.network.v2018_06_01.models.EffectiveRouteState :param address_prefix: The address prefixes of the effective routes in CIDR notation. :type address_prefix: list[str] :param next_hop_ip_address: The IP address of the next hop of the effective route. :type next_hop_ip_address: list[str] :param next_hop_type: The type of Azure hop the packet should be sent to. Possible values are: 'VirtualNetworkGateway', 'VnetLocal', 'Internet', 'VirtualAppliance', and 'None'. Possible values include: "VirtualNetworkGateway", "VnetLocal", "Internet", "VirtualAppliance", "None". :type next_hop_type: str or ~azure.mgmt.network.v2018_06_01.models.RouteNextHopType """ _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'source': {'key': 'source', 'type': 'str'}, 'state': {'key': 'state', 'type': 'str'}, 'address_prefix': {'key': 'addressPrefix', 'type': '[str]'}, 'next_hop_ip_address': {'key': 'nextHopIpAddress', 'type': '[str]'}, 'next_hop_type': {'key': 'nextHopType', 'type': 'str'}, } def __init__( self, **kwargs ): super(EffectiveRoute, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.source = kwargs.get('source', None) self.state = kwargs.get('state', None) self.address_prefix = kwargs.get('address_prefix', None) self.next_hop_ip_address = kwargs.get('next_hop_ip_address', None) self.next_hop_type = kwargs.get('next_hop_type', None) class EffectiveRouteListResult(msrest.serialization.Model): """Response for list effective route API service call. Variables are only populated by the server, and will be ignored when sending a request. :param value: A list of effective routes. :type value: list[~azure.mgmt.network.v2018_06_01.models.EffectiveRoute] :ivar next_link: The URL to get the next set of results. :vartype next_link: str """ _validation = { 'next_link': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[EffectiveRoute]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(EffectiveRouteListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = None class EndpointServiceResult(SubResource): """Endpoint service. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :ivar name: Name of the endpoint service. :vartype name: str :ivar type: Type of the endpoint service. :vartype type: str """ _validation = { 'name': {'readonly': True}, 'type': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, } def __init__( self, **kwargs ): super(EndpointServiceResult, self).__init__(**kwargs) self.name = None self.type = None class EndpointServicesListResult(msrest.serialization.Model): """Response for the ListAvailableEndpointServices API service call. :param value: List of available endpoint services in a region. :type value: list[~azure.mgmt.network.v2018_06_01.models.EndpointServiceResult] :param next_link: The URL to get the next set of results. :type next_link: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[EndpointServiceResult]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(EndpointServicesListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class Error(msrest.serialization.Model): """Error. :param code: :type code: str :param message: :type message: str :param target: :type target: str :param details: :type details: list[~azure.mgmt.network.v2018_06_01.models.ErrorDetails] :param inner_error: :type inner_error: str """ _attribute_map = { 'code': {'key': 'code', 'type': 'str'}, 'message': {'key': 'message', 'type': 'str'}, 'target': {'key': 'target', 'type': 'str'}, 'details': {'key': 'details', 'type': '[ErrorDetails]'}, 'inner_error': {'key': 'innerError', 'type': 'str'}, } def __init__( self, **kwargs ): super(Error, self).__init__(**kwargs) self.code = kwargs.get('code', None) self.message = kwargs.get('message', None) self.target = kwargs.get('target', None) self.details = kwargs.get('details', None) self.inner_error = kwargs.get('inner_error', None) class ErrorDetails(msrest.serialization.Model): """ErrorDetails. :param code: :type code: str :param target: :type target: str :param message: :type message: str """ _attribute_map = { 'code': {'key': 'code', 'type': 'str'}, 'target': {'key': 'target', 'type': 'str'}, 'message': {'key': 'message', 'type': 'str'}, } def __init__( self, **kwargs ): super(ErrorDetails, self).__init__(**kwargs) self.code = kwargs.get('code', None) self.target = kwargs.get('target', None) self.message = kwargs.get('message', None) class ErrorResponse(msrest.serialization.Model): """The error object. :param error: :type error: ~azure.mgmt.network.v2018_06_01.models.ErrorDetails """ _attribute_map = { 'error': {'key': 'error', 'type': 'ErrorDetails'}, } def __init__( self, **kwargs ): super(ErrorResponse, self).__init__(**kwargs) self.error = kwargs.get('error', None) class EvaluatedNetworkSecurityGroup(msrest.serialization.Model): """Results of network security group evaluation. Variables are only populated by the server, and will be ignored when sending a request. :param network_security_group_id: Network security group ID. :type network_security_group_id: str :param matched_rule: Matched rule. :type matched_rule: ~azure.mgmt.network.v2018_06_01.models.MatchedRule :ivar rules_evaluation_result: List of network security rules evaluation results. :vartype rules_evaluation_result: list[~azure.mgmt.network.v2018_06_01.models.NetworkSecurityRulesEvaluationResult] """ _validation = { 'rules_evaluation_result': {'readonly': True}, } _attribute_map = { 'network_security_group_id': {'key': 'networkSecurityGroupId', 'type': 'str'}, 'matched_rule': {'key': 'matchedRule', 'type': 'MatchedRule'}, 'rules_evaluation_result': {'key': 'rulesEvaluationResult', 'type': '[NetworkSecurityRulesEvaluationResult]'}, } def __init__( self, **kwargs ): super(EvaluatedNetworkSecurityGroup, self).__init__(**kwargs) self.network_security_group_id = kwargs.get('network_security_group_id', None) self.matched_rule = kwargs.get('matched_rule', None) self.rules_evaluation_result = None class ExpressRouteCircuit(Resource): """ExpressRouteCircuit resource. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :param location: Resource location. :type location: str :param tags: A set of tags. Resource tags. :type tags: dict[str, str] :param sku: The SKU. :type sku: ~azure.mgmt.network.v2018_06_01.models.ExpressRouteCircuitSku :ivar etag: Gets a unique read-only string that changes whenever the resource is updated. :vartype etag: str :param allow_classic_operations: Allow classic operations. :type allow_classic_operations: bool :param circuit_provisioning_state: The CircuitProvisioningState state of the resource. :type circuit_provisioning_state: str :param service_provider_provisioning_state: The ServiceProviderProvisioningState state of the resource. Possible values are 'NotProvisioned', 'Provisioning', 'Provisioned', and 'Deprovisioning'. Possible values include: "NotProvisioned", "Provisioning", "Provisioned", "Deprovisioning". :type service_provider_provisioning_state: str or ~azure.mgmt.network.v2018_06_01.models.ServiceProviderProvisioningState :param authorizations: The list of authorizations. :type authorizations: list[~azure.mgmt.network.v2018_06_01.models.ExpressRouteCircuitAuthorization] :param peerings: The list of peerings. :type peerings: list[~azure.mgmt.network.v2018_06_01.models.ExpressRouteCircuitPeering] :param service_key: The ServiceKey. :type service_key: str :param service_provider_notes: The ServiceProviderNotes. :type service_provider_notes: str :param service_provider_properties: The ServiceProviderProperties. :type service_provider_properties: ~azure.mgmt.network.v2018_06_01.models.ExpressRouteCircuitServiceProviderProperties :param provisioning_state: Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :type provisioning_state: str :param gateway_manager_etag: The GatewayManager Etag. :type gateway_manager_etag: str """ _validation = { 'name': {'readonly': True}, 'type': {'readonly': True}, 'etag': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'sku': {'key': 'sku', 'type': 'ExpressRouteCircuitSku'}, 'etag': {'key': 'etag', 'type': 'str'}, 'allow_classic_operations': {'key': 'properties.allowClassicOperations', 'type': 'bool'}, 'circuit_provisioning_state': {'key': 'properties.circuitProvisioningState', 'type': 'str'}, 'service_provider_provisioning_state': {'key': 'properties.serviceProviderProvisioningState', 'type': 'str'}, 'authorizations': {'key': 'properties.authorizations', 'type': '[ExpressRouteCircuitAuthorization]'}, 'peerings': {'key': 'properties.peerings', 'type': '[ExpressRouteCircuitPeering]'}, 'service_key': {'key': 'properties.serviceKey', 'type': 'str'}, 'service_provider_notes': {'key': 'properties.serviceProviderNotes', 'type': 'str'}, 'service_provider_properties': {'key': 'properties.serviceProviderProperties', 'type': 'ExpressRouteCircuitServiceProviderProperties'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'gateway_manager_etag': {'key': 'properties.gatewayManagerEtag', 'type': 'str'}, } def __init__( self, **kwargs ): super(ExpressRouteCircuit, self).__init__(**kwargs) self.sku = kwargs.get('sku', None) self.etag = None self.allow_classic_operations = kwargs.get('allow_classic_operations', None) self.circuit_provisioning_state = kwargs.get('circuit_provisioning_state', None) self.service_provider_provisioning_state = kwargs.get('service_provider_provisioning_state', None) self.authorizations = kwargs.get('authorizations', None) self.peerings = kwargs.get('peerings', None) self.service_key = kwargs.get('service_key', None) self.service_provider_notes = kwargs.get('service_provider_notes', None) self.service_provider_properties = kwargs.get('service_provider_properties', None) self.provisioning_state = kwargs.get('provisioning_state', None) self.gateway_manager_etag = kwargs.get('gateway_manager_etag', None) class ExpressRouteCircuitArpTable(msrest.serialization.Model): """The ARP table associated with the ExpressRouteCircuit. :param age: Entry age in minutes. :type age: int :param interface: Interface address. :type interface: str :param ip_address: The IP address. :type ip_address: str :param mac_address: The MAC address. :type mac_address: str """ _attribute_map = { 'age': {'key': 'age', 'type': 'int'}, 'interface': {'key': 'interface', 'type': 'str'}, 'ip_address': {'key': 'ipAddress', 'type': 'str'}, 'mac_address': {'key': 'macAddress', 'type': 'str'}, } def __init__( self, **kwargs ): super(ExpressRouteCircuitArpTable, self).__init__(**kwargs) self.age = kwargs.get('age', None) self.interface = kwargs.get('interface', None) self.ip_address = kwargs.get('ip_address', None) self.mac_address = kwargs.get('mac_address', None) class ExpressRouteCircuitAuthorization(SubResource): """Authorization in an ExpressRouteCircuit resource. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :param name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource. :type name: str :ivar etag: A unique read-only string that changes whenever the resource is updated. :vartype etag: str :param authorization_key: The authorization key. :type authorization_key: str :param authorization_use_status: AuthorizationUseStatus. Possible values are: 'Available' and 'InUse'. Possible values include: "Available", "InUse". :type authorization_use_status: str or ~azure.mgmt.network.v2018_06_01.models.AuthorizationUseStatus :param provisioning_state: Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :type provisioning_state: str """ _validation = { 'etag': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'authorization_key': {'key': 'properties.authorizationKey', 'type': 'str'}, 'authorization_use_status': {'key': 'properties.authorizationUseStatus', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(ExpressRouteCircuitAuthorization, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.etag = None self.authorization_key = kwargs.get('authorization_key', None) self.authorization_use_status = kwargs.get('authorization_use_status', None) self.provisioning_state = kwargs.get('provisioning_state', None) class ExpressRouteCircuitConnection(SubResource): """Express Route Circuit Connection in an ExpressRouteCircuitPeering resource. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :param name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource. :type name: str :ivar etag: A unique read-only string that changes whenever the resource is updated. :vartype etag: str :param express_route_circuit_peering: Reference to Express Route Circuit Private Peering Resource of the circuit initiating connection. :type express_route_circuit_peering: ~azure.mgmt.network.v2018_06_01.models.SubResource :param peer_express_route_circuit_peering: Reference to Express Route Circuit Private Peering Resource of the peered circuit. :type peer_express_route_circuit_peering: ~azure.mgmt.network.v2018_06_01.models.SubResource :param address_prefix: /29 IP address space to carve out Customer addresses for tunnels. :type address_prefix: str :param authorization_key: The authorization key. :type authorization_key: str :ivar circuit_connection_status: Express Route Circuit Connection State. Possible values are: 'Connected' and 'Disconnected'. Possible values include: "Connected", "Connecting", "Disconnected". :vartype circuit_connection_status: str or ~azure.mgmt.network.v2018_06_01.models.CircuitConnectionStatus :ivar provisioning_state: Provisioning state of the circuit connection resource. Possible values are: 'Succeeded', 'Updating', 'Deleting', and 'Failed'. :vartype provisioning_state: str """ _validation = { 'etag': {'readonly': True}, 'circuit_connection_status': {'readonly': True}, 'provisioning_state': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'express_route_circuit_peering': {'key': 'properties.expressRouteCircuitPeering', 'type': 'SubResource'}, 'peer_express_route_circuit_peering': {'key': 'properties.peerExpressRouteCircuitPeering', 'type': 'SubResource'}, 'address_prefix': {'key': 'properties.addressPrefix', 'type': 'str'}, 'authorization_key': {'key': 'properties.authorizationKey', 'type': 'str'}, 'circuit_connection_status': {'key': 'properties.circuitConnectionStatus', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(ExpressRouteCircuitConnection, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.etag = None self.express_route_circuit_peering = kwargs.get('express_route_circuit_peering', None) self.peer_express_route_circuit_peering = kwargs.get('peer_express_route_circuit_peering', None) self.address_prefix = kwargs.get('address_prefix', None) self.authorization_key = kwargs.get('authorization_key', None) self.circuit_connection_status = None self.provisioning_state = None class ExpressRouteCircuitListResult(msrest.serialization.Model): """Response for ListExpressRouteCircuit API service call. :param value: A list of ExpressRouteCircuits in a resource group. :type value: list[~azure.mgmt.network.v2018_06_01.models.ExpressRouteCircuit] :param next_link: The URL to get the next set of results. :type next_link: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[ExpressRouteCircuit]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(ExpressRouteCircuitListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class ExpressRouteCircuitPeering(SubResource): """Peering in an ExpressRouteCircuit resource. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :param name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource. :type name: str :ivar etag: A unique read-only string that changes whenever the resource is updated. :vartype etag: str :param peering_type: The peering type. Possible values include: "AzurePublicPeering", "AzurePrivatePeering", "MicrosoftPeering". :type peering_type: str or ~azure.mgmt.network.v2018_06_01.models.ExpressRoutePeeringType :param state: The peering state. Possible values include: "Disabled", "Enabled". :type state: str or ~azure.mgmt.network.v2018_06_01.models.ExpressRoutePeeringState :param azure_asn: The Azure ASN. :type azure_asn: int :param peer_asn: The peer ASN. :type peer_asn: long :param primary_peer_address_prefix: The primary address prefix. :type primary_peer_address_prefix: str :param secondary_peer_address_prefix: The secondary address prefix. :type secondary_peer_address_prefix: str :param primary_azure_port: The primary port. :type primary_azure_port: str :param secondary_azure_port: The secondary port. :type secondary_azure_port: str :param shared_key: The shared key. :type shared_key: str :param vlan_id: The VLAN ID. :type vlan_id: int :param microsoft_peering_config: The Microsoft peering configuration. :type microsoft_peering_config: ~azure.mgmt.network.v2018_06_01.models.ExpressRouteCircuitPeeringConfig :param stats: Gets peering stats. :type stats: ~azure.mgmt.network.v2018_06_01.models.ExpressRouteCircuitStats :param provisioning_state: Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :type provisioning_state: str :param gateway_manager_etag: The GatewayManager Etag. :type gateway_manager_etag: str :param last_modified_by: Gets whether the provider or the customer last modified the peering. :type last_modified_by: str :param route_filter: The reference of the RouteFilter resource. :type route_filter: ~azure.mgmt.network.v2018_06_01.models.RouteFilter :param ipv6_peering_config: The IPv6 peering configuration. :type ipv6_peering_config: ~azure.mgmt.network.v2018_06_01.models.Ipv6ExpressRouteCircuitPeeringConfig :param connections: The list of circuit connections associated with Azure Private Peering for this circuit. :type connections: list[~azure.mgmt.network.v2018_06_01.models.ExpressRouteCircuitConnection] """ _validation = { 'etag': {'readonly': True}, 'peer_asn': {'maximum': 4294967295, 'minimum': 1}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'peering_type': {'key': 'properties.peeringType', 'type': 'str'}, 'state': {'key': 'properties.state', 'type': 'str'}, 'azure_asn': {'key': 'properties.azureASN', 'type': 'int'}, 'peer_asn': {'key': 'properties.peerASN', 'type': 'long'}, 'primary_peer_address_prefix': {'key': 'properties.primaryPeerAddressPrefix', 'type': 'str'}, 'secondary_peer_address_prefix': {'key': 'properties.secondaryPeerAddressPrefix', 'type': 'str'}, 'primary_azure_port': {'key': 'properties.primaryAzurePort', 'type': 'str'}, 'secondary_azure_port': {'key': 'properties.secondaryAzurePort', 'type': 'str'}, 'shared_key': {'key': 'properties.sharedKey', 'type': 'str'}, 'vlan_id': {'key': 'properties.vlanId', 'type': 'int'}, 'microsoft_peering_config': {'key': 'properties.microsoftPeeringConfig', 'type': 'ExpressRouteCircuitPeeringConfig'}, 'stats': {'key': 'properties.stats', 'type': 'ExpressRouteCircuitStats'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'gateway_manager_etag': {'key': 'properties.gatewayManagerEtag', 'type': 'str'}, 'last_modified_by': {'key': 'properties.lastModifiedBy', 'type': 'str'}, 'route_filter': {'key': 'properties.routeFilter', 'type': 'RouteFilter'}, 'ipv6_peering_config': {'key': 'properties.ipv6PeeringConfig', 'type': 'Ipv6ExpressRouteCircuitPeeringConfig'}, 'connections': {'key': 'properties.connections', 'type': '[ExpressRouteCircuitConnection]'}, } def __init__( self, **kwargs ): super(ExpressRouteCircuitPeering, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.etag = None self.peering_type = kwargs.get('peering_type', None) self.state = kwargs.get('state', None) self.azure_asn = kwargs.get('azure_asn', None) self.peer_asn = kwargs.get('peer_asn', None) self.primary_peer_address_prefix = kwargs.get('primary_peer_address_prefix', None) self.secondary_peer_address_prefix = kwargs.get('secondary_peer_address_prefix', None) self.primary_azure_port = kwargs.get('primary_azure_port', None) self.secondary_azure_port = kwargs.get('secondary_azure_port', None) self.shared_key = kwargs.get('shared_key', None) self.vlan_id = kwargs.get('vlan_id', None) self.microsoft_peering_config = kwargs.get('microsoft_peering_config', None) self.stats = kwargs.get('stats', None) self.provisioning_state = kwargs.get('provisioning_state', None) self.gateway_manager_etag = kwargs.get('gateway_manager_etag', None) self.last_modified_by = kwargs.get('last_modified_by', None) self.route_filter = kwargs.get('route_filter', None) self.ipv6_peering_config = kwargs.get('ipv6_peering_config', None) self.connections = kwargs.get('connections', None) class ExpressRouteCircuitPeeringConfig(msrest.serialization.Model): """Specifies the peering configuration. :param advertised_public_prefixes: The reference of AdvertisedPublicPrefixes. :type advertised_public_prefixes: list[str] :param advertised_communities: The communities of bgp peering. Specified for microsoft peering. :type advertised_communities: list[str] :param advertised_public_prefixes_state: AdvertisedPublicPrefixState of the Peering resource. Possible values are 'NotConfigured', 'Configuring', 'Configured', and 'ValidationNeeded'. Possible values include: "NotConfigured", "Configuring", "Configured", "ValidationNeeded". :type advertised_public_prefixes_state: str or ~azure.mgmt.network.v2018_06_01.models.ExpressRouteCircuitPeeringAdvertisedPublicPrefixState :param legacy_mode: The legacy mode of the peering. :type legacy_mode: int :param customer_asn: The CustomerASN of the peering. :type customer_asn: int :param routing_registry_name: The RoutingRegistryName of the configuration. :type routing_registry_name: str """ _attribute_map = { 'advertised_public_prefixes': {'key': 'advertisedPublicPrefixes', 'type': '[str]'}, 'advertised_communities': {'key': 'advertisedCommunities', 'type': '[str]'}, 'advertised_public_prefixes_state': {'key': 'advertisedPublicPrefixesState', 'type': 'str'}, 'legacy_mode': {'key': 'legacyMode', 'type': 'int'}, 'customer_asn': {'key': 'customerASN', 'type': 'int'}, 'routing_registry_name': {'key': 'routingRegistryName', 'type': 'str'}, } def __init__( self, **kwargs ): super(ExpressRouteCircuitPeeringConfig, self).__init__(**kwargs) self.advertised_public_prefixes = kwargs.get('advertised_public_prefixes', None) self.advertised_communities = kwargs.get('advertised_communities', None) self.advertised_public_prefixes_state = kwargs.get('advertised_public_prefixes_state', None) self.legacy_mode = kwargs.get('legacy_mode', None) self.customer_asn = kwargs.get('customer_asn', None) self.routing_registry_name = kwargs.get('routing_registry_name', None) class ExpressRouteCircuitPeeringListResult(msrest.serialization.Model): """Response for ListPeering API service call retrieves all peerings that belong to an ExpressRouteCircuit. :param value: The peerings in an express route circuit. :type value: list[~azure.mgmt.network.v2018_06_01.models.ExpressRouteCircuitPeering] :param next_link: The URL to get the next set of results. :type next_link: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[ExpressRouteCircuitPeering]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(ExpressRouteCircuitPeeringListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class ExpressRouteCircuitReference(msrest.serialization.Model): """ExpressRouteCircuitReference. :param id: Corresponding Express Route Circuit Id. :type id: str """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, } def __init__( self, **kwargs ): super(ExpressRouteCircuitReference, self).__init__(**kwargs) self.id = kwargs.get('id', None) class ExpressRouteCircuitRoutesTable(msrest.serialization.Model): """The routes table associated with the ExpressRouteCircuit. :param network: IP address of a network entity. :type network: str :param next_hop: NextHop address. :type next_hop: str :param loc_prf: Local preference value as set with the set local-preference route-map configuration command. :type loc_prf: str :param weight: Route Weight. :type weight: int :param path: Autonomous system paths to the destination network. :type path: str """ _attribute_map = { 'network': {'key': 'network', 'type': 'str'}, 'next_hop': {'key': 'nextHop', 'type': 'str'}, 'loc_prf': {'key': 'locPrf', 'type': 'str'}, 'weight': {'key': 'weight', 'type': 'int'}, 'path': {'key': 'path', 'type': 'str'}, } def __init__( self, **kwargs ): super(ExpressRouteCircuitRoutesTable, self).__init__(**kwargs) self.network = kwargs.get('network', None) self.next_hop = kwargs.get('next_hop', None) self.loc_prf = kwargs.get('loc_prf', None) self.weight = kwargs.get('weight', None) self.path = kwargs.get('path', None) class ExpressRouteCircuitRoutesTableSummary(msrest.serialization.Model): """The routes table associated with the ExpressRouteCircuit. :param neighbor: IP address of the neighbor. :type neighbor: str :param v: BGP version number spoken to the neighbor. :type v: int :param as_property: Autonomous system number. :type as_property: int :param up_down: The length of time that the BGP session has been in the Established state, or the current status if not in the Established state. :type up_down: str :param state_pfx_rcd: Current state of the BGP session, and the number of prefixes that have been received from a neighbor or peer group. :type state_pfx_rcd: str """ _attribute_map = { 'neighbor': {'key': 'neighbor', 'type': 'str'}, 'v': {'key': 'v', 'type': 'int'}, 'as_property': {'key': 'as', 'type': 'int'}, 'up_down': {'key': 'upDown', 'type': 'str'}, 'state_pfx_rcd': {'key': 'statePfxRcd', 'type': 'str'}, } def __init__( self, **kwargs ): super(ExpressRouteCircuitRoutesTableSummary, self).__init__(**kwargs) self.neighbor = kwargs.get('neighbor', None) self.v = kwargs.get('v', None) self.as_property = kwargs.get('as_property', None) self.up_down = kwargs.get('up_down', None) self.state_pfx_rcd = kwargs.get('state_pfx_rcd', None) class ExpressRouteCircuitsArpTableListResult(msrest.serialization.Model): """Response for ListArpTable associated with the Express Route Circuits API. :param value: Gets list of the ARP table. :type value: list[~azure.mgmt.network.v2018_06_01.models.ExpressRouteCircuitArpTable] :param next_link: The URL to get the next set of results. :type next_link: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[ExpressRouteCircuitArpTable]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(ExpressRouteCircuitsArpTableListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class ExpressRouteCircuitServiceProviderProperties(msrest.serialization.Model): """Contains ServiceProviderProperties in an ExpressRouteCircuit. :param service_provider_name: The serviceProviderName. :type service_provider_name: str :param peering_location: The peering location. :type peering_location: str :param bandwidth_in_mbps: The BandwidthInMbps. :type bandwidth_in_mbps: int """ _attribute_map = { 'service_provider_name': {'key': 'serviceProviderName', 'type': 'str'}, 'peering_location': {'key': 'peeringLocation', 'type': 'str'}, 'bandwidth_in_mbps': {'key': 'bandwidthInMbps', 'type': 'int'}, } def __init__( self, **kwargs ): super(ExpressRouteCircuitServiceProviderProperties, self).__init__(**kwargs) self.service_provider_name = kwargs.get('service_provider_name', None) self.peering_location = kwargs.get('peering_location', None) self.bandwidth_in_mbps = kwargs.get('bandwidth_in_mbps', None) class ExpressRouteCircuitSku(msrest.serialization.Model): """Contains SKU in an ExpressRouteCircuit. :param name: The name of the SKU. :type name: str :param tier: The tier of the SKU. Possible values are 'Standard' and 'Premium'. Possible values include: "Standard", "Premium". :type tier: str or ~azure.mgmt.network.v2018_06_01.models.ExpressRouteCircuitSkuTier :param family: The family of the SKU. Possible values are: 'UnlimitedData' and 'MeteredData'. Possible values include: "UnlimitedData", "MeteredData". :type family: str or ~azure.mgmt.network.v2018_06_01.models.ExpressRouteCircuitSkuFamily """ _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'tier': {'key': 'tier', 'type': 'str'}, 'family': {'key': 'family', 'type': 'str'}, } def __init__( self, **kwargs ): super(ExpressRouteCircuitSku, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.tier = kwargs.get('tier', None) self.family = kwargs.get('family', None) class ExpressRouteCircuitsRoutesTableListResult(msrest.serialization.Model): """Response for ListRoutesTable associated with the Express Route Circuits API. :param value: The list of routes table. :type value: list[~azure.mgmt.network.v2018_06_01.models.ExpressRouteCircuitRoutesTable] :param next_link: The URL to get the next set of results. :type next_link: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[ExpressRouteCircuitRoutesTable]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(ExpressRouteCircuitsRoutesTableListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class ExpressRouteCircuitsRoutesTableSummaryListResult(msrest.serialization.Model): """Response for ListRoutesTable associated with the Express Route Circuits API. :param value: A list of the routes table. :type value: list[~azure.mgmt.network.v2018_06_01.models.ExpressRouteCircuitRoutesTableSummary] :param next_link: The URL to get the next set of results. :type next_link: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[ExpressRouteCircuitRoutesTableSummary]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(ExpressRouteCircuitsRoutesTableSummaryListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class ExpressRouteCircuitStats(msrest.serialization.Model): """Contains stats associated with the peering. :param primarybytes_in: Gets BytesIn of the peering. :type primarybytes_in: long :param primarybytes_out: Gets BytesOut of the peering. :type primarybytes_out: long :param secondarybytes_in: Gets BytesIn of the peering. :type secondarybytes_in: long :param secondarybytes_out: Gets BytesOut of the peering. :type secondarybytes_out: long """ _attribute_map = { 'primarybytes_in': {'key': 'primarybytesIn', 'type': 'long'}, 'primarybytes_out': {'key': 'primarybytesOut', 'type': 'long'}, 'secondarybytes_in': {'key': 'secondarybytesIn', 'type': 'long'}, 'secondarybytes_out': {'key': 'secondarybytesOut', 'type': 'long'}, } def __init__( self, **kwargs ): super(ExpressRouteCircuitStats, self).__init__(**kwargs) self.primarybytes_in = kwargs.get('primarybytes_in', None) self.primarybytes_out = kwargs.get('primarybytes_out', None) self.secondarybytes_in = kwargs.get('secondarybytes_in', None) self.secondarybytes_out = kwargs.get('secondarybytes_out', None) class ExpressRouteCrossConnection(Resource): """ExpressRouteCrossConnection resource. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :param location: Resource location. :type location: str :param tags: A set of tags. Resource tags. :type tags: dict[str, str] :ivar etag: Gets a unique read-only string that changes whenever the resource is updated. :vartype etag: str :ivar primary_azure_port: The name of the primary port. :vartype primary_azure_port: str :ivar secondary_azure_port: The name of the secondary port. :vartype secondary_azure_port: str :ivar s_tag: The identifier of the circuit traffic. :vartype s_tag: int :param peering_location: The peering location of the ExpressRoute circuit. :type peering_location: str :param bandwidth_in_mbps: The circuit bandwidth In Mbps. :type bandwidth_in_mbps: int :param express_route_circuit: The ExpressRouteCircuit. :type express_route_circuit: ~azure.mgmt.network.v2018_06_01.models.ExpressRouteCircuitReference :param service_provider_provisioning_state: The provisioning state of the circuit in the connectivity provider system. Possible values are 'NotProvisioned', 'Provisioning', 'Provisioned'. Possible values include: "NotProvisioned", "Provisioning", "Provisioned", "Deprovisioning". :type service_provider_provisioning_state: str or ~azure.mgmt.network.v2018_06_01.models.ServiceProviderProvisioningState :param service_provider_notes: Additional read only notes set by the connectivity provider. :type service_provider_notes: str :ivar provisioning_state: Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :vartype provisioning_state: str :param peerings: The list of peerings. :type peerings: list[~azure.mgmt.network.v2018_06_01.models.ExpressRouteCrossConnectionPeering] """ _validation = { 'name': {'readonly': True}, 'type': {'readonly': True}, 'etag': {'readonly': True}, 'primary_azure_port': {'readonly': True}, 'secondary_azure_port': {'readonly': True}, 's_tag': {'readonly': True}, 'provisioning_state': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'etag': {'key': 'etag', 'type': 'str'}, 'primary_azure_port': {'key': 'properties.primaryAzurePort', 'type': 'str'}, 'secondary_azure_port': {'key': 'properties.secondaryAzurePort', 'type': 'str'}, 's_tag': {'key': 'properties.sTag', 'type': 'int'}, 'peering_location': {'key': 'properties.peeringLocation', 'type': 'str'}, 'bandwidth_in_mbps': {'key': 'properties.bandwidthInMbps', 'type': 'int'}, 'express_route_circuit': {'key': 'properties.expressRouteCircuit', 'type': 'ExpressRouteCircuitReference'}, 'service_provider_provisioning_state': {'key': 'properties.serviceProviderProvisioningState', 'type': 'str'}, 'service_provider_notes': {'key': 'properties.serviceProviderNotes', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'peerings': {'key': 'properties.peerings', 'type': '[ExpressRouteCrossConnectionPeering]'}, } def __init__( self, **kwargs ): super(ExpressRouteCrossConnection, self).__init__(**kwargs) self.etag = None self.primary_azure_port = None self.secondary_azure_port = None self.s_tag = None self.peering_location = kwargs.get('peering_location', None) self.bandwidth_in_mbps = kwargs.get('bandwidth_in_mbps', None) self.express_route_circuit = kwargs.get('express_route_circuit', None) self.service_provider_provisioning_state = kwargs.get('service_provider_provisioning_state', None) self.service_provider_notes = kwargs.get('service_provider_notes', None) self.provisioning_state = None self.peerings = kwargs.get('peerings', None) class ExpressRouteCrossConnectionListResult(msrest.serialization.Model): """Response for ListExpressRouteCrossConnection API service call. Variables are only populated by the server, and will be ignored when sending a request. :param value: A list of ExpressRouteCrossConnection resources. :type value: list[~azure.mgmt.network.v2018_06_01.models.ExpressRouteCrossConnection] :ivar next_link: The URL to get the next set of results. :vartype next_link: str """ _validation = { 'next_link': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[ExpressRouteCrossConnection]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(ExpressRouteCrossConnectionListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = None class ExpressRouteCrossConnectionPeering(SubResource): """Peering in an ExpressRoute Cross Connection resource. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :param name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource. :type name: str :ivar etag: A unique read-only string that changes whenever the resource is updated. :vartype etag: str :param peering_type: The peering type. Possible values include: "AzurePublicPeering", "AzurePrivatePeering", "MicrosoftPeering". :type peering_type: str or ~azure.mgmt.network.v2018_06_01.models.ExpressRoutePeeringType :param state: The peering state. Possible values include: "Disabled", "Enabled". :type state: str or ~azure.mgmt.network.v2018_06_01.models.ExpressRoutePeeringState :ivar azure_asn: The Azure ASN. :vartype azure_asn: int :param peer_asn: The peer ASN. :type peer_asn: long :param primary_peer_address_prefix: The primary address prefix. :type primary_peer_address_prefix: str :param secondary_peer_address_prefix: The secondary address prefix. :type secondary_peer_address_prefix: str :ivar primary_azure_port: The primary port. :vartype primary_azure_port: str :ivar secondary_azure_port: The secondary port. :vartype secondary_azure_port: str :param shared_key: The shared key. :type shared_key: str :param vlan_id: The VLAN ID. :type vlan_id: int :param microsoft_peering_config: The Microsoft peering configuration. :type microsoft_peering_config: ~azure.mgmt.network.v2018_06_01.models.ExpressRouteCircuitPeeringConfig :ivar provisioning_state: Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :vartype provisioning_state: str :param gateway_manager_etag: The GatewayManager Etag. :type gateway_manager_etag: str :param last_modified_by: Gets whether the provider or the customer last modified the peering. :type last_modified_by: str :param ipv6_peering_config: The IPv6 peering configuration. :type ipv6_peering_config: ~azure.mgmt.network.v2018_06_01.models.Ipv6ExpressRouteCircuitPeeringConfig """ _validation = { 'etag': {'readonly': True}, 'azure_asn': {'readonly': True}, 'peer_asn': {'maximum': 4294967295, 'minimum': 1}, 'primary_azure_port': {'readonly': True}, 'secondary_azure_port': {'readonly': True}, 'provisioning_state': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'peering_type': {'key': 'properties.peeringType', 'type': 'str'}, 'state': {'key': 'properties.state', 'type': 'str'}, 'azure_asn': {'key': 'properties.azureASN', 'type': 'int'}, 'peer_asn': {'key': 'properties.peerASN', 'type': 'long'}, 'primary_peer_address_prefix': {'key': 'properties.primaryPeerAddressPrefix', 'type': 'str'}, 'secondary_peer_address_prefix': {'key': 'properties.secondaryPeerAddressPrefix', 'type': 'str'}, 'primary_azure_port': {'key': 'properties.primaryAzurePort', 'type': 'str'}, 'secondary_azure_port': {'key': 'properties.secondaryAzurePort', 'type': 'str'}, 'shared_key': {'key': 'properties.sharedKey', 'type': 'str'}, 'vlan_id': {'key': 'properties.vlanId', 'type': 'int'}, 'microsoft_peering_config': {'key': 'properties.microsoftPeeringConfig', 'type': 'ExpressRouteCircuitPeeringConfig'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'gateway_manager_etag': {'key': 'properties.gatewayManagerEtag', 'type': 'str'}, 'last_modified_by': {'key': 'properties.lastModifiedBy', 'type': 'str'}, 'ipv6_peering_config': {'key': 'properties.ipv6PeeringConfig', 'type': 'Ipv6ExpressRouteCircuitPeeringConfig'}, } def __init__( self, **kwargs ): super(ExpressRouteCrossConnectionPeering, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.etag = None self.peering_type = kwargs.get('peering_type', None) self.state = kwargs.get('state', None) self.azure_asn = None self.peer_asn = kwargs.get('peer_asn', None) self.primary_peer_address_prefix = kwargs.get('primary_peer_address_prefix', None) self.secondary_peer_address_prefix = kwargs.get('secondary_peer_address_prefix', None) self.primary_azure_port = None self.secondary_azure_port = None self.shared_key = kwargs.get('shared_key', None) self.vlan_id = kwargs.get('vlan_id', None) self.microsoft_peering_config = kwargs.get('microsoft_peering_config', None) self.provisioning_state = None self.gateway_manager_etag = kwargs.get('gateway_manager_etag', None) self.last_modified_by = kwargs.get('last_modified_by', None) self.ipv6_peering_config = kwargs.get('ipv6_peering_config', None) class ExpressRouteCrossConnectionPeeringList(msrest.serialization.Model): """Response for ListPeering API service call retrieves all peerings that belong to an ExpressRouteCrossConnection. Variables are only populated by the server, and will be ignored when sending a request. :param value: The peerings in an express route cross connection. :type value: list[~azure.mgmt.network.v2018_06_01.models.ExpressRouteCrossConnectionPeering] :ivar next_link: The URL to get the next set of results. :vartype next_link: str """ _validation = { 'next_link': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[ExpressRouteCrossConnectionPeering]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(ExpressRouteCrossConnectionPeeringList, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = None class ExpressRouteCrossConnectionRoutesTableSummary(msrest.serialization.Model): """The routes table associated with the ExpressRouteCircuit. :param neighbor: IP address of Neighbor router. :type neighbor: str :param asn: Autonomous system number. :type asn: int :param up_down: The length of time that the BGP session has been in the Established state, or the current status if not in the Established state. :type up_down: str :param state_or_prefixes_received: Current state of the BGP session, and the number of prefixes that have been received from a neighbor or peer group. :type state_or_prefixes_received: str """ _attribute_map = { 'neighbor': {'key': 'neighbor', 'type': 'str'}, 'asn': {'key': 'asn', 'type': 'int'}, 'up_down': {'key': 'upDown', 'type': 'str'}, 'state_or_prefixes_received': {'key': 'stateOrPrefixesReceived', 'type': 'str'}, } def __init__( self, **kwargs ): super(ExpressRouteCrossConnectionRoutesTableSummary, self).__init__(**kwargs) self.neighbor = kwargs.get('neighbor', None) self.asn = kwargs.get('asn', None) self.up_down = kwargs.get('up_down', None) self.state_or_prefixes_received = kwargs.get('state_or_prefixes_received', None) class ExpressRouteCrossConnectionsRoutesTableSummaryListResult(msrest.serialization.Model): """Response for ListRoutesTable associated with the Express Route Cross Connections. Variables are only populated by the server, and will be ignored when sending a request. :param value: A list of the routes table. :type value: list[~azure.mgmt.network.v2018_06_01.models.ExpressRouteCrossConnectionRoutesTableSummary] :ivar next_link: The URL to get the next set of results. :vartype next_link: str """ _validation = { 'next_link': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[ExpressRouteCrossConnectionRoutesTableSummary]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(ExpressRouteCrossConnectionsRoutesTableSummaryListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = None class ExpressRouteServiceProvider(Resource): """A ExpressRouteResourceProvider object. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :param location: Resource location. :type location: str :param tags: A set of tags. Resource tags. :type tags: dict[str, str] :param peering_locations: Get a list of peering locations. :type peering_locations: list[str] :param bandwidths_offered: Gets bandwidths offered. :type bandwidths_offered: list[~azure.mgmt.network.v2018_06_01.models.ExpressRouteServiceProviderBandwidthsOffered] :param provisioning_state: Gets the provisioning state of the resource. :type provisioning_state: str """ _validation = { 'name': {'readonly': True}, 'type': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'peering_locations': {'key': 'properties.peeringLocations', 'type': '[str]'}, 'bandwidths_offered': {'key': 'properties.bandwidthsOffered', 'type': '[ExpressRouteServiceProviderBandwidthsOffered]'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(ExpressRouteServiceProvider, self).__init__(**kwargs) self.peering_locations = kwargs.get('peering_locations', None) self.bandwidths_offered = kwargs.get('bandwidths_offered', None) self.provisioning_state = kwargs.get('provisioning_state', None) class ExpressRouteServiceProviderBandwidthsOffered(msrest.serialization.Model): """Contains bandwidths offered in ExpressRouteServiceProvider resources. :param offer_name: The OfferName. :type offer_name: str :param value_in_mbps: The ValueInMbps. :type value_in_mbps: int """ _attribute_map = { 'offer_name': {'key': 'offerName', 'type': 'str'}, 'value_in_mbps': {'key': 'valueInMbps', 'type': 'int'}, } def __init__( self, **kwargs ): super(ExpressRouteServiceProviderBandwidthsOffered, self).__init__(**kwargs) self.offer_name = kwargs.get('offer_name', None) self.value_in_mbps = kwargs.get('value_in_mbps', None) class ExpressRouteServiceProviderListResult(msrest.serialization.Model): """Response for the ListExpressRouteServiceProvider API service call. :param value: A list of ExpressRouteResourceProvider resources. :type value: list[~azure.mgmt.network.v2018_06_01.models.ExpressRouteServiceProvider] :param next_link: The URL to get the next set of results. :type next_link: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[ExpressRouteServiceProvider]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(ExpressRouteServiceProviderListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class FlowLogInformation(msrest.serialization.Model): """Information on the configuration of flow log and traffic analytics (optional) . All required parameters must be populated in order to send to Azure. :param target_resource_id: Required. The ID of the resource to configure for flow log and traffic analytics (optional) . :type target_resource_id: str :param flow_analytics_configuration: Parameters that define the configuration of traffic analytics. :type flow_analytics_configuration: ~azure.mgmt.network.v2018_06_01.models.TrafficAnalyticsProperties :param storage_id: Required. ID of the storage account which is used to store the flow log. :type storage_id: str :param enabled: Required. Flag to enable/disable flow logging. :type enabled: bool :param retention_policy: Parameters that define the retention policy for flow log. :type retention_policy: ~azure.mgmt.network.v2018_06_01.models.RetentionPolicyParameters """ _validation = { 'target_resource_id': {'required': True}, 'storage_id': {'required': True}, 'enabled': {'required': True}, } _attribute_map = { 'target_resource_id': {'key': 'targetResourceId', 'type': 'str'}, 'flow_analytics_configuration': {'key': 'flowAnalyticsConfiguration', 'type': 'TrafficAnalyticsProperties'}, 'storage_id': {'key': 'properties.storageId', 'type': 'str'}, 'enabled': {'key': 'properties.enabled', 'type': 'bool'}, 'retention_policy': {'key': 'properties.retentionPolicy', 'type': 'RetentionPolicyParameters'}, } def __init__( self, **kwargs ): super(FlowLogInformation, self).__init__(**kwargs) self.target_resource_id = kwargs['target_resource_id'] self.flow_analytics_configuration = kwargs.get('flow_analytics_configuration', None) self.storage_id = kwargs['storage_id'] self.enabled = kwargs['enabled'] self.retention_policy = kwargs.get('retention_policy', None) class FlowLogStatusParameters(msrest.serialization.Model): """Parameters that define a resource to query flow log and traffic analytics (optional) status. All required parameters must be populated in order to send to Azure. :param target_resource_id: Required. The target resource where getting the flow log and traffic analytics (optional) status. :type target_resource_id: str """ _validation = { 'target_resource_id': {'required': True}, } _attribute_map = { 'target_resource_id': {'key': 'targetResourceId', 'type': 'str'}, } def __init__( self, **kwargs ): super(FlowLogStatusParameters, self).__init__(**kwargs) self.target_resource_id = kwargs['target_resource_id'] class FrontendIPConfiguration(SubResource): """Frontend IP address of the load balancer. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :param name: The name of the resource that is unique within a resource group. This name can be used to access the resource. :type name: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :param zones: A list of availability zones denoting the IP allocated for the resource needs to come from. :type zones: list[str] :ivar inbound_nat_rules: Read only. Inbound rules URIs that use this frontend IP. :vartype inbound_nat_rules: list[~azure.mgmt.network.v2018_06_01.models.SubResource] :ivar inbound_nat_pools: Read only. Inbound pools URIs that use this frontend IP. :vartype inbound_nat_pools: list[~azure.mgmt.network.v2018_06_01.models.SubResource] :ivar outbound_nat_rules: Read only. Outbound rules URIs that use this frontend IP. :vartype outbound_nat_rules: list[~azure.mgmt.network.v2018_06_01.models.SubResource] :ivar load_balancing_rules: Gets load balancing rules URIs that use this frontend IP. :vartype load_balancing_rules: list[~azure.mgmt.network.v2018_06_01.models.SubResource] :param private_ip_address: The private IP address of the IP configuration. :type private_ip_address: str :param private_ip_allocation_method: The Private IP allocation method. Possible values are: 'Static' and 'Dynamic'. Possible values include: "Static", "Dynamic". :type private_ip_allocation_method: str or ~azure.mgmt.network.v2018_06_01.models.IPAllocationMethod :param subnet: The reference of the subnet resource. :type subnet: ~azure.mgmt.network.v2018_06_01.models.Subnet :param public_ip_address: The reference of the Public IP resource. :type public_ip_address: ~azure.mgmt.network.v2018_06_01.models.PublicIPAddress :param provisioning_state: Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :type provisioning_state: str """ _validation = { 'inbound_nat_rules': {'readonly': True}, 'inbound_nat_pools': {'readonly': True}, 'outbound_nat_rules': {'readonly': True}, 'load_balancing_rules': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'zones': {'key': 'zones', 'type': '[str]'}, 'inbound_nat_rules': {'key': 'properties.inboundNatRules', 'type': '[SubResource]'}, 'inbound_nat_pools': {'key': 'properties.inboundNatPools', 'type': '[SubResource]'}, 'outbound_nat_rules': {'key': 'properties.outboundNatRules', 'type': '[SubResource]'}, 'load_balancing_rules': {'key': 'properties.loadBalancingRules', 'type': '[SubResource]'}, 'private_ip_address': {'key': 'properties.privateIPAddress', 'type': 'str'}, 'private_ip_allocation_method': {'key': 'properties.privateIPAllocationMethod', 'type': 'str'}, 'subnet': {'key': 'properties.subnet', 'type': 'Subnet'}, 'public_ip_address': {'key': 'properties.publicIPAddress', 'type': 'PublicIPAddress'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(FrontendIPConfiguration, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.etag = kwargs.get('etag', None) self.zones = kwargs.get('zones', None) self.inbound_nat_rules = None self.inbound_nat_pools = None self.outbound_nat_rules = None self.load_balancing_rules = None self.private_ip_address = kwargs.get('private_ip_address', None) self.private_ip_allocation_method = kwargs.get('private_ip_allocation_method', None) self.subnet = kwargs.get('subnet', None) self.public_ip_address = kwargs.get('public_ip_address', None) self.provisioning_state = kwargs.get('provisioning_state', None) class GatewayRoute(msrest.serialization.Model): """Gateway routing details. Variables are only populated by the server, and will be ignored when sending a request. :ivar local_address: The gateway's local address. :vartype local_address: str :ivar network: The route's network prefix. :vartype network: str :ivar next_hop: The route's next hop. :vartype next_hop: str :ivar source_peer: The peer this route was learned from. :vartype source_peer: str :ivar origin: The source this route was learned from. :vartype origin: str :ivar as_path: The route's AS path sequence. :vartype as_path: str :ivar weight: The route's weight. :vartype weight: int """ _validation = { 'local_address': {'readonly': True}, 'network': {'readonly': True}, 'next_hop': {'readonly': True}, 'source_peer': {'readonly': True}, 'origin': {'readonly': True}, 'as_path': {'readonly': True}, 'weight': {'readonly': True}, } _attribute_map = { 'local_address': {'key': 'localAddress', 'type': 'str'}, 'network': {'key': 'network', 'type': 'str'}, 'next_hop': {'key': 'nextHop', 'type': 'str'}, 'source_peer': {'key': 'sourcePeer', 'type': 'str'}, 'origin': {'key': 'origin', 'type': 'str'}, 'as_path': {'key': 'asPath', 'type': 'str'}, 'weight': {'key': 'weight', 'type': 'int'}, } def __init__( self, **kwargs ): super(GatewayRoute, self).__init__(**kwargs) self.local_address = None self.network = None self.next_hop = None self.source_peer = None self.origin = None self.as_path = None self.weight = None class GatewayRouteListResult(msrest.serialization.Model): """List of virtual network gateway routes. :param value: List of gateway routes. :type value: list[~azure.mgmt.network.v2018_06_01.models.GatewayRoute] """ _attribute_map = { 'value': {'key': 'value', 'type': '[GatewayRoute]'}, } def __init__( self, **kwargs ): super(GatewayRouteListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) class GetVpnSitesConfigurationRequest(msrest.serialization.Model): """List of Vpn-Sites. :param vpn_sites: List of resource-ids of the vpn-sites for which config is to be downloaded. :type vpn_sites: list[~azure.mgmt.network.v2018_06_01.models.SubResource] :param output_blob_sas_url: The sas-url to download the configurations for vpn-sites. :type output_blob_sas_url: str """ _attribute_map = { 'vpn_sites': {'key': 'vpnSites', 'type': '[SubResource]'}, 'output_blob_sas_url': {'key': 'outputBlobSasUrl', 'type': 'str'}, } def __init__( self, **kwargs ): super(GetVpnSitesConfigurationRequest, self).__init__(**kwargs) self.vpn_sites = kwargs.get('vpn_sites', None) self.output_blob_sas_url = kwargs.get('output_blob_sas_url', None) class HTTPConfiguration(msrest.serialization.Model): """HTTP configuration of the connectivity check. :param method: HTTP method. Possible values include: "Get". :type method: str or ~azure.mgmt.network.v2018_06_01.models.HTTPMethod :param headers: List of HTTP headers. :type headers: list[~azure.mgmt.network.v2018_06_01.models.HTTPHeader] :param valid_status_codes: Valid status codes. :type valid_status_codes: list[int] """ _attribute_map = { 'method': {'key': 'method', 'type': 'str'}, 'headers': {'key': 'headers', 'type': '[HTTPHeader]'}, 'valid_status_codes': {'key': 'validStatusCodes', 'type': '[int]'}, } def __init__( self, **kwargs ): super(HTTPConfiguration, self).__init__(**kwargs) self.method = kwargs.get('method', None) self.headers = kwargs.get('headers', None) self.valid_status_codes = kwargs.get('valid_status_codes', None) class HTTPHeader(msrest.serialization.Model): """Describes the HTTP header. :param name: The name in HTTP header. :type name: str :param value: The value in HTTP header. :type value: str """ _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'value': {'key': 'value', 'type': 'str'}, } def __init__( self, **kwargs ): super(HTTPHeader, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.value = kwargs.get('value', None) class HubVirtualNetworkConnection(Resource): """HubVirtualNetworkConnection Resource. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :param location: Resource location. :type location: str :param tags: A set of tags. Resource tags. :type tags: dict[str, str] :ivar etag: Gets a unique read-only string that changes whenever the resource is updated. :vartype etag: str :param remote_virtual_network: Reference to the remote virtual network. :type remote_virtual_network: ~azure.mgmt.network.v2018_06_01.models.SubResource :param allow_hub_to_remote_vnet_transit: VirtualHub to RemoteVnet transit to enabled or not. :type allow_hub_to_remote_vnet_transit: bool :param allow_remote_vnet_to_use_hub_vnet_gateways: Allow RemoteVnet to use Virtual Hub's gateways. :type allow_remote_vnet_to_use_hub_vnet_gateways: bool :param provisioning_state: The provisioning state of the resource. Possible values include: "Succeeded", "Updating", "Deleting", "Failed". :type provisioning_state: str or ~azure.mgmt.network.v2018_06_01.models.ProvisioningState """ _validation = { 'name': {'readonly': True}, 'type': {'readonly': True}, 'etag': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'etag': {'key': 'etag', 'type': 'str'}, 'remote_virtual_network': {'key': 'properties.remoteVirtualNetwork', 'type': 'SubResource'}, 'allow_hub_to_remote_vnet_transit': {'key': 'properties.allowHubToRemoteVnetTransit', 'type': 'bool'}, 'allow_remote_vnet_to_use_hub_vnet_gateways': {'key': 'properties.allowRemoteVnetToUseHubVnetGateways', 'type': 'bool'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(HubVirtualNetworkConnection, self).__init__(**kwargs) self.etag = None self.remote_virtual_network = kwargs.get('remote_virtual_network', None) self.allow_hub_to_remote_vnet_transit = kwargs.get('allow_hub_to_remote_vnet_transit', None) self.allow_remote_vnet_to_use_hub_vnet_gateways = kwargs.get('allow_remote_vnet_to_use_hub_vnet_gateways', None) self.provisioning_state = kwargs.get('provisioning_state', None) class InboundNatPool(SubResource): """Inbound NAT pool of the load balancer. :param id: Resource ID. :type id: str :param name: The name of the resource that is unique within a resource group. This name can be used to access the resource. :type name: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :param frontend_ip_configuration: A reference to frontend IP addresses. :type frontend_ip_configuration: ~azure.mgmt.network.v2018_06_01.models.SubResource :param protocol: The transport protocol for the endpoint. Possible values are 'Udp' or 'Tcp' or 'All'. Possible values include: "Udp", "Tcp", "All". :type protocol: str or ~azure.mgmt.network.v2018_06_01.models.TransportProtocol :param frontend_port_range_start: The first port number in the range of external ports that will be used to provide Inbound Nat to NICs associated with a load balancer. Acceptable values range between 1 and 65534. :type frontend_port_range_start: int :param frontend_port_range_end: The last port number in the range of external ports that will be used to provide Inbound Nat to NICs associated with a load balancer. Acceptable values range between 1 and 65535. :type frontend_port_range_end: int :param backend_port: The port used for internal connections on the endpoint. Acceptable values are between 1 and 65535. :type backend_port: int :param idle_timeout_in_minutes: The timeout for the TCP idle connection. The value can be set between 4 and 30 minutes. The default value is 4 minutes. This element is only used when the protocol is set to TCP. :type idle_timeout_in_minutes: int :param enable_floating_ip: Configures a virtual machine's endpoint for the floating IP capability required to configure a SQL AlwaysOn Availability Group. This setting is required when using the SQL AlwaysOn Availability Groups in SQL server. This setting can't be changed after you create the endpoint. :type enable_floating_ip: bool :param provisioning_state: Gets the provisioning state of the PublicIP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :type provisioning_state: str """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'frontend_ip_configuration': {'key': 'properties.frontendIPConfiguration', 'type': 'SubResource'}, 'protocol': {'key': 'properties.protocol', 'type': 'str'}, 'frontend_port_range_start': {'key': 'properties.frontendPortRangeStart', 'type': 'int'}, 'frontend_port_range_end': {'key': 'properties.frontendPortRangeEnd', 'type': 'int'}, 'backend_port': {'key': 'properties.backendPort', 'type': 'int'}, 'idle_timeout_in_minutes': {'key': 'properties.idleTimeoutInMinutes', 'type': 'int'}, 'enable_floating_ip': {'key': 'properties.enableFloatingIP', 'type': 'bool'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(InboundNatPool, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.etag = kwargs.get('etag', None) self.frontend_ip_configuration = kwargs.get('frontend_ip_configuration', None) self.protocol = kwargs.get('protocol', None) self.frontend_port_range_start = kwargs.get('frontend_port_range_start', None) self.frontend_port_range_end = kwargs.get('frontend_port_range_end', None) self.backend_port = kwargs.get('backend_port', None) self.idle_timeout_in_minutes = kwargs.get('idle_timeout_in_minutes', None) self.enable_floating_ip = kwargs.get('enable_floating_ip', None) self.provisioning_state = kwargs.get('provisioning_state', None) class InboundNatRule(SubResource): """Inbound NAT rule of the load balancer. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :param name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource. :type name: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :param frontend_ip_configuration: A reference to frontend IP addresses. :type frontend_ip_configuration: ~azure.mgmt.network.v2018_06_01.models.SubResource :ivar backend_ip_configuration: A reference to a private IP address defined on a network interface of a VM. Traffic sent to the frontend port of each of the frontend IP configurations is forwarded to the backend IP. :vartype backend_ip_configuration: ~azure.mgmt.network.v2018_06_01.models.NetworkInterfaceIPConfiguration :param protocol: The transport protocol for the endpoint. Possible values are 'Udp' or 'Tcp' or 'All'. Possible values include: "Udp", "Tcp", "All". :type protocol: str or ~azure.mgmt.network.v2018_06_01.models.TransportProtocol :param frontend_port: The port for the external endpoint. Port numbers for each rule must be unique within the Load Balancer. Acceptable values range from 1 to 65534. :type frontend_port: int :param backend_port: The port used for the internal endpoint. Acceptable values range from 1 to 65535. :type backend_port: int :param idle_timeout_in_minutes: The timeout for the TCP idle connection. The value can be set between 4 and 30 minutes. The default value is 4 minutes. This element is only used when the protocol is set to TCP. :type idle_timeout_in_minutes: int :param enable_floating_ip: Configures a virtual machine's endpoint for the floating IP capability required to configure a SQL AlwaysOn Availability Group. This setting is required when using the SQL AlwaysOn Availability Groups in SQL server. This setting can't be changed after you create the endpoint. :type enable_floating_ip: bool :param provisioning_state: Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :type provisioning_state: str """ _validation = { 'backend_ip_configuration': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'frontend_ip_configuration': {'key': 'properties.frontendIPConfiguration', 'type': 'SubResource'}, 'backend_ip_configuration': {'key': 'properties.backendIPConfiguration', 'type': 'NetworkInterfaceIPConfiguration'}, 'protocol': {'key': 'properties.protocol', 'type': 'str'}, 'frontend_port': {'key': 'properties.frontendPort', 'type': 'int'}, 'backend_port': {'key': 'properties.backendPort', 'type': 'int'}, 'idle_timeout_in_minutes': {'key': 'properties.idleTimeoutInMinutes', 'type': 'int'}, 'enable_floating_ip': {'key': 'properties.enableFloatingIP', 'type': 'bool'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(InboundNatRule, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.etag = kwargs.get('etag', None) self.frontend_ip_configuration = kwargs.get('frontend_ip_configuration', None) self.backend_ip_configuration = None self.protocol = kwargs.get('protocol', None) self.frontend_port = kwargs.get('frontend_port', None) self.backend_port = kwargs.get('backend_port', None) self.idle_timeout_in_minutes = kwargs.get('idle_timeout_in_minutes', None) self.enable_floating_ip = kwargs.get('enable_floating_ip', None) self.provisioning_state = kwargs.get('provisioning_state', None) class InboundNatRuleListResult(msrest.serialization.Model): """Response for ListInboundNatRule API service call. Variables are only populated by the server, and will be ignored when sending a request. :param value: A list of inbound nat rules in a load balancer. :type value: list[~azure.mgmt.network.v2018_06_01.models.InboundNatRule] :ivar next_link: The URL to get the next set of results. :vartype next_link: str """ _validation = { 'next_link': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[InboundNatRule]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(InboundNatRuleListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = None class IPAddressAvailabilityResult(msrest.serialization.Model): """Response for CheckIPAddressAvailability API service call. :param available: Private IP address availability. :type available: bool :param available_ip_addresses: Contains other available private IP addresses if the asked for address is taken. :type available_ip_addresses: list[str] """ _attribute_map = { 'available': {'key': 'available', 'type': 'bool'}, 'available_ip_addresses': {'key': 'availableIPAddresses', 'type': '[str]'}, } def __init__( self, **kwargs ): super(IPAddressAvailabilityResult, self).__init__(**kwargs) self.available = kwargs.get('available', None) self.available_ip_addresses = kwargs.get('available_ip_addresses', None) class IPConfiguration(SubResource): """IP configuration. :param id: Resource ID. :type id: str :param name: The name of the resource that is unique within a resource group. This name can be used to access the resource. :type name: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :param private_ip_address: The private IP address of the IP configuration. :type private_ip_address: str :param private_ip_allocation_method: The private IP allocation method. Possible values are 'Static' and 'Dynamic'. Possible values include: "Static", "Dynamic". :type private_ip_allocation_method: str or ~azure.mgmt.network.v2018_06_01.models.IPAllocationMethod :param subnet: The reference of the subnet resource. :type subnet: ~azure.mgmt.network.v2018_06_01.models.Subnet :param public_ip_address: The reference of the public IP resource. :type public_ip_address: ~azure.mgmt.network.v2018_06_01.models.PublicIPAddress :param provisioning_state: Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :type provisioning_state: str """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'private_ip_address': {'key': 'properties.privateIPAddress', 'type': 'str'}, 'private_ip_allocation_method': {'key': 'properties.privateIPAllocationMethod', 'type': 'str'}, 'subnet': {'key': 'properties.subnet', 'type': 'Subnet'}, 'public_ip_address': {'key': 'properties.publicIPAddress', 'type': 'PublicIPAddress'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(IPConfiguration, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.etag = kwargs.get('etag', None) self.private_ip_address = kwargs.get('private_ip_address', None) self.private_ip_allocation_method = kwargs.get('private_ip_allocation_method', None) self.subnet = kwargs.get('subnet', None) self.public_ip_address = kwargs.get('public_ip_address', None) self.provisioning_state = kwargs.get('provisioning_state', None) class IpsecPolicy(msrest.serialization.Model): """An IPSec Policy configuration for a virtual network gateway connection. All required parameters must be populated in order to send to Azure. :param sa_life_time_seconds: Required. The IPSec Security Association (also called Quick Mode or Phase 2 SA) lifetime in seconds for a site to site VPN tunnel. :type sa_life_time_seconds: int :param sa_data_size_kilobytes: Required. The IPSec Security Association (also called Quick Mode or Phase 2 SA) payload size in KB for a site to site VPN tunnel. :type sa_data_size_kilobytes: int :param ipsec_encryption: Required. The IPSec encryption algorithm (IKE phase 1). Possible values include: "None", "DES", "DES3", "AES128", "AES192", "AES256", "GCMAES128", "GCMAES192", "GCMAES256". :type ipsec_encryption: str or ~azure.mgmt.network.v2018_06_01.models.IpsecEncryption :param ipsec_integrity: Required. The IPSec integrity algorithm (IKE phase 1). Possible values include: "MD5", "SHA1", "SHA256", "GCMAES128", "GCMAES192", "GCMAES256". :type ipsec_integrity: str or ~azure.mgmt.network.v2018_06_01.models.IpsecIntegrity :param ike_encryption: Required. The IKE encryption algorithm (IKE phase 2). Possible values include: "DES", "DES3", "AES128", "AES192", "AES256", "GCMAES256", "GCMAES128". :type ike_encryption: str or ~azure.mgmt.network.v2018_06_01.models.IkeEncryption :param ike_integrity: Required. The IKE integrity algorithm (IKE phase 2). Possible values include: "MD5", "SHA1", "SHA256", "SHA384", "GCMAES256", "GCMAES128". :type ike_integrity: str or ~azure.mgmt.network.v2018_06_01.models.IkeIntegrity :param dh_group: Required. The DH Groups used in IKE Phase 1 for initial SA. Possible values include: "None", "DHGroup1", "DHGroup2", "DHGroup14", "DHGroup2048", "ECP256", "ECP384", "DHGroup24". :type dh_group: str or ~azure.mgmt.network.v2018_06_01.models.DhGroup :param pfs_group: Required. The Pfs Groups used in IKE Phase 2 for new child SA. Possible values include: "None", "PFS1", "PFS2", "PFS2048", "ECP256", "ECP384", "PFS24", "PFS14", "PFSMM". :type pfs_group: str or ~azure.mgmt.network.v2018_06_01.models.PfsGroup """ _validation = { 'sa_life_time_seconds': {'required': True}, 'sa_data_size_kilobytes': {'required': True}, 'ipsec_encryption': {'required': True}, 'ipsec_integrity': {'required': True}, 'ike_encryption': {'required': True}, 'ike_integrity': {'required': True}, 'dh_group': {'required': True}, 'pfs_group': {'required': True}, } _attribute_map = { 'sa_life_time_seconds': {'key': 'saLifeTimeSeconds', 'type': 'int'}, 'sa_data_size_kilobytes': {'key': 'saDataSizeKilobytes', 'type': 'int'}, 'ipsec_encryption': {'key': 'ipsecEncryption', 'type': 'str'}, 'ipsec_integrity': {'key': 'ipsecIntegrity', 'type': 'str'}, 'ike_encryption': {'key': 'ikeEncryption', 'type': 'str'}, 'ike_integrity': {'key': 'ikeIntegrity', 'type': 'str'}, 'dh_group': {'key': 'dhGroup', 'type': 'str'}, 'pfs_group': {'key': 'pfsGroup', 'type': 'str'}, } def __init__( self, **kwargs ): super(IpsecPolicy, self).__init__(**kwargs) self.sa_life_time_seconds = kwargs['sa_life_time_seconds'] self.sa_data_size_kilobytes = kwargs['sa_data_size_kilobytes'] self.ipsec_encryption = kwargs['ipsec_encryption'] self.ipsec_integrity = kwargs['ipsec_integrity'] self.ike_encryption = kwargs['ike_encryption'] self.ike_integrity = kwargs['ike_integrity'] self.dh_group = kwargs['dh_group'] self.pfs_group = kwargs['pfs_group'] class IpTag(msrest.serialization.Model): """Contains the IpTag associated with the public IP address. :param ip_tag_type: Gets or sets the ipTag type: Example FirstPartyUsage. :type ip_tag_type: str :param tag: Gets or sets value of the IpTag associated with the public IP. Example SQL, Storage etc. :type tag: str """ _attribute_map = { 'ip_tag_type': {'key': 'ipTagType', 'type': 'str'}, 'tag': {'key': 'tag', 'type': 'str'}, } def __init__( self, **kwargs ): super(IpTag, self).__init__(**kwargs) self.ip_tag_type = kwargs.get('ip_tag_type', None) self.tag = kwargs.get('tag', None) class Ipv6ExpressRouteCircuitPeeringConfig(msrest.serialization.Model): """Contains IPv6 peering config. :param primary_peer_address_prefix: The primary address prefix. :type primary_peer_address_prefix: str :param secondary_peer_address_prefix: The secondary address prefix. :type secondary_peer_address_prefix: str :param microsoft_peering_config: The Microsoft peering configuration. :type microsoft_peering_config: ~azure.mgmt.network.v2018_06_01.models.ExpressRouteCircuitPeeringConfig :param route_filter: The reference of the RouteFilter resource. :type route_filter: ~azure.mgmt.network.v2018_06_01.models.RouteFilter :param state: The state of peering. Possible values are: 'Disabled' and 'Enabled'. Possible values include: "Disabled", "Enabled". :type state: str or ~azure.mgmt.network.v2018_06_01.models.ExpressRouteCircuitPeeringState """ _attribute_map = { 'primary_peer_address_prefix': {'key': 'primaryPeerAddressPrefix', 'type': 'str'}, 'secondary_peer_address_prefix': {'key': 'secondaryPeerAddressPrefix', 'type': 'str'}, 'microsoft_peering_config': {'key': 'microsoftPeeringConfig', 'type': 'ExpressRouteCircuitPeeringConfig'}, 'route_filter': {'key': 'routeFilter', 'type': 'RouteFilter'}, 'state': {'key': 'state', 'type': 'str'}, } def __init__( self, **kwargs ): super(Ipv6ExpressRouteCircuitPeeringConfig, self).__init__(**kwargs) self.primary_peer_address_prefix = kwargs.get('primary_peer_address_prefix', None) self.secondary_peer_address_prefix = kwargs.get('secondary_peer_address_prefix', None) self.microsoft_peering_config = kwargs.get('microsoft_peering_config', None) self.route_filter = kwargs.get('route_filter', None) self.state = kwargs.get('state', None) class ListHubVirtualNetworkConnectionsResult(msrest.serialization.Model): """List of HubVirtualNetworkConnections and a URL nextLink to get the next set of results. :param value: List of HubVirtualNetworkConnections. :type value: list[~azure.mgmt.network.v2018_06_01.models.HubVirtualNetworkConnection] :param next_link: URL to get the next set of operation list results if there are any. :type next_link: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[HubVirtualNetworkConnection]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(ListHubVirtualNetworkConnectionsResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class ListVirtualHubsResult(msrest.serialization.Model): """Result of the request to list VirtualHubs. It contains a list of VirtualHubs and a URL nextLink to get the next set of results. :param value: List of VirtualHubs. :type value: list[~azure.mgmt.network.v2018_06_01.models.VirtualHub] :param next_link: URL to get the next set of operation list results if there are any. :type next_link: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[VirtualHub]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(ListVirtualHubsResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class ListVirtualWANsResult(msrest.serialization.Model): """Result of the request to list VirtualWANs. It contains a list of VirtualWANs and a URL nextLink to get the next set of results. :param value: List of VirtualWANs. :type value: list[~azure.mgmt.network.v2018_06_01.models.VirtualWAN] :param next_link: URL to get the next set of operation list results if there are any. :type next_link: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[VirtualWAN]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(ListVirtualWANsResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class ListVpnConnectionsResult(msrest.serialization.Model): """Result of the request to list all vpn connections to a virtual wan vpn gateway. It contains a list of Vpn Connections and a URL nextLink to get the next set of results. :param value: List of Vpn Connections. :type value: list[~azure.mgmt.network.v2018_06_01.models.VpnConnection] :param next_link: URL to get the next set of operation list results if there are any. :type next_link: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[VpnConnection]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(ListVpnConnectionsResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class ListVpnGatewaysResult(msrest.serialization.Model): """Result of the request to list VpnGateways. It contains a list of VpnGateways and a URL nextLink to get the next set of results. :param value: List of VpnGateways. :type value: list[~azure.mgmt.network.v2018_06_01.models.VpnGateway] :param next_link: URL to get the next set of operation list results if there are any. :type next_link: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[VpnGateway]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(ListVpnGatewaysResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class ListVpnSitesResult(msrest.serialization.Model): """Result of the request to list VpnSites. It contains a list of VpnSites and a URL nextLink to get the next set of results. :param value: List of VpnSites. :type value: list[~azure.mgmt.network.v2018_06_01.models.VpnSite] :param next_link: URL to get the next set of operation list results if there are any. :type next_link: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[VpnSite]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(ListVpnSitesResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class LoadBalancer(Resource): """LoadBalancer resource. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :param location: Resource location. :type location: str :param tags: A set of tags. Resource tags. :type tags: dict[str, str] :param sku: The load balancer SKU. :type sku: ~azure.mgmt.network.v2018_06_01.models.LoadBalancerSku :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :param frontend_ip_configurations: Object representing the frontend IPs to be used for the load balancer. :type frontend_ip_configurations: list[~azure.mgmt.network.v2018_06_01.models.FrontendIPConfiguration] :param backend_address_pools: Collection of backend address pools used by a load balancer. :type backend_address_pools: list[~azure.mgmt.network.v2018_06_01.models.BackendAddressPool] :param load_balancing_rules: Object collection representing the load balancing rules Gets the provisioning. :type load_balancing_rules: list[~azure.mgmt.network.v2018_06_01.models.LoadBalancingRule] :param probes: Collection of probe objects used in the load balancer. :type probes: list[~azure.mgmt.network.v2018_06_01.models.Probe] :param inbound_nat_rules: Collection of inbound NAT Rules used by a load balancer. Defining inbound NAT rules on your load balancer is mutually exclusive with defining an inbound NAT pool. Inbound NAT pools are referenced from virtual machine scale sets. NICs that are associated with individual virtual machines cannot reference an Inbound NAT pool. They have to reference individual inbound NAT rules. :type inbound_nat_rules: list[~azure.mgmt.network.v2018_06_01.models.InboundNatRule] :param inbound_nat_pools: Defines an external port range for inbound NAT to a single backend port on NICs associated with a load balancer. Inbound NAT rules are created automatically for each NIC associated with the Load Balancer using an external port from this range. Defining an Inbound NAT pool on your Load Balancer is mutually exclusive with defining inbound Nat rules. Inbound NAT pools are referenced from virtual machine scale sets. NICs that are associated with individual virtual machines cannot reference an inbound NAT pool. They have to reference individual inbound NAT rules. :type inbound_nat_pools: list[~azure.mgmt.network.v2018_06_01.models.InboundNatPool] :param outbound_nat_rules: The outbound NAT rules. :type outbound_nat_rules: list[~azure.mgmt.network.v2018_06_01.models.OutboundNatRule] :param resource_guid: The resource GUID property of the load balancer resource. :type resource_guid: str :param provisioning_state: Gets the provisioning state of the PublicIP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :type provisioning_state: str """ _validation = { 'name': {'readonly': True}, 'type': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'sku': {'key': 'sku', 'type': 'LoadBalancerSku'}, 'etag': {'key': 'etag', 'type': 'str'}, 'frontend_ip_configurations': {'key': 'properties.frontendIPConfigurations', 'type': '[FrontendIPConfiguration]'}, 'backend_address_pools': {'key': 'properties.backendAddressPools', 'type': '[BackendAddressPool]'}, 'load_balancing_rules': {'key': 'properties.loadBalancingRules', 'type': '[LoadBalancingRule]'}, 'probes': {'key': 'properties.probes', 'type': '[Probe]'}, 'inbound_nat_rules': {'key': 'properties.inboundNatRules', 'type': '[InboundNatRule]'}, 'inbound_nat_pools': {'key': 'properties.inboundNatPools', 'type': '[InboundNatPool]'}, 'outbound_nat_rules': {'key': 'properties.outboundNatRules', 'type': '[OutboundNatRule]'}, 'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(LoadBalancer, self).__init__(**kwargs) self.sku = kwargs.get('sku', None) self.etag = kwargs.get('etag', None) self.frontend_ip_configurations = kwargs.get('frontend_ip_configurations', None) self.backend_address_pools = kwargs.get('backend_address_pools', None) self.load_balancing_rules = kwargs.get('load_balancing_rules', None) self.probes = kwargs.get('probes', None) self.inbound_nat_rules = kwargs.get('inbound_nat_rules', None) self.inbound_nat_pools = kwargs.get('inbound_nat_pools', None) self.outbound_nat_rules = kwargs.get('outbound_nat_rules', None) self.resource_guid = kwargs.get('resource_guid', None) self.provisioning_state = kwargs.get('provisioning_state', None) class LoadBalancerBackendAddressPoolListResult(msrest.serialization.Model): """Response for ListBackendAddressPool API service call. Variables are only populated by the server, and will be ignored when sending a request. :param value: A list of backend address pools in a load balancer. :type value: list[~azure.mgmt.network.v2018_06_01.models.BackendAddressPool] :ivar next_link: The URL to get the next set of results. :vartype next_link: str """ _validation = { 'next_link': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[BackendAddressPool]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(LoadBalancerBackendAddressPoolListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = None class LoadBalancerFrontendIPConfigurationListResult(msrest.serialization.Model): """Response for ListFrontendIPConfiguration API service call. Variables are only populated by the server, and will be ignored when sending a request. :param value: A list of frontend IP configurations in a load balancer. :type value: list[~azure.mgmt.network.v2018_06_01.models.FrontendIPConfiguration] :ivar next_link: The URL to get the next set of results. :vartype next_link: str """ _validation = { 'next_link': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[FrontendIPConfiguration]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(LoadBalancerFrontendIPConfigurationListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = None class LoadBalancerListResult(msrest.serialization.Model): """Response for ListLoadBalancers API service call. Variables are only populated by the server, and will be ignored when sending a request. :param value: A list of load balancers in a resource group. :type value: list[~azure.mgmt.network.v2018_06_01.models.LoadBalancer] :ivar next_link: The URL to get the next set of results. :vartype next_link: str """ _validation = { 'next_link': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[LoadBalancer]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(LoadBalancerListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = None class LoadBalancerLoadBalancingRuleListResult(msrest.serialization.Model): """Response for ListLoadBalancingRule API service call. Variables are only populated by the server, and will be ignored when sending a request. :param value: A list of load balancing rules in a load balancer. :type value: list[~azure.mgmt.network.v2018_06_01.models.LoadBalancingRule] :ivar next_link: The URL to get the next set of results. :vartype next_link: str """ _validation = { 'next_link': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[LoadBalancingRule]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(LoadBalancerLoadBalancingRuleListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = None class LoadBalancerProbeListResult(msrest.serialization.Model): """Response for ListProbe API service call. Variables are only populated by the server, and will be ignored when sending a request. :param value: A list of probes in a load balancer. :type value: list[~azure.mgmt.network.v2018_06_01.models.Probe] :ivar next_link: The URL to get the next set of results. :vartype next_link: str """ _validation = { 'next_link': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[Probe]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(LoadBalancerProbeListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = None class LoadBalancerSku(msrest.serialization.Model): """SKU of a load balancer. :param name: Name of a load balancer SKU. Possible values include: "Basic", "Standard". :type name: str or ~azure.mgmt.network.v2018_06_01.models.LoadBalancerSkuName """ _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, } def __init__( self, **kwargs ): super(LoadBalancerSku, self).__init__(**kwargs) self.name = kwargs.get('name', None) class LoadBalancingRule(SubResource): """A load balancing rule for a load balancer. :param id: Resource ID. :type id: str :param name: The name of the resource that is unique within a resource group. This name can be used to access the resource. :type name: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :param frontend_ip_configuration: A reference to frontend IP addresses. :type frontend_ip_configuration: ~azure.mgmt.network.v2018_06_01.models.SubResource :param backend_address_pool: A reference to a pool of DIPs. Inbound traffic is randomly load balanced across IPs in the backend IPs. :type backend_address_pool: ~azure.mgmt.network.v2018_06_01.models.SubResource :param probe: The reference of the load balancer probe used by the load balancing rule. :type probe: ~azure.mgmt.network.v2018_06_01.models.SubResource :param protocol: The transport protocol for the endpoint. Possible values are 'Udp' or 'Tcp' or 'All'. Possible values include: "Udp", "Tcp", "All". :type protocol: str or ~azure.mgmt.network.v2018_06_01.models.TransportProtocol :param load_distribution: The load distribution policy for this rule. Possible values are 'Default', 'SourceIP', and 'SourceIPProtocol'. Possible values include: "Default", "SourceIP", "SourceIPProtocol". :type load_distribution: str or ~azure.mgmt.network.v2018_06_01.models.LoadDistribution :param frontend_port: The port for the external endpoint. Port numbers for each rule must be unique within the Load Balancer. Acceptable values are between 0 and 65534. Note that value 0 enables "Any Port". :type frontend_port: int :param backend_port: The port used for internal connections on the endpoint. Acceptable values are between 0 and 65535. Note that value 0 enables "Any Port". :type backend_port: int :param idle_timeout_in_minutes: The timeout for the TCP idle connection. The value can be set between 4 and 30 minutes. The default value is 4 minutes. This element is only used when the protocol is set to TCP. :type idle_timeout_in_minutes: int :param enable_floating_ip: Configures a virtual machine's endpoint for the floating IP capability required to configure a SQL AlwaysOn Availability Group. This setting is required when using the SQL AlwaysOn Availability Groups in SQL server. This setting can't be changed after you create the endpoint. :type enable_floating_ip: bool :param disable_outbound_snat: Configures SNAT for the VMs in the backend pool to use the publicIP address specified in the frontend of the load balancing rule. :type disable_outbound_snat: bool :param provisioning_state: Gets the provisioning state of the PublicIP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :type provisioning_state: str """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'frontend_ip_configuration': {'key': 'properties.frontendIPConfiguration', 'type': 'SubResource'}, 'backend_address_pool': {'key': 'properties.backendAddressPool', 'type': 'SubResource'}, 'probe': {'key': 'properties.probe', 'type': 'SubResource'}, 'protocol': {'key': 'properties.protocol', 'type': 'str'}, 'load_distribution': {'key': 'properties.loadDistribution', 'type': 'str'}, 'frontend_port': {'key': 'properties.frontendPort', 'type': 'int'}, 'backend_port': {'key': 'properties.backendPort', 'type': 'int'}, 'idle_timeout_in_minutes': {'key': 'properties.idleTimeoutInMinutes', 'type': 'int'}, 'enable_floating_ip': {'key': 'properties.enableFloatingIP', 'type': 'bool'}, 'disable_outbound_snat': {'key': 'properties.disableOutboundSnat', 'type': 'bool'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(LoadBalancingRule, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.etag = kwargs.get('etag', None) self.frontend_ip_configuration = kwargs.get('frontend_ip_configuration', None) self.backend_address_pool = kwargs.get('backend_address_pool', None) self.probe = kwargs.get('probe', None) self.protocol = kwargs.get('protocol', None) self.load_distribution = kwargs.get('load_distribution', None) self.frontend_port = kwargs.get('frontend_port', None) self.backend_port = kwargs.get('backend_port', None) self.idle_timeout_in_minutes = kwargs.get('idle_timeout_in_minutes', None) self.enable_floating_ip = kwargs.get('enable_floating_ip', None) self.disable_outbound_snat = kwargs.get('disable_outbound_snat', None) self.provisioning_state = kwargs.get('provisioning_state', None) class LocalNetworkGateway(Resource): """A common class for general resource information. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :param location: Resource location. :type location: str :param tags: A set of tags. Resource tags. :type tags: dict[str, str] :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :param local_network_address_space: Local network site address space. :type local_network_address_space: ~azure.mgmt.network.v2018_06_01.models.AddressSpace :param gateway_ip_address: IP address of local network gateway. :type gateway_ip_address: str :param bgp_settings: Local network gateway's BGP speaker settings. :type bgp_settings: ~azure.mgmt.network.v2018_06_01.models.BgpSettings :param resource_guid: The resource GUID property of the LocalNetworkGateway resource. :type resource_guid: str :ivar provisioning_state: The provisioning state of the LocalNetworkGateway resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :vartype provisioning_state: str """ _validation = { 'name': {'readonly': True}, 'type': {'readonly': True}, 'provisioning_state': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'etag': {'key': 'etag', 'type': 'str'}, 'local_network_address_space': {'key': 'properties.localNetworkAddressSpace', 'type': 'AddressSpace'}, 'gateway_ip_address': {'key': 'properties.gatewayIpAddress', 'type': 'str'}, 'bgp_settings': {'key': 'properties.bgpSettings', 'type': 'BgpSettings'}, 'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(LocalNetworkGateway, self).__init__(**kwargs) self.etag = kwargs.get('etag', None) self.local_network_address_space = kwargs.get('local_network_address_space', None) self.gateway_ip_address = kwargs.get('gateway_ip_address', None) self.bgp_settings = kwargs.get('bgp_settings', None) self.resource_guid = kwargs.get('resource_guid', None) self.provisioning_state = None class LocalNetworkGatewayListResult(msrest.serialization.Model): """Response for ListLocalNetworkGateways API service call. Variables are only populated by the server, and will be ignored when sending a request. :param value: A list of local network gateways that exists in a resource group. :type value: list[~azure.mgmt.network.v2018_06_01.models.LocalNetworkGateway] :ivar next_link: The URL to get the next set of results. :vartype next_link: str """ _validation = { 'next_link': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[LocalNetworkGateway]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(LocalNetworkGatewayListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = None class LogSpecification(msrest.serialization.Model): """Description of logging specification. :param name: The name of the specification. :type name: str :param display_name: The display name of the specification. :type display_name: str :param blob_duration: Duration of the blob. :type blob_duration: str """ _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'display_name': {'key': 'displayName', 'type': 'str'}, 'blob_duration': {'key': 'blobDuration', 'type': 'str'}, } def __init__( self, **kwargs ): super(LogSpecification, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.display_name = kwargs.get('display_name', None) self.blob_duration = kwargs.get('blob_duration', None) class MatchedRule(msrest.serialization.Model): """Matched rule. :param rule_name: Name of the matched network security rule. :type rule_name: str :param action: The network traffic is allowed or denied. Possible values are 'Allow' and 'Deny'. :type action: str """ _attribute_map = { 'rule_name': {'key': 'ruleName', 'type': 'str'}, 'action': {'key': 'action', 'type': 'str'}, } def __init__( self, **kwargs ): super(MatchedRule, self).__init__(**kwargs) self.rule_name = kwargs.get('rule_name', None) self.action = kwargs.get('action', None) class MetricSpecification(msrest.serialization.Model): """Description of metrics specification. :param name: The name of the metric. :type name: str :param display_name: The display name of the metric. :type display_name: str :param display_description: The description of the metric. :type display_description: str :param unit: Units the metric to be displayed in. :type unit: str :param aggregation_type: The aggregation type. :type aggregation_type: str :param availabilities: List of availability. :type availabilities: list[~azure.mgmt.network.v2018_06_01.models.Availability] :param enable_regional_mdm_account: Whether regional MDM account enabled. :type enable_regional_mdm_account: bool :param fill_gap_with_zero: Whether gaps would be filled with zeros. :type fill_gap_with_zero: bool :param metric_filter_pattern: Pattern for the filter of the metric. :type metric_filter_pattern: str :param dimensions: List of dimensions. :type dimensions: list[~azure.mgmt.network.v2018_06_01.models.Dimension] :param is_internal: Whether the metric is internal. :type is_internal: bool :param source_mdm_account: The source MDM account. :type source_mdm_account: str :param source_mdm_namespace: The source MDM namespace. :type source_mdm_namespace: str :param resource_id_dimension_name_override: The resource Id dimension name override. :type resource_id_dimension_name_override: str """ _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'display_name': {'key': 'displayName', 'type': 'str'}, 'display_description': {'key': 'displayDescription', 'type': 'str'}, 'unit': {'key': 'unit', 'type': 'str'}, 'aggregation_type': {'key': 'aggregationType', 'type': 'str'}, 'availabilities': {'key': 'availabilities', 'type': '[Availability]'}, 'enable_regional_mdm_account': {'key': 'enableRegionalMdmAccount', 'type': 'bool'}, 'fill_gap_with_zero': {'key': 'fillGapWithZero', 'type': 'bool'}, 'metric_filter_pattern': {'key': 'metricFilterPattern', 'type': 'str'}, 'dimensions': {'key': 'dimensions', 'type': '[Dimension]'}, 'is_internal': {'key': 'isInternal', 'type': 'bool'}, 'source_mdm_account': {'key': 'sourceMdmAccount', 'type': 'str'}, 'source_mdm_namespace': {'key': 'sourceMdmNamespace', 'type': 'str'}, 'resource_id_dimension_name_override': {'key': 'resourceIdDimensionNameOverride', 'type': 'str'}, } def __init__( self, **kwargs ): super(MetricSpecification, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.display_name = kwargs.get('display_name', None) self.display_description = kwargs.get('display_description', None) self.unit = kwargs.get('unit', None) self.aggregation_type = kwargs.get('aggregation_type', None) self.availabilities = kwargs.get('availabilities', None) self.enable_regional_mdm_account = kwargs.get('enable_regional_mdm_account', None) self.fill_gap_with_zero = kwargs.get('fill_gap_with_zero', None) self.metric_filter_pattern = kwargs.get('metric_filter_pattern', None) self.dimensions = kwargs.get('dimensions', None) self.is_internal = kwargs.get('is_internal', None) self.source_mdm_account = kwargs.get('source_mdm_account', None) self.source_mdm_namespace = kwargs.get('source_mdm_namespace', None) self.resource_id_dimension_name_override = kwargs.get('resource_id_dimension_name_override', None) class NetworkConfigurationDiagnosticParameters(msrest.serialization.Model): """Parameters to get network configuration diagnostic. All required parameters must be populated in order to send to Azure. :param target_resource_id: Required. The ID of the target resource to perform network configuration diagnostic. Valid options are VM, NetworkInterface, VMSS/NetworkInterface and Application Gateway. :type target_resource_id: str :param queries: Required. List of traffic queries. :type queries: list[~azure.mgmt.network.v2018_06_01.models.TrafficQuery] """ _validation = { 'target_resource_id': {'required': True}, 'queries': {'required': True}, } _attribute_map = { 'target_resource_id': {'key': 'targetResourceId', 'type': 'str'}, 'queries': {'key': 'queries', 'type': '[TrafficQuery]'}, } def __init__( self, **kwargs ): super(NetworkConfigurationDiagnosticParameters, self).__init__(**kwargs) self.target_resource_id = kwargs['target_resource_id'] self.queries = kwargs['queries'] class NetworkConfigurationDiagnosticResponse(msrest.serialization.Model): """Results of network configuration diagnostic on the target resource. Variables are only populated by the server, and will be ignored when sending a request. :ivar results: List of network configuration diagnostic results. :vartype results: list[~azure.mgmt.network.v2018_06_01.models.NetworkConfigurationDiagnosticResult] """ _validation = { 'results': {'readonly': True}, } _attribute_map = { 'results': {'key': 'results', 'type': '[NetworkConfigurationDiagnosticResult]'}, } def __init__( self, **kwargs ): super(NetworkConfigurationDiagnosticResponse, self).__init__(**kwargs) self.results = None class NetworkConfigurationDiagnosticResult(msrest.serialization.Model): """Network configuration diagnostic result corresponded to provided traffic query. :param traffic_query: Parameters to compare with network configuration. :type traffic_query: ~azure.mgmt.network.v2018_06_01.models.TrafficQuery :param network_security_group_result: Network configuration diagnostic result corresponded provided traffic query. :type network_security_group_result: ~azure.mgmt.network.v2018_06_01.models.NetworkSecurityGroupResult """ _attribute_map = { 'traffic_query': {'key': 'trafficQuery', 'type': 'TrafficQuery'}, 'network_security_group_result': {'key': 'networkSecurityGroupResult', 'type': 'NetworkSecurityGroupResult'}, } def __init__( self, **kwargs ): super(NetworkConfigurationDiagnosticResult, self).__init__(**kwargs) self.traffic_query = kwargs.get('traffic_query', None) self.network_security_group_result = kwargs.get('network_security_group_result', None) class NetworkInterface(Resource): """A network interface in a resource group. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :param location: Resource location. :type location: str :param tags: A set of tags. Resource tags. :type tags: dict[str, str] :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :param virtual_machine: The reference of a virtual machine. :type virtual_machine: ~azure.mgmt.network.v2018_06_01.models.SubResource :param network_security_group: The reference of the NetworkSecurityGroup resource. :type network_security_group: ~azure.mgmt.network.v2018_06_01.models.NetworkSecurityGroup :param ip_configurations: A list of IPConfigurations of the network interface. :type ip_configurations: list[~azure.mgmt.network.v2018_06_01.models.NetworkInterfaceIPConfiguration] :param dns_settings: The DNS settings in network interface. :type dns_settings: ~azure.mgmt.network.v2018_06_01.models.NetworkInterfaceDnsSettings :param mac_address: The MAC address of the network interface. :type mac_address: str :param primary: Gets whether this is a primary network interface on a virtual machine. :type primary: bool :param enable_accelerated_networking: If the network interface is accelerated networking enabled. :type enable_accelerated_networking: bool :param enable_ip_forwarding: Indicates whether IP forwarding is enabled on this network interface. :type enable_ip_forwarding: bool :param resource_guid: The resource GUID property of the network interface resource. :type resource_guid: str :param provisioning_state: The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :type provisioning_state: str """ _validation = { 'name': {'readonly': True}, 'type': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'etag': {'key': 'etag', 'type': 'str'}, 'virtual_machine': {'key': 'properties.virtualMachine', 'type': 'SubResource'}, 'network_security_group': {'key': 'properties.networkSecurityGroup', 'type': 'NetworkSecurityGroup'}, 'ip_configurations': {'key': 'properties.ipConfigurations', 'type': '[NetworkInterfaceIPConfiguration]'}, 'dns_settings': {'key': 'properties.dnsSettings', 'type': 'NetworkInterfaceDnsSettings'}, 'mac_address': {'key': 'properties.macAddress', 'type': 'str'}, 'primary': {'key': 'properties.primary', 'type': 'bool'}, 'enable_accelerated_networking': {'key': 'properties.enableAcceleratedNetworking', 'type': 'bool'}, 'enable_ip_forwarding': {'key': 'properties.enableIPForwarding', 'type': 'bool'}, 'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(NetworkInterface, self).__init__(**kwargs) self.etag = kwargs.get('etag', None) self.virtual_machine = kwargs.get('virtual_machine', None) self.network_security_group = kwargs.get('network_security_group', None) self.ip_configurations = kwargs.get('ip_configurations', None) self.dns_settings = kwargs.get('dns_settings', None) self.mac_address = kwargs.get('mac_address', None) self.primary = kwargs.get('primary', None) self.enable_accelerated_networking = kwargs.get('enable_accelerated_networking', None) self.enable_ip_forwarding = kwargs.get('enable_ip_forwarding', None) self.resource_guid = kwargs.get('resource_guid', None) self.provisioning_state = kwargs.get('provisioning_state', None) class NetworkInterfaceAssociation(msrest.serialization.Model): """Network interface and its custom security rules. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Network interface ID. :vartype id: str :param security_rules: Collection of custom security rules. :type security_rules: list[~azure.mgmt.network.v2018_06_01.models.SecurityRule] """ _validation = { 'id': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'security_rules': {'key': 'securityRules', 'type': '[SecurityRule]'}, } def __init__( self, **kwargs ): super(NetworkInterfaceAssociation, self).__init__(**kwargs) self.id = None self.security_rules = kwargs.get('security_rules', None) class NetworkInterfaceDnsSettings(msrest.serialization.Model): """DNS settings of a network interface. :param dns_servers: List of DNS servers IP addresses. Use 'AzureProvidedDNS' to switch to azure provided DNS resolution. 'AzureProvidedDNS' value cannot be combined with other IPs, it must be the only value in dnsServers collection. :type dns_servers: list[str] :param applied_dns_servers: If the VM that uses this NIC is part of an Availability Set, then this list will have the union of all DNS servers from all NICs that are part of the Availability Set. This property is what is configured on each of those VMs. :type applied_dns_servers: list[str] :param internal_dns_name_label: Relative DNS name for this NIC used for internal communications between VMs in the same virtual network. :type internal_dns_name_label: str :param internal_fqdn: Fully qualified DNS name supporting internal communications between VMs in the same virtual network. :type internal_fqdn: str :param internal_domain_name_suffix: Even if internalDnsNameLabel is not specified, a DNS entry is created for the primary NIC of the VM. This DNS name can be constructed by concatenating the VM name with the value of internalDomainNameSuffix. :type internal_domain_name_suffix: str """ _attribute_map = { 'dns_servers': {'key': 'dnsServers', 'type': '[str]'}, 'applied_dns_servers': {'key': 'appliedDnsServers', 'type': '[str]'}, 'internal_dns_name_label': {'key': 'internalDnsNameLabel', 'type': 'str'}, 'internal_fqdn': {'key': 'internalFqdn', 'type': 'str'}, 'internal_domain_name_suffix': {'key': 'internalDomainNameSuffix', 'type': 'str'}, } def __init__( self, **kwargs ): super(NetworkInterfaceDnsSettings, self).__init__(**kwargs) self.dns_servers = kwargs.get('dns_servers', None) self.applied_dns_servers = kwargs.get('applied_dns_servers', None) self.internal_dns_name_label = kwargs.get('internal_dns_name_label', None) self.internal_fqdn = kwargs.get('internal_fqdn', None) self.internal_domain_name_suffix = kwargs.get('internal_domain_name_suffix', None) class NetworkInterfaceIPConfiguration(SubResource): """IPConfiguration in a network interface. :param id: Resource ID. :type id: str :param name: The name of the resource that is unique within a resource group. This name can be used to access the resource. :type name: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :param application_gateway_backend_address_pools: The reference of ApplicationGatewayBackendAddressPool resource. :type application_gateway_backend_address_pools: list[~azure.mgmt.network.v2018_06_01.models.ApplicationGatewayBackendAddressPool] :param load_balancer_backend_address_pools: The reference of LoadBalancerBackendAddressPool resource. :type load_balancer_backend_address_pools: list[~azure.mgmt.network.v2018_06_01.models.BackendAddressPool] :param load_balancer_inbound_nat_rules: A list of references of LoadBalancerInboundNatRules. :type load_balancer_inbound_nat_rules: list[~azure.mgmt.network.v2018_06_01.models.InboundNatRule] :param private_ip_address: Private IP address of the IP configuration. :type private_ip_address: str :param private_ip_allocation_method: Defines how a private IP address is assigned. Possible values are: 'Static' and 'Dynamic'. Possible values include: "Static", "Dynamic". :type private_ip_allocation_method: str or ~azure.mgmt.network.v2018_06_01.models.IPAllocationMethod :param private_ip_address_version: Available from Api-Version 2016-03-30 onwards, it represents whether the specific ipconfiguration is IPv4 or IPv6. Default is taken as IPv4. Possible values are: 'IPv4' and 'IPv6'. Possible values include: "IPv4", "IPv6". :type private_ip_address_version: str or ~azure.mgmt.network.v2018_06_01.models.IPVersion :param subnet: Subnet bound to the IP configuration. :type subnet: ~azure.mgmt.network.v2018_06_01.models.Subnet :param primary: Gets whether this is a primary customer address on the network interface. :type primary: bool :param public_ip_address: Public IP address bound to the IP configuration. :type public_ip_address: ~azure.mgmt.network.v2018_06_01.models.PublicIPAddress :param application_security_groups: Application security groups in which the IP configuration is included. :type application_security_groups: list[~azure.mgmt.network.v2018_06_01.models.ApplicationSecurityGroup] :param provisioning_state: The provisioning state of the network interface IP configuration. Possible values are: 'Updating', 'Deleting', and 'Failed'. :type provisioning_state: str """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'application_gateway_backend_address_pools': {'key': 'properties.applicationGatewayBackendAddressPools', 'type': '[ApplicationGatewayBackendAddressPool]'}, 'load_balancer_backend_address_pools': {'key': 'properties.loadBalancerBackendAddressPools', 'type': '[BackendAddressPool]'}, 'load_balancer_inbound_nat_rules': {'key': 'properties.loadBalancerInboundNatRules', 'type': '[InboundNatRule]'}, 'private_ip_address': {'key': 'properties.privateIPAddress', 'type': 'str'}, 'private_ip_allocation_method': {'key': 'properties.privateIPAllocationMethod', 'type': 'str'}, 'private_ip_address_version': {'key': 'properties.privateIPAddressVersion', 'type': 'str'}, 'subnet': {'key': 'properties.subnet', 'type': 'Subnet'}, 'primary': {'key': 'properties.primary', 'type': 'bool'}, 'public_ip_address': {'key': 'properties.publicIPAddress', 'type': 'PublicIPAddress'}, 'application_security_groups': {'key': 'properties.applicationSecurityGroups', 'type': '[ApplicationSecurityGroup]'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(NetworkInterfaceIPConfiguration, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.etag = kwargs.get('etag', None) self.application_gateway_backend_address_pools = kwargs.get('application_gateway_backend_address_pools', None) self.load_balancer_backend_address_pools = kwargs.get('load_balancer_backend_address_pools', None) self.load_balancer_inbound_nat_rules = kwargs.get('load_balancer_inbound_nat_rules', None) self.private_ip_address = kwargs.get('private_ip_address', None) self.private_ip_allocation_method = kwargs.get('private_ip_allocation_method', None) self.private_ip_address_version = kwargs.get('private_ip_address_version', None) self.subnet = kwargs.get('subnet', None) self.primary = kwargs.get('primary', None) self.public_ip_address = kwargs.get('public_ip_address', None) self.application_security_groups = kwargs.get('application_security_groups', None) self.provisioning_state = kwargs.get('provisioning_state', None) class NetworkInterfaceIPConfigurationListResult(msrest.serialization.Model): """Response for list ip configurations API service call. Variables are only populated by the server, and will be ignored when sending a request. :param value: A list of ip configurations. :type value: list[~azure.mgmt.network.v2018_06_01.models.NetworkInterfaceIPConfiguration] :ivar next_link: The URL to get the next set of results. :vartype next_link: str """ _validation = { 'next_link': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[NetworkInterfaceIPConfiguration]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(NetworkInterfaceIPConfigurationListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = None class NetworkInterfaceListResult(msrest.serialization.Model): """Response for the ListNetworkInterface API service call. Variables are only populated by the server, and will be ignored when sending a request. :param value: A list of network interfaces in a resource group. :type value: list[~azure.mgmt.network.v2018_06_01.models.NetworkInterface] :ivar next_link: The URL to get the next set of results. :vartype next_link: str """ _validation = { 'next_link': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[NetworkInterface]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(NetworkInterfaceListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = None class NetworkInterfaceLoadBalancerListResult(msrest.serialization.Model): """Response for list ip configurations API service call. Variables are only populated by the server, and will be ignored when sending a request. :param value: A list of load balancers. :type value: list[~azure.mgmt.network.v2018_06_01.models.LoadBalancer] :ivar next_link: The URL to get the next set of results. :vartype next_link: str """ _validation = { 'next_link': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[LoadBalancer]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(NetworkInterfaceLoadBalancerListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = None class NetworkSecurityGroup(Resource): """NetworkSecurityGroup resource. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :param location: Resource location. :type location: str :param tags: A set of tags. Resource tags. :type tags: dict[str, str] :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :param security_rules: A collection of security rules of the network security group. :type security_rules: list[~azure.mgmt.network.v2018_06_01.models.SecurityRule] :param default_security_rules: The default security rules of network security group. :type default_security_rules: list[~azure.mgmt.network.v2018_06_01.models.SecurityRule] :ivar network_interfaces: A collection of references to network interfaces. :vartype network_interfaces: list[~azure.mgmt.network.v2018_06_01.models.NetworkInterface] :ivar subnets: A collection of references to subnets. :vartype subnets: list[~azure.mgmt.network.v2018_06_01.models.Subnet] :param resource_guid: The resource GUID property of the network security group resource. :type resource_guid: str :param provisioning_state: The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :type provisioning_state: str """ _validation = { 'name': {'readonly': True}, 'type': {'readonly': True}, 'network_interfaces': {'readonly': True}, 'subnets': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'etag': {'key': 'etag', 'type': 'str'}, 'security_rules': {'key': 'properties.securityRules', 'type': '[SecurityRule]'}, 'default_security_rules': {'key': 'properties.defaultSecurityRules', 'type': '[SecurityRule]'}, 'network_interfaces': {'key': 'properties.networkInterfaces', 'type': '[NetworkInterface]'}, 'subnets': {'key': 'properties.subnets', 'type': '[Subnet]'}, 'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(NetworkSecurityGroup, self).__init__(**kwargs) self.etag = kwargs.get('etag', None) self.security_rules = kwargs.get('security_rules', None) self.default_security_rules = kwargs.get('default_security_rules', None) self.network_interfaces = None self.subnets = None self.resource_guid = kwargs.get('resource_guid', None) self.provisioning_state = kwargs.get('provisioning_state', None) class NetworkSecurityGroupListResult(msrest.serialization.Model): """Response for ListNetworkSecurityGroups API service call. :param value: A list of NetworkSecurityGroup resources. :type value: list[~azure.mgmt.network.v2018_06_01.models.NetworkSecurityGroup] :param next_link: The URL to get the next set of results. :type next_link: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[NetworkSecurityGroup]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(NetworkSecurityGroupListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class NetworkSecurityGroupResult(msrest.serialization.Model): """Network configuration diagnostic result corresponded provided traffic query. Variables are only populated by the server, and will be ignored when sending a request. :param security_rule_access_result: The network traffic is allowed or denied. Possible values are 'Allow' and 'Deny'. Possible values include: "Allow", "Deny". :type security_rule_access_result: str or ~azure.mgmt.network.v2018_06_01.models.SecurityRuleAccess :ivar evaluated_network_security_groups: List of results network security groups diagnostic. :vartype evaluated_network_security_groups: list[~azure.mgmt.network.v2018_06_01.models.EvaluatedNetworkSecurityGroup] """ _validation = { 'evaluated_network_security_groups': {'readonly': True}, } _attribute_map = { 'security_rule_access_result': {'key': 'securityRuleAccessResult', 'type': 'str'}, 'evaluated_network_security_groups': {'key': 'evaluatedNetworkSecurityGroups', 'type': '[EvaluatedNetworkSecurityGroup]'}, } def __init__( self, **kwargs ): super(NetworkSecurityGroupResult, self).__init__(**kwargs) self.security_rule_access_result = kwargs.get('security_rule_access_result', None) self.evaluated_network_security_groups = None class NetworkSecurityRulesEvaluationResult(msrest.serialization.Model): """Network security rules evaluation result. :param name: Name of the network security rule. :type name: str :param protocol_matched: Value indicating whether protocol is matched. :type protocol_matched: bool :param source_matched: Value indicating whether source is matched. :type source_matched: bool :param source_port_matched: Value indicating whether source port is matched. :type source_port_matched: bool :param destination_matched: Value indicating whether destination is matched. :type destination_matched: bool :param destination_port_matched: Value indicating whether destination port is matched. :type destination_port_matched: bool """ _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'protocol_matched': {'key': 'protocolMatched', 'type': 'bool'}, 'source_matched': {'key': 'sourceMatched', 'type': 'bool'}, 'source_port_matched': {'key': 'sourcePortMatched', 'type': 'bool'}, 'destination_matched': {'key': 'destinationMatched', 'type': 'bool'}, 'destination_port_matched': {'key': 'destinationPortMatched', 'type': 'bool'}, } def __init__( self, **kwargs ): super(NetworkSecurityRulesEvaluationResult, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.protocol_matched = kwargs.get('protocol_matched', None) self.source_matched = kwargs.get('source_matched', None) self.source_port_matched = kwargs.get('source_port_matched', None) self.destination_matched = kwargs.get('destination_matched', None) self.destination_port_matched = kwargs.get('destination_port_matched', None) class NetworkWatcher(Resource): """Network watcher in a resource group. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :param location: Resource location. :type location: str :param tags: A set of tags. Resource tags. :type tags: dict[str, str] :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :ivar provisioning_state: The provisioning state of the resource. Possible values include: "Succeeded", "Updating", "Deleting", "Failed". :vartype provisioning_state: str or ~azure.mgmt.network.v2018_06_01.models.ProvisioningState """ _validation = { 'name': {'readonly': True}, 'type': {'readonly': True}, 'provisioning_state': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'etag': {'key': 'etag', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(NetworkWatcher, self).__init__(**kwargs) self.etag = kwargs.get('etag', None) self.provisioning_state = None class NetworkWatcherListResult(msrest.serialization.Model): """List of network watcher resources. :param value: :type value: list[~azure.mgmt.network.v2018_06_01.models.NetworkWatcher] """ _attribute_map = { 'value': {'key': 'value', 'type': '[NetworkWatcher]'}, } def __init__( self, **kwargs ): super(NetworkWatcherListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) class NextHopParameters(msrest.serialization.Model): """Parameters that define the source and destination endpoint. All required parameters must be populated in order to send to Azure. :param target_resource_id: Required. The resource identifier of the target resource against which the action is to be performed. :type target_resource_id: str :param source_ip_address: Required. The source IP address. :type source_ip_address: str :param destination_ip_address: Required. The destination IP address. :type destination_ip_address: str :param target_nic_resource_id: The NIC ID. (If VM has multiple NICs and IP forwarding is enabled on any of the nics, then this parameter must be specified. Otherwise optional). :type target_nic_resource_id: str """ _validation = { 'target_resource_id': {'required': True}, 'source_ip_address': {'required': True}, 'destination_ip_address': {'required': True}, } _attribute_map = { 'target_resource_id': {'key': 'targetResourceId', 'type': 'str'}, 'source_ip_address': {'key': 'sourceIPAddress', 'type': 'str'}, 'destination_ip_address': {'key': 'destinationIPAddress', 'type': 'str'}, 'target_nic_resource_id': {'key': 'targetNicResourceId', 'type': 'str'}, } def __init__( self, **kwargs ): super(NextHopParameters, self).__init__(**kwargs) self.target_resource_id = kwargs['target_resource_id'] self.source_ip_address = kwargs['source_ip_address'] self.destination_ip_address = kwargs['destination_ip_address'] self.target_nic_resource_id = kwargs.get('target_nic_resource_id', None) class NextHopResult(msrest.serialization.Model): """The information about next hop from the specified VM. :param next_hop_type: Next hop type. Possible values include: "Internet", "VirtualAppliance", "VirtualNetworkGateway", "VnetLocal", "HyperNetGateway", "None". :type next_hop_type: str or ~azure.mgmt.network.v2018_06_01.models.NextHopType :param next_hop_ip_address: Next hop IP Address. :type next_hop_ip_address: str :param route_table_id: The resource identifier for the route table associated with the route being returned. If the route being returned does not correspond to any user created routes then this field will be the string 'System Route'. :type route_table_id: str """ _attribute_map = { 'next_hop_type': {'key': 'nextHopType', 'type': 'str'}, 'next_hop_ip_address': {'key': 'nextHopIpAddress', 'type': 'str'}, 'route_table_id': {'key': 'routeTableId', 'type': 'str'}, } def __init__( self, **kwargs ): super(NextHopResult, self).__init__(**kwargs) self.next_hop_type = kwargs.get('next_hop_type', None) self.next_hop_ip_address = kwargs.get('next_hop_ip_address', None) self.route_table_id = kwargs.get('route_table_id', None) class Operation(msrest.serialization.Model): """Network REST API operation definition. :param name: Operation name: {provider}/{resource}/{operation}. :type name: str :param display: Display metadata associated with the operation. :type display: ~azure.mgmt.network.v2018_06_01.models.OperationDisplay :param origin: Origin of the operation. :type origin: str :param service_specification: Specification of the service. :type service_specification: ~azure.mgmt.network.v2018_06_01.models.OperationPropertiesFormatServiceSpecification """ _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'display': {'key': 'display', 'type': 'OperationDisplay'}, 'origin': {'key': 'origin', 'type': 'str'}, 'service_specification': {'key': 'properties.serviceSpecification', 'type': 'OperationPropertiesFormatServiceSpecification'}, } def __init__( self, **kwargs ): super(Operation, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.display = kwargs.get('display', None) self.origin = kwargs.get('origin', None) self.service_specification = kwargs.get('service_specification', None) class OperationDisplay(msrest.serialization.Model): """Display metadata associated with the operation. :param provider: Service provider: Microsoft Network. :type provider: str :param resource: Resource on which the operation is performed. :type resource: str :param operation: Type of the operation: get, read, delete, etc. :type operation: str :param description: Description of the operation. :type description: str """ _attribute_map = { 'provider': {'key': 'provider', 'type': 'str'}, 'resource': {'key': 'resource', 'type': 'str'}, 'operation': {'key': 'operation', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, } def __init__( self, **kwargs ): super(OperationDisplay, self).__init__(**kwargs) self.provider = kwargs.get('provider', None) self.resource = kwargs.get('resource', None) self.operation = kwargs.get('operation', None) self.description = kwargs.get('description', None) class OperationListResult(msrest.serialization.Model): """Result of the request to list Network operations. It contains a list of operations and a URL link to get the next set of results. :param value: List of Network operations supported by the Network resource provider. :type value: list[~azure.mgmt.network.v2018_06_01.models.Operation] :param next_link: URL to get the next set of operation list results if there are any. :type next_link: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[Operation]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(OperationListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class OperationPropertiesFormatServiceSpecification(msrest.serialization.Model): """Specification of the service. :param metric_specifications: Operation service specification. :type metric_specifications: list[~azure.mgmt.network.v2018_06_01.models.MetricSpecification] :param log_specifications: Operation log specification. :type log_specifications: list[~azure.mgmt.network.v2018_06_01.models.LogSpecification] """ _attribute_map = { 'metric_specifications': {'key': 'metricSpecifications', 'type': '[MetricSpecification]'}, 'log_specifications': {'key': 'logSpecifications', 'type': '[LogSpecification]'}, } def __init__( self, **kwargs ): super(OperationPropertiesFormatServiceSpecification, self).__init__(**kwargs) self.metric_specifications = kwargs.get('metric_specifications', None) self.log_specifications = kwargs.get('log_specifications', None) class OutboundNatRule(SubResource): """Outbound NAT pool of the load balancer. :param id: Resource ID. :type id: str :param name: The name of the resource that is unique within a resource group. This name can be used to access the resource. :type name: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :param allocated_outbound_ports: The number of outbound ports to be used for NAT. :type allocated_outbound_ports: int :param frontend_ip_configurations: The Frontend IP addresses of the load balancer. :type frontend_ip_configurations: list[~azure.mgmt.network.v2018_06_01.models.SubResource] :param backend_address_pool: A reference to a pool of DIPs. Outbound traffic is randomly load balanced across IPs in the backend IPs. :type backend_address_pool: ~azure.mgmt.network.v2018_06_01.models.SubResource :param provisioning_state: Gets the provisioning state of the PublicIP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :type provisioning_state: str """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'allocated_outbound_ports': {'key': 'properties.allocatedOutboundPorts', 'type': 'int'}, 'frontend_ip_configurations': {'key': 'properties.frontendIPConfigurations', 'type': '[SubResource]'}, 'backend_address_pool': {'key': 'properties.backendAddressPool', 'type': 'SubResource'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(OutboundNatRule, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.etag = kwargs.get('etag', None) self.allocated_outbound_ports = kwargs.get('allocated_outbound_ports', None) self.frontend_ip_configurations = kwargs.get('frontend_ip_configurations', None) self.backend_address_pool = kwargs.get('backend_address_pool', None) self.provisioning_state = kwargs.get('provisioning_state', None) class PacketCapture(msrest.serialization.Model): """Parameters that define the create packet capture operation. All required parameters must be populated in order to send to Azure. :param target: Required. The ID of the targeted resource, only VM is currently supported. :type target: str :param bytes_to_capture_per_packet: Number of bytes captured per packet, the remaining bytes are truncated. :type bytes_to_capture_per_packet: int :param total_bytes_per_session: Maximum size of the capture output. :type total_bytes_per_session: int :param time_limit_in_seconds: Maximum duration of the capture session in seconds. :type time_limit_in_seconds: int :param storage_location: Required. Describes the storage location for a packet capture session. :type storage_location: ~azure.mgmt.network.v2018_06_01.models.PacketCaptureStorageLocation :param filters: :type filters: list[~azure.mgmt.network.v2018_06_01.models.PacketCaptureFilter] """ _validation = { 'target': {'required': True}, 'storage_location': {'required': True}, } _attribute_map = { 'target': {'key': 'properties.target', 'type': 'str'}, 'bytes_to_capture_per_packet': {'key': 'properties.bytesToCapturePerPacket', 'type': 'int'}, 'total_bytes_per_session': {'key': 'properties.totalBytesPerSession', 'type': 'int'}, 'time_limit_in_seconds': {'key': 'properties.timeLimitInSeconds', 'type': 'int'}, 'storage_location': {'key': 'properties.storageLocation', 'type': 'PacketCaptureStorageLocation'}, 'filters': {'key': 'properties.filters', 'type': '[PacketCaptureFilter]'}, } def __init__( self, **kwargs ): super(PacketCapture, self).__init__(**kwargs) self.target = kwargs['target'] self.bytes_to_capture_per_packet = kwargs.get('bytes_to_capture_per_packet', 0) self.total_bytes_per_session = kwargs.get('total_bytes_per_session', 1073741824) self.time_limit_in_seconds = kwargs.get('time_limit_in_seconds', 18000) self.storage_location = kwargs['storage_location'] self.filters = kwargs.get('filters', None) class PacketCaptureFilter(msrest.serialization.Model): """Filter that is applied to packet capture request. Multiple filters can be applied. :param protocol: Protocol to be filtered on. Possible values include: "TCP", "UDP", "Any". Default value: "Any". :type protocol: str or ~azure.mgmt.network.v2018_06_01.models.PcProtocol :param local_ip_address: Local IP Address to be filtered on. Notation: "127.0.0.1" for single address entry. "127.0.0.1-127.0.0.255" for range. "127.0.0.1;127.0.0.5"? for multiple entries. Multiple ranges not currently supported. Mixing ranges with multiple entries not currently supported. Default = null. :type local_ip_address: str :param remote_ip_address: Local IP Address to be filtered on. Notation: "127.0.0.1" for single address entry. "127.0.0.1-127.0.0.255" for range. "127.0.0.1;127.0.0.5;" for multiple entries. Multiple ranges not currently supported. Mixing ranges with multiple entries not currently supported. Default = null. :type remote_ip_address: str :param local_port: Local port to be filtered on. Notation: "80" for single port entry."80-85" for range. "80;443;" for multiple entries. Multiple ranges not currently supported. Mixing ranges with multiple entries not currently supported. Default = null. :type local_port: str :param remote_port: Remote port to be filtered on. Notation: "80" for single port entry."80-85" for range. "80;443;" for multiple entries. Multiple ranges not currently supported. Mixing ranges with multiple entries not currently supported. Default = null. :type remote_port: str """ _attribute_map = { 'protocol': {'key': 'protocol', 'type': 'str'}, 'local_ip_address': {'key': 'localIPAddress', 'type': 'str'}, 'remote_ip_address': {'key': 'remoteIPAddress', 'type': 'str'}, 'local_port': {'key': 'localPort', 'type': 'str'}, 'remote_port': {'key': 'remotePort', 'type': 'str'}, } def __init__( self, **kwargs ): super(PacketCaptureFilter, self).__init__(**kwargs) self.protocol = kwargs.get('protocol', "Any") self.local_ip_address = kwargs.get('local_ip_address', None) self.remote_ip_address = kwargs.get('remote_ip_address', None) self.local_port = kwargs.get('local_port', None) self.remote_port = kwargs.get('remote_port', None) class PacketCaptureListResult(msrest.serialization.Model): """List of packet capture sessions. :param value: Information about packet capture sessions. :type value: list[~azure.mgmt.network.v2018_06_01.models.PacketCaptureResult] """ _attribute_map = { 'value': {'key': 'value', 'type': '[PacketCaptureResult]'}, } def __init__( self, **kwargs ): super(PacketCaptureListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) class PacketCaptureParameters(msrest.serialization.Model): """Parameters that define the create packet capture operation. All required parameters must be populated in order to send to Azure. :param target: Required. The ID of the targeted resource, only VM is currently supported. :type target: str :param bytes_to_capture_per_packet: Number of bytes captured per packet, the remaining bytes are truncated. :type bytes_to_capture_per_packet: int :param total_bytes_per_session: Maximum size of the capture output. :type total_bytes_per_session: int :param time_limit_in_seconds: Maximum duration of the capture session in seconds. :type time_limit_in_seconds: int :param storage_location: Required. Describes the storage location for a packet capture session. :type storage_location: ~azure.mgmt.network.v2018_06_01.models.PacketCaptureStorageLocation :param filters: :type filters: list[~azure.mgmt.network.v2018_06_01.models.PacketCaptureFilter] """ _validation = { 'target': {'required': True}, 'storage_location': {'required': True}, } _attribute_map = { 'target': {'key': 'target', 'type': 'str'}, 'bytes_to_capture_per_packet': {'key': 'bytesToCapturePerPacket', 'type': 'int'}, 'total_bytes_per_session': {'key': 'totalBytesPerSession', 'type': 'int'}, 'time_limit_in_seconds': {'key': 'timeLimitInSeconds', 'type': 'int'}, 'storage_location': {'key': 'storageLocation', 'type': 'PacketCaptureStorageLocation'}, 'filters': {'key': 'filters', 'type': '[PacketCaptureFilter]'}, } def __init__( self, **kwargs ): super(PacketCaptureParameters, self).__init__(**kwargs) self.target = kwargs['target'] self.bytes_to_capture_per_packet = kwargs.get('bytes_to_capture_per_packet', 0) self.total_bytes_per_session = kwargs.get('total_bytes_per_session', 1073741824) self.time_limit_in_seconds = kwargs.get('time_limit_in_seconds', 18000) self.storage_location = kwargs['storage_location'] self.filters = kwargs.get('filters', None) class PacketCaptureQueryStatusResult(msrest.serialization.Model): """Status of packet capture session. :param name: The name of the packet capture resource. :type name: str :param id: The ID of the packet capture resource. :type id: str :param capture_start_time: The start time of the packet capture session. :type capture_start_time: ~datetime.datetime :param packet_capture_status: The status of the packet capture session. Possible values include: "NotStarted", "Running", "Stopped", "Error", "Unknown". :type packet_capture_status: str or ~azure.mgmt.network.v2018_06_01.models.PcStatus :param stop_reason: The reason the current packet capture session was stopped. :type stop_reason: str :param packet_capture_error: List of errors of packet capture session. :type packet_capture_error: list[str or ~azure.mgmt.network.v2018_06_01.models.PcError] """ _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'id': {'key': 'id', 'type': 'str'}, 'capture_start_time': {'key': 'captureStartTime', 'type': 'iso-8601'}, 'packet_capture_status': {'key': 'packetCaptureStatus', 'type': 'str'}, 'stop_reason': {'key': 'stopReason', 'type': 'str'}, 'packet_capture_error': {'key': 'packetCaptureError', 'type': '[str]'}, } def __init__( self, **kwargs ): super(PacketCaptureQueryStatusResult, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.id = kwargs.get('id', None) self.capture_start_time = kwargs.get('capture_start_time', None) self.packet_capture_status = kwargs.get('packet_capture_status', None) self.stop_reason = kwargs.get('stop_reason', None) self.packet_capture_error = kwargs.get('packet_capture_error', None) class PacketCaptureResult(msrest.serialization.Model): """Information about packet capture session. Variables are only populated by the server, and will be ignored when sending a request. :ivar name: Name of the packet capture session. :vartype name: str :ivar id: ID of the packet capture operation. :vartype id: str :param etag: :type etag: str :param target: The ID of the targeted resource, only VM is currently supported. :type target: str :param bytes_to_capture_per_packet: Number of bytes captured per packet, the remaining bytes are truncated. :type bytes_to_capture_per_packet: int :param total_bytes_per_session: Maximum size of the capture output. :type total_bytes_per_session: int :param time_limit_in_seconds: Maximum duration of the capture session in seconds. :type time_limit_in_seconds: int :param storage_location: Describes the storage location for a packet capture session. :type storage_location: ~azure.mgmt.network.v2018_06_01.models.PacketCaptureStorageLocation :param filters: :type filters: list[~azure.mgmt.network.v2018_06_01.models.PacketCaptureFilter] :param provisioning_state: The provisioning state of the packet capture session. Possible values include: "Succeeded", "Updating", "Deleting", "Failed". :type provisioning_state: str or ~azure.mgmt.network.v2018_06_01.models.ProvisioningState """ _validation = { 'name': {'readonly': True}, 'id': {'readonly': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'id': {'key': 'id', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'target': {'key': 'properties.target', 'type': 'str'}, 'bytes_to_capture_per_packet': {'key': 'properties.bytesToCapturePerPacket', 'type': 'int'}, 'total_bytes_per_session': {'key': 'properties.totalBytesPerSession', 'type': 'int'}, 'time_limit_in_seconds': {'key': 'properties.timeLimitInSeconds', 'type': 'int'}, 'storage_location': {'key': 'properties.storageLocation', 'type': 'PacketCaptureStorageLocation'}, 'filters': {'key': 'properties.filters', 'type': '[PacketCaptureFilter]'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(PacketCaptureResult, self).__init__(**kwargs) self.name = None self.id = None self.etag = kwargs.get('etag', "A unique read-only string that changes whenever the resource is updated.") self.target = kwargs.get('target', None) self.bytes_to_capture_per_packet = kwargs.get('bytes_to_capture_per_packet', 0) self.total_bytes_per_session = kwargs.get('total_bytes_per_session', 1073741824) self.time_limit_in_seconds = kwargs.get('time_limit_in_seconds', 18000) self.storage_location = kwargs.get('storage_location', None) self.filters = kwargs.get('filters', None) self.provisioning_state = kwargs.get('provisioning_state', None) class PacketCaptureResultProperties(PacketCaptureParameters): """Describes the properties of a packet capture session. All required parameters must be populated in order to send to Azure. :param target: Required. The ID of the targeted resource, only VM is currently supported. :type target: str :param bytes_to_capture_per_packet: Number of bytes captured per packet, the remaining bytes are truncated. :type bytes_to_capture_per_packet: int :param total_bytes_per_session: Maximum size of the capture output. :type total_bytes_per_session: int :param time_limit_in_seconds: Maximum duration of the capture session in seconds. :type time_limit_in_seconds: int :param storage_location: Required. Describes the storage location for a packet capture session. :type storage_location: ~azure.mgmt.network.v2018_06_01.models.PacketCaptureStorageLocation :param filters: :type filters: list[~azure.mgmt.network.v2018_06_01.models.PacketCaptureFilter] :param provisioning_state: The provisioning state of the packet capture session. Possible values include: "Succeeded", "Updating", "Deleting", "Failed". :type provisioning_state: str or ~azure.mgmt.network.v2018_06_01.models.ProvisioningState """ _validation = { 'target': {'required': True}, 'storage_location': {'required': True}, } _attribute_map = { 'target': {'key': 'target', 'type': 'str'}, 'bytes_to_capture_per_packet': {'key': 'bytesToCapturePerPacket', 'type': 'int'}, 'total_bytes_per_session': {'key': 'totalBytesPerSession', 'type': 'int'}, 'time_limit_in_seconds': {'key': 'timeLimitInSeconds', 'type': 'int'}, 'storage_location': {'key': 'storageLocation', 'type': 'PacketCaptureStorageLocation'}, 'filters': {'key': 'filters', 'type': '[PacketCaptureFilter]'}, 'provisioning_state': {'key': 'provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(PacketCaptureResultProperties, self).__init__(**kwargs) self.provisioning_state = kwargs.get('provisioning_state', None) class PacketCaptureStorageLocation(msrest.serialization.Model): """Describes the storage location for a packet capture session. :param storage_id: The ID of the storage account to save the packet capture session. Required if no local file path is provided. :type storage_id: str :param storage_path: The URI of the storage path to save the packet capture. Must be a well- formed URI describing the location to save the packet capture. :type storage_path: str :param file_path: A valid local path on the targeting VM. Must include the name of the capture file (*.cap). For linux virtual machine it must start with /var/captures. Required if no storage ID is provided, otherwise optional. :type file_path: str """ _attribute_map = { 'storage_id': {'key': 'storageId', 'type': 'str'}, 'storage_path': {'key': 'storagePath', 'type': 'str'}, 'file_path': {'key': 'filePath', 'type': 'str'}, } def __init__( self, **kwargs ): super(PacketCaptureStorageLocation, self).__init__(**kwargs) self.storage_id = kwargs.get('storage_id', None) self.storage_path = kwargs.get('storage_path', None) self.file_path = kwargs.get('file_path', None) class PatchRouteFilter(SubResource): """Route Filter Resource. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :ivar name: The name of the resource that is unique within a resource group. This name can be used to access the resource. :vartype name: str :ivar etag: A unique read-only string that changes whenever the resource is updated. :vartype etag: str :ivar type: Resource type. :vartype type: str :param tags: A set of tags. Resource tags. :type tags: dict[str, str] :param rules: Collection of RouteFilterRules contained within a route filter. :type rules: list[~azure.mgmt.network.v2018_06_01.models.RouteFilterRule] :param peerings: A collection of references to express route circuit peerings. :type peerings: list[~azure.mgmt.network.v2018_06_01.models.ExpressRouteCircuitPeering] :ivar provisioning_state: The provisioning state of the resource. Possible values are: 'Updating', 'Deleting', 'Succeeded' and 'Failed'. :vartype provisioning_state: str """ _validation = { 'name': {'readonly': True}, 'etag': {'readonly': True}, 'type': {'readonly': True}, 'provisioning_state': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'rules': {'key': 'properties.rules', 'type': '[RouteFilterRule]'}, 'peerings': {'key': 'properties.peerings', 'type': '[ExpressRouteCircuitPeering]'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(PatchRouteFilter, self).__init__(**kwargs) self.name = None self.etag = None self.type = None self.tags = kwargs.get('tags', None) self.rules = kwargs.get('rules', None) self.peerings = kwargs.get('peerings', None) self.provisioning_state = None class PatchRouteFilterRule(SubResource): """Route Filter Rule Resource. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :ivar name: The name of the resource that is unique within a resource group. This name can be used to access the resource. :vartype name: str :ivar etag: A unique read-only string that changes whenever the resource is updated. :vartype etag: str :param access: The access type of the rule. Valid values are: 'Allow', 'Deny'. Possible values include: "Allow", "Deny". :type access: str or ~azure.mgmt.network.v2018_06_01.models.Access :param route_filter_rule_type: The rule type of the rule. Valid value is: 'Community'. Possible values include: "Community". :type route_filter_rule_type: str or ~azure.mgmt.network.v2018_06_01.models.RouteFilterRuleType :param communities: The collection for bgp community values to filter on. e.g. ['12076:5010','12076:5020']. :type communities: list[str] :ivar provisioning_state: The provisioning state of the resource. Possible values are: 'Updating', 'Deleting', 'Succeeded' and 'Failed'. :vartype provisioning_state: str """ _validation = { 'name': {'readonly': True}, 'etag': {'readonly': True}, 'provisioning_state': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'access': {'key': 'properties.access', 'type': 'str'}, 'route_filter_rule_type': {'key': 'properties.routeFilterRuleType', 'type': 'str'}, 'communities': {'key': 'properties.communities', 'type': '[str]'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(PatchRouteFilterRule, self).__init__(**kwargs) self.name = None self.etag = None self.access = kwargs.get('access', None) self.route_filter_rule_type = kwargs.get('route_filter_rule_type', None) self.communities = kwargs.get('communities', None) self.provisioning_state = None class Policies(msrest.serialization.Model): """Policies for vpn gateway. :param allow_branch_to_branch_traffic: True if branch to branch traffic is allowed. :type allow_branch_to_branch_traffic: bool :param allow_vnet_to_vnet_traffic: True if Vnet to Vnet traffic is allowed. :type allow_vnet_to_vnet_traffic: bool """ _attribute_map = { 'allow_branch_to_branch_traffic': {'key': 'allowBranchToBranchTraffic', 'type': 'bool'}, 'allow_vnet_to_vnet_traffic': {'key': 'allowVnetToVnetTraffic', 'type': 'bool'}, } def __init__( self, **kwargs ): super(Policies, self).__init__(**kwargs) self.allow_branch_to_branch_traffic = kwargs.get('allow_branch_to_branch_traffic', None) self.allow_vnet_to_vnet_traffic = kwargs.get('allow_vnet_to_vnet_traffic', None) class Probe(SubResource): """A load balancer probe. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :param name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource. :type name: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :ivar load_balancing_rules: The load balancer rules that use this probe. :vartype load_balancing_rules: list[~azure.mgmt.network.v2018_06_01.models.SubResource] :param protocol: The protocol of the end point. Possible values are: 'Http', 'Tcp', or 'Https'. If 'Tcp' is specified, a received ACK is required for the probe to be successful. If 'Http' or 'Https' is specified, a 200 OK response from the specifies URI is required for the probe to be successful. Possible values include: "Http", "Tcp", "Https". :type protocol: str or ~azure.mgmt.network.v2018_06_01.models.ProbeProtocol :param port: The port for communicating the probe. Possible values range from 1 to 65535, inclusive. :type port: int :param interval_in_seconds: The interval, in seconds, for how frequently to probe the endpoint for health status. Typically, the interval is slightly less than half the allocated timeout period (in seconds) which allows two full probes before taking the instance out of rotation. The default value is 15, the minimum value is 5. :type interval_in_seconds: int :param number_of_probes: The number of probes where if no response, will result in stopping further traffic from being delivered to the endpoint. This values allows endpoints to be taken out of rotation faster or slower than the typical times used in Azure. :type number_of_probes: int :param request_path: The URI used for requesting health status from the VM. Path is required if a protocol is set to http. Otherwise, it is not allowed. There is no default value. :type request_path: str :param provisioning_state: Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :type provisioning_state: str """ _validation = { 'load_balancing_rules': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'load_balancing_rules': {'key': 'properties.loadBalancingRules', 'type': '[SubResource]'}, 'protocol': {'key': 'properties.protocol', 'type': 'str'}, 'port': {'key': 'properties.port', 'type': 'int'}, 'interval_in_seconds': {'key': 'properties.intervalInSeconds', 'type': 'int'}, 'number_of_probes': {'key': 'properties.numberOfProbes', 'type': 'int'}, 'request_path': {'key': 'properties.requestPath', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(Probe, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.etag = kwargs.get('etag', None) self.load_balancing_rules = None self.protocol = kwargs.get('protocol', None) self.port = kwargs.get('port', None) self.interval_in_seconds = kwargs.get('interval_in_seconds', None) self.number_of_probes = kwargs.get('number_of_probes', None) self.request_path = kwargs.get('request_path', None) self.provisioning_state = kwargs.get('provisioning_state', None) class ProtocolConfiguration(msrest.serialization.Model): """Configuration of the protocol. :param http_configuration: HTTP configuration of the connectivity check. :type http_configuration: ~azure.mgmt.network.v2018_06_01.models.HTTPConfiguration """ _attribute_map = { 'http_configuration': {'key': 'HTTPConfiguration', 'type': 'HTTPConfiguration'}, } def __init__( self, **kwargs ): super(ProtocolConfiguration, self).__init__(**kwargs) self.http_configuration = kwargs.get('http_configuration', None) class PublicIPAddress(Resource): """Public IP address resource. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :param location: Resource location. :type location: str :param tags: A set of tags. Resource tags. :type tags: dict[str, str] :param sku: The public IP address SKU. :type sku: ~azure.mgmt.network.v2018_06_01.models.PublicIPAddressSku :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :param zones: A list of availability zones denoting the IP allocated for the resource needs to come from. :type zones: list[str] :param public_ip_allocation_method: The public IP allocation method. Possible values are: 'Static' and 'Dynamic'. Possible values include: "Static", "Dynamic". :type public_ip_allocation_method: str or ~azure.mgmt.network.v2018_06_01.models.IPAllocationMethod :param public_ip_address_version: The public IP address version. Possible values are: 'IPv4' and 'IPv6'. Possible values include: "IPv4", "IPv6". :type public_ip_address_version: str or ~azure.mgmt.network.v2018_06_01.models.IPVersion :ivar ip_configuration: The IP configuration associated with the public IP address. :vartype ip_configuration: ~azure.mgmt.network.v2018_06_01.models.IPConfiguration :param dns_settings: The FQDN of the DNS record associated with the public IP address. :type dns_settings: ~azure.mgmt.network.v2018_06_01.models.PublicIPAddressDnsSettings :param ip_tags: The list of tags associated with the public IP address. :type ip_tags: list[~azure.mgmt.network.v2018_06_01.models.IpTag] :param ip_address: The IP address associated with the public IP address resource. :type ip_address: str :param idle_timeout_in_minutes: The idle timeout of the public IP address. :type idle_timeout_in_minutes: int :param resource_guid: The resource GUID property of the public IP resource. :type resource_guid: str :param provisioning_state: The provisioning state of the PublicIP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :type provisioning_state: str """ _validation = { 'name': {'readonly': True}, 'type': {'readonly': True}, 'ip_configuration': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'sku': {'key': 'sku', 'type': 'PublicIPAddressSku'}, 'etag': {'key': 'etag', 'type': 'str'}, 'zones': {'key': 'zones', 'type': '[str]'}, 'public_ip_allocation_method': {'key': 'properties.publicIPAllocationMethod', 'type': 'str'}, 'public_ip_address_version': {'key': 'properties.publicIPAddressVersion', 'type': 'str'}, 'ip_configuration': {'key': 'properties.ipConfiguration', 'type': 'IPConfiguration'}, 'dns_settings': {'key': 'properties.dnsSettings', 'type': 'PublicIPAddressDnsSettings'}, 'ip_tags': {'key': 'properties.ipTags', 'type': '[IpTag]'}, 'ip_address': {'key': 'properties.ipAddress', 'type': 'str'}, 'idle_timeout_in_minutes': {'key': 'properties.idleTimeoutInMinutes', 'type': 'int'}, 'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(PublicIPAddress, self).__init__(**kwargs) self.sku = kwargs.get('sku', None) self.etag = kwargs.get('etag', None) self.zones = kwargs.get('zones', None) self.public_ip_allocation_method = kwargs.get('public_ip_allocation_method', None) self.public_ip_address_version = kwargs.get('public_ip_address_version', None) self.ip_configuration = None self.dns_settings = kwargs.get('dns_settings', None) self.ip_tags = kwargs.get('ip_tags', None) self.ip_address = kwargs.get('ip_address', None) self.idle_timeout_in_minutes = kwargs.get('idle_timeout_in_minutes', None) self.resource_guid = kwargs.get('resource_guid', None) self.provisioning_state = kwargs.get('provisioning_state', None) class PublicIPAddressDnsSettings(msrest.serialization.Model): """Contains FQDN of the DNS record associated with the public IP address. :param domain_name_label: Gets or sets the Domain name label.The concatenation of the domain name label and the regionalized DNS zone make up the fully qualified domain name associated with the public IP address. If a domain name label is specified, an A DNS record is created for the public IP in the Microsoft Azure DNS system. :type domain_name_label: str :param fqdn: Gets the FQDN, Fully qualified domain name of the A DNS record associated with the public IP. This is the concatenation of the domainNameLabel and the regionalized DNS zone. :type fqdn: str :param reverse_fqdn: Gets or Sets the Reverse FQDN. A user-visible, fully qualified domain name that resolves to this public IP address. If the reverseFqdn is specified, then a PTR DNS record is created pointing from the IP address in the in-addr.arpa domain to the reverse FQDN. :type reverse_fqdn: str """ _attribute_map = { 'domain_name_label': {'key': 'domainNameLabel', 'type': 'str'}, 'fqdn': {'key': 'fqdn', 'type': 'str'}, 'reverse_fqdn': {'key': 'reverseFqdn', 'type': 'str'}, } def __init__( self, **kwargs ): super(PublicIPAddressDnsSettings, self).__init__(**kwargs) self.domain_name_label = kwargs.get('domain_name_label', None) self.fqdn = kwargs.get('fqdn', None) self.reverse_fqdn = kwargs.get('reverse_fqdn', None) class PublicIPAddressListResult(msrest.serialization.Model): """Response for ListPublicIpAddresses API service call. :param value: A list of public IP addresses that exists in a resource group. :type value: list[~azure.mgmt.network.v2018_06_01.models.PublicIPAddress] :param next_link: The URL to get the next set of results. :type next_link: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[PublicIPAddress]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(PublicIPAddressListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class PublicIPAddressSku(msrest.serialization.Model): """SKU of a public IP address. :param name: Name of a public IP address SKU. Possible values include: "Basic", "Standard". :type name: str or ~azure.mgmt.network.v2018_06_01.models.PublicIPAddressSkuName """ _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, } def __init__( self, **kwargs ): super(PublicIPAddressSku, self).__init__(**kwargs) self.name = kwargs.get('name', None) class QueryTroubleshootingParameters(msrest.serialization.Model): """Parameters that define the resource to query the troubleshooting result. All required parameters must be populated in order to send to Azure. :param target_resource_id: Required. The target resource ID to query the troubleshooting result. :type target_resource_id: str """ _validation = { 'target_resource_id': {'required': True}, } _attribute_map = { 'target_resource_id': {'key': 'targetResourceId', 'type': 'str'}, } def __init__( self, **kwargs ): super(QueryTroubleshootingParameters, self).__init__(**kwargs) self.target_resource_id = kwargs['target_resource_id'] class ResourceNavigationLink(SubResource): """ResourceNavigationLink resource. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :param name: Name of the resource that is unique within a resource group. This name can be used to access the resource. :type name: str :ivar etag: A unique read-only string that changes whenever the resource is updated. :vartype etag: str :param linked_resource_type: Resource type of the linked resource. :type linked_resource_type: str :param link: Link to the external resource. :type link: str :ivar provisioning_state: Provisioning state of the ResourceNavigationLink resource. :vartype provisioning_state: str """ _validation = { 'etag': {'readonly': True}, 'provisioning_state': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'linked_resource_type': {'key': 'properties.linkedResourceType', 'type': 'str'}, 'link': {'key': 'properties.link', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(ResourceNavigationLink, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.etag = None self.linked_resource_type = kwargs.get('linked_resource_type', None) self.link = kwargs.get('link', None) self.provisioning_state = None class RetentionPolicyParameters(msrest.serialization.Model): """Parameters that define the retention policy for flow log. :param days: Number of days to retain flow log records. :type days: int :param enabled: Flag to enable/disable retention. :type enabled: bool """ _attribute_map = { 'days': {'key': 'days', 'type': 'int'}, 'enabled': {'key': 'enabled', 'type': 'bool'}, } def __init__( self, **kwargs ): super(RetentionPolicyParameters, self).__init__(**kwargs) self.days = kwargs.get('days', 0) self.enabled = kwargs.get('enabled', False) class Route(SubResource): """Route resource. :param id: Resource ID. :type id: str :param name: The name of the resource that is unique within a resource group. This name can be used to access the resource. :type name: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :param address_prefix: The destination CIDR to which the route applies. :type address_prefix: str :param next_hop_type: The type of Azure hop the packet should be sent to. Possible values are: 'VirtualNetworkGateway', 'VnetLocal', 'Internet', 'VirtualAppliance', and 'None'. Possible values include: "VirtualNetworkGateway", "VnetLocal", "Internet", "VirtualAppliance", "None". :type next_hop_type: str or ~azure.mgmt.network.v2018_06_01.models.RouteNextHopType :param next_hop_ip_address: The IP address packets should be forwarded to. Next hop values are only allowed in routes where the next hop type is VirtualAppliance. :type next_hop_ip_address: str :param provisioning_state: The provisioning state of the resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :type provisioning_state: str """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'address_prefix': {'key': 'properties.addressPrefix', 'type': 'str'}, 'next_hop_type': {'key': 'properties.nextHopType', 'type': 'str'}, 'next_hop_ip_address': {'key': 'properties.nextHopIpAddress', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(Route, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.etag = kwargs.get('etag', None) self.address_prefix = kwargs.get('address_prefix', None) self.next_hop_type = kwargs.get('next_hop_type', None) self.next_hop_ip_address = kwargs.get('next_hop_ip_address', None) self.provisioning_state = kwargs.get('provisioning_state', None) class RouteFilter(Resource): """Route Filter Resource. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :param location: Resource location. :type location: str :param tags: A set of tags. Resource tags. :type tags: dict[str, str] :ivar etag: Gets a unique read-only string that changes whenever the resource is updated. :vartype etag: str :param rules: Collection of RouteFilterRules contained within a route filter. :type rules: list[~azure.mgmt.network.v2018_06_01.models.RouteFilterRule] :param peerings: A collection of references to express route circuit peerings. :type peerings: list[~azure.mgmt.network.v2018_06_01.models.ExpressRouteCircuitPeering] :ivar provisioning_state: The provisioning state of the resource. Possible values are: 'Updating', 'Deleting', 'Succeeded' and 'Failed'. :vartype provisioning_state: str """ _validation = { 'name': {'readonly': True}, 'type': {'readonly': True}, 'etag': {'readonly': True}, 'provisioning_state': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'etag': {'key': 'etag', 'type': 'str'}, 'rules': {'key': 'properties.rules', 'type': '[RouteFilterRule]'}, 'peerings': {'key': 'properties.peerings', 'type': '[ExpressRouteCircuitPeering]'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(RouteFilter, self).__init__(**kwargs) self.etag = None self.rules = kwargs.get('rules', None) self.peerings = kwargs.get('peerings', None) self.provisioning_state = None class RouteFilterListResult(msrest.serialization.Model): """Response for the ListRouteFilters API service call. :param value: Gets a list of route filters in a resource group. :type value: list[~azure.mgmt.network.v2018_06_01.models.RouteFilter] :param next_link: The URL to get the next set of results. :type next_link: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[RouteFilter]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(RouteFilterListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class RouteFilterRule(SubResource): """Route Filter Rule Resource. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :param name: The name of the resource that is unique within a resource group. This name can be used to access the resource. :type name: str :param location: Resource location. :type location: str :ivar etag: A unique read-only string that changes whenever the resource is updated. :vartype etag: str :param access: The access type of the rule. Valid values are: 'Allow', 'Deny'. Possible values include: "Allow", "Deny". :type access: str or ~azure.mgmt.network.v2018_06_01.models.Access :param route_filter_rule_type: The rule type of the rule. Valid value is: 'Community'. Possible values include: "Community". :type route_filter_rule_type: str or ~azure.mgmt.network.v2018_06_01.models.RouteFilterRuleType :param communities: The collection for bgp community values to filter on. e.g. ['12076:5010','12076:5020']. :type communities: list[str] :ivar provisioning_state: The provisioning state of the resource. Possible values are: 'Updating', 'Deleting', 'Succeeded' and 'Failed'. :vartype provisioning_state: str """ _validation = { 'etag': {'readonly': True}, 'provisioning_state': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'access': {'key': 'properties.access', 'type': 'str'}, 'route_filter_rule_type': {'key': 'properties.routeFilterRuleType', 'type': 'str'}, 'communities': {'key': 'properties.communities', 'type': '[str]'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(RouteFilterRule, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.location = kwargs.get('location', None) self.etag = None self.access = kwargs.get('access', None) self.route_filter_rule_type = kwargs.get('route_filter_rule_type', None) self.communities = kwargs.get('communities', None) self.provisioning_state = None class RouteFilterRuleListResult(msrest.serialization.Model): """Response for the ListRouteFilterRules API service call. :param value: Gets a list of RouteFilterRules in a resource group. :type value: list[~azure.mgmt.network.v2018_06_01.models.RouteFilterRule] :param next_link: The URL to get the next set of results. :type next_link: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[RouteFilterRule]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(RouteFilterRuleListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class RouteListResult(msrest.serialization.Model): """Response for the ListRoute API service call. :param value: Gets a list of routes in a resource group. :type value: list[~azure.mgmt.network.v2018_06_01.models.Route] :param next_link: The URL to get the next set of results. :type next_link: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[Route]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(RouteListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class RouteTable(Resource): """Route table resource. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :param location: Resource location. :type location: str :param tags: A set of tags. Resource tags. :type tags: dict[str, str] :param etag: Gets a unique read-only string that changes whenever the resource is updated. :type etag: str :param routes: Collection of routes contained within a route table. :type routes: list[~azure.mgmt.network.v2018_06_01.models.Route] :ivar subnets: A collection of references to subnets. :vartype subnets: list[~azure.mgmt.network.v2018_06_01.models.Subnet] :param disable_bgp_route_propagation: Gets or sets whether to disable the routes learned by BGP on that route table. True means disable. :type disable_bgp_route_propagation: bool :param provisioning_state: The provisioning state of the resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :type provisioning_state: str """ _validation = { 'name': {'readonly': True}, 'type': {'readonly': True}, 'subnets': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'etag': {'key': 'etag', 'type': 'str'}, 'routes': {'key': 'properties.routes', 'type': '[Route]'}, 'subnets': {'key': 'properties.subnets', 'type': '[Subnet]'}, 'disable_bgp_route_propagation': {'key': 'properties.disableBgpRoutePropagation', 'type': 'bool'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(RouteTable, self).__init__(**kwargs) self.etag = kwargs.get('etag', None) self.routes = kwargs.get('routes', None) self.subnets = None self.disable_bgp_route_propagation = kwargs.get('disable_bgp_route_propagation', None) self.provisioning_state = kwargs.get('provisioning_state', None) class RouteTableListResult(msrest.serialization.Model): """Response for the ListRouteTable API service call. :param value: Gets a list of route tables in a resource group. :type value: list[~azure.mgmt.network.v2018_06_01.models.RouteTable] :param next_link: The URL to get the next set of results. :type next_link: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[RouteTable]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(RouteTableListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class SecurityGroupNetworkInterface(msrest.serialization.Model): """Network interface and all its associated security rules. :param id: ID of the network interface. :type id: str :param security_rule_associations: All security rules associated with the network interface. :type security_rule_associations: ~azure.mgmt.network.v2018_06_01.models.SecurityRuleAssociations """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'security_rule_associations': {'key': 'securityRuleAssociations', 'type': 'SecurityRuleAssociations'}, } def __init__( self, **kwargs ): super(SecurityGroupNetworkInterface, self).__init__(**kwargs) self.id = kwargs.get('id', None) self.security_rule_associations = kwargs.get('security_rule_associations', None) class SecurityGroupViewParameters(msrest.serialization.Model): """Parameters that define the VM to check security groups for. All required parameters must be populated in order to send to Azure. :param target_resource_id: Required. ID of the target VM. :type target_resource_id: str """ _validation = { 'target_resource_id': {'required': True}, } _attribute_map = { 'target_resource_id': {'key': 'targetResourceId', 'type': 'str'}, } def __init__( self, **kwargs ): super(SecurityGroupViewParameters, self).__init__(**kwargs) self.target_resource_id = kwargs['target_resource_id'] class SecurityGroupViewResult(msrest.serialization.Model): """The information about security rules applied to the specified VM. :param network_interfaces: List of network interfaces on the specified VM. :type network_interfaces: list[~azure.mgmt.network.v2018_06_01.models.SecurityGroupNetworkInterface] """ _attribute_map = { 'network_interfaces': {'key': 'networkInterfaces', 'type': '[SecurityGroupNetworkInterface]'}, } def __init__( self, **kwargs ): super(SecurityGroupViewResult, self).__init__(**kwargs) self.network_interfaces = kwargs.get('network_interfaces', None) class SecurityRule(SubResource): """Network security rule. :param id: Resource ID. :type id: str :param name: The name of the resource that is unique within a resource group. This name can be used to access the resource. :type name: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :param description: A description for this rule. Restricted to 140 chars. :type description: str :param protocol: Network protocol this rule applies to. Possible values are 'Tcp', 'Udp', and '*'. Possible values include: "Tcp", "Udp", "*". :type protocol: str or ~azure.mgmt.network.v2018_06_01.models.SecurityRuleProtocol :param source_port_range: The source port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports. :type source_port_range: str :param destination_port_range: The destination port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports. :type destination_port_range: str :param source_address_prefix: The CIDR or source IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If this is an ingress rule, specifies where network traffic originates from. :type source_address_prefix: str :param source_address_prefixes: The CIDR or source IP ranges. :type source_address_prefixes: list[str] :param source_application_security_groups: The application security group specified as source. :type source_application_security_groups: list[~azure.mgmt.network.v2018_06_01.models.ApplicationSecurityGroup] :param destination_address_prefix: The destination address prefix. CIDR or destination IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. :type destination_address_prefix: str :param destination_address_prefixes: The destination address prefixes. CIDR or destination IP ranges. :type destination_address_prefixes: list[str] :param destination_application_security_groups: The application security group specified as destination. :type destination_application_security_groups: list[~azure.mgmt.network.v2018_06_01.models.ApplicationSecurityGroup] :param source_port_ranges: The source port ranges. :type source_port_ranges: list[str] :param destination_port_ranges: The destination port ranges. :type destination_port_ranges: list[str] :param access: The network traffic is allowed or denied. Possible values are: 'Allow' and 'Deny'. Possible values include: "Allow", "Deny". :type access: str or ~azure.mgmt.network.v2018_06_01.models.SecurityRuleAccess :param priority: The priority of the rule. The value can be between 100 and 4096. The priority number must be unique for each rule in the collection. The lower the priority number, the higher the priority of the rule. :type priority: int :param direction: The direction of the rule. The direction specifies if rule will be evaluated on incoming or outgoing traffic. Possible values are: 'Inbound' and 'Outbound'. Possible values include: "Inbound", "Outbound". :type direction: str or ~azure.mgmt.network.v2018_06_01.models.SecurityRuleDirection :param provisioning_state: The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :type provisioning_state: str """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'protocol': {'key': 'properties.protocol', 'type': 'str'}, 'source_port_range': {'key': 'properties.sourcePortRange', 'type': 'str'}, 'destination_port_range': {'key': 'properties.destinationPortRange', 'type': 'str'}, 'source_address_prefix': {'key': 'properties.sourceAddressPrefix', 'type': 'str'}, 'source_address_prefixes': {'key': 'properties.sourceAddressPrefixes', 'type': '[str]'}, 'source_application_security_groups': {'key': 'properties.sourceApplicationSecurityGroups', 'type': '[ApplicationSecurityGroup]'}, 'destination_address_prefix': {'key': 'properties.destinationAddressPrefix', 'type': 'str'}, 'destination_address_prefixes': {'key': 'properties.destinationAddressPrefixes', 'type': '[str]'}, 'destination_application_security_groups': {'key': 'properties.destinationApplicationSecurityGroups', 'type': '[ApplicationSecurityGroup]'}, 'source_port_ranges': {'key': 'properties.sourcePortRanges', 'type': '[str]'}, 'destination_port_ranges': {'key': 'properties.destinationPortRanges', 'type': '[str]'}, 'access': {'key': 'properties.access', 'type': 'str'}, 'priority': {'key': 'properties.priority', 'type': 'int'}, 'direction': {'key': 'properties.direction', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(SecurityRule, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.etag = kwargs.get('etag', None) self.description = kwargs.get('description', None) self.protocol = kwargs.get('protocol', None) self.source_port_range = kwargs.get('source_port_range', None) self.destination_port_range = kwargs.get('destination_port_range', None) self.source_address_prefix = kwargs.get('source_address_prefix', None) self.source_address_prefixes = kwargs.get('source_address_prefixes', None) self.source_application_security_groups = kwargs.get('source_application_security_groups', None) self.destination_address_prefix = kwargs.get('destination_address_prefix', None) self.destination_address_prefixes = kwargs.get('destination_address_prefixes', None) self.destination_application_security_groups = kwargs.get('destination_application_security_groups', None) self.source_port_ranges = kwargs.get('source_port_ranges', None) self.destination_port_ranges = kwargs.get('destination_port_ranges', None) self.access = kwargs.get('access', None) self.priority = kwargs.get('priority', None) self.direction = kwargs.get('direction', None) self.provisioning_state = kwargs.get('provisioning_state', None) class SecurityRuleAssociations(msrest.serialization.Model): """All security rules associated with the network interface. :param network_interface_association: Network interface and its custom security rules. :type network_interface_association: ~azure.mgmt.network.v2018_06_01.models.NetworkInterfaceAssociation :param subnet_association: Network interface and its custom security rules. :type subnet_association: ~azure.mgmt.network.v2018_06_01.models.SubnetAssociation :param default_security_rules: Collection of default security rules of the network security group. :type default_security_rules: list[~azure.mgmt.network.v2018_06_01.models.SecurityRule] :param effective_security_rules: Collection of effective security rules. :type effective_security_rules: list[~azure.mgmt.network.v2018_06_01.models.EffectiveNetworkSecurityRule] """ _attribute_map = { 'network_interface_association': {'key': 'networkInterfaceAssociation', 'type': 'NetworkInterfaceAssociation'}, 'subnet_association': {'key': 'subnetAssociation', 'type': 'SubnetAssociation'}, 'default_security_rules': {'key': 'defaultSecurityRules', 'type': '[SecurityRule]'}, 'effective_security_rules': {'key': 'effectiveSecurityRules', 'type': '[EffectiveNetworkSecurityRule]'}, } def __init__( self, **kwargs ): super(SecurityRuleAssociations, self).__init__(**kwargs) self.network_interface_association = kwargs.get('network_interface_association', None) self.subnet_association = kwargs.get('subnet_association', None) self.default_security_rules = kwargs.get('default_security_rules', None) self.effective_security_rules = kwargs.get('effective_security_rules', None) class SecurityRuleListResult(msrest.serialization.Model): """Response for ListSecurityRule API service call. Retrieves all security rules that belongs to a network security group. :param value: The security rules in a network security group. :type value: list[~azure.mgmt.network.v2018_06_01.models.SecurityRule] :param next_link: The URL to get the next set of results. :type next_link: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[SecurityRule]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(SecurityRuleListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class ServiceEndpointPropertiesFormat(msrest.serialization.Model): """The service endpoint properties. :param service: The type of the endpoint service. :type service: str :param locations: A list of locations. :type locations: list[str] :param provisioning_state: The provisioning state of the resource. :type provisioning_state: str """ _attribute_map = { 'service': {'key': 'service', 'type': 'str'}, 'locations': {'key': 'locations', 'type': '[str]'}, 'provisioning_state': {'key': 'provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(ServiceEndpointPropertiesFormat, self).__init__(**kwargs) self.service = kwargs.get('service', None) self.locations = kwargs.get('locations', None) self.provisioning_state = kwargs.get('provisioning_state', None) class Subnet(SubResource): """Subnet in a virtual network resource. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :param name: The name of the resource that is unique within a resource group. This name can be used to access the resource. :type name: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :param address_prefix: The address prefix for the subnet. :type address_prefix: str :param network_security_group: The reference of the NetworkSecurityGroup resource. :type network_security_group: ~azure.mgmt.network.v2018_06_01.models.NetworkSecurityGroup :param route_table: The reference of the RouteTable resource. :type route_table: ~azure.mgmt.network.v2018_06_01.models.RouteTable :param service_endpoints: An array of service endpoints. :type service_endpoints: list[~azure.mgmt.network.v2018_06_01.models.ServiceEndpointPropertiesFormat] :ivar ip_configurations: Gets an array of references to the network interface IP configurations using subnet. :vartype ip_configurations: list[~azure.mgmt.network.v2018_06_01.models.IPConfiguration] :param resource_navigation_links: Gets an array of references to the external resources using subnet. :type resource_navigation_links: list[~azure.mgmt.network.v2018_06_01.models.ResourceNavigationLink] :param provisioning_state: The provisioning state of the resource. :type provisioning_state: str """ _validation = { 'ip_configurations': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'address_prefix': {'key': 'properties.addressPrefix', 'type': 'str'}, 'network_security_group': {'key': 'properties.networkSecurityGroup', 'type': 'NetworkSecurityGroup'}, 'route_table': {'key': 'properties.routeTable', 'type': 'RouteTable'}, 'service_endpoints': {'key': 'properties.serviceEndpoints', 'type': '[ServiceEndpointPropertiesFormat]'}, 'ip_configurations': {'key': 'properties.ipConfigurations', 'type': '[IPConfiguration]'}, 'resource_navigation_links': {'key': 'properties.resourceNavigationLinks', 'type': '[ResourceNavigationLink]'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(Subnet, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.etag = kwargs.get('etag', None) self.address_prefix = kwargs.get('address_prefix', None) self.network_security_group = kwargs.get('network_security_group', None) self.route_table = kwargs.get('route_table', None) self.service_endpoints = kwargs.get('service_endpoints', None) self.ip_configurations = None self.resource_navigation_links = kwargs.get('resource_navigation_links', None) self.provisioning_state = kwargs.get('provisioning_state', None) class SubnetAssociation(msrest.serialization.Model): """Network interface and its custom security rules. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Subnet ID. :vartype id: str :param security_rules: Collection of custom security rules. :type security_rules: list[~azure.mgmt.network.v2018_06_01.models.SecurityRule] """ _validation = { 'id': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'security_rules': {'key': 'securityRules', 'type': '[SecurityRule]'}, } def __init__( self, **kwargs ): super(SubnetAssociation, self).__init__(**kwargs) self.id = None self.security_rules = kwargs.get('security_rules', None) class SubnetListResult(msrest.serialization.Model): """Response for ListSubnets API service callRetrieves all subnet that belongs to a virtual network. :param value: The subnets in a virtual network. :type value: list[~azure.mgmt.network.v2018_06_01.models.Subnet] :param next_link: The URL to get the next set of results. :type next_link: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[Subnet]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(SubnetListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class TagsObject(msrest.serialization.Model): """Tags object for patch operations. :param tags: A set of tags. Resource tags. :type tags: dict[str, str] """ _attribute_map = { 'tags': {'key': 'tags', 'type': '{str}'}, } def __init__( self, **kwargs ): super(TagsObject, self).__init__(**kwargs) self.tags = kwargs.get('tags', None) class Topology(msrest.serialization.Model): """Topology of the specified resource group. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: GUID representing the operation id. :vartype id: str :ivar created_date_time: The datetime when the topology was initially created for the resource group. :vartype created_date_time: ~datetime.datetime :ivar last_modified: The datetime when the topology was last modified. :vartype last_modified: ~datetime.datetime :param resources: :type resources: list[~azure.mgmt.network.v2018_06_01.models.TopologyResource] """ _validation = { 'id': {'readonly': True}, 'created_date_time': {'readonly': True}, 'last_modified': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'created_date_time': {'key': 'createdDateTime', 'type': 'iso-8601'}, 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, 'resources': {'key': 'resources', 'type': '[TopologyResource]'}, } def __init__( self, **kwargs ): super(Topology, self).__init__(**kwargs) self.id = None self.created_date_time = None self.last_modified = None self.resources = kwargs.get('resources', None) class TopologyAssociation(msrest.serialization.Model): """Resources that have an association with the parent resource. :param name: The name of the resource that is associated with the parent resource. :type name: str :param resource_id: The ID of the resource that is associated with the parent resource. :type resource_id: str :param association_type: The association type of the child resource to the parent resource. Possible values include: "Associated", "Contains". :type association_type: str or ~azure.mgmt.network.v2018_06_01.models.AssociationType """ _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'resource_id': {'key': 'resourceId', 'type': 'str'}, 'association_type': {'key': 'associationType', 'type': 'str'}, } def __init__( self, **kwargs ): super(TopologyAssociation, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.resource_id = kwargs.get('resource_id', None) self.association_type = kwargs.get('association_type', None) class TopologyParameters(msrest.serialization.Model): """Parameters that define the representation of topology. :param target_resource_group_name: The name of the target resource group to perform topology on. :type target_resource_group_name: str :param target_virtual_network: The reference of the Virtual Network resource. :type target_virtual_network: ~azure.mgmt.network.v2018_06_01.models.SubResource :param target_subnet: The reference of the Subnet resource. :type target_subnet: ~azure.mgmt.network.v2018_06_01.models.SubResource """ _attribute_map = { 'target_resource_group_name': {'key': 'targetResourceGroupName', 'type': 'str'}, 'target_virtual_network': {'key': 'targetVirtualNetwork', 'type': 'SubResource'}, 'target_subnet': {'key': 'targetSubnet', 'type': 'SubResource'}, } def __init__( self, **kwargs ): super(TopologyParameters, self).__init__(**kwargs) self.target_resource_group_name = kwargs.get('target_resource_group_name', None) self.target_virtual_network = kwargs.get('target_virtual_network', None) self.target_subnet = kwargs.get('target_subnet', None) class TopologyResource(msrest.serialization.Model): """The network resource topology information for the given resource group. :param name: Name of the resource. :type name: str :param id: ID of the resource. :type id: str :param location: Resource location. :type location: str :param associations: Holds the associations the resource has with other resources in the resource group. :type associations: list[~azure.mgmt.network.v2018_06_01.models.TopologyAssociation] """ _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'id': {'key': 'id', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'associations': {'key': 'associations', 'type': '[TopologyAssociation]'}, } def __init__( self, **kwargs ): super(TopologyResource, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.id = kwargs.get('id', None) self.location = kwargs.get('location', None) self.associations = kwargs.get('associations', None) class TrafficAnalyticsConfigurationProperties(msrest.serialization.Model): """Parameters that define the configuration of traffic analytics. All required parameters must be populated in order to send to Azure. :param enabled: Required. Flag to enable/disable traffic analytics. :type enabled: bool :param workspace_id: Required. The resource guid of the attached workspace. :type workspace_id: str :param workspace_region: Required. The location of the attached workspace. :type workspace_region: str :param workspace_resource_id: Required. Resource Id of the attached workspace. :type workspace_resource_id: str """ _validation = { 'enabled': {'required': True}, 'workspace_id': {'required': True}, 'workspace_region': {'required': True}, 'workspace_resource_id': {'required': True}, } _attribute_map = { 'enabled': {'key': 'enabled', 'type': 'bool'}, 'workspace_id': {'key': 'workspaceId', 'type': 'str'}, 'workspace_region': {'key': 'workspaceRegion', 'type': 'str'}, 'workspace_resource_id': {'key': 'workspaceResourceId', 'type': 'str'}, } def __init__( self, **kwargs ): super(TrafficAnalyticsConfigurationProperties, self).__init__(**kwargs) self.enabled = kwargs['enabled'] self.workspace_id = kwargs['workspace_id'] self.workspace_region = kwargs['workspace_region'] self.workspace_resource_id = kwargs['workspace_resource_id'] class TrafficAnalyticsProperties(msrest.serialization.Model): """Parameters that define the configuration of traffic analytics. All required parameters must be populated in order to send to Azure. :param network_watcher_flow_analytics_configuration: Required. Parameters that define the configuration of traffic analytics. :type network_watcher_flow_analytics_configuration: ~azure.mgmt.network.v2018_06_01.models.TrafficAnalyticsConfigurationProperties """ _validation = { 'network_watcher_flow_analytics_configuration': {'required': True}, } _attribute_map = { 'network_watcher_flow_analytics_configuration': {'key': 'networkWatcherFlowAnalyticsConfiguration', 'type': 'TrafficAnalyticsConfigurationProperties'}, } def __init__( self, **kwargs ): super(TrafficAnalyticsProperties, self).__init__(**kwargs) self.network_watcher_flow_analytics_configuration = kwargs['network_watcher_flow_analytics_configuration'] class TrafficQuery(msrest.serialization.Model): """Parameters to compare with network configuration. All required parameters must be populated in order to send to Azure. :param direction: Required. The direction of the traffic. Accepted values are 'Inbound' and 'Outbound'. Possible values include: "Inbound", "Outbound". :type direction: str or ~azure.mgmt.network.v2018_06_01.models.Direction :param protocol: Required. Protocol to be verified on. Accepted values are '*', TCP, UDP. :type protocol: str :param source: Required. Traffic source. Accepted values are '*', IP Address/CIDR, Service Tag. :type source: str :param destination: Required. Traffic destination. Accepted values are: '*', IP Address/CIDR, Service Tag. :type destination: str :param destination_port: Required. Traffic destination port. Accepted values are '*', port (for example, 3389) and port range (for example, 80-100). :type destination_port: str """ _validation = { 'direction': {'required': True}, 'protocol': {'required': True}, 'source': {'required': True}, 'destination': {'required': True}, 'destination_port': {'required': True}, } _attribute_map = { 'direction': {'key': 'direction', 'type': 'str'}, 'protocol': {'key': 'protocol', 'type': 'str'}, 'source': {'key': 'source', 'type': 'str'}, 'destination': {'key': 'destination', 'type': 'str'}, 'destination_port': {'key': 'destinationPort', 'type': 'str'}, } def __init__( self, **kwargs ): super(TrafficQuery, self).__init__(**kwargs) self.direction = kwargs['direction'] self.protocol = kwargs['protocol'] self.source = kwargs['source'] self.destination = kwargs['destination'] self.destination_port = kwargs['destination_port'] class TroubleshootingDetails(msrest.serialization.Model): """Information gained from troubleshooting of specified resource. :param id: The id of the get troubleshoot operation. :type id: str :param reason_type: Reason type of failure. :type reason_type: str :param summary: A summary of troubleshooting. :type summary: str :param detail: Details on troubleshooting results. :type detail: str :param recommended_actions: List of recommended actions. :type recommended_actions: list[~azure.mgmt.network.v2018_06_01.models.TroubleshootingRecommendedActions] """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'reason_type': {'key': 'reasonType', 'type': 'str'}, 'summary': {'key': 'summary', 'type': 'str'}, 'detail': {'key': 'detail', 'type': 'str'}, 'recommended_actions': {'key': 'recommendedActions', 'type': '[TroubleshootingRecommendedActions]'}, } def __init__( self, **kwargs ): super(TroubleshootingDetails, self).__init__(**kwargs) self.id = kwargs.get('id', None) self.reason_type = kwargs.get('reason_type', None) self.summary = kwargs.get('summary', None) self.detail = kwargs.get('detail', None) self.recommended_actions = kwargs.get('recommended_actions', None) class TroubleshootingParameters(msrest.serialization.Model): """Parameters that define the resource to troubleshoot. All required parameters must be populated in order to send to Azure. :param target_resource_id: Required. The target resource to troubleshoot. :type target_resource_id: str :param storage_id: Required. The ID for the storage account to save the troubleshoot result. :type storage_id: str :param storage_path: Required. The path to the blob to save the troubleshoot result in. :type storage_path: str """ _validation = { 'target_resource_id': {'required': True}, 'storage_id': {'required': True}, 'storage_path': {'required': True}, } _attribute_map = { 'target_resource_id': {'key': 'targetResourceId', 'type': 'str'}, 'storage_id': {'key': 'properties.storageId', 'type': 'str'}, 'storage_path': {'key': 'properties.storagePath', 'type': 'str'}, } def __init__( self, **kwargs ): super(TroubleshootingParameters, self).__init__(**kwargs) self.target_resource_id = kwargs['target_resource_id'] self.storage_id = kwargs['storage_id'] self.storage_path = kwargs['storage_path'] class TroubleshootingRecommendedActions(msrest.serialization.Model): """Recommended actions based on discovered issues. :param action_id: ID of the recommended action. :type action_id: str :param action_text: Description of recommended actions. :type action_text: str :param action_uri: The uri linking to a documentation for the recommended troubleshooting actions. :type action_uri: str :param action_uri_text: The information from the URI for the recommended troubleshooting actions. :type action_uri_text: str """ _attribute_map = { 'action_id': {'key': 'actionId', 'type': 'str'}, 'action_text': {'key': 'actionText', 'type': 'str'}, 'action_uri': {'key': 'actionUri', 'type': 'str'}, 'action_uri_text': {'key': 'actionUriText', 'type': 'str'}, } def __init__( self, **kwargs ): super(TroubleshootingRecommendedActions, self).__init__(**kwargs) self.action_id = kwargs.get('action_id', None) self.action_text = kwargs.get('action_text', None) self.action_uri = kwargs.get('action_uri', None) self.action_uri_text = kwargs.get('action_uri_text', None) class TroubleshootingResult(msrest.serialization.Model): """Troubleshooting information gained from specified resource. :param start_time: The start time of the troubleshooting. :type start_time: ~datetime.datetime :param end_time: The end time of the troubleshooting. :type end_time: ~datetime.datetime :param code: The result code of the troubleshooting. :type code: str :param results: Information from troubleshooting. :type results: list[~azure.mgmt.network.v2018_06_01.models.TroubleshootingDetails] """ _attribute_map = { 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, 'code': {'key': 'code', 'type': 'str'}, 'results': {'key': 'results', 'type': '[TroubleshootingDetails]'}, } def __init__( self, **kwargs ): super(TroubleshootingResult, self).__init__(**kwargs) self.start_time = kwargs.get('start_time', None) self.end_time = kwargs.get('end_time', None) self.code = kwargs.get('code', None) self.results = kwargs.get('results', None) class TunnelConnectionHealth(msrest.serialization.Model): """VirtualNetworkGatewayConnection properties. Variables are only populated by the server, and will be ignored when sending a request. :ivar tunnel: Tunnel name. :vartype tunnel: str :ivar connection_status: Virtual network Gateway connection status. Possible values include: "Unknown", "Connecting", "Connected", "NotConnected". :vartype connection_status: str or ~azure.mgmt.network.v2018_06_01.models.VirtualNetworkGatewayConnectionStatus :ivar ingress_bytes_transferred: The Ingress Bytes Transferred in this connection. :vartype ingress_bytes_transferred: long :ivar egress_bytes_transferred: The Egress Bytes Transferred in this connection. :vartype egress_bytes_transferred: long :ivar last_connection_established_utc_time: The time at which connection was established in Utc format. :vartype last_connection_established_utc_time: str """ _validation = { 'tunnel': {'readonly': True}, 'connection_status': {'readonly': True}, 'ingress_bytes_transferred': {'readonly': True}, 'egress_bytes_transferred': {'readonly': True}, 'last_connection_established_utc_time': {'readonly': True}, } _attribute_map = { 'tunnel': {'key': 'tunnel', 'type': 'str'}, 'connection_status': {'key': 'connectionStatus', 'type': 'str'}, 'ingress_bytes_transferred': {'key': 'ingressBytesTransferred', 'type': 'long'}, 'egress_bytes_transferred': {'key': 'egressBytesTransferred', 'type': 'long'}, 'last_connection_established_utc_time': {'key': 'lastConnectionEstablishedUtcTime', 'type': 'str'}, } def __init__( self, **kwargs ): super(TunnelConnectionHealth, self).__init__(**kwargs) self.tunnel = None self.connection_status = None self.ingress_bytes_transferred = None self.egress_bytes_transferred = None self.last_connection_established_utc_time = None class Usage(msrest.serialization.Model): """Describes network resource usage. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar id: Resource identifier. :vartype id: str :param unit: Required. An enum describing the unit of measurement. Possible values include: "Count". :type unit: str or ~azure.mgmt.network.v2018_06_01.models.UsageUnit :param current_value: Required. The current value of the usage. :type current_value: long :param limit: Required. The limit of usage. :type limit: long :param name: Required. The name of the type of usage. :type name: ~azure.mgmt.network.v2018_06_01.models.UsageName """ _validation = { 'id': {'readonly': True}, 'unit': {'required': True}, 'current_value': {'required': True}, 'limit': {'required': True}, 'name': {'required': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'unit': {'key': 'unit', 'type': 'str'}, 'current_value': {'key': 'currentValue', 'type': 'long'}, 'limit': {'key': 'limit', 'type': 'long'}, 'name': {'key': 'name', 'type': 'UsageName'}, } def __init__( self, **kwargs ): super(Usage, self).__init__(**kwargs) self.id = None self.unit = kwargs['unit'] self.current_value = kwargs['current_value'] self.limit = kwargs['limit'] self.name = kwargs['name'] class UsageName(msrest.serialization.Model): """The usage names. :param value: A string describing the resource name. :type value: str :param localized_value: A localized string describing the resource name. :type localized_value: str """ _attribute_map = { 'value': {'key': 'value', 'type': 'str'}, 'localized_value': {'key': 'localizedValue', 'type': 'str'}, } def __init__( self, **kwargs ): super(UsageName, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.localized_value = kwargs.get('localized_value', None) class UsagesListResult(msrest.serialization.Model): """The list usages operation response. :param value: The list network resource usages. :type value: list[~azure.mgmt.network.v2018_06_01.models.Usage] :param next_link: URL to get the next set of results. :type next_link: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[Usage]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(UsagesListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class VerificationIPFlowParameters(msrest.serialization.Model): """Parameters that define the IP flow to be verified. All required parameters must be populated in order to send to Azure. :param target_resource_id: Required. The ID of the target resource to perform next-hop on. :type target_resource_id: str :param direction: Required. The direction of the packet represented as a 5-tuple. Possible values include: "Inbound", "Outbound". :type direction: str or ~azure.mgmt.network.v2018_06_01.models.Direction :param protocol: Required. Protocol to be verified on. Possible values include: "TCP", "UDP". :type protocol: str or ~azure.mgmt.network.v2018_06_01.models.IpFlowProtocol :param local_port: Required. The local port. Acceptable values are a single integer in the range (0-65535). Support for * for the source port, which depends on the direction. :type local_port: str :param remote_port: Required. The remote port. Acceptable values are a single integer in the range (0-65535). Support for * for the source port, which depends on the direction. :type remote_port: str :param local_ip_address: Required. The local IP address. Acceptable values are valid IPv4 addresses. :type local_ip_address: str :param remote_ip_address: Required. The remote IP address. Acceptable values are valid IPv4 addresses. :type remote_ip_address: str :param target_nic_resource_id: The NIC ID. (If VM has multiple NICs and IP forwarding is enabled on any of them, then this parameter must be specified. Otherwise optional). :type target_nic_resource_id: str """ _validation = { 'target_resource_id': {'required': True}, 'direction': {'required': True}, 'protocol': {'required': True}, 'local_port': {'required': True}, 'remote_port': {'required': True}, 'local_ip_address': {'required': True}, 'remote_ip_address': {'required': True}, } _attribute_map = { 'target_resource_id': {'key': 'targetResourceId', 'type': 'str'}, 'direction': {'key': 'direction', 'type': 'str'}, 'protocol': {'key': 'protocol', 'type': 'str'}, 'local_port': {'key': 'localPort', 'type': 'str'}, 'remote_port': {'key': 'remotePort', 'type': 'str'}, 'local_ip_address': {'key': 'localIPAddress', 'type': 'str'}, 'remote_ip_address': {'key': 'remoteIPAddress', 'type': 'str'}, 'target_nic_resource_id': {'key': 'targetNicResourceId', 'type': 'str'}, } def __init__( self, **kwargs ): super(VerificationIPFlowParameters, self).__init__(**kwargs) self.target_resource_id = kwargs['target_resource_id'] self.direction = kwargs['direction'] self.protocol = kwargs['protocol'] self.local_port = kwargs['local_port'] self.remote_port = kwargs['remote_port'] self.local_ip_address = kwargs['local_ip_address'] self.remote_ip_address = kwargs['remote_ip_address'] self.target_nic_resource_id = kwargs.get('target_nic_resource_id', None) class VerificationIPFlowResult(msrest.serialization.Model): """Results of IP flow verification on the target resource. :param access: Indicates whether the traffic is allowed or denied. Possible values include: "Allow", "Deny". :type access: str or ~azure.mgmt.network.v2018_06_01.models.Access :param rule_name: Name of the rule. If input is not matched against any security rule, it is not displayed. :type rule_name: str """ _attribute_map = { 'access': {'key': 'access', 'type': 'str'}, 'rule_name': {'key': 'ruleName', 'type': 'str'}, } def __init__( self, **kwargs ): super(VerificationIPFlowResult, self).__init__(**kwargs) self.access = kwargs.get('access', None) self.rule_name = kwargs.get('rule_name', None) class VirtualHub(Resource): """VirtualHub Resource. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :param location: Resource location. :type location: str :param tags: A set of tags. Resource tags. :type tags: dict[str, str] :ivar etag: Gets a unique read-only string that changes whenever the resource is updated. :vartype etag: str :param virtual_wan: The VirtualWAN to which the VirtualHub belongs. :type virtual_wan: ~azure.mgmt.network.v2018_06_01.models.SubResource :param hub_virtual_network_connections: list of all vnet connections with this VirtualHub. :type hub_virtual_network_connections: list[~azure.mgmt.network.v2018_06_01.models.HubVirtualNetworkConnection] :param address_prefix: Address-prefix for this VirtualHub. :type address_prefix: str :param provisioning_state: The provisioning state of the resource. Possible values include: "Succeeded", "Updating", "Deleting", "Failed". :type provisioning_state: str or ~azure.mgmt.network.v2018_06_01.models.ProvisioningState """ _validation = { 'name': {'readonly': True}, 'type': {'readonly': True}, 'etag': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'etag': {'key': 'etag', 'type': 'str'}, 'virtual_wan': {'key': 'properties.virtualWan', 'type': 'SubResource'}, 'hub_virtual_network_connections': {'key': 'properties.hubVirtualNetworkConnections', 'type': '[HubVirtualNetworkConnection]'}, 'address_prefix': {'key': 'properties.addressPrefix', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(VirtualHub, self).__init__(**kwargs) self.etag = None self.virtual_wan = kwargs.get('virtual_wan', None) self.hub_virtual_network_connections = kwargs.get('hub_virtual_network_connections', None) self.address_prefix = kwargs.get('address_prefix', None) self.provisioning_state = kwargs.get('provisioning_state', None) class VirtualNetwork(Resource): """Virtual Network resource. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :param location: Resource location. :type location: str :param tags: A set of tags. Resource tags. :type tags: dict[str, str] :param etag: Gets a unique read-only string that changes whenever the resource is updated. :type etag: str :param address_space: The AddressSpace that contains an array of IP address ranges that can be used by subnets. :type address_space: ~azure.mgmt.network.v2018_06_01.models.AddressSpace :param dhcp_options: The dhcpOptions that contains an array of DNS servers available to VMs deployed in the virtual network. :type dhcp_options: ~azure.mgmt.network.v2018_06_01.models.DhcpOptions :param subnets: A list of subnets in a Virtual Network. :type subnets: list[~azure.mgmt.network.v2018_06_01.models.Subnet] :param virtual_network_peerings: A list of peerings in a Virtual Network. :type virtual_network_peerings: list[~azure.mgmt.network.v2018_06_01.models.VirtualNetworkPeering] :param resource_guid: The resourceGuid property of the Virtual Network resource. :type resource_guid: str :param provisioning_state: The provisioning state of the PublicIP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :type provisioning_state: str :param enable_ddos_protection: Indicates if DDoS protection is enabled for all the protected resources in the virtual network. It requires a DDoS protection plan associated with the resource. :type enable_ddos_protection: bool :param enable_vm_protection: Indicates if VM protection is enabled for all the subnets in the virtual network. :type enable_vm_protection: bool :param ddos_protection_plan: The DDoS protection plan associated with the virtual network. :type ddos_protection_plan: ~azure.mgmt.network.v2018_06_01.models.SubResource """ _validation = { 'name': {'readonly': True}, 'type': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'etag': {'key': 'etag', 'type': 'str'}, 'address_space': {'key': 'properties.addressSpace', 'type': 'AddressSpace'}, 'dhcp_options': {'key': 'properties.dhcpOptions', 'type': 'DhcpOptions'}, 'subnets': {'key': 'properties.subnets', 'type': '[Subnet]'}, 'virtual_network_peerings': {'key': 'properties.virtualNetworkPeerings', 'type': '[VirtualNetworkPeering]'}, 'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'enable_ddos_protection': {'key': 'properties.enableDdosProtection', 'type': 'bool'}, 'enable_vm_protection': {'key': 'properties.enableVmProtection', 'type': 'bool'}, 'ddos_protection_plan': {'key': 'properties.ddosProtectionPlan', 'type': 'SubResource'}, } def __init__( self, **kwargs ): super(VirtualNetwork, self).__init__(**kwargs) self.etag = kwargs.get('etag', None) self.address_space = kwargs.get('address_space', None) self.dhcp_options = kwargs.get('dhcp_options', None) self.subnets = kwargs.get('subnets', None) self.virtual_network_peerings = kwargs.get('virtual_network_peerings', None) self.resource_guid = kwargs.get('resource_guid', None) self.provisioning_state = kwargs.get('provisioning_state', None) self.enable_ddos_protection = kwargs.get('enable_ddos_protection', False) self.enable_vm_protection = kwargs.get('enable_vm_protection', False) self.ddos_protection_plan = kwargs.get('ddos_protection_plan', None) class VirtualNetworkConnectionGatewayReference(msrest.serialization.Model): """A reference to VirtualNetworkGateway or LocalNetworkGateway resource. All required parameters must be populated in order to send to Azure. :param id: Required. The ID of VirtualNetworkGateway or LocalNetworkGateway resource. :type id: str """ _validation = { 'id': {'required': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, } def __init__( self, **kwargs ): super(VirtualNetworkConnectionGatewayReference, self).__init__(**kwargs) self.id = kwargs['id'] class VirtualNetworkGateway(Resource): """A common class for general resource information. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :param location: Resource location. :type location: str :param tags: A set of tags. Resource tags. :type tags: dict[str, str] :param etag: Gets a unique read-only string that changes whenever the resource is updated. :type etag: str :param ip_configurations: IP configurations for virtual network gateway. :type ip_configurations: list[~azure.mgmt.network.v2018_06_01.models.VirtualNetworkGatewayIPConfiguration] :param gateway_type: The type of this virtual network gateway. Possible values are: 'Vpn' and 'ExpressRoute'. Possible values include: "Vpn", "ExpressRoute". :type gateway_type: str or ~azure.mgmt.network.v2018_06_01.models.VirtualNetworkGatewayType :param vpn_type: The type of this virtual network gateway. Possible values are: 'PolicyBased' and 'RouteBased'. Possible values include: "PolicyBased", "RouteBased". :type vpn_type: str or ~azure.mgmt.network.v2018_06_01.models.VpnType :param enable_bgp: Whether BGP is enabled for this virtual network gateway or not. :type enable_bgp: bool :param active: ActiveActive flag. :type active: bool :param gateway_default_site: The reference of the LocalNetworkGateway resource which represents local network site having default routes. Assign Null value in case of removing existing default site setting. :type gateway_default_site: ~azure.mgmt.network.v2018_06_01.models.SubResource :param sku: The reference of the VirtualNetworkGatewaySku resource which represents the SKU selected for Virtual network gateway. :type sku: ~azure.mgmt.network.v2018_06_01.models.VirtualNetworkGatewaySku :param vpn_client_configuration: The reference of the VpnClientConfiguration resource which represents the P2S VpnClient configurations. :type vpn_client_configuration: ~azure.mgmt.network.v2018_06_01.models.VpnClientConfiguration :param bgp_settings: Virtual network gateway's BGP speaker settings. :type bgp_settings: ~azure.mgmt.network.v2018_06_01.models.BgpSettings :param resource_guid: The resource GUID property of the VirtualNetworkGateway resource. :type resource_guid: str :ivar provisioning_state: The provisioning state of the VirtualNetworkGateway resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :vartype provisioning_state: str """ _validation = { 'name': {'readonly': True}, 'type': {'readonly': True}, 'provisioning_state': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'etag': {'key': 'etag', 'type': 'str'}, 'ip_configurations': {'key': 'properties.ipConfigurations', 'type': '[VirtualNetworkGatewayIPConfiguration]'}, 'gateway_type': {'key': 'properties.gatewayType', 'type': 'str'}, 'vpn_type': {'key': 'properties.vpnType', 'type': 'str'}, 'enable_bgp': {'key': 'properties.enableBgp', 'type': 'bool'}, 'active': {'key': 'properties.activeActive', 'type': 'bool'}, 'gateway_default_site': {'key': 'properties.gatewayDefaultSite', 'type': 'SubResource'}, 'sku': {'key': 'properties.sku', 'type': 'VirtualNetworkGatewaySku'}, 'vpn_client_configuration': {'key': 'properties.vpnClientConfiguration', 'type': 'VpnClientConfiguration'}, 'bgp_settings': {'key': 'properties.bgpSettings', 'type': 'BgpSettings'}, 'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(VirtualNetworkGateway, self).__init__(**kwargs) self.etag = kwargs.get('etag', None) self.ip_configurations = kwargs.get('ip_configurations', None) self.gateway_type = kwargs.get('gateway_type', None) self.vpn_type = kwargs.get('vpn_type', None) self.enable_bgp = kwargs.get('enable_bgp', None) self.active = kwargs.get('active', None) self.gateway_default_site = kwargs.get('gateway_default_site', None) self.sku = kwargs.get('sku', None) self.vpn_client_configuration = kwargs.get('vpn_client_configuration', None) self.bgp_settings = kwargs.get('bgp_settings', None) self.resource_guid = kwargs.get('resource_guid', None) self.provisioning_state = None class VirtualNetworkGatewayConnection(Resource): """A common class for general resource information. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :param id: Resource ID. :type id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :param location: Resource location. :type location: str :param tags: A set of tags. Resource tags. :type tags: dict[str, str] :param etag: Gets a unique read-only string that changes whenever the resource is updated. :type etag: str :param authorization_key: The authorizationKey. :type authorization_key: str :param virtual_network_gateway1: Required. The reference to virtual network gateway resource. :type virtual_network_gateway1: ~azure.mgmt.network.v2018_06_01.models.VirtualNetworkGateway :param virtual_network_gateway2: The reference to virtual network gateway resource. :type virtual_network_gateway2: ~azure.mgmt.network.v2018_06_01.models.VirtualNetworkGateway :param local_network_gateway2: The reference to local network gateway resource. :type local_network_gateway2: ~azure.mgmt.network.v2018_06_01.models.LocalNetworkGateway :param connection_type: Required. Gateway connection type. Possible values are: 'IPsec','Vnet2Vnet','ExpressRoute', and 'VPNClient. Possible values include: "IPsec", "Vnet2Vnet", "ExpressRoute", "VPNClient". :type connection_type: str or ~azure.mgmt.network.v2018_06_01.models.VirtualNetworkGatewayConnectionType :param routing_weight: The routing weight. :type routing_weight: int :param shared_key: The IPSec shared key. :type shared_key: str :ivar connection_status: Virtual network Gateway connection status. Possible values are 'Unknown', 'Connecting', 'Connected' and 'NotConnected'. Possible values include: "Unknown", "Connecting", "Connected", "NotConnected". :vartype connection_status: str or ~azure.mgmt.network.v2018_06_01.models.VirtualNetworkGatewayConnectionStatus :ivar tunnel_connection_status: Collection of all tunnels' connection health status. :vartype tunnel_connection_status: list[~azure.mgmt.network.v2018_06_01.models.TunnelConnectionHealth] :ivar egress_bytes_transferred: The egress bytes transferred in this connection. :vartype egress_bytes_transferred: long :ivar ingress_bytes_transferred: The ingress bytes transferred in this connection. :vartype ingress_bytes_transferred: long :param peer: The reference to peerings resource. :type peer: ~azure.mgmt.network.v2018_06_01.models.SubResource :param enable_bgp: EnableBgp flag. :type enable_bgp: bool :param use_policy_based_traffic_selectors: Enable policy-based traffic selectors. :type use_policy_based_traffic_selectors: bool :param ipsec_policies: The IPSec Policies to be considered by this connection. :type ipsec_policies: list[~azure.mgmt.network.v2018_06_01.models.IpsecPolicy] :param resource_guid: The resource GUID property of the VirtualNetworkGatewayConnection resource. :type resource_guid: str :ivar provisioning_state: The provisioning state of the VirtualNetworkGatewayConnection resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :vartype provisioning_state: str """ _validation = { 'name': {'readonly': True}, 'type': {'readonly': True}, 'virtual_network_gateway1': {'required': True}, 'connection_type': {'required': True}, 'connection_status': {'readonly': True}, 'tunnel_connection_status': {'readonly': True}, 'egress_bytes_transferred': {'readonly': True}, 'ingress_bytes_transferred': {'readonly': True}, 'provisioning_state': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'etag': {'key': 'etag', 'type': 'str'}, 'authorization_key': {'key': 'properties.authorizationKey', 'type': 'str'}, 'virtual_network_gateway1': {'key': 'properties.virtualNetworkGateway1', 'type': 'VirtualNetworkGateway'}, 'virtual_network_gateway2': {'key': 'properties.virtualNetworkGateway2', 'type': 'VirtualNetworkGateway'}, 'local_network_gateway2': {'key': 'properties.localNetworkGateway2', 'type': 'LocalNetworkGateway'}, 'connection_type': {'key': 'properties.connectionType', 'type': 'str'}, 'routing_weight': {'key': 'properties.routingWeight', 'type': 'int'}, 'shared_key': {'key': 'properties.sharedKey', 'type': 'str'}, 'connection_status': {'key': 'properties.connectionStatus', 'type': 'str'}, 'tunnel_connection_status': {'key': 'properties.tunnelConnectionStatus', 'type': '[TunnelConnectionHealth]'}, 'egress_bytes_transferred': {'key': 'properties.egressBytesTransferred', 'type': 'long'}, 'ingress_bytes_transferred': {'key': 'properties.ingressBytesTransferred', 'type': 'long'}, 'peer': {'key': 'properties.peer', 'type': 'SubResource'}, 'enable_bgp': {'key': 'properties.enableBgp', 'type': 'bool'}, 'use_policy_based_traffic_selectors': {'key': 'properties.usePolicyBasedTrafficSelectors', 'type': 'bool'}, 'ipsec_policies': {'key': 'properties.ipsecPolicies', 'type': '[IpsecPolicy]'}, 'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(VirtualNetworkGatewayConnection, self).__init__(**kwargs) self.etag = kwargs.get('etag', None) self.authorization_key = kwargs.get('authorization_key', None) self.virtual_network_gateway1 = kwargs['virtual_network_gateway1'] self.virtual_network_gateway2 = kwargs.get('virtual_network_gateway2', None) self.local_network_gateway2 = kwargs.get('local_network_gateway2', None) self.connection_type = kwargs['connection_type'] self.routing_weight = kwargs.get('routing_weight', None) self.shared_key = kwargs.get('shared_key', None) self.connection_status = None self.tunnel_connection_status = None self.egress_bytes_transferred = None self.ingress_bytes_transferred = None self.peer = kwargs.get('peer', None) self.enable_bgp = kwargs.get('enable_bgp', None) self.use_policy_based_traffic_selectors = kwargs.get('use_policy_based_traffic_selectors', None) self.ipsec_policies = kwargs.get('ipsec_policies', None) self.resource_guid = kwargs.get('resource_guid', None) self.provisioning_state = None class VirtualNetworkGatewayConnectionListEntity(Resource): """A common class for general resource information. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :param id: Resource ID. :type id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :param location: Resource location. :type location: str :param tags: A set of tags. Resource tags. :type tags: dict[str, str] :param etag: Gets a unique read-only string that changes whenever the resource is updated. :type etag: str :param authorization_key: The authorizationKey. :type authorization_key: str :param virtual_network_gateway1: Required. The reference to virtual network gateway resource. :type virtual_network_gateway1: ~azure.mgmt.network.v2018_06_01.models.VirtualNetworkConnectionGatewayReference :param virtual_network_gateway2: The reference to virtual network gateway resource. :type virtual_network_gateway2: ~azure.mgmt.network.v2018_06_01.models.VirtualNetworkConnectionGatewayReference :param local_network_gateway2: The reference to local network gateway resource. :type local_network_gateway2: ~azure.mgmt.network.v2018_06_01.models.VirtualNetworkConnectionGatewayReference :param connection_type: Required. Gateway connection type. Possible values are: 'IPsec','Vnet2Vnet','ExpressRoute', and 'VPNClient. Possible values include: "IPsec", "Vnet2Vnet", "ExpressRoute", "VPNClient". :type connection_type: str or ~azure.mgmt.network.v2018_06_01.models.VirtualNetworkGatewayConnectionType :param routing_weight: The routing weight. :type routing_weight: int :param shared_key: The IPSec shared key. :type shared_key: str :ivar connection_status: Virtual network Gateway connection status. Possible values are 'Unknown', 'Connecting', 'Connected' and 'NotConnected'. Possible values include: "Unknown", "Connecting", "Connected", "NotConnected". :vartype connection_status: str or ~azure.mgmt.network.v2018_06_01.models.VirtualNetworkGatewayConnectionStatus :ivar tunnel_connection_status: Collection of all tunnels' connection health status. :vartype tunnel_connection_status: list[~azure.mgmt.network.v2018_06_01.models.TunnelConnectionHealth] :ivar egress_bytes_transferred: The egress bytes transferred in this connection. :vartype egress_bytes_transferred: long :ivar ingress_bytes_transferred: The ingress bytes transferred in this connection. :vartype ingress_bytes_transferred: long :param peer: The reference to peerings resource. :type peer: ~azure.mgmt.network.v2018_06_01.models.SubResource :param enable_bgp: EnableBgp flag. :type enable_bgp: bool :param use_policy_based_traffic_selectors: Enable policy-based traffic selectors. :type use_policy_based_traffic_selectors: bool :param ipsec_policies: The IPSec Policies to be considered by this connection. :type ipsec_policies: list[~azure.mgmt.network.v2018_06_01.models.IpsecPolicy] :param resource_guid: The resource GUID property of the VirtualNetworkGatewayConnection resource. :type resource_guid: str :ivar provisioning_state: The provisioning state of the VirtualNetworkGatewayConnection resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :vartype provisioning_state: str """ _validation = { 'name': {'readonly': True}, 'type': {'readonly': True}, 'virtual_network_gateway1': {'required': True}, 'connection_type': {'required': True}, 'connection_status': {'readonly': True}, 'tunnel_connection_status': {'readonly': True}, 'egress_bytes_transferred': {'readonly': True}, 'ingress_bytes_transferred': {'readonly': True}, 'provisioning_state': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'etag': {'key': 'etag', 'type': 'str'}, 'authorization_key': {'key': 'properties.authorizationKey', 'type': 'str'}, 'virtual_network_gateway1': {'key': 'properties.virtualNetworkGateway1', 'type': 'VirtualNetworkConnectionGatewayReference'}, 'virtual_network_gateway2': {'key': 'properties.virtualNetworkGateway2', 'type': 'VirtualNetworkConnectionGatewayReference'}, 'local_network_gateway2': {'key': 'properties.localNetworkGateway2', 'type': 'VirtualNetworkConnectionGatewayReference'}, 'connection_type': {'key': 'properties.connectionType', 'type': 'str'}, 'routing_weight': {'key': 'properties.routingWeight', 'type': 'int'}, 'shared_key': {'key': 'properties.sharedKey', 'type': 'str'}, 'connection_status': {'key': 'properties.connectionStatus', 'type': 'str'}, 'tunnel_connection_status': {'key': 'properties.tunnelConnectionStatus', 'type': '[TunnelConnectionHealth]'}, 'egress_bytes_transferred': {'key': 'properties.egressBytesTransferred', 'type': 'long'}, 'ingress_bytes_transferred': {'key': 'properties.ingressBytesTransferred', 'type': 'long'}, 'peer': {'key': 'properties.peer', 'type': 'SubResource'}, 'enable_bgp': {'key': 'properties.enableBgp', 'type': 'bool'}, 'use_policy_based_traffic_selectors': {'key': 'properties.usePolicyBasedTrafficSelectors', 'type': 'bool'}, 'ipsec_policies': {'key': 'properties.ipsecPolicies', 'type': '[IpsecPolicy]'}, 'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(VirtualNetworkGatewayConnectionListEntity, self).__init__(**kwargs) self.etag = kwargs.get('etag', None) self.authorization_key = kwargs.get('authorization_key', None) self.virtual_network_gateway1 = kwargs['virtual_network_gateway1'] self.virtual_network_gateway2 = kwargs.get('virtual_network_gateway2', None) self.local_network_gateway2 = kwargs.get('local_network_gateway2', None) self.connection_type = kwargs['connection_type'] self.routing_weight = kwargs.get('routing_weight', None) self.shared_key = kwargs.get('shared_key', None) self.connection_status = None self.tunnel_connection_status = None self.egress_bytes_transferred = None self.ingress_bytes_transferred = None self.peer = kwargs.get('peer', None) self.enable_bgp = kwargs.get('enable_bgp', None) self.use_policy_based_traffic_selectors = kwargs.get('use_policy_based_traffic_selectors', None) self.ipsec_policies = kwargs.get('ipsec_policies', None) self.resource_guid = kwargs.get('resource_guid', None) self.provisioning_state = None class VirtualNetworkGatewayConnectionListResult(msrest.serialization.Model): """Response for the ListVirtualNetworkGatewayConnections API service call. Variables are only populated by the server, and will be ignored when sending a request. :param value: Gets a list of VirtualNetworkGatewayConnection resources that exists in a resource group. :type value: list[~azure.mgmt.network.v2018_06_01.models.VirtualNetworkGatewayConnection] :ivar next_link: The URL to get the next set of results. :vartype next_link: str """ _validation = { 'next_link': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[VirtualNetworkGatewayConnection]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(VirtualNetworkGatewayConnectionListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = None class VirtualNetworkGatewayIPConfiguration(SubResource): """IP configuration for virtual network gateway. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :param name: The name of the resource that is unique within a resource group. This name can be used to access the resource. :type name: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :param private_ip_allocation_method: The private IP allocation method. Possible values are: 'Static' and 'Dynamic'. Possible values include: "Static", "Dynamic". :type private_ip_allocation_method: str or ~azure.mgmt.network.v2018_06_01.models.IPAllocationMethod :param subnet: The reference of the subnet resource. :type subnet: ~azure.mgmt.network.v2018_06_01.models.SubResource :param public_ip_address: The reference of the public IP resource. :type public_ip_address: ~azure.mgmt.network.v2018_06_01.models.SubResource :ivar provisioning_state: The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :vartype provisioning_state: str """ _validation = { 'provisioning_state': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'private_ip_allocation_method': {'key': 'properties.privateIPAllocationMethod', 'type': 'str'}, 'subnet': {'key': 'properties.subnet', 'type': 'SubResource'}, 'public_ip_address': {'key': 'properties.publicIPAddress', 'type': 'SubResource'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(VirtualNetworkGatewayIPConfiguration, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.etag = kwargs.get('etag', None) self.private_ip_allocation_method = kwargs.get('private_ip_allocation_method', None) self.subnet = kwargs.get('subnet', None) self.public_ip_address = kwargs.get('public_ip_address', None) self.provisioning_state = None class VirtualNetworkGatewayListConnectionsResult(msrest.serialization.Model): """Response for the VirtualNetworkGatewayListConnections API service call. Variables are only populated by the server, and will be ignored when sending a request. :param value: Gets a list of VirtualNetworkGatewayConnection resources that exists in a resource group. :type value: list[~azure.mgmt.network.v2018_06_01.models.VirtualNetworkGatewayConnectionListEntity] :ivar next_link: The URL to get the next set of results. :vartype next_link: str """ _validation = { 'next_link': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[VirtualNetworkGatewayConnectionListEntity]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(VirtualNetworkGatewayListConnectionsResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = None class VirtualNetworkGatewayListResult(msrest.serialization.Model): """Response for the ListVirtualNetworkGateways API service call. Variables are only populated by the server, and will be ignored when sending a request. :param value: Gets a list of VirtualNetworkGateway resources that exists in a resource group. :type value: list[~azure.mgmt.network.v2018_06_01.models.VirtualNetworkGateway] :ivar next_link: The URL to get the next set of results. :vartype next_link: str """ _validation = { 'next_link': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[VirtualNetworkGateway]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(VirtualNetworkGatewayListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = None class VirtualNetworkGatewaySku(msrest.serialization.Model): """VirtualNetworkGatewaySku details. :param name: Gateway SKU name. Possible values include: "Basic", "HighPerformance", "Standard", "UltraPerformance", "VpnGw1", "VpnGw2", "VpnGw3", "VpnGw1AZ", "VpnGw2AZ", "VpnGw3AZ", "ErGw1AZ", "ErGw2AZ", "ErGw3AZ". :type name: str or ~azure.mgmt.network.v2018_06_01.models.VirtualNetworkGatewaySkuName :param tier: Gateway SKU tier. Possible values include: "Basic", "HighPerformance", "Standard", "UltraPerformance", "VpnGw1", "VpnGw2", "VpnGw3", "VpnGw1AZ", "VpnGw2AZ", "VpnGw3AZ", "ErGw1AZ", "ErGw2AZ", "ErGw3AZ". :type tier: str or ~azure.mgmt.network.v2018_06_01.models.VirtualNetworkGatewaySkuTier :param capacity: The capacity. :type capacity: int """ _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'tier': {'key': 'tier', 'type': 'str'}, 'capacity': {'key': 'capacity', 'type': 'int'}, } def __init__( self, **kwargs ): super(VirtualNetworkGatewaySku, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.tier = kwargs.get('tier', None) self.capacity = kwargs.get('capacity', None) class VirtualNetworkListResult(msrest.serialization.Model): """Response for the ListVirtualNetworks API service call. :param value: Gets a list of VirtualNetwork resources in a resource group. :type value: list[~azure.mgmt.network.v2018_06_01.models.VirtualNetwork] :param next_link: The URL to get the next set of results. :type next_link: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[VirtualNetwork]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(VirtualNetworkListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class VirtualNetworkListUsageResult(msrest.serialization.Model): """Response for the virtual networks GetUsage API service call. Variables are only populated by the server, and will be ignored when sending a request. :ivar value: VirtualNetwork usage stats. :vartype value: list[~azure.mgmt.network.v2018_06_01.models.VirtualNetworkUsage] :param next_link: The URL to get the next set of results. :type next_link: str """ _validation = { 'value': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[VirtualNetworkUsage]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(VirtualNetworkListUsageResult, self).__init__(**kwargs) self.value = None self.next_link = kwargs.get('next_link', None) class VirtualNetworkPeering(SubResource): """Peerings in a virtual network resource. :param id: Resource ID. :type id: str :param name: The name of the resource that is unique within a resource group. This name can be used to access the resource. :type name: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :param allow_virtual_network_access: Whether the VMs in the linked virtual network space would be able to access all the VMs in local Virtual network space. :type allow_virtual_network_access: bool :param allow_forwarded_traffic: Whether the forwarded traffic from the VMs in the remote virtual network will be allowed/disallowed. :type allow_forwarded_traffic: bool :param allow_gateway_transit: If gateway links can be used in remote virtual networking to link to this virtual network. :type allow_gateway_transit: bool :param use_remote_gateways: If remote gateways can be used on this virtual network. If the flag is set to true, and allowGatewayTransit on remote peering is also true, virtual network will use gateways of remote virtual network for transit. Only one peering can have this flag set to true. This flag cannot be set if virtual network already has a gateway. :type use_remote_gateways: bool :param remote_virtual_network: The reference of the remote virtual network. The remote virtual network can be in the same or different region (preview). See here to register for the preview and learn more (https://docs.microsoft.com/en-us/azure/virtual-network/virtual-network-create- peering). :type remote_virtual_network: ~azure.mgmt.network.v2018_06_01.models.SubResource :param remote_address_space: The reference of the remote virtual network address space. :type remote_address_space: ~azure.mgmt.network.v2018_06_01.models.AddressSpace :param peering_state: The status of the virtual network peering. Possible values are 'Initiated', 'Connected', and 'Disconnected'. Possible values include: "Initiated", "Connected", "Disconnected". :type peering_state: str or ~azure.mgmt.network.v2018_06_01.models.VirtualNetworkPeeringState :param provisioning_state: The provisioning state of the resource. :type provisioning_state: str """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'allow_virtual_network_access': {'key': 'properties.allowVirtualNetworkAccess', 'type': 'bool'}, 'allow_forwarded_traffic': {'key': 'properties.allowForwardedTraffic', 'type': 'bool'}, 'allow_gateway_transit': {'key': 'properties.allowGatewayTransit', 'type': 'bool'}, 'use_remote_gateways': {'key': 'properties.useRemoteGateways', 'type': 'bool'}, 'remote_virtual_network': {'key': 'properties.remoteVirtualNetwork', 'type': 'SubResource'}, 'remote_address_space': {'key': 'properties.remoteAddressSpace', 'type': 'AddressSpace'}, 'peering_state': {'key': 'properties.peeringState', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(VirtualNetworkPeering, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.etag = kwargs.get('etag', None) self.allow_virtual_network_access = kwargs.get('allow_virtual_network_access', None) self.allow_forwarded_traffic = kwargs.get('allow_forwarded_traffic', None) self.allow_gateway_transit = kwargs.get('allow_gateway_transit', None) self.use_remote_gateways = kwargs.get('use_remote_gateways', None) self.remote_virtual_network = kwargs.get('remote_virtual_network', None) self.remote_address_space = kwargs.get('remote_address_space', None) self.peering_state = kwargs.get('peering_state', None) self.provisioning_state = kwargs.get('provisioning_state', None) class VirtualNetworkPeeringListResult(msrest.serialization.Model): """Response for ListSubnets API service call. Retrieves all subnets that belong to a virtual network. :param value: The peerings in a virtual network. :type value: list[~azure.mgmt.network.v2018_06_01.models.VirtualNetworkPeering] :param next_link: The URL to get the next set of results. :type next_link: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[VirtualNetworkPeering]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(VirtualNetworkPeeringListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class VirtualNetworkUsage(msrest.serialization.Model): """Usage details for subnet. Variables are only populated by the server, and will be ignored when sending a request. :ivar current_value: Indicates number of IPs used from the Subnet. :vartype current_value: float :ivar id: Subnet identifier. :vartype id: str :ivar limit: Indicates the size of the subnet. :vartype limit: float :ivar name: The name containing common and localized value for usage. :vartype name: ~azure.mgmt.network.v2018_06_01.models.VirtualNetworkUsageName :ivar unit: Usage units. Returns 'Count'. :vartype unit: str """ _validation = { 'current_value': {'readonly': True}, 'id': {'readonly': True}, 'limit': {'readonly': True}, 'name': {'readonly': True}, 'unit': {'readonly': True}, } _attribute_map = { 'current_value': {'key': 'currentValue', 'type': 'float'}, 'id': {'key': 'id', 'type': 'str'}, 'limit': {'key': 'limit', 'type': 'float'}, 'name': {'key': 'name', 'type': 'VirtualNetworkUsageName'}, 'unit': {'key': 'unit', 'type': 'str'}, } def __init__( self, **kwargs ): super(VirtualNetworkUsage, self).__init__(**kwargs) self.current_value = None self.id = None self.limit = None self.name = None self.unit = None class VirtualNetworkUsageName(msrest.serialization.Model): """Usage strings container. Variables are only populated by the server, and will be ignored when sending a request. :ivar localized_value: Localized subnet size and usage string. :vartype localized_value: str :ivar value: Subnet size and usage string. :vartype value: str """ _validation = { 'localized_value': {'readonly': True}, 'value': {'readonly': True}, } _attribute_map = { 'localized_value': {'key': 'localizedValue', 'type': 'str'}, 'value': {'key': 'value', 'type': 'str'}, } def __init__( self, **kwargs ): super(VirtualNetworkUsageName, self).__init__(**kwargs) self.localized_value = None self.value = None class VirtualWAN(Resource): """VirtualWAN Resource. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :param location: Resource location. :type location: str :param tags: A set of tags. Resource tags. :type tags: dict[str, str] :ivar etag: Gets a unique read-only string that changes whenever the resource is updated. :vartype etag: str :param disable_vpn_encryption: Vpn encryption to be disabled or not. :type disable_vpn_encryption: bool :ivar virtual_hubs: List of VirtualHubs in the VirtualWAN. :vartype virtual_hubs: list[~azure.mgmt.network.v2018_06_01.models.SubResource] :ivar vpn_sites: :vartype vpn_sites: list[~azure.mgmt.network.v2018_06_01.models.SubResource] :param provisioning_state: The provisioning state of the resource. Possible values include: "Succeeded", "Updating", "Deleting", "Failed". :type provisioning_state: str or ~azure.mgmt.network.v2018_06_01.models.ProvisioningState """ _validation = { 'name': {'readonly': True}, 'type': {'readonly': True}, 'etag': {'readonly': True}, 'virtual_hubs': {'readonly': True}, 'vpn_sites': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'etag': {'key': 'etag', 'type': 'str'}, 'disable_vpn_encryption': {'key': 'properties.disableVpnEncryption', 'type': 'bool'}, 'virtual_hubs': {'key': 'properties.virtualHubs', 'type': '[SubResource]'}, 'vpn_sites': {'key': 'properties.vpnSites', 'type': '[SubResource]'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(VirtualWAN, self).__init__(**kwargs) self.etag = None self.disable_vpn_encryption = kwargs.get('disable_vpn_encryption', None) self.virtual_hubs = None self.vpn_sites = None self.provisioning_state = kwargs.get('provisioning_state', None) class VpnClientConfiguration(msrest.serialization.Model): """VpnClientConfiguration for P2S client. :param vpn_client_address_pool: The reference of the address space resource which represents Address space for P2S VpnClient. :type vpn_client_address_pool: ~azure.mgmt.network.v2018_06_01.models.AddressSpace :param vpn_client_root_certificates: VpnClientRootCertificate for virtual network gateway. :type vpn_client_root_certificates: list[~azure.mgmt.network.v2018_06_01.models.VpnClientRootCertificate] :param vpn_client_revoked_certificates: VpnClientRevokedCertificate for Virtual network gateway. :type vpn_client_revoked_certificates: list[~azure.mgmt.network.v2018_06_01.models.VpnClientRevokedCertificate] :param vpn_client_protocols: VpnClientProtocols for Virtual network gateway. :type vpn_client_protocols: list[str or ~azure.mgmt.network.v2018_06_01.models.VpnClientProtocol] :param vpn_client_ipsec_policies: VpnClientIpsecPolicies for virtual network gateway P2S client. :type vpn_client_ipsec_policies: list[~azure.mgmt.network.v2018_06_01.models.IpsecPolicy] :param radius_server_address: The radius server address property of the VirtualNetworkGateway resource for vpn client connection. :type radius_server_address: str :param radius_server_secret: The radius secret property of the VirtualNetworkGateway resource for vpn client connection. :type radius_server_secret: str """ _attribute_map = { 'vpn_client_address_pool': {'key': 'vpnClientAddressPool', 'type': 'AddressSpace'}, 'vpn_client_root_certificates': {'key': 'vpnClientRootCertificates', 'type': '[VpnClientRootCertificate]'}, 'vpn_client_revoked_certificates': {'key': 'vpnClientRevokedCertificates', 'type': '[VpnClientRevokedCertificate]'}, 'vpn_client_protocols': {'key': 'vpnClientProtocols', 'type': '[str]'}, 'vpn_client_ipsec_policies': {'key': 'vpnClientIpsecPolicies', 'type': '[IpsecPolicy]'}, 'radius_server_address': {'key': 'radiusServerAddress', 'type': 'str'}, 'radius_server_secret': {'key': 'radiusServerSecret', 'type': 'str'}, } def __init__( self, **kwargs ): super(VpnClientConfiguration, self).__init__(**kwargs) self.vpn_client_address_pool = kwargs.get('vpn_client_address_pool', None) self.vpn_client_root_certificates = kwargs.get('vpn_client_root_certificates', None) self.vpn_client_revoked_certificates = kwargs.get('vpn_client_revoked_certificates', None) self.vpn_client_protocols = kwargs.get('vpn_client_protocols', None) self.vpn_client_ipsec_policies = kwargs.get('vpn_client_ipsec_policies', None) self.radius_server_address = kwargs.get('radius_server_address', None) self.radius_server_secret = kwargs.get('radius_server_secret', None) class VpnClientIPsecParameters(msrest.serialization.Model): """An IPSec parameters for a virtual network gateway P2S connection. All required parameters must be populated in order to send to Azure. :param sa_life_time_seconds: Required. The IPSec Security Association (also called Quick Mode or Phase 2 SA) lifetime in seconds for P2S client. :type sa_life_time_seconds: int :param sa_data_size_kilobytes: Required. The IPSec Security Association (also called Quick Mode or Phase 2 SA) payload size in KB for P2S client.. :type sa_data_size_kilobytes: int :param ipsec_encryption: Required. The IPSec encryption algorithm (IKE phase 1). Possible values include: "None", "DES", "DES3", "AES128", "AES192", "AES256", "GCMAES128", "GCMAES192", "GCMAES256". :type ipsec_encryption: str or ~azure.mgmt.network.v2018_06_01.models.IpsecEncryption :param ipsec_integrity: Required. The IPSec integrity algorithm (IKE phase 1). Possible values include: "MD5", "SHA1", "SHA256", "GCMAES128", "GCMAES192", "GCMAES256". :type ipsec_integrity: str or ~azure.mgmt.network.v2018_06_01.models.IpsecIntegrity :param ike_encryption: Required. The IKE encryption algorithm (IKE phase 2). Possible values include: "DES", "DES3", "AES128", "AES192", "AES256", "GCMAES256", "GCMAES128". :type ike_encryption: str or ~azure.mgmt.network.v2018_06_01.models.IkeEncryption :param ike_integrity: Required. The IKE integrity algorithm (IKE phase 2). Possible values include: "MD5", "SHA1", "SHA256", "SHA384", "GCMAES256", "GCMAES128". :type ike_integrity: str or ~azure.mgmt.network.v2018_06_01.models.IkeIntegrity :param dh_group: Required. The DH Groups used in IKE Phase 1 for initial SA. Possible values include: "None", "DHGroup1", "DHGroup2", "DHGroup14", "DHGroup2048", "ECP256", "ECP384", "DHGroup24". :type dh_group: str or ~azure.mgmt.network.v2018_06_01.models.DhGroup :param pfs_group: Required. The Pfs Groups used in IKE Phase 2 for new child SA. Possible values include: "None", "PFS1", "PFS2", "PFS2048", "ECP256", "ECP384", "PFS24", "PFS14", "PFSMM". :type pfs_group: str or ~azure.mgmt.network.v2018_06_01.models.PfsGroup """ _validation = { 'sa_life_time_seconds': {'required': True}, 'sa_data_size_kilobytes': {'required': True}, 'ipsec_encryption': {'required': True}, 'ipsec_integrity': {'required': True}, 'ike_encryption': {'required': True}, 'ike_integrity': {'required': True}, 'dh_group': {'required': True}, 'pfs_group': {'required': True}, } _attribute_map = { 'sa_life_time_seconds': {'key': 'saLifeTimeSeconds', 'type': 'int'}, 'sa_data_size_kilobytes': {'key': 'saDataSizeKilobytes', 'type': 'int'}, 'ipsec_encryption': {'key': 'ipsecEncryption', 'type': 'str'}, 'ipsec_integrity': {'key': 'ipsecIntegrity', 'type': 'str'}, 'ike_encryption': {'key': 'ikeEncryption', 'type': 'str'}, 'ike_integrity': {'key': 'ikeIntegrity', 'type': 'str'}, 'dh_group': {'key': 'dhGroup', 'type': 'str'}, 'pfs_group': {'key': 'pfsGroup', 'type': 'str'}, } def __init__( self, **kwargs ): super(VpnClientIPsecParameters, self).__init__(**kwargs) self.sa_life_time_seconds = kwargs['sa_life_time_seconds'] self.sa_data_size_kilobytes = kwargs['sa_data_size_kilobytes'] self.ipsec_encryption = kwargs['ipsec_encryption'] self.ipsec_integrity = kwargs['ipsec_integrity'] self.ike_encryption = kwargs['ike_encryption'] self.ike_integrity = kwargs['ike_integrity'] self.dh_group = kwargs['dh_group'] self.pfs_group = kwargs['pfs_group'] class VpnClientParameters(msrest.serialization.Model): """Vpn Client Parameters for package generation. :param processor_architecture: VPN client Processor Architecture. Possible values are: 'AMD64' and 'X86'. Possible values include: "Amd64", "X86". :type processor_architecture: str or ~azure.mgmt.network.v2018_06_01.models.ProcessorArchitecture :param authentication_method: VPN client Authentication Method. Possible values are: 'EAPTLS' and 'EAPMSCHAPv2'. Possible values include: "EAPTLS", "EAPMSCHAPv2". :type authentication_method: str or ~azure.mgmt.network.v2018_06_01.models.AuthenticationMethod :param radius_server_auth_certificate: The public certificate data for the radius server authentication certificate as a Base-64 encoded string. Required only if external radius authentication has been configured with EAPTLS authentication. :type radius_server_auth_certificate: str :param client_root_certificates: A list of client root certificates public certificate data encoded as Base-64 strings. Optional parameter for external radius based authentication with EAPTLS. :type client_root_certificates: list[str] """ _attribute_map = { 'processor_architecture': {'key': 'processorArchitecture', 'type': 'str'}, 'authentication_method': {'key': 'authenticationMethod', 'type': 'str'}, 'radius_server_auth_certificate': {'key': 'radiusServerAuthCertificate', 'type': 'str'}, 'client_root_certificates': {'key': 'clientRootCertificates', 'type': '[str]'}, } def __init__( self, **kwargs ): super(VpnClientParameters, self).__init__(**kwargs) self.processor_architecture = kwargs.get('processor_architecture', None) self.authentication_method = kwargs.get('authentication_method', None) self.radius_server_auth_certificate = kwargs.get('radius_server_auth_certificate', None) self.client_root_certificates = kwargs.get('client_root_certificates', None) class VpnClientRevokedCertificate(SubResource): """VPN client revoked certificate of virtual network gateway. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :param name: The name of the resource that is unique within a resource group. This name can be used to access the resource. :type name: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :param thumbprint: The revoked VPN client certificate thumbprint. :type thumbprint: str :ivar provisioning_state: The provisioning state of the VPN client revoked certificate resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :vartype provisioning_state: str """ _validation = { 'provisioning_state': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'thumbprint': {'key': 'properties.thumbprint', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(VpnClientRevokedCertificate, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.etag = kwargs.get('etag', None) self.thumbprint = kwargs.get('thumbprint', None) self.provisioning_state = None class VpnClientRootCertificate(SubResource): """VPN client root certificate of virtual network gateway. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :param id: Resource ID. :type id: str :param name: The name of the resource that is unique within a resource group. This name can be used to access the resource. :type name: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :param public_cert_data: Required. The certificate public data. :type public_cert_data: str :ivar provisioning_state: The provisioning state of the VPN client root certificate resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :vartype provisioning_state: str """ _validation = { 'public_cert_data': {'required': True}, 'provisioning_state': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'public_cert_data': {'key': 'properties.publicCertData', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(VpnClientRootCertificate, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.etag = kwargs.get('etag', None) self.public_cert_data = kwargs['public_cert_data'] self.provisioning_state = None class VpnConnection(SubResource): """VpnConnection Resource. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :param name: The name of the resource that is unique within a resource group. This name can be used to access the resource. :type name: str :ivar etag: Gets a unique read-only string that changes whenever the resource is updated. :vartype etag: str :param remote_vpn_site: Id of the connected vpn site. :type remote_vpn_site: ~azure.mgmt.network.v2018_06_01.models.SubResource :param routing_weight: routing weight for vpn connection. :type routing_weight: int :ivar connection_status: The connection status. Possible values include: "Unknown", "Connecting", "Connected", "NotConnected". :vartype connection_status: str or ~azure.mgmt.network.v2018_06_01.models.VpnConnectionStatus :ivar ingress_bytes_transferred: Ingress bytes transferred. :vartype ingress_bytes_transferred: long :ivar egress_bytes_transferred: Egress bytes transferred. :vartype egress_bytes_transferred: long :ivar connection_bandwidth_in_mbps: Expected bandwidth in MBPS. :vartype connection_bandwidth_in_mbps: int :param shared_key: SharedKey for the vpn connection. :type shared_key: str :param enable_bgp: EnableBgp flag. :type enable_bgp: bool :param ipsec_policies: The IPSec Policies to be considered by this connection. :type ipsec_policies: list[~azure.mgmt.network.v2018_06_01.models.IpsecPolicy] :param provisioning_state: The provisioning state of the resource. Possible values include: "Succeeded", "Updating", "Deleting", "Failed". :type provisioning_state: str or ~azure.mgmt.network.v2018_06_01.models.ProvisioningState """ _validation = { 'etag': {'readonly': True}, 'connection_status': {'readonly': True}, 'ingress_bytes_transferred': {'readonly': True}, 'egress_bytes_transferred': {'readonly': True}, 'connection_bandwidth_in_mbps': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'remote_vpn_site': {'key': 'properties.remoteVpnSite', 'type': 'SubResource'}, 'routing_weight': {'key': 'properties.routingWeight', 'type': 'int'}, 'connection_status': {'key': 'properties.connectionStatus', 'type': 'str'}, 'ingress_bytes_transferred': {'key': 'properties.ingressBytesTransferred', 'type': 'long'}, 'egress_bytes_transferred': {'key': 'properties.egressBytesTransferred', 'type': 'long'}, 'connection_bandwidth_in_mbps': {'key': 'properties.connectionBandwidthInMbps', 'type': 'int'}, 'shared_key': {'key': 'properties.sharedKey', 'type': 'str'}, 'enable_bgp': {'key': 'properties.enableBgp', 'type': 'bool'}, 'ipsec_policies': {'key': 'properties.ipsecPolicies', 'type': '[IpsecPolicy]'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(VpnConnection, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.etag = None self.remote_vpn_site = kwargs.get('remote_vpn_site', None) self.routing_weight = kwargs.get('routing_weight', None) self.connection_status = None self.ingress_bytes_transferred = None self.egress_bytes_transferred = None self.connection_bandwidth_in_mbps = None self.shared_key = kwargs.get('shared_key', None) self.enable_bgp = kwargs.get('enable_bgp', None) self.ipsec_policies = kwargs.get('ipsec_policies', None) self.provisioning_state = kwargs.get('provisioning_state', None) class VpnDeviceScriptParameters(msrest.serialization.Model): """Vpn device configuration script generation parameters. :param vendor: The vendor for the vpn device. :type vendor: str :param device_family: The device family for the vpn device. :type device_family: str :param firmware_version: The firmware version for the vpn device. :type firmware_version: str """ _attribute_map = { 'vendor': {'key': 'vendor', 'type': 'str'}, 'device_family': {'key': 'deviceFamily', 'type': 'str'}, 'firmware_version': {'key': 'firmwareVersion', 'type': 'str'}, } def __init__( self, **kwargs ): super(VpnDeviceScriptParameters, self).__init__(**kwargs) self.vendor = kwargs.get('vendor', None) self.device_family = kwargs.get('device_family', None) self.firmware_version = kwargs.get('firmware_version', None) class VpnGateway(Resource): """VpnGateway Resource. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :param location: Resource location. :type location: str :param tags: A set of tags. Resource tags. :type tags: dict[str, str] :ivar etag: Gets a unique read-only string that changes whenever the resource is updated. :vartype etag: str :param virtual_hub: The VirtualHub to which the gateway belongs. :type virtual_hub: ~azure.mgmt.network.v2018_06_01.models.SubResource :param connections: list of all vpn connections to the gateway. :type connections: list[~azure.mgmt.network.v2018_06_01.models.VpnConnection] :param bgp_settings: Local network gateway's BGP speaker settings. :type bgp_settings: ~azure.mgmt.network.v2018_06_01.models.BgpSettings :param provisioning_state: The provisioning state of the resource. Possible values include: "Succeeded", "Updating", "Deleting", "Failed". :type provisioning_state: str or ~azure.mgmt.network.v2018_06_01.models.ProvisioningState :param policies: The policies applied to this vpn gateway. :type policies: ~azure.mgmt.network.v2018_06_01.models.Policies """ _validation = { 'name': {'readonly': True}, 'type': {'readonly': True}, 'etag': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'etag': {'key': 'etag', 'type': 'str'}, 'virtual_hub': {'key': 'properties.virtualHub', 'type': 'SubResource'}, 'connections': {'key': 'properties.connections', 'type': '[VpnConnection]'}, 'bgp_settings': {'key': 'properties.bgpSettings', 'type': 'BgpSettings'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'policies': {'key': 'properties.policies', 'type': 'Policies'}, } def __init__( self, **kwargs ): super(VpnGateway, self).__init__(**kwargs) self.etag = None self.virtual_hub = kwargs.get('virtual_hub', None) self.connections = kwargs.get('connections', None) self.bgp_settings = kwargs.get('bgp_settings', None) self.provisioning_state = kwargs.get('provisioning_state', None) self.policies = kwargs.get('policies', None) class VpnSite(Resource): """VpnSite Resource. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :param location: Resource location. :type location: str :param tags: A set of tags. Resource tags. :type tags: dict[str, str] :ivar etag: Gets a unique read-only string that changes whenever the resource is updated. :vartype etag: str :param virtual_wan: The VirtualWAN to which the vpnSite belongs. :type virtual_wan: ~azure.mgmt.network.v2018_06_01.models.SubResource :param device_properties: The device properties. :type device_properties: ~azure.mgmt.network.v2018_06_01.models.DeviceProperties :param ip_address: The ip-address for the vpn-site. :type ip_address: str :param site_key: The key for vpn-site that can be used for connections. :type site_key: str :param address_space: The AddressSpace that contains an array of IP address ranges. :type address_space: ~azure.mgmt.network.v2018_06_01.models.AddressSpace :param bgp_properties: The set of bgp properties. :type bgp_properties: ~azure.mgmt.network.v2018_06_01.models.BgpSettings :param provisioning_state: The provisioning state of the resource. Possible values include: "Succeeded", "Updating", "Deleting", "Failed". :type provisioning_state: str or ~azure.mgmt.network.v2018_06_01.models.ProvisioningState """ _validation = { 'name': {'readonly': True}, 'type': {'readonly': True}, 'etag': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'etag': {'key': 'etag', 'type': 'str'}, 'virtual_wan': {'key': 'properties.virtualWAN', 'type': 'SubResource'}, 'device_properties': {'key': 'properties.deviceProperties', 'type': 'DeviceProperties'}, 'ip_address': {'key': 'properties.ipAddress', 'type': 'str'}, 'site_key': {'key': 'properties.siteKey', 'type': 'str'}, 'address_space': {'key': 'properties.addressSpace', 'type': 'AddressSpace'}, 'bgp_properties': {'key': 'properties.bgpProperties', 'type': 'BgpSettings'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(VpnSite, self).__init__(**kwargs) self.etag = None self.virtual_wan = kwargs.get('virtual_wan', None) self.device_properties = kwargs.get('device_properties', None) self.ip_address = kwargs.get('ip_address', None) self.site_key = kwargs.get('site_key', None) self.address_space = kwargs.get('address_space', None) self.bgp_properties = kwargs.get('bgp_properties', None) self.provisioning_state = kwargs.get('provisioning_state', None) class VpnSiteId(msrest.serialization.Model): """VpnSite Resource. Variables are only populated by the server, and will be ignored when sending a request. :ivar vpn_site: The resource-uri of the vpn-site for which config is to be fetched. :vartype vpn_site: str """ _validation = { 'vpn_site': {'readonly': True}, } _attribute_map = { 'vpn_site': {'key': 'vpnSite', 'type': 'str'}, } def __init__( self, **kwargs ): super(VpnSiteId, self).__init__(**kwargs) self.vpn_site = None
#!/usr/bin/env python import argparse import pandas as pd import numpy as np from math import floor import tqdm def main(): parser = argparse.ArgumentParser(description='Pairwise distances between MLST alleles') parser.add_argument('infile', type=str, help="Tab separated file containing alleles") parser.add_argument('outfile', type=str, help="Name for output file") args = parser.parse_args() alleles_in = pd.read_csv(args.infile, sep="\t", header=0, index_col=0, dtype=str) num_samples = len(alleles_in.index) dists = np.zeros((num_samples, num_samples), dtype=int) with tqdm.tqdm(total = int(0.5*num_samples*(num_samples-1))) as pbar: for i in range(num_samples): row1 = alleles_in.iloc[i, :].values for j in range(i+1, num_samples): row2 = alleles_in.iloc[j, :].values diffs = np.sum(np.not_equal(row1, row2)) dists[i,j] = diffs dists[j,i] = diffs pbar.update(1) dists_out = pd.DataFrame(dists, index=alleles_in.index, columns=alleles_in.index) dists_out.to_csv(args.outfile) print("Done\n") if __name__ == "__main__": main()
#!/usr/bin/env python from ecies.utils import generate_eth_key import ecies privKey = generate_eth_key() privKeyHex = privKey.to_hex() pubKeyHex = privKey.public_key.to_hex() def encrypt(plaintext=None): return ecies.encrypt(pubKeyHex, plaintext) def decrypt(ciphertext=None): return ecies.decrypt(privKeyHex, ciphertext)
import functools import numpy as np from garage.experiment import deterministic from garage.sampler import DefaultWorker from iod.utils import get_np_concat_obs class OptionWorker(DefaultWorker): def __init__( self, *, # Require passing by keyword, since everything's an int. seed, max_path_length, worker_number, sampler_key): super().__init__(seed=seed, max_path_length=max_path_length, worker_number=worker_number) self._sampler_key = sampler_key self._max_path_length_override = None self._cur_extras = None self._cur_extra_idx = None self._cur_extra_keys = set() self._render = False self._deterministic_initial_state = None self._deterministic_policy = None def update_env(self, env_update): if env_update is not None: if isinstance(env_update, dict): for k, v in env_update.items(): setattr(self.env, k, v) else: super().update_env(env_update) def worker_init(self): """Initialize a worker.""" if self._seed is not None: deterministic.set_seed(self._seed + self._worker_number * 10000) def update_worker(self, worker_update): if worker_update is not None: if isinstance(worker_update, dict): for k, v in worker_update.items(): setattr(self, k, v) if k == '_cur_extras': if v is None: self._cur_extra_keys = set() else: if len(self._cur_extras) > 0: self._cur_extra_keys = set(self._cur_extras[0].keys()) else: self._cur_extra_keys = None else: raise TypeError('Unknown worker update type.') def get_attrs(self, keys): attr_dict = {} for key in keys: attr_dict[key] = functools.reduce(getattr, [self] + key.split('.')) return attr_dict def start_rollout(self): """Begin a new rollout.""" if 'goal' in self._cur_extra_keys: goal = self._cur_extras[self._cur_extra_idx]['goal'] reset_kwargs = {'goal': goal} else: reset_kwargs = {} env = self.env while hasattr(env, 'env'): env = getattr(env, 'env') if hasattr(env, 'fixed_initial_state') and self._deterministic_initial_state is not None: env.fixed_initial_state = self._deterministic_initial_state self._path_length = 0 self._prev_obs = self.env.reset(**reset_kwargs) self._prev_extra = None self.agent.reset() def step_rollout(self): """Take a single time-step in the current rollout. Returns: bool: True iff the path is done, either due to the environment indicating termination of due to reaching `max_path_length`. """ cur_max_path_length = self._max_path_length if self._max_path_length_override is None else self._max_path_length_override if self._path_length < cur_max_path_length: if 'option' in self._cur_extra_keys: cur_extra_key = 'option' else: cur_extra_key = None if cur_extra_key is None: agent_input = self._prev_obs else: if isinstance(self._cur_extras[self._cur_extra_idx][cur_extra_key], list): cur_extra = self._cur_extras[self._cur_extra_idx][cur_extra_key][self._path_length] if cur_extra is None: cur_extra = self._prev_extra self._cur_extras[self._cur_extra_idx][cur_extra_key][self._path_length] = cur_extra else: cur_extra = self._cur_extras[self._cur_extra_idx][cur_extra_key] agent_input = get_np_concat_obs( self._prev_obs, cur_extra, ) self._prev_extra = cur_extra if self._deterministic_policy is not None: self.agent._force_use_mode_actions = self._deterministic_policy a, agent_info = self.agent.get_action(agent_input) if self._render: next_o, r, d, env_info = self.env.step(a, render=self._render) else: next_o, r, d, env_info = self.env.step(a) self._observations.append(self._prev_obs) self._rewards.append(r) self._actions.append(a) for k, v in agent_info.items(): self._agent_infos[k].append(v) for k in self._cur_extra_keys: if isinstance(self._cur_extras[self._cur_extra_idx][k], list): self._agent_infos[k].append(self._cur_extras[self._cur_extra_idx][k][self._path_length]) else: self._agent_infos[k].append(self._cur_extras[self._cur_extra_idx][k]) for k, v in env_info.items(): self._env_infos[k].append(v) self._path_length += 1 self._terminals.append(d) if not d: self._prev_obs = next_o return False self._terminals[-1] = True self._lengths.append(self._path_length) self._last_observations.append(self._prev_obs) return True def rollout(self): """Sample a single rollout of the agent in the environment. Returns: garage.TrajectoryBatch: The collected trajectory. """ if self._cur_extras is not None: self._cur_extra_idx += 1 self.start_rollout() while not self.step_rollout(): pass return self.collect_rollout()
# -*- coding: utf8 -*- import json import random import socket from collections import OrderedDict from time import sleep import requests from fake_useragent import UserAgent import TickerConfig from agency.agency_tools import proxy from config import logger def _set_header_default(): header_dict = OrderedDict() # header_dict["Accept"] = "application/json, text/plain, */*" header_dict["Accept-Encoding"] = "gzip, deflate" header_dict[ "User-Agent"] = _set_user_agent() header_dict["Content-Type"] = "application/x-www-form-urlencoded; charset=UTF-8" header_dict["Origin"] = "https://kyfw.12306.cn" header_dict["Connection"] = "keep-alive" return header_dict def _set_user_agent(): # try: # user_agent = UserAgent(verify_ssl=False).random # return user_agent # except: # print("请求头设置失败,使用默认请求头") # return 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.' + str( # random.randint(5000, 7000)) + '.0 Safari/537.36' return "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36" class HTTPClient(object): def __init__(self, is_proxy, cdnList=None): """ cdnList试试切换不包括查询的cdn,防止查询cdn污染登陆和下单cdn :param method: :param headers: Must be a dict. Such as headers={'Content_Type':'text/html'} """ self.initS() self._cdn = None self.cdnList = cdnList self._proxies = None if is_proxy == 1: self.proxy = proxy() self._proxies = self.proxy.setProxy() # print(u"设置当前代理ip为 {}, 请注意代理ip是否可用!!!!!请注意代理ip是否可用!!!!!请注意代理ip是否可用!!!!!".format(self._proxies)) def initS(self): self._s = requests.Session() self._s.headers.update(_set_header_default()) return self def set_cookies(self, kwargs): """ 设置cookies :param kwargs: :return: """ for kwarg in kwargs: for k, v in kwarg.items(): self._s.cookies.set(k, v) def get_cookies(self): """ 获取cookies :return: """ return self._s.cookies.values() def del_cookies(self): """ 删除所有的key :return: """ self._s.cookies.clear() def del_cookies_by_key(self, key): """ 删除指定key的session :return: """ self._s.cookies.set(key, None) def setHeaders(self, headers): self._s.headers.update(headers) return self def resetHeaders(self): self._s.headers.clear() self._s.headers.update(_set_header_default()) def getHeadersHost(self): return self._s.headers["Host"] def setHeadersHost(self, host): self._s.headers.update({"Host": host}) return self def setHeadersUserAgent(self): self._s.headers.update({"User-Agent": _set_user_agent()}) def getHeadersUserAgent(self): return self._s.headers["User-Agent"] def getHeadersReferer(self): return self._s.headers["Referer"] def setHeadersReferer(self, referer): self._s.headers.update({"Referer": referer}) return self @property def cdn(self): return self._cdn @cdn.setter def cdn(self, cdn): self._cdn = cdn def send(self, urls, data=None, **kwargs): """send request to url.If response 200,return response, else return None.""" allow_redirects = False is_logger = urls.get("is_logger", False) req_url = urls.get("req_url", "") re_try = urls.get("re_try", 0) s_time = urls.get("s_time", 0) is_cdn = urls.get("is_cdn", False) is_test_cdn = urls.get("is_test_cdn", False) error_data = {"code": 99999, "message": u"重试次数达到上限"} if data: method = "post" self.setHeaders({"Content-Length": "{0}".format(len(data))}) else: method = "get" self.resetHeaders() if TickerConfig.RANDOM_AGENT == 1: self.setHeadersUserAgent() self.setHeadersReferer(urls["Referer"]) if is_logger: logger.log( u"url: {0}\n入参: {1}\n请求方式: {2}\n".format(req_url, data, method)) self.setHeadersHost(urls["Host"]) if is_test_cdn: url_host = self._cdn elif is_cdn: if self._cdn: # print(u"当前请求cdn为{}".format(self._cdn)) url_host = self._cdn else: url_host = urls["Host"] else: url_host = urls["Host"] http = urls.get("httpType") or "https" for i in range(re_try): try: # sleep(urls["s_time"]) if "s_time" in urls else sleep(0.001) sleep(s_time) try: requests.packages.urllib3.disable_warnings() except: pass response = self._s.request(method=method, timeout=5, proxies=self._proxies, url=http + "://" + url_host + req_url, data=data, allow_redirects=allow_redirects, verify=False, **kwargs) if response.status_code == 200 or response.status_code == 302: if urls.get("not_decode", False): return response.content if response.content: if is_logger: logger.log( u"出参:{0}".format(response.content.decode())) if urls["is_json"]: return json.loads( response.content.decode() if isinstance(response.content, bytes) else response.content) else: return response.content.decode("utf8", "ignore") if isinstance(response.content, bytes) else response.content else: print(f"url: {urls['req_url']}返回参数为空, 接口状态码: {response.status_code}") logger.log( u"url: {} 返回参数为空".format(urls["req_url"])) if self.cdnList: # 如果下单或者登陆出现cdn 302的情况,立马切换cdn url_host = self.cdnList.pop(random.randint(0, 4)) continue else: sleep(urls["re_time"]) except (requests.exceptions.Timeout, requests.exceptions.ReadTimeout, requests.exceptions.ConnectionError): pass except socket.error: pass return error_data
# MIT License # # Copyright (c) 2017 Matt Boyer # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import os from pkg_resources import resource_stream import re import yaml from . import _LOGGER from . import PROJECT_NAME, USER_YAML_PATH, BUILTIN_YAML class Heuristic(object): def __init__(self, magic, offset, grouping, table, name_regex=None): self._offset = offset self._table_name = table self._grouping = grouping self._magic_re = re.compile(magic) self._table_name_regex = None if name_regex is not None: self._table_name_regex = re.compile(name_regex) def __repr__(self): return "<Record heuristic for table \"{0}\"({1})>".format( self._table_name, self._grouping ) def __call__(self, freeblock_bytes): # We need to unwind the full set of matches so we can traverse it # in reverse all_matches = [ match for match in self._magic_re.finditer(freeblock_bytes) ] for magic_match in all_matches[::-1]: header_start = magic_match.start() - self._offset if header_start < 0: _LOGGER.debug("Header start outside of freeblock!") break yield header_start def match(self, table): if self._table_name_regex is not None: return bool(self._table_name_regex.match(table.name)) else: return self._table_name == table.name class HeuristicsRegistry(dict): def __init__(self): super().__init__(self) @staticmethod def check_heuristic(magic, offset): assert(isinstance(magic, bytes)) assert(isinstance(offset, int)) assert(offset >= 0) def _load_from_yaml(self, yaml_string): if isinstance(yaml_string, bytes): yaml_string = yaml_string.decode('utf-8') raw_yaml = yaml.load(yaml_string) # TODO Find a more descriptive term than "table grouping" for table_grouping, tables in raw_yaml.items(): _LOGGER.debug( "Loading YAML data for table grouping \"%s\"", table_grouping ) grouping_tables = {} for table_name, table_props in tables.items(): self.check_heuristic( table_props['magic'], table_props['offset'] ) grouping_tables[table_name] = Heuristic( table_props['magic'], table_props['offset'], table_grouping, table_name, name_regex=table_props.get('name_regex') ) _LOGGER.debug("Loaded heuristics for \"%s\"", table_name) self[table_grouping] = grouping_tables def load_heuristics(self): with resource_stream(PROJECT_NAME, BUILTIN_YAML) as builtin: try: self._load_from_yaml(builtin.read()) except KeyError: raise SystemError("Malformed builtin magic file") if not os.path.exists(USER_YAML_PATH): return with open(USER_YAML_PATH, 'r') as user_yaml: try: self._load_from_yaml(user_yaml.read()) except KeyError: raise SystemError("Malformed user magic file") @property def groupings(self): for db_name in sorted(self.keys()): yield db_name @property def all_tables(self): for db in self.groupings: for table in self[db].keys(): yield (db, table) def _get_heuristic_in_grouping(self, db_table, grouping): heuristic_name = None if grouping in self: for heuristic_name in self[grouping]: if self[grouping][heuristic_name].match(db_table): break else: # We haven't found a match within the grouping... what # shall we do? raise ValueError("No heuristic found") return self[grouping][heuristic_name] else: raise ValueError( "No heuristic defined for table \"%s\" in grouping \"%s\"" % (db_table.name, grouping) ) def _get_heuristic_in_all_groupings(self, db_table): grouping = None heuristic_name = None for grouping, heuristic_name in self.all_tables: if self[grouping][heuristic_name].match(db_table): break else: raise ValueError( "No heuristic defined for table \"%s\" in any grouping" % (db_table.name,) ) return self[grouping][heuristic_name] def get_heuristic(self, db_table, grouping): if grouping is not None: return self._get_heuristic_in_grouping(db_table, grouping) else: return self._get_heuristic_in_all_groupings(db_table)
#!/bin/python3 """Main CI job script.""" import os import subprocess import time from datetime import datetime from github import Github from kubeinit_ci_utils import remove_label, upload_logs # # We only execute the e2e jobs for those PR having # `whitelist_domain` as part of the committer's email # def main(): """Run the main method.""" gh = Github(os.environ['GH_TOKEN']) gh_token = os.environ['GH_TOKEN'] vars_file_path = os.getenv('VARS_FILE', "") pipeline_id = os.getenv('CI_PIPELINE_ID', 0) repo = gh.get_repo("kubeinit/kubeinit") branches = repo.get_branches() output = 0 # Something linke: # url = "https://gitlab.com/kubeinit/kubeinit-ci/pipelines/" url = os.getenv('CI_PIPELINE_URL', "") print("The job results will be published in runtime at: " + url) for branch in branches: for pr in repo.get_pulls(state='open', sort='created', base=branch.name): labels = [item.name for item in pr.labels] sha = pr.head.sha committer_email = repo.get_commit(sha=sha).commit.committer.email print(committer_email) execute = False scenario = "default" # We assign the executed label to avoid executing this agains the same PR over and over # We mark the PR as e2e-executed # # Charmed Distribution of Kubernetes # if ("cdk-libvirt-3-master-1-worker-default" in labels): distro = "cdk" driver = "libvirt" master = "3" worker = "1" execute = True scenario = "default" remove_label("cdk-libvirt-3-master-1-worker-default", pr, repo) elif ("cdk-libvirt-3-master-0-worker-default" in labels): distro = "cdk" driver = "libvirt" master = "3" worker = "0" execute = True scenario = "default" remove_label("cdk-libvirt-3-master-0-worker-default", pr, repo) elif ("cdk-libvirt-1-master-1-worker-default" in labels): distro = "cdk" driver = "libvirt" master = "1" worker = "1" execute = True scenario = "default" remove_label("cdk-libvirt-1-master-1-worker-default", pr, repo) elif ("cdk-libvirt-1-master-0-worker-default" in labels): distro = "cdk" driver = "libvirt" master = "1" worker = "0" execute = True scenario = "default" remove_label("cdk-libvirt-1-master-0-worker-default", pr, repo) # # Rancher Kubernetes Engine # elif ("rke-libvirt-3-master-1-worker-default" in labels): distro = "rke" driver = "libvirt" master = "3" worker = "1" execute = True scenario = "default" remove_label("rke-libvirt-3-master-1-worker-default", pr, repo) elif ("rke-libvirt-3-master-0-worker-default" in labels): distro = "rke" driver = "libvirt" master = "3" worker = "0" execute = True scenario = "default" remove_label("rke-libvirt-3-master-0-worker-default", pr, repo) elif ("rke-libvirt-1-master-1-worker-default" in labels): distro = "rke" driver = "libvirt" master = "1" worker = "1" execute = True scenario = "default" remove_label("rke-libvirt-1-master-1-worker-default", pr, repo) elif ("rke-libvirt-1-master-0-worker-default" in labels): distro = "rke" driver = "libvirt" master = "1" worker = "0" execute = True scenario = "default" remove_label("rke-libvirt-1-master-0-worker-default", pr, repo) # # Origin Kubernetes Distribution # elif ("okd-libvirt-3-master-0-worker-default" in labels): distro = "okd" driver = "libvirt" master = "3" worker = "0" execute = True scenario = "default" remove_label("okd-libvirt-3-master-0-worker-default", pr, repo) elif ("okd-libvirt-3-master-1-worker-default" in labels): distro = "okd" driver = "libvirt" master = "3" worker = "1" execute = True scenario = "default" remove_label("okd-libvirt-3-master-1-worker-default", pr, repo) elif ("okd-libvirt-1-master-0-worker-default" in labels): distro = "okd" driver = "libvirt" master = "1" worker = "0" execute = True scenario = "default" remove_label("okd-libvirt-1-master-0-worker-default", pr, repo) elif ("okd-libvirt-1-master-1-worker-default" in labels): distro = "okd" driver = "libvirt" master = "1" worker = "1" execute = True scenario = "default" remove_label("okd-libvirt-1-master-1-worker-default", pr, repo) # # Kubernetes # elif ("k8s-libvirt-3-master-1-worker-default" in labels): distro = "k8s" driver = "libvirt" master = "3" worker = "1" execute = True scenario = "default" remove_label("k8s-libvirt-3-master-1-worker-default", pr, repo) elif ("k8s-libvirt-3-master-0-worker-default" in labels): distro = "k8s" driver = "libvirt" master = "3" worker = "0" execute = True scenario = "default" remove_label("k8s-libvirt-3-master-0-worker-default", pr, repo) elif ("k8s-libvirt-1-master-1-worker-default" in labels): distro = "k8s" driver = "libvirt" master = "1" worker = "1" execute = True scenario = "default" remove_label("k8s-libvirt-1-master-1-worker-default", pr, repo) elif ("k8s-libvirt-1-master-0-worker-default" in labels): distro = "k8s" driver = "libvirt" master = "1" worker = "0" execute = True scenario = "default" remove_label("k8s-libvirt-1-master-0-worker-default", pr, repo) # # EKS # elif ("eks-libvirt-3-master-1-worker-default" in labels): distro = "eks" driver = "libvirt" master = "3" worker = "1" execute = True scenario = "default" remove_label("eks-libvirt-3-master-1-worker-default", pr, repo) elif ("eks-libvirt-3-master-0-worker-default" in labels): distro = "eks" driver = "libvirt" master = "3" worker = "0" execute = True scenario = "default" remove_label("eks-libvirt-3-master-0-worker-default", pr, repo) elif ("eks-libvirt-1-master-1-worker-default" in labels): distro = "eks" driver = "libvirt" master = "1" worker = "1" execute = True scenario = "default" remove_label("eks-libvirt-1-master-1-worker-default", pr, repo) elif ("eks-libvirt-1-master-0-worker-default" in labels): distro = "eks" driver = "libvirt" master = "1" worker = "0" execute = True scenario = "default" remove_label("eks-libvirt-1-master-0-worker-default", pr, repo) # # Misc jobs # elif ("okd.rke-libvirt-3-master-1-worker-submariner" in labels): distro = "okd.rke" driver = "libvirt" master = "3" worker = "1" execute = True scenario = "submariner" remove_label("okd.rke-libvirt-3-master-1-worker-submariner", pr, repo) elif ("okd.rke-libvirt-1-master-2-worker-submariner" in labels): distro = "okd.rke" driver = "libvirt" master = "1" worker = "2" execute = True scenario = "submariner" remove_label("okd.rke-libvirt-1-master-2-worker-submariner", pr, repo) if execute: now = datetime.now() now.strftime("%m.%d.%Y.%H.%M.%S") job_name = pipeline_id + "-" + distro + "-" + driver + "-" + master + "-" + worker + "-" + scenario + "-" + now.strftime("%Y.%m.%d.%H.%M.%S") print("Let's run the e2e job, distro %s driver %s " % (distro, driver)) print("-------------") print("-------------") print("Running the e2e job for: " + str(pr.number) + " " + pr.title) print("-------------") print("-------------") print("-------------") # We update the status to show that we are executing the e2e test print("Current status") print(repo.get_commit(sha=sha).get_statuses()) repo.get_commit(sha=sha).create_status(state="pending", target_url=url + str(pipeline_id), description="Running...", context="%s-%s-%s-master-%s-worker-%s" % (distro, driver, master, worker, scenario)) print("The pipeline ID is: " + str(pipeline_id)) print("The clouds.yml path is: " + str(vars_file_path)) # We trigger the e2e job start_time = time.time() try: print("We call the downstream job configuring its parameters") subprocess.check_call("./ci/run_kubeinit.sh %s %s %s %s %s %s %s %s" % (str(branch.name), str(pr.number), str(vars_file_path), str(distro), str(driver), str(master), str(worker), str(scenario)), shell=True) except Exception as e: print('An exception hapened executing Ansible') print(e) output = 1 try: print("Render ara data") subprocess.check_call("./ci/ara.sh %s" % (str(job_name) + "-" + str(output)), shell=True) except Exception as e: print('An exception hapened rendering ara data') print(e) output = 1 print("starting the uploader job") upload_error = upload_logs(str(job_name) + "-" + str(output), gh_token) print("finishing the uploader job") if output == 0: state = "success" else: state = "failure" desc = ("Ended with %s in %s minutes" % (state, round((time.time() - start_time) / 60, 2))) print(desc) print(state) if upload_error == 1: dest_url = url + str(pipeline_id) else: dest_url = 'https://kubeinit-bot.github.io/kubeinit-ci-results/' + str(job_name) + "-" + str(output) + '/index.html' print("The destination URL is: " + dest_url) # We update the status with the job result repo.get_commit(sha=sha).create_status(state=state, target_url=dest_url, description=desc, context="%s-%s-%s-master-%s-worker-%s" % (distro, driver, master, worker, scenario)) else: print("No need to do anything") if execute: exit() if __name__ == "__main__": main()
from __future__ import absolute_import import difflib from functools import wraps, partial import re from flask import request, url_for, current_app from flask import abort as original_flask_abort from flask.views import MethodView from flask.signals import got_request_exception from werkzeug.exceptions import HTTPException, MethodNotAllowed, NotFound from werkzeug.http import HTTP_STATUS_CODES from werkzeug.wrappers import Response as ResponseBase from flask.ext.restful.utils import error_data, unpack from flask.ext.restful.representations.json import output_json import sys from flask.helpers import _endpoint_from_view_func from types import MethodType try: #noinspection PyUnresolvedReferences from collections import OrderedDict except ImportError: from .utils.ordereddict import OrderedDict __all__ = ('Api', 'Resource', 'marshal', 'marshal_with', 'marshal_with_field', 'abort') def abort(http_status_code, **kwargs): """Raise a HTTPException for the given http_status_code. Attach any keyword arguments to the exception for later processing. """ #noinspection PyUnresolvedReferences try: original_flask_abort(http_status_code) except HTTPException as e: if len(kwargs): e.data = kwargs raise DEFAULT_REPRESENTATIONS = {'application/json': output_json} class Api(object): """ The main entry point for the application. You need to initialize it with a Flask Application: :: >>> app = Flask(__name__) >>> api = restful.Api(app) Alternatively, you can use :meth:`init_app` to set the Flask application after it has been constructed. :param app: the Flask application object :type app: flask.Flask :param prefix: Prefix all routes with a value, eg v1 or 2010-04-01 :type prefix: str :param default_mediatype: The default media type to return :type default_mediatype: str :param decorators: Decorators to attach to every resource :type decorators: list :param catch_all_404s: Use :meth:`handle_error` to handle 404 errors throughout your app :param url_part_order: A string that controls the order that the pieces of the url are concatenated when the full url is constructed. 'b' is the blueprint (or blueprint registration) prefix, 'a' is the api prefix, and 'e' is the path component the endpoint is added with :type catch_all_404s: bool :param errors: A dictionary to define a custom response for each exception or error raised during a request :type errors: dict """ def __init__(self, app=None, prefix='', default_mediatype='application/json', decorators=None, catch_all_404s=False, url_part_order='bae', errors=None): self.representations = dict(DEFAULT_REPRESENTATIONS) self.urls = {} self.prefix = prefix self.default_mediatype = default_mediatype self.decorators = decorators if decorators else [] self.catch_all_404s = catch_all_404s self.url_part_order = url_part_order self.errors = errors or {} self.blueprint_setup = None self.endpoints = set() self.resources = [] self.app = None if app is not None: self.app = app self.init_app(app) def init_app(self, app): """Initialize this class with the given :class:`flask.Flask` application or :class:`flask.Blueprint` object. :param app: the Flask application or blueprint object :type app: flask.Flask :type app: flask.Blueprint Examples:: api = Api() api.add_resource(...) api.init_app(app) """ self.blueprint = None # If app is a blueprint, defer the initialization try: app.record(self._deferred_blueprint_init) # Flask.Blueprint has a 'record' attribute, Flask.Api does not except AttributeError: self._init_app(app) else: self.blueprint = app def _complete_url(self, url_part, registration_prefix): """This method is used to defer the construction of the final url in the case that the Api is created with a Blueprint. :param url_part: The part of the url the endpoint is registered with :param registration_prefix: The part of the url contributed by the blueprint. Generally speaking, BlueprintSetupState.url_prefix """ parts = { 'b': registration_prefix, 'a': self.prefix, 'e': url_part } return ''.join(parts[key] for key in self.url_part_order if parts[key]) @staticmethod def _blueprint_setup_add_url_rule_patch(blueprint_setup, rule, endpoint=None, view_func=None, **options): """Method used to patch BlueprintSetupState.add_url_rule for setup state instance corresponding to this Api instance. Exists primarily to enable _complete_url's function. :param blueprint_setup: The BlueprintSetupState instance (self) :param rule: A string or callable that takes a string and returns a string(_complete_url) that is the url rule for the endpoint being registered :param endpoint: See BlueprintSetupState.add_url_rule :param view_func: See BlueprintSetupState.add_url_rule :param **options: See BlueprintSetupState.add_url_rule """ if callable(rule): rule = rule(blueprint_setup.url_prefix) elif blueprint_setup.url_prefix: rule = blueprint_setup.url_prefix + rule options.setdefault('subdomain', blueprint_setup.subdomain) if endpoint is None: endpoint = _endpoint_from_view_func(view_func) defaults = blueprint_setup.url_defaults if 'defaults' in options: defaults = dict(defaults, **options.pop('defaults')) blueprint_setup.app.add_url_rule(rule, '%s.%s' % (blueprint_setup.blueprint.name, endpoint), view_func, defaults=defaults, **options) def _deferred_blueprint_init(self, setup_state): """Synchronize prefix between blueprint/api and registration options, then perform initialization with setup_state.app :class:`flask.Flask` object. When a :class:`flask_restful.Api` object is initialized with a blueprint, this method is recorded on the blueprint to be run when the blueprint is later registered to a :class:`flask.Flask` object. This method also monkeypatches BlueprintSetupState.add_url_rule with _blueprint_setup_add_url_rule_patch. :param setup_state: The setup state object passed to deferred functions during blueprint registration :type setup_state: flask.blueprints.BlueprintSetupState """ self.blueprint_setup = setup_state if setup_state.add_url_rule.__name__ != '_blueprint_setup_add_url_rule_patch': setup_state._original_add_url_rule = setup_state.add_url_rule setup_state.add_url_rule = MethodType(Api._blueprint_setup_add_url_rule_patch, setup_state) if not setup_state.first_registration: raise ValueError('flask-restful blueprints can only be registered once.') self._init_app(setup_state.app) def _init_app(self, app): """Perform initialization actions with the given :class:`flask.Flask` object. :param app: The flask application object :type app: flask.Flask """ app.handle_exception = partial(self.error_router, app.handle_exception) app.handle_user_exception = partial(self.error_router, app.handle_user_exception) if len(self.resources) > 0: for resource, urls, kwargs in self.resources: self._register_view(app, resource, *urls, **kwargs) def owns_endpoint(self, endpoint): """Tests if an endpoint name (not path) belongs to this Api. Takes in to account the Blueprint name part of the endpoint name. :param endpoint: The name of the endpoint being checked :return: bool """ if self.blueprint: if endpoint.startswith(self.blueprint.name): endpoint = endpoint.split(self.blueprint.name + '.', 1)[-1] else: return False return endpoint in self.endpoints def _should_use_fr_error_handler(self): """ Determine if error should be handled with FR or default Flask The goal is to return Flask error handlers for non-FR-related routes, and FR errors (with the correct media type) for FR endpoints. This method currently handles 404 and 405 errors. :return: bool """ adapter = current_app.create_url_adapter(request) try: adapter.match() except MethodNotAllowed as e: # Check if the other HTTP methods at this url would hit the Api valid_route_method = e.valid_methods[0] rule, _ = adapter.match(method=valid_route_method, return_rule=True) return self.owns_endpoint(rule.endpoint) except NotFound: return self.catch_all_404s except: # Werkzeug throws other kinds of exceptions, such as Redirect pass def _has_fr_route(self): """Encapsulating the rules for whether the request was to a Flask endpoint""" # 404's, 405's, which might not have a url_rule if self._should_use_fr_error_handler(): return True # for all other errors, just check if FR dispatched the route if not request.url_rule: return False return self.owns_endpoint(request.url_rule.endpoint) def error_router(self, original_handler, e): """This function decides whether the error occured in a flask-restful endpoint or not. If it happened in a flask-restful endpoint, our handler will be dispatched. If it happened in an unrelated view, the app's original error handler will be dispatched. :param original_handler: the original Flask error handler for the app :type original_handler: function :param e: the exception raised while handling the request :type e: Exception """ if self._has_fr_route(): return self.handle_error(e) return original_handler(e) def handle_error(self, e): """Error handler for the API transforms a raised exception into a Flask response, with the appropriate HTTP status code and body. :param e: the raised Exception object :type e: Exception """ got_request_exception.send(current_app._get_current_object(), exception=e) if not hasattr(e, 'code') and current_app.propagate_exceptions: exc_type, exc_value, tb = sys.exc_info() if exc_value is e: raise else: raise e code = getattr(e, 'code', 500) data = getattr(e, 'data', error_data(code)) if code >= 500: # There's currently a bug in Python3 that disallows calling # logging.exception() when an exception hasn't actually be raised if sys.exc_info() == (None, None, None): current_app.logger.error("Internal Error") else: current_app.logger.exception("Internal Error") help_on_404 = current_app.config.get("ERROR_404_HELP", True) if code == 404 and help_on_404 and ('message' not in data or data['message'] == HTTP_STATUS_CODES[404]): rules = dict([(re.sub('(<.*>)', '', rule.rule), rule.rule) for rule in current_app.url_map.iter_rules()]) close_matches = difflib.get_close_matches(request.path, rules.keys()) if close_matches: # If we already have a message, add punctuation and continue it. if "message" in data: data["message"] += ". " else: data["message"] = "" data['message'] += 'You have requested this URI [' + request.path + \ '] but did you mean ' + \ ' or '.join(( rules[match] for match in close_matches) ) + ' ?' error_cls_name = type(e).__name__ if error_cls_name in self.errors: custom_data = self.errors.get(error_cls_name, {}) code = custom_data.get('status', 500) data.update(custom_data) resp = self.make_response(data, code) if code == 401: resp = self.unauthorized(resp) return resp def mediatypes_method(self): """Return a method that returns a list of mediatypes """ return lambda resource_cls: self.mediatypes() + [self.default_mediatype] def add_resource(self, resource, *urls, **kwargs): """Adds a resource to the api. :param resource: the class name of your resource :type resource: :class:`Resource` :param urls: one or more url routes to match for the resource, standard flask routing rules apply. Any url variables will be passed to the resource method as args. :type urls: str :param endpoint: endpoint name (defaults to :meth:`Resource.__name__.lower` Can be used to reference this route in :class:`fields.Url` fields :type endpoint: str Additional keyword arguments not specified above will be passed as-is to :meth:`flask.Flask.add_url_rule`. Examples:: api.add_resource(HelloWorld, '/', '/hello') api.add_resource(Foo, '/foo', endpoint="foo") api.add_resource(FooSpecial, '/special/foo', endpoint="foo") """ if self.app is not None: self._register_view(self.app, resource, *urls, **kwargs) else: self.resources.append((resource, urls, kwargs)) def _register_view(self, app, resource, *urls, **kwargs): endpoint = kwargs.pop('endpoint', None) or resource.__name__.lower() self.endpoints.add(endpoint) if endpoint in app.view_functions.keys(): previous_view_class = app.view_functions[endpoint].__dict__['view_class'] # if you override the endpoint with a different class, avoid the collision by raising an exception if previous_view_class != resource: raise ValueError('This endpoint (%s) is already set to the class %s.' % (endpoint, previous_view_class.__name__)) resource.mediatypes = self.mediatypes_method() # Hacky resource.endpoint = endpoint resource_func = self.output(resource.as_view(endpoint)) for decorator in self.decorators: resource_func = decorator(resource_func) for url in urls: # If this Api has a blueprint if self.blueprint: # And this Api has been setup if self.blueprint_setup: # Set the rule to a string directly, as the blueprint is already # set up. self.blueprint_setup.add_url_rule(url, view_func=resource_func, **kwargs) continue else: # Set the rule to a function that expects the blueprint prefix # to construct the final url. Allows deferment of url finalization # in the case that the associated Blueprint has not yet been # registered to an application, so we can wait for the registration # prefix rule = partial(self._complete_url, url) else: # If we've got no Blueprint, just build a url with no prefix rule = self._complete_url(url, '') # Add the url to the application or blueprint app.add_url_rule(rule, view_func=resource_func, **kwargs) def output(self, resource): """Wraps a resource (as a flask view function), for cases where the resource does not directly return a response object :param resource: The resource as a flask view function """ @wraps(resource) def wrapper(*args, **kwargs): resp = resource(*args, **kwargs) if isinstance(resp, ResponseBase): # There may be a better way to test return resp data, code, headers = unpack(resp) return self.make_response(data, code, headers=headers) return wrapper def url_for(self, resource, **values): """Generates a URL to the given resource.""" return url_for(resource.endpoint, **values) def make_response(self, data, *args, **kwargs): """Looks up the representation transformer for the requested media type, invoking the transformer to create a response object. This defaults to (application/json) if no transformer is found for the requested mediatype. :param data: Python object containing response data to be transformed """ for mediatype in self.mediatypes() + [self.default_mediatype]: if mediatype in self.representations: resp = self.representations[mediatype](data, *args, **kwargs) resp.headers['Content-Type'] = mediatype return resp def mediatypes(self): """Returns a list of requested mediatypes sent in the Accept header""" return [h for h, q in request.accept_mimetypes] def representation(self, mediatype): """Allows additional representation transformers to be declared for the api. Transformers are functions that must be decorated with this method, passing the mediatype the transformer represents. Three arguments are passed to the transformer: * The data to be represented in the response body * The http status code * A dictionary of headers The transformer should convert the data appropriately for the mediatype and return a Flask response object. Ex:: @api.representation('application/xml') def xml(data, code, headers): resp = make_response(convert_data_to_xml(data), code) resp.headers.extend(headers) return resp """ def wrapper(func): self.representations[mediatype] = func return func return wrapper def unauthorized(self, response): """ Given a response, change it to ask for credentials """ realm = current_app.config.get("HTTP_BASIC_AUTH_REALM", "flask-restful") challenge = u"{0} realm=\"{1}\"".format("Basic", realm) response.headers['WWW-Authenticate'] = challenge return response class Resource(MethodView): """ Represents an abstract RESTful resource. Concrete resources should extend from this class and expose methods for each supported HTTP method. If a resource is invoked with an unsupported HTTP method, the API will return a response with status 405 Method Not Allowed. Otherwise the appropriate method is called and passed all arguments from the url rule used when adding the resource to an Api instance. See :meth:`~flask.ext.restful.Api.add_resource` for details. """ representations = None method_decorators = [] def dispatch_request(self, *args, **kwargs): # Taken from flask #noinspection PyUnresolvedReferences meth = getattr(self, request.method.lower(), None) if meth is None and request.method == 'HEAD': meth = getattr(self, 'get', None) assert meth is not None, 'Unimplemented method %r' % request.method for decorator in self.method_decorators: meth = decorator(meth) resp = meth(*args, **kwargs) if isinstance(resp, ResponseBase): # There may be a better way to test return resp representations = self.representations or {} #noinspection PyUnresolvedReferences for mediatype in self.mediatypes(): if mediatype in representations: data, code, headers = unpack(resp) resp = representations[mediatype](data, code, headers) resp.headers['Content-Type'] = mediatype return resp return resp def marshal(data, fields): """Takes raw data (in the form of a dict, list, object) and a dict of fields to output and filters the data based on those fields. :param data: the actual object(s) from which the fields are taken from :param fields: a dict of whose keys will make up the final serialized response output >>> from flask.ext.restful import fields, marshal >>> data = { 'a': 100, 'b': 'foo' } >>> mfields = { 'a': fields.Raw } >>> marshal(data, mfields) OrderedDict([('a', 100)]) """ def make(cls): if isinstance(cls, type): return cls() return cls if isinstance(data, (list, tuple)): return [marshal(d, fields) for d in data] items = ((k, marshal(data, v) if isinstance(v, dict) else make(v).output(k, data)) for k, v in fields.items()) return OrderedDict(items) class marshal_with(object): """A decorator that apply marshalling to the return values of your methods. >>> from flask.ext.restful import fields, marshal_with >>> mfields = { 'a': fields.Raw } >>> @marshal_with(mfields) ... def get(): ... return { 'a': 100, 'b': 'foo' } ... ... >>> get() OrderedDict([('a', 100)]) see :meth:`flask.ext.restful.marshal` """ def __init__(self, fields): """:param fields: a dict of whose keys will make up the final serialized response output""" self.fields = fields def __call__(self, f): @wraps(f) def wrapper(*args, **kwargs): resp = f(*args, **kwargs) if isinstance(resp, tuple): data, code, headers = unpack(resp) return marshal(data, self.fields), code, headers else: return marshal(resp, self.fields) return wrapper class marshal_with_field(object): """ A decorator that formats the return values of your methods with a single field. >>> from flask.ext.restful import marshal_with_field, fields >>> @marshal_with_field(fields.List(fields.Integer)) ... def get(): ... return ['1', 2, 3.0] ... >>> get() [1, 2, 3] see :meth:`flask.ext.restful.marshal_with` """ def __init__(self, field): """ :param field: a single field with which to marshal the output. """ if isinstance(field, type): self.field = field() else: self.field = field def __call__(self, f): @wraps(f) def wrapper(*args, **kwargs): resp = f(*args, **kwargs) if isinstance(resp, tuple): data, code, headers = unpack(resp) return self.field.format(data), code, headers return self.field.format(resp) return wrapper
from django.test import TestCase from .models import Project from django.contrib.auth.models import User # Create your tests here. class TestProject(TestCase): def setUp(self): self.user=User(username='jefferson') self.user.save() self.project=Project(user=self.user, title='test', description='this is a test project') self.project.save() #TEST METHOD def test_save(self): self.project.save_project() project=Project.objects.all() self.assertTrue(len(project)==1) #TEST DELETE METHOD def test_delete(self): Project.delete_project(self.project.pk) after_delete=Project.objects.all() self.assertTrue(len(after_delete)==0) #TEST GET ELEMENT BY D def test_get_by_id(self): project=Project.get_project(self.project.id) self.assertEqual(project.title,'test')
import os from flask import Flask from flask_sqlalchemy import SQLAlchemy from flask_login import LoginManager from flask_sslify import SSLify import config db = SQLAlchemy() login_manager = LoginManager() def create_app() -> Flask: """Create an application factory with SQLAlchemy, Login, and SSLify Returns: Flask -- A Flask application object """ app = Flask(__name__) app.secret_key = os.urandom(24) env = os.getenv('FLASK_ENV', 'development') if env == 'production': SQL_ALCHEMY_DATABASE_URI = ( config.ProductionConfig.SQL_ALCHEMY_DATABASE_URI ) elif env == 'testing': SQL_ALCHEMY_DATABASE_URI = ( config.TestConfig.SQL_ALCHEMY_DATABASE_URI ) else: SQL_ALCHEMY_DATABASE_URI = config.Config.SQL_ALCHEMY_DATABASE_URI app.config['SQLALCHEMY_DATABASE_URI'] = SQL_ALCHEMY_DATABASE_URI app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False db.init_app(app) login_manager.login_view = 'auth.login' login_manager.init_app(app) if 'DYNO' in os.environ: SSLify(app) from .models import User @login_manager.user_loader def load_user(user_id): return User.query.get(int(user_id)) from .routes import main as main_blueprint app.register_blueprint(main_blueprint) from .auth import auth as auth_blueprint app.register_blueprint(auth_blueprint) with app.app_context(): from . import routes db.create_all() return app
from prometheus_client import start_http_server from prometheus_client.core import GaugeMetricFamily, REGISTRY from apiclient.discovery import build from apiclient.errors import HttpError from datetime import datetime from oauth2client.service_account import ServiceAccountCredentials import time, httplib2, os, bios, helper, json class GarCollector(object): lastResponses = {} def collect(self): self._gauges = {} analytics = self._initialize_analyticsreporting() print("[",datetime.now(),"]","Authorized to talk with Analytics v4 API") reports = helper.yamlToReportRequests(bios.read(CONFIG_FILE)) for report in reports: print("[",datetime.now(),"]","[REPORT REQUEST]", report) segmentsList = report['segmentsList'] del report['segmentsList'] response = self._requestWithExponentialBackoff(analytics, report) print("[",datetime.now(),"]","RESPONSE OBTAINED") self._get_metrics( response, report.get('reportRequests')[0].get('viewId'), report.get('reportRequests')[0].get('dateRanges')[0], segmentsList ) for metric in self._gauges: yield self._gauges[metric] def _initialize_analyticsreporting(self): credentials = ServiceAccountCredentials.from_json_keyfile_name( SERVICE_ACCOUNT_FILE, scopes=SCOPES ) http = credentials.authorize(httplib2.Http()) analytics = build('analytics', 'v4', http=http, discoveryServiceUrl=DISCOVERY_URI) return analytics def _get_report(self, analytics, report): return analytics.reports().batchGet( body=report ).execute() def _requestWithExponentialBackoff(self, analytics, report): """Wrapper to request Google Analytics data with exponential backoff. The makeRequest method accepts the analytics service object, makes API requests and returns the response. If any error occurs, the makeRequest method is retried using exponential backoff. Args: analytics: The analytics service object report: Report request structure Returns: The API response from the _get_report method. """ reportId = hash(json.dumps(report)) for n in range(0, 5): try: response = self._get_report(analytics, report) self.lastResponses[reportId] = response return response except HttpError as error: print("[WARNING] Http request error", error.resp.reason) if error.resp.reason in ['userRateLimitExceeded', 'quotaExceeded', 'internalServerError', 'backendError']: time.sleep((2 ** n) + random.random()) else: break print("[",datetime.now(),"]","[ERROR] There has been an error, returning earlier result", reportId) return self.lastResponses[reportId] def _get_metrics(self, response, viewId, dateRanges, segmentsList): METRIC_PREFIX = 'ga_reporting' LABELS = ['ga:viewId', 'ga:dateStart', 'ga:dateEnd'] self._gauges = {} for report in response.get('reports', []): columnHeader = report.get('columnHeader', {}) dimensionHeaders = LABELS dimensionHeaders.extend(columnHeader.get('dimensions', [])) # Added dimensions as labels - fixed bug dimensionHeadersModified = [x[3:] for x in dimensionHeaders] metricHeaders = columnHeader.get('metricHeader', {}).get('metricHeaderEntries', []) rows = report.get('data', {}).get('rows', []) testi=0 for row in rows: dimensions = [viewId, dateRanges.get('startDate'), dateRanges.get('endDate')] dimensions.extend(row.get('dimensions', [])) dateRangeValues = row.get('metrics', []) for i, element in enumerate(dimensions): if element == 'Dynamic Segment': dimensions[i] = list(segmentsList[0][0].values())[0] testi+=1 dimension="" # for header, dimension in zip(dimensionHeaders, dimensions): # print("[HEADER] " + header + ': ' + dimension) for i, values in enumerate(dateRangeValues): # print('Date range (' + str(i) + ')') for metricHeader, returnValue in zip(metricHeaders, values.get('values')): metric = metricHeader.get('name')[3:] # print("[METRIC] " + metric + ': ' + returnValue) self._gauges[metric+str(testi)] = GaugeMetricFamily('%s_%s' % (METRIC_PREFIX, metric), '%s' % metric, value=None, labels=dimensionHeadersModified) self._gauges[metric+str(testi)].add_metric(dimensions, value=float(returnValue)) if __name__ == '__main__': SCOPES = ['https://www.googleapis.com/auth/analytics.readonly'] DISCOVERY_URI = ('https://analyticsreporting.googleapis.com/$discovery/rest') SERVICE_ACCOUNT_FILE = os.getenv('SERVICE_ACCOUNT_FILE') CONFIG_FILE=os.getenv('CONFIG_FILE') print("[",datetime.now(),"]","Starting server in 0.0.0.0:" + os.getenv('BIND_PORT')) start_http_server(int(os.getenv('BIND_PORT'))) REGISTRY.register(GarCollector()) print("[",datetime.now(),"]","Waiting for serving metrics") while True: time.sleep(1)
# Copyright (c) 2018, Toby Slight. All rights reserved. # ISC License (ISCL) - see LICENSE file for details. import argparse import os from pdu import du def chkpath(path): """ Checks if a path exists. """ if os.path.exists(path): return path else: msg = "{0} does not exist.".format(path) raise argparse.ArgumentTypeError(msg) def getargs(): """ Return a list of valid arguments. """ parser = argparse.ArgumentParser( description='Python Disk Usage Calculator.') parser.add_argument("path", type=chkpath, nargs='?', default=".", help="A valid path.") return parser.parse_args() def main(): args = getargs() path = os.path.abspath(args.path) size = du(path) print("{0} = {1}".format(path, size)) if __name__ == '__main__': main()
#!/usr/bin/env python # encoding: utf-8 # # Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 import time import pytest from sasctl.core import request_link from sasctl.services import sentiment_analysis as sa pytestmark = pytest.mark.usefixtures('session') def assert_job_succeeds(job): assert job.state == 'pending' while request_link(job, 'state') in ('pending', 'running'): time.sleep(1) state = request_link(job, 'state') if state == 'failed': # Refresh to get 'errors' ref job = request_link(job, 'self') errors = request_link(job, 'errors') pytest.fail('Job failed: ' + str(errors)) assert state == 'completed' def test_from_table(): from sasctl.services import cas_management as cm input = cm.get_table('COMPLAINTS', 'Public') job = sa.analyze_sentiment(input, id_column='__uniqueid__', text_column='Consumer_complaint_narrative') assert_job_succeeds(job) def test_from_inline_docs(): from sasctl.services import cas_management as cm caslib = cm.get_caslib('Public') input = [ "Oh yes, the, uh, the Norwegian Blue. What's wrong with it?", "I'll tell you what's wrong with it, my lad. He's dead, that's " "what's wrong with it!", "No, no, he's uh,...he's resting.", "Look, matey, I know a dead parrot when I see one, and I'm looking " "at one right now.", "No no he's not dead, he's, he's resting! Remarkable bird, " "the Norwegian Blue, isn't it? Beautiful plumage!", "The plumage don't enter into it. It's stone dead." ] job = sa.analyze_sentiment(input, caslib=caslib) assert_job_succeeds(job)
#-*- coding:utf-8 -*- # Author:longjiang from scrapy.spiders import CrawlSpider,Rule import re import requests from scrapy_redis.spiders import RedisSpider from scrapy.selector import Selector from scrapy.http import Request import logging import time from bs4 import BeautifulSoup import json import sys reload(sys) sys.setdefaultencoding('utf8') from ..items import BasicInfoItem,DetailInfoItem,ResblockInfoItem,BuildingInfoItem,UnitInfoItem,CommentInfoItem,DynamicInfoItem class Spider(CrawlSpider): name = "lianjia" start_urls = ["https://m.weibo.cn/api/container/getIndex?type=uid&value=1816289645"] def start_requests(self): query_string = { "city_id": "610100", "is_showing_banner": "0", "is_showing_topic": "0", "limit_count": "20", "limit_offset": "20", "position": "19", "request_ts": int(time.time()) } url = "https://app.api.lianjia.com/newhouse/apisearch?city_id={}&is_showing_banner={}&is_showing_topic={}&limit_count={}&limit_offset={}&position={}&request_ts={}" \ .format(query_string["city_id"], query_string["is_showing_banner"], query_string["is_showing_topic"], query_string["limit_count"], query_string["limit_offset"], query_string["position"], query_string["request_ts"]) yield self.make_requests_from_url(url) def parse(self, response): list=json.loads(response.body)["data"]["resblock_list"] for item in list["list"]: data = BasicInfoItem() data["basic_id"] = item["id"] data["city_id"] = item["city_id"] data["city_name"] = item["city_name"] data["district_name"] = item["district_name"] # 区域id data["district_id"] = item["district_id"] data["bizcircle_name"] = item["bizcircle_name"] data["process_status"] = item["process_status"] # 建筑面积 data["resblock_frame_area"] = item["resblock_frame_area"] # 坐标 data["longitude"] = item["longitude"] data["latitude"] = item["latitude"] # 小区名称 data["title"] = item["title"] data["resblock_name"] = item["resblock_name"] # 地址 data["address"] = item["address"] # 平均单价 data["avg_unit_price"] = item["avg_unit_price"] # 均价 data["average_price"] = item["average_price"] # 地址 data["address_remark"] = item["address_remark"] # 项目标识(sfsyaalqy) data["project_name"] = item["project_name"] # special_tags data["special_tags"] = ','.join(item["special_tags"]) # data["min_frame_area"] = item["min_frame_area"] data["max_frame_area"] = item["max_frame_area"] if len(item["frame_rooms"]) > 0: for obj in item["frame_rooms"]: data["count"] = obj["count"] data["room"] = obj["room"] else: data["count"] = None data["room"] = None data["tags"] = ','.join(item["tags"]) if len(item["project_tags"]) > 0: desc = [] for ele in item["project_tags"]: desc.append(ele["desc"]) # 描述 data["description"] = ','.join(desc) else: data["description"] = None # 住宅/公寓 data["house_type"] = item["house_type"] # 是否在售 data["sale_status"] = item["sale_status"] # 有评价? data["has_evaluate"] = item["has_evaluate"] data["has_vr_aerial"] = item["has_vr_aerial"] data["has_vr_house"] = item["has_vr_house"] data["has_video"] = item["has_video"] data["has_virtual_view"] = item["has_virtual_view"] # 最低总价 data["lowest_total_price"] = item["lowest_total_price"] # 显示价格 data["show_price"] = item["show_price"] # 显式价格单位(万/元) data["show_price_unit"] = item["show_price_unit"] # 显示价格描述 data["show_price_desc"] = item["show_price_desc"] # 状态 data["status"] = item["status"] data["evaluate_status"] = item["evaluate_status"] # 总价 data["total_price_start"] = item["total_price_start"] # 总价单位 data["total_price_start_unit"] = item["total_price_start_unit"] # 平均最低单价 data["avg_price_start"] = item["avg_price_start"] # 平均最低单价单位 data["avg_price_start_unit"] = item["avg_price_start_unit"] data["insert_time"]=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) yield data #详情 detail_params = { "city_id": "610100", "project_name": item["project_name"], "request_ts": "1516673168" } detail_url="https://app.api.lianjia.com/newhouse/app/resblock/detail?city_id={}&project_name={}&request_ts={}".format(detail_params["city_id"],detail_params["project_name"],detail_params["request_ts"]) yield Request(url=detail_url,callback=self.parse_detail,meta={"house_id":item["id"],"project_name":item["project_name"]}) #评论 comment_params = { "is_real": "", "limit": "20", "offset": "0", "project_name": item["project_name"], "request_ts": int(time.time()) } comment_url = "https://app.api.lianjia.com/newhouse/commentlist?is_real={}&limit={}&offset={}&project_name={}&request_ts={}".format( comment_params["is_real"], comment_params["limit"],comment_params["offset"], comment_params["project_name"], comment_params["request_ts"]) yield Request(url=comment_url,callback=self.parse_comment,meta={"project_name":item["project_name"]}) #动态信息 dynamic_params={ "limit":"10", "offset":"0", "project_name":item["project_name"], "request_ts":int(time.time()) } dynamic_url="https://app.api.lianjia.com/newhouse/dongtailist?limit={}&offset={}&project_name={}&request_ts={}".format(dynamic_params["limit"],dynamic_params["offset"],dynamic_params["project_name"],dynamic_params["request_ts"]) yield Request(url=dynamic_url,callback=self.parse_dynamic,meta={"project_name":item["project_name"]}) #获取更多 offset = int(re.findall("limit_offset=(\d+)&?", response.url)[0]) # 是否还有更多数据 if list["has_more_data"] == "1": offset+=20 query_string = { "city_id": "610100", "is_showing_banner": "0", "is_showing_topic": "0", "limit_count": "20", "limit_offset": offset, "position": "19", "request_ts": int(time.time()) } url = "https://app.api.lianjia.com/newhouse/apisearch?city_id={}&is_showing_banner={}&is_showing_topic={}&limit_count={}&limit_offset={}&position={}&request_ts={}" \ .format(query_string["city_id"], query_string["is_showing_banner"], query_string["is_showing_topic"], query_string["limit_count"], query_string["limit_offset"], query_string["position"], query_string["request_ts"]) yield Request(url=url,callback=self.parse) #详情 def parse_detail(self,response): house_id=response.meta["house_id"] project_name=response.meta["project_name"] data = json.loads(response.body)["data"] # item={} # detail_info表(基本框架信息) for item_f in data["frames"]: detail_info = DetailInfoItem() # id detail_info["fid"] = house_id # 关联键 detail_info["insert_time"] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) # 入库时间 detail_info["frames_id"] = item_f["id"] detail_info["project_name"] = item_f["project_name"] detail_info["resblock_id"] = item_f["resblock_id"] detail_info["resblock_name"] = item_f["resblock_name"] detail_info["frame_name"] = item_f["frame_name"] detail_info["bedroom_count"] = item_f["bedroom_count"] detail_info["parlor_count"] = item_f["parlor_count"] detail_info["cookroom_count"] = item_f["cookroom_count"] detail_info["total_count"] = item_f["total_count"] detail_info["toilet_count"] = item_f["toilet_count"] detail_info["build_area"] = item_f["build_area"] detail_info["inside_area"] = item_f["inside_area"] detail_info["frame_structure"] = item_f["frame_structure"] detail_info["orientation"] = item_f["orientation"] detail_info["is_main_frame"] = item_f["is_main_frame"] detail_info["status"] = item_f["status"] detail_info["sell_status"] = item_f["sell_status"] detail_info["sell_status_txt"] = item_f["sell_status_txt"] detail_info["price"] = item_f["price"] detail_info["total_price_min"] = item_f["total_price_min"] detail_info["total_price_max"] = item_f["total_price_max"] detail_info["total_price"] = item_f["total_price"] detail_info["show_price"] = item_f["show_price"] detail_info["show_price_unit"] = item_f["show_price_unit"] detail_info["show_price_desc"] = item_f["show_price_desc"] detail_info["show_price_confirm_time"] = item_f["show_price_confirm_time"] # 房型描述 detail_info["tags"] = ','.join(item_f["tags"]) # 建筑面积 detail_info["frames_build_area"] = item_f["frame_build_area"] detail_info["detail_url"] = item_f["detail_url"] yield detail_info # print detail_info # resblock表 resblock_info = data["resblock_info"] resblock_info_item = ResblockInfoItem() resblock_info_item["rid"] = house_id # 关联键 resblock_info_item["insert_time"] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) # 入库时间 resblock_info_item["resblock_id"] = resblock_info["resblock_id"] resblock_info_item["city_id"] = resblock_info["city_id"] resblock_info_item["status"] = resblock_info["status"] resblock_info_item["average_price"] = resblock_info["average_price"] resblock_info_item["resblock_name"] = resblock_info["resblock_name"] resblock_info_item["resblock_alias"] = resblock_info["resblock_alias"] resblock_info_item["sale_status"] = resblock_info["sale_status"] resblock_info_item["process_status"] = resblock_info["process_status"] resblock_info_item["house_type"] = resblock_info["house_type"] resblock_info_item["house_type_value"] = resblock_info["house_type_value"] resblock_info_item["tags"] = ','.join(resblock_info["tags"]) desc = [] for t in resblock_info["project_tags"]: desc.append(t["desc"]) resblock_info_item["description"] = ','.join(desc) resblock_info_item["project_name"] = resblock_info["project_name"] resblock_info_item["address_remark"] = resblock_info["address_remark"] resblock_info_item["administrative_address"] = resblock_info["administrative_address"] resblock_info_item["open_date"] = resblock_info["open_date"] resblock_info_item["open_date_more"] = resblock_info["open_date_more"] resblock_info_item["special_tags"] = ','.join(resblock_info["special_tags"]) resblock_info_item["developer_company"] = ','.join(resblock_info["developer_company"]) # 交房时间 resblock_info_item["hand_over_time"] = resblock_info["hand_over_time"] resblock_info_item["properright"] = resblock_info["properright"] resblock_info_item["longitude"] = resblock_info["longitude"] resblock_info_item["latitude"] = resblock_info["latitude"] resblock_info_item["district_id"] = resblock_info["district_id"] resblock_info_item["district_name"] = resblock_info["district_name"] resblock_info_item["bizcircle_id"] = resblock_info["bizcircle_id"] resblock_info_item["bizcircle_name"] = resblock_info["bizcircle_name"] resblock_info_item["store_addr"] = resblock_info["store_addr"] resblock_info_item["build_type"] = resblock_info["build_type"] resblock_info_item["cubage_rate"] = resblock_info["cubage_rate"] resblock_info_item["virescence_rate"] = resblock_info["virescence_rate"] resblock_info_item["house_amount"] = resblock_info["house_amount"] resblock_info_item["overground_car_num"] = resblock_info["overground_car_num"] resblock_info_item["underground_car_num"] = resblock_info["underground_car_num"] resblock_info_item["property_company"] = ','.join(resblock_info["property_company"]) resblock_info_item["property_price"] = resblock_info["property_price"] resblock_info_item["heating_type"] = resblock_info["heating_type"] resblock_info_item["powersuply_kind"] = resblock_info["powersuply_kind"] resblock_info_item["watersuply_kind"] = resblock_info["watersuply_kind"] resblock_info_item["is_open_date_predict"] = resblock_info["is_open_date_predict"] resblock_info_item["is_hand_over_predict"] = resblock_info["is_hand_over_predict"] resblock_info_item["lowest_total_price"] = resblock_info["lowest_total_price"] resblock_info_item["price_confirm_time"] = resblock_info["price_confirm_time"] if len(resblock_info["permit_list"]) > 0: for r in resblock_info["permit_list"]: resblock_info_item["permit_number"] = r["permit_number"] resblock_info_item["permit_time"] = r["permit_time"] resblock_info_item["building_list"] = r["building_list"] resblock_info_item["show_permit"] = resblock_info["show_permit"] resblock_info_item["show_price"] = resblock_info["show_price"] resblock_info_item["show_price_unit"] = resblock_info["show_price_unit"] resblock_info_item["show_price_desc"] = resblock_info["show_price_desc"] resblock_info_item["resblock_frame_area"] = resblock_info["resblock_frame_area"] resblock_info_item["show_open_date"] = resblock_info["show_open_date"] resblock_info_item["show_hand_over_time"] = resblock_info["show_hand_over_time"] resblock_info_item["total_area"] = resblock_info["total_area"] resblock_info_item["site_area"] = resblock_info["site_area"] resblock_info_item["pid"] = resblock_info["pid"] yield resblock_info_item # build_info表 building_info = data["building_info"] if len(building_info["build_list"]) > 0: for item_b in building_info["build_list"]: building_info_item = BuildingInfoItem() building_info_item["house_id"] = house_id # 关联键 building_info_item["project_name"]=project_name building_info_item["insert_time"] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) # 入库时间 building_info_item["bid"] = item_b["id"] building_info_item["resblock_id"] = item_b["resblock_id"] building_info_item["building_code"] = item_b["building_code"] building_info_item["part_num"] = item_b["part_num"] building_info_item["building_type"] = item_b["building_type"] building_info_item["total_unit_count"] = item_b["total_unit_count"] building_info_item["total_house_count"] = item_b["total_house_count"] building_info_item["floor_height"] = item_b["floor_height"] building_info_item["sell_count"] = item_b["sell_count"] building_info_item["open_time"] = item_b["open_time"] building_info_item["hand_over_time"] = item_b["hand_over_time"] building_info_item["sale_status"] = item_b["sale_status"] yield building_info_item if len(item_b["unit_list"]) > 0: for item_u in item_b["unit_list"]: unit_list_item = UnitInfoItem() unit_list_item["ulid"] = house_id # 关联键 unit_list_item["project_name"]=project_name unit_list_item["insert_time"] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) # 插入时间 unit_list_item["uid"] = item_u["id"] unit_list_item["unit_name"] = item_u["unit_name"] unit_list_item["sorted"] = item_u["sorted"] unit_list_item["overground_floors"] = item_u["overground_floors"] unit_list_item["underground_floors"] = item_u["underground_floors"] unit_list_item["floor_height"] = item_u["floor_height"] unit_list_item["elevator_count"] = item_u["elevator_count"] unit_list_item["floor_house_count"] = item_u["floor_house_count"] yield unit_list_item #评论 def parse_comment(self,response): project_name=response.meta["project_name"] data=json.loads(response.body)["data"] if int(data["total"])>0: #评论列表 for comm in data["list"]: comment_info_item = CommentInfoItem() #项目标识 comment_info_item["project_name"]=project_name # 周边评分 comment_info_item["around"]=data["around"] # 交通评分 comment_info_item["traffic"]=data["traffic"] # 绿化评分 comment_info_item["green"] =data["green"] # 综合评分 comment_info_item["composite_score"] =data["composite_score"] # 综合评价描述 comment_info_item["composite_score_info"] =data["composite_score_info"] # 评论总数 comment_info_item["total"] =data["total"] comment_info_item["comment_id"]=comm["id"] #评论id comment_info_item["project_name"] =comm["project_name"] #项目名 comment_info_item["user_name"] =comm["user_name"] #评论的用户名 comment_info_item["user_around"] =comm["around"] #该用户对该楼盘周边的评价 comment_info_item["user_traffic"] =comm["traffic"]#该用户对该楼盘交通的评价 comment_info_item["user_green"] =comm["green"]#该用户对该楼盘绿化的评价 comment_info_item["user_avg"] =comm["avg"]#该用户对该楼盘的平均评价 comment_info_item["content"] =comm["content"] #该用户的评价 comment_info_item["ctime"] =comm["ctime"] #评价时间 comment_info_item["pc_ctime"] =comm["pc_ctime"] #评价时间 comment_info_item["image"]=','.join(comm["image"]) #评价里的图片 comment_info_item["phone"] =comm["phone"] #用户手机 comment_info_item["like_num"] =comm["like_num"] #该评论的点赞数 comment_info_item["is_interest"] =comm["is_interest"] #转发? comment_info_item["show_status"] =comm["show_status"]#显式状态 comment_info_item["official_reply"] =comm["official_reply"] #官方回复 comment_info_item["is_anonymity"] =comm["is_anonymity"] #是否匿名 comment_info_item["is_like"] =comm["is_like"] #是否喜欢 comment_info_item["uc_avatar"] =comm["uc_avatar"] #头像 comment_info_item["used_time"] =comm["used_time"] #浏览次数? comment_info_item["insert_time"]=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) yield comment_info_item #是否还有更多 offset = int(re.findall("offset=(\d+)&?", response.url)[0]) if data["more"]=="1": offset += 20 params = { "is_real": "", "limit": "20", "offset": offset, "project_name": project_name, "request_ts": int(time.time()) } url = "https://app.api.lianjia.com/newhouse/commentlist?is_real={}&limit={}&offset={}&project_name={}&request_ts={}".format( params["is_real"], params["limit"], params["offset"], params["project_name"], params["request_ts"]) print url yield Request(url=url,callback=self.parse_comment,meta={"project_name":project_name}) #动态信息 def parse_dynamic(self,response): project_name=response.meta["project_name"] data=json.loads(response.body)["data"] #动态列表 if len(data["list"])>0: for item in data["list"]: dynamic_info_item=DynamicInfoItem() dynamic_info_item["project_name"]=project_name #项目标识 dynamic_info_item["total"]=data["total"] # 动态信息总数 dynamic_info_item["dynamic_id"]=item["id"] #动态信息id dynamic_info_item["title"]=item["title"] #信息标题 dynamic_info_item["time"]=item["time"] #消息发布时间 dynamic_info_item["content"]=item["content"] #消息内容 dynamic_info_item["type"]=item["type"] #消息类型 dynamic_info_item["insert_time"]=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) yield dynamic_info_item #是否有更多 offset = int(re.findall("offset=(\d+)&?", response.url)[0]) if data["more"]=="1": offset+=10 dynamic_params = { "limit": "10", "offset": offset, "project_name": project_name, "request_ts": int(time.time()) } dynamic_url = "https://app.api.lianjia.com/newhouse/dongtailist?limit={}&offset={}&project_name={}&request_ts={}".format( dynamic_params["limit"], dynamic_params["offset"], dynamic_params["project_name"],dynamic_params["request_ts"]) yield Request(url=dynamic_url, callback=self.parse_dynamic,meta={"project_name":project_name})
from panther import lookup_aws_account_name from panther_base_helpers import deep_get def rule(event): return (event['eventName'] == 'ConsoleLogin' and deep_get(event, 'userIdentity', 'type') == 'Root' and deep_get(event, 'responseElements', 'ConsoleLogin') == 'Success') def title(event): return 'AWS root login detected from [{ip}] in account [{account}]'.format( ip=event['sourceIPAddress'], account=lookup_aws_account_name(event.get('recipientAccountId'))) def dedup(event): # Each Root login should generate a unique alert return '-'.join( [event['recipientAccountId'], event['eventName'], event['eventTime']]) def alert_context(event): return { 'sourceIPAddress': event['sourceIPAddress'], 'userIdentityAccountId': deep_get(event, 'userIdentity', 'accountId'), 'userIdentityArn': deep_get(event, 'userIdentity', 'arn'), 'eventTime': event.get('eventTime'), 'mfaUsed': deep_get(event, 'additionalEventData', 'MFAUsed') }
def cheese_and_crackers(cheese_count, boxes_of_crackers): print("You have %d cheeses!" % cheese_count) print("You have %d boxes of crackers" % boxes_of_crackers) print("Man that's enough for a party!") print("Get a blanket.\n") print("We can just give the function numbers directly:") cheese_and_crackers(20, 30) print("OR, we can use variables from our script:") amount_of_cheese = 10 amount_of_crackers = 50 cheese_and_crackers(amount_of_cheese, amount_of_crackers) print("We can even do math inside too:") cheese_and_crackers(10 + 20, 5 + 6) print("And we can combine the two, variables and math:") cheese_and_crackers(amount_of_cheese + 100, amount_of_crackers + 1000)
from __future__ import absolute_import from __future__ import division from __future__ import print_function # We disable pylint because we need python3 compatibility. from six.moves import xrange # pylint: disable=redefined-builtin from six.moves import zip # pylint: disable=redefined-builtin from tensorflow.python import shape from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import embedding_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_ops from tensorflow.python.ops import rnn from tensorflow.python.ops import rnn_cell from tensorflow.python.ops import variable_scope from tensorflow.python.util import nest import tensorflow as tf import rnn_cell as my_rnn_cell # TODO(ebrevdo): Remove once _linear is fully deprecated. linear = rnn_cell._linear # pylint: disable=protected-access def _extract_argmax_and_embed(embedding, num_symbols, output_projection=None, update_embedding=True): """Get a loop_function that extracts the previous symbol and embeds it. Args: embedding: embedding tensor for symbols. output_projection: None or a pair (W, B). If provided, each fed previous output will first be multiplied by W and added B. update_embedding: Boolean; if False, the gradients will not propagate through the embeddings. Returns: A loop function. """ def loop_function(prev, _): # if output_projection is not None: # prev = nn_ops.xw_plus_b( # prev, output_projection[0], output_projection[1]) # prev_symbol = math_ops.argmax(prev, 1) prev_symbol = math_ops.argmax(array_ops.split_v(prev, [2, num_symbols - 2], 1)[1], 1) + 2 # Note that gradients will not propagate through the second parameter of # embedding_lookup. emb_prev = embedding_ops.embedding_lookup(embedding, prev_symbol) if not update_embedding: emb_prev = array_ops.stop_gradient(emb_prev) return emb_prev return loop_function def _extract_beam_search(embedding, beam_size, num_symbols, embedding_size, output_projection=None, update_embedding=True): """Get a loop_function that extracts the previous symbol and embeds it. Args: embedding: embedding tensor for symbols. output_projection: None or a pair (W, B). If provided, each fed previous output will first be multiplied by W and added B. update_embedding: Boolean; if False, the gradients will not propagate through the embeddings. Returns: A loop function. """ def loop_function(prev, i, log_beam_probs, beam_path, beam_symbols, beam_results): # if output_projection is not None: # prev = nn_ops.xw_plus_b( # prev, output_projection[0], output_projection[1]) # prev= prev.get_shape().with_rank(2)[1] prev = array_ops.split_v(prev, [2, num_symbols - 2], 1)[1] probs = tf.log(prev + 1e-12) if i > 1: probs = tf.reshape(probs + log_beam_probs[-1], [-1, beam_size * (num_symbols - 2)]) best_probs, indices = tf.nn.top_k(probs, beam_size * 2) indices = tf.stop_gradient(tf.squeeze(tf.reshape(indices, [-1, 1]))) best_probs = tf.stop_gradient(tf.reshape(best_probs, [-1, 1])) symbols = indices % (num_symbols - 2) + 2 # Which word in vocabulary. beam_parent = indices // (num_symbols - 2) # Which hypothesis it came from. partition = tf.cast(tf.cast(symbols - 2, tf.bool), tf.int32) prob_group = tf.dynamic_partition(best_probs, partition, 2) symbols_group = tf.dynamic_partition(symbols, partition, 2) parent_group = tf.dynamic_partition(beam_parent, partition, 2) beam_results.append([prob_group[0], symbols_group[0], parent_group[0]]) _probs = prob_group[1][:beam_size] _symbols = symbols_group[1][:beam_size] _parents = parent_group[1][:beam_size] beam_symbols.append(_symbols) beam_path.append(_parents) log_beam_probs.append(_probs) # Note that gradients will not propagate through the second parameter of # embedding_lookup. emb_prev = embedding_ops.embedding_lookup(embedding, _symbols) emb_prev = tf.reshape(emb_prev, [beam_size, embedding_size]) # emb_prev = embedding_ops.embedding_lookup(embedding, symbols) if not update_embedding: emb_prev = array_ops.stop_gradient(emb_prev) return emb_prev return loop_function def rnn_decoder(decoder_inputs, initial_state, cell, loop_function=None, scope=None): """RNN decoder for the sequence-to-sequence model. Args: decoder_inputs: A list of 2D Tensors [batch_size x input_size]. initial_state: 2D Tensor with shape [batch_size x cell.state_size]. cell: rnn_cell.RNNCell defining the cell function and size. loop_function: If not None, this function will be applied to the i-th output in order to generate the i+1-st input, and decoder_inputs will be ignored, except for the first element ("GO" symbol). This can be used for decoding, but also for training to emulate http://arxiv.org/abs/1506.03099. Signature -- loop_function(prev, i) = next * prev is a 2D Tensor of shape [batch_size x output_size], * i is an integer, the step number (when advanced control is needed), * next is a 2D Tensor of shape [batch_size x input_size]. scope: VariableScope for the created subgraph; defaults to "rnn_decoder". Returns: A tuple of the form (outputs, state), where: outputs: A list of the same length as decoder_inputs of 2D Tensors with shape [batch_size x output_size] containing generated outputs. state: The state of each cell at the final time-step. It is a 2D Tensor of shape [batch_size x cell.state_size]. (Note that in some cases, like basic RNN cell or GRU cell, outputs and states can be the same. They are different for LSTM cells though.) """ with variable_scope.variable_scope(scope or "rnn_decoder"): state = initial_state outputs = [] prev = None for i, inp in enumerate(decoder_inputs): if loop_function is not None and prev is not None: with variable_scope.variable_scope("loop_function", reuse=True): inp = loop_function(prev, i) if i > 0: variable_scope.get_variable_scope().reuse_variables() output, state = cell(inp, state) outputs.append(output) if loop_function is not None: prev = output return outputs, state def basic_rnn_seq2seq( encoder_inputs, decoder_inputs, cell, dtype=dtypes.float32, scope=None): """Basic RNN sequence-to-sequence model. This model first runs an RNN to encode encoder_inputs into a state vector, then runs decoder, initialized with the last encoder state, on decoder_inputs. Encoder and decoder use the same RNN cell type, but don't share parameters. Args: encoder_inputs: A list of 2D Tensors [batch_size x input_size]. decoder_inputs: A list of 2D Tensors [batch_size x input_size]. cell: rnn_cell.RNNCell defining the cell function and size. dtype: The dtype of the initial state of the RNN cell (default: tf.float32). scope: VariableScope for the created subgraph; default: "basic_rnn_seq2seq". Returns: A tuple of the form (outputs, state), where: outputs: A list of the same length as decoder_inputs of 2D Tensors with shape [batch_size x output_size] containing the generated outputs. state: The state of each decoder cell in the final time-step. It is a 2D Tensor of shape [batch_size x cell.state_size]. """ with variable_scope.variable_scope(scope or "basic_rnn_seq2seq"): _, enc_state = rnn.rnn(cell, encoder_inputs, dtype=dtype) return rnn_decoder(decoder_inputs, enc_state, cell) def tied_rnn_seq2seq(encoder_inputs, decoder_inputs, cell, loop_function=None, dtype=dtypes.float32, scope=None): """RNN sequence-to-sequence model with tied encoder and decoder parameters. This model first runs an RNN to encode encoder_inputs into a state vector, and then runs decoder, initialized with the last encoder state, on decoder_inputs. Encoder and decoder use the same RNN cell and share parameters. Args: encoder_inputs: A list of 2D Tensors [batch_size x input_size]. decoder_inputs: A list of 2D Tensors [batch_size x input_size]. cell: rnn_cell.RNNCell defining the cell function and size. loop_function: If not None, this function will be applied to i-th output in order to generate i+1-th input, and decoder_inputs will be ignored, except for the first element ("GO" symbol), see rnn_decoder for details. dtype: The dtype of the initial state of the rnn cell (default: tf.float32). scope: VariableScope for the created subgraph; default: "tied_rnn_seq2seq". Returns: A tuple of the form (outputs, state), where: outputs: A list of the same length as decoder_inputs of 2D Tensors with shape [batch_size x output_size] containing the generated outputs. state: The state of each decoder cell in each time-step. This is a list with length len(decoder_inputs) -- one item for each time-step. It is a 2D Tensor of shape [batch_size x cell.state_size]. """ with variable_scope.variable_scope("combined_tied_rnn_seq2seq"): scope = scope or "tied_rnn_seq2seq" _, enc_state = rnn.rnn( cell, encoder_inputs, dtype=dtype, scope=scope) variable_scope.get_variable_scope().reuse_variables() return rnn_decoder(decoder_inputs, enc_state, cell, loop_function=loop_function, scope=scope) def embedding_rnn_decoder(decoder_inputs, initial_state, cell, num_symbols, embedding_size, output_projection=None, feed_previous=False, update_embedding_for_previous=True, scope=None): """RNN decoder with embedding and a pure-decoding option. Args: decoder_inputs: A list of 1D batch-sized int32 Tensors (decoder inputs). initial_state: 2D Tensor [batch_size x cell.state_size]. cell: rnn_cell.RNNCell defining the cell function. num_symbols: Integer, how many symbols come into the embedding. embedding_size: Integer, the length of the embedding vector for each symbol. output_projection: None or a pair (W, B) of output projection weights and biases; W has shape [output_size x num_symbols] and B has shape [num_symbols]; if provided and feed_previous=True, each fed previous output will first be multiplied by W and added B. feed_previous: Boolean; if True, only the first of decoder_inputs will be used (the "GO" symbol), and all other decoder inputs will be generated by: next = embedding_lookup(embedding, argmax(previous_output)), In effect, this implements a greedy decoder. It can also be used during training to emulate http://arxiv.org/abs/1506.03099. If False, decoder_inputs are used as given (the standard decoder case). update_embedding_for_previous: Boolean; if False and feed_previous=True, only the embedding for the first symbol of decoder_inputs (the "GO" symbol) will be updated by back propagation. Embeddings for the symbols generated from the decoder itself remain unchanged. This parameter has no effect if feed_previous=False. scope: VariableScope for the created subgraph; defaults to "embedding_rnn_decoder". Returns: A tuple of the form (outputs, state), where: outputs: A list of the same length as decoder_inputs of 2D Tensors. The output is of shape [batch_size x cell.output_size] when output_projection is not None (and represents the dense representation of predicted tokens). It is of shape [batch_size x num_decoder_symbols] when output_projection is None. state: The state of each decoder cell in each time-step. This is a list with length len(decoder_inputs) -- one item for each time-step. It is a 2D Tensor of shape [batch_size x cell.state_size]. Raises: ValueError: When output_projection has the wrong shape. """ with variable_scope.variable_scope(scope or "embedding_rnn_decoder") as scope: if output_projection is not None: dtype = scope.dtype proj_weights = ops.convert_to_tensor(output_projection[0], dtype=dtype) proj_weights.get_shape().assert_is_compatible_with([None, num_symbols]) proj_biases = ops.convert_to_tensor(output_projection[1], dtype=dtype) proj_biases.get_shape().assert_is_compatible_with([num_symbols]) embedding = variable_scope.get_variable("embedding", [num_symbols, embedding_size]) loop_function = _extract_argmax_and_embed( embedding, output_projection, update_embedding_for_previous) if feed_previous else None emb_inp = ( embedding_ops.embedding_lookup(embedding, i) for i in decoder_inputs) return rnn_decoder(emb_inp, initial_state, cell, loop_function=loop_function) def embedding_rnn_seq2seq(encoder_inputs, decoder_inputs, cell, num_encoder_symbols, num_decoder_symbols, embedding_size, output_projection=None, feed_previous=False, dtype=None, scope=None): """Embedding RNN sequence-to-sequence model. This model first embeds encoder_inputs by a newly created embedding (of shape [num_encoder_symbols x input_size]). Then it runs an RNN to encode embedded encoder_inputs into a state vector. Next, it embeds decoder_inputs by another newly created embedding (of shape [num_decoder_symbols x input_size]). Then it runs RNN decoder, initialized with the last encoder state, on embedded decoder_inputs. Args: encoder_inputs: A list of 1D int32 Tensors of shape [batch_size]. decoder_inputs: A list of 1D int32 Tensors of shape [batch_size]. cell: rnn_cell.RNNCell defining the cell function and size. num_encoder_symbols: Integer; number of symbols on the encoder side. num_decoder_symbols: Integer; number of symbols on the decoder side. embedding_size: Integer, the length of the embedding vector for each symbol. output_projection: None or a pair (W, B) of output projection weights and biases; W has shape [output_size x num_decoder_symbols] and B has shape [num_decoder_symbols]; if provided and feed_previous=True, each fed previous output will first be multiplied by W and added B. feed_previous: Boolean or scalar Boolean Tensor; if True, only the first of decoder_inputs will be used (the "GO" symbol), and all other decoder inputs will be taken from previous outputs (as in embedding_rnn_decoder). If False, decoder_inputs are used as given (the standard decoder case). dtype: The dtype of the initial state for both the encoder and encoder rnn cells (default: tf.float32). scope: VariableScope for the created subgraph; defaults to "embedding_rnn_seq2seq" Returns: A tuple of the form (outputs, state), where: outputs: A list of the same length as decoder_inputs of 2D Tensors. The output is of shape [batch_size x cell.output_size] when output_projection is not None (and represents the dense representation of predicted tokens). It is of shape [batch_size x num_decoder_symbols] when output_projection is None. state: The state of each decoder cell in each time-step. This is a list with length len(decoder_inputs) -- one item for each time-step. It is a 2D Tensor of shape [batch_size x cell.state_size]. """ with variable_scope.variable_scope(scope or "embedding_rnn_seq2seq") as scope: if dtype is not None: scope.set_dtype(dtype) else: dtype = scope.dtype # Encoder. encoder_cell = rnn_cell.EmbeddingWrapper( cell, embedding_classes=num_encoder_symbols, embedding_size=embedding_size) _, encoder_state = rnn.rnn(encoder_cell, encoder_inputs, dtype=dtype) # Decoder. if output_projection is None: cell = rnn_cell.OutputProjectionWrapper(cell, num_decoder_symbols) if isinstance(feed_previous, bool): return embedding_rnn_decoder( decoder_inputs, encoder_state, cell, num_decoder_symbols, embedding_size, output_projection=output_projection, feed_previous=feed_previous) # If feed_previous is a Tensor, we construct 2 graphs and use cond. def decoder(feed_previous_bool): reuse = None if feed_previous_bool else True with variable_scope.variable_scope( variable_scope.get_variable_scope(), reuse=reuse) as scope: outputs, state = embedding_rnn_decoder( decoder_inputs, encoder_state, cell, num_decoder_symbols, embedding_size, output_projection=output_projection, feed_previous=feed_previous_bool, update_embedding_for_previous=False) state_list = [state] if nest.is_sequence(state): state_list = nest.flatten(state) return outputs + state_list outputs_and_state = control_flow_ops.cond(feed_previous, lambda: decoder(True), lambda: decoder(False)) outputs_len = len(decoder_inputs) # Outputs length same as decoder inputs. state_list = outputs_and_state[outputs_len:] state = state_list[0] if nest.is_sequence(encoder_state): state = nest.pack_sequence_as(structure=encoder_state, flat_sequence=state_list) return outputs_and_state[:outputs_len], state def embedding_tied_rnn_seq2seq(encoder_inputs, decoder_inputs, cell, num_symbols, embedding_size, num_decoder_symbols=None, output_projection=None, feed_previous=False, dtype=None, scope=None): """Embedding RNN sequence-to-sequence model with tied (shared) parameters. This model first embeds encoder_inputs by a newly created embedding (of shape [num_symbols x input_size]). Then it runs an RNN to encode embedded encoder_inputs into a state vector. Next, it embeds decoder_inputs using the same embedding. Then it runs RNN decoder, initialized with the last encoder state, on embedded decoder_inputs. The decoder output is over symbols from 0 to num_decoder_symbols - 1 if num_decoder_symbols is none; otherwise it is over 0 to num_symbols - 1. Args: encoder_inputs: A list of 1D int32 Tensors of shape [batch_size]. decoder_inputs: A list of 1D int32 Tensors of shape [batch_size]. cell: rnn_cell.RNNCell defining the cell function and size. num_symbols: Integer; number of symbols for both encoder and decoder. embedding_size: Integer, the length of the embedding vector for each symbol. num_decoder_symbols: Integer; number of output symbols for decoder. If provided, the decoder output is over symbols 0 to num_decoder_symbols - 1. Otherwise, decoder output is over symbols 0 to num_symbols - 1. Note that this assumes that the vocabulary is set up such that the first num_decoder_symbols of num_symbols are part of decoding. output_projection: None or a pair (W, B) of output projection weights and biases; W has shape [output_size x num_symbols] and B has shape [num_symbols]; if provided and feed_previous=True, each fed previous output will first be multiplied by W and added B. feed_previous: Boolean or scalar Boolean Tensor; if True, only the first of decoder_inputs will be used (the "GO" symbol), and all other decoder inputs will be taken from previous outputs (as in embedding_rnn_decoder). If False, decoder_inputs are used as given (the standard decoder case). dtype: The dtype to use for the initial RNN states (default: tf.float32). scope: VariableScope for the created subgraph; defaults to "embedding_tied_rnn_seq2seq". Returns: A tuple of the form (outputs, state), where: outputs: A list of the same length as decoder_inputs of 2D Tensors with shape [batch_size x output_symbols] containing the generated outputs where output_symbols = num_decoder_symbols if num_decoder_symbols is not None otherwise output_symbols = num_symbols. state: The state of each decoder cell at the final time-step. It is a 2D Tensor of shape [batch_size x cell.state_size]. Raises: ValueError: When output_projection has the wrong shape. """ with variable_scope.variable_scope( scope or "embedding_tied_rnn_seq2seq", dtype=dtype) as scope: dtype = scope.dtype if output_projection is not None: proj_weights = ops.convert_to_tensor(output_projection[0], dtype=dtype) proj_weights.get_shape().assert_is_compatible_with([None, num_symbols]) proj_biases = ops.convert_to_tensor(output_projection[1], dtype=dtype) proj_biases.get_shape().assert_is_compatible_with([num_symbols]) embedding = variable_scope.get_variable( "embedding", [num_symbols, embedding_size], dtype=dtype) emb_encoder_inputs = [embedding_ops.embedding_lookup(embedding, x) for x in encoder_inputs] emb_decoder_inputs = [embedding_ops.embedding_lookup(embedding, x) for x in decoder_inputs] output_symbols = num_symbols if num_decoder_symbols is not None: output_symbols = num_decoder_symbols if output_projection is None: cell = rnn_cell.OutputProjectionWrapper(cell, output_symbols) if isinstance(feed_previous, bool): loop_function = _extract_argmax_and_embed( embedding, output_projection, True) if feed_previous else None return tied_rnn_seq2seq(emb_encoder_inputs, emb_decoder_inputs, cell, loop_function=loop_function, dtype=dtype) # If feed_previous is a Tensor, we construct 2 graphs and use cond. def decoder(feed_previous_bool): loop_function = _extract_argmax_and_embed( embedding, output_projection, False) if feed_previous_bool else None reuse = None if feed_previous_bool else True with variable_scope.variable_scope(variable_scope.get_variable_scope(), reuse=reuse): outputs, state = tied_rnn_seq2seq( emb_encoder_inputs, emb_decoder_inputs, cell, loop_function=loop_function, dtype=dtype) state_list = [state] if nest.is_sequence(state): state_list = nest.flatten(state) return outputs + state_list outputs_and_state = control_flow_ops.cond(feed_previous, lambda: decoder(True), lambda: decoder(False)) outputs_len = len(decoder_inputs) # Outputs length same as decoder inputs. state_list = outputs_and_state[outputs_len:] state = state_list[0] # Calculate zero-state to know it's structure. static_batch_size = encoder_inputs[0].get_shape()[0] for inp in encoder_inputs[1:]: static_batch_size.merge_with(inp.get_shape()[0]) batch_size = static_batch_size.value if batch_size is None: batch_size = array_ops.shape(encoder_inputs[0])[0] zero_state = cell.zero_state(batch_size, dtype) if nest.is_sequence(zero_state): state = nest.pack_sequence_as(structure=zero_state, flat_sequence=state_list) return outputs_and_state[:outputs_len], state def attention_decoder(decoder_inputs, emotion, imemory, ememory, initial_state, attention_states, cell, output_size=None, output_projection=None, num_heads=1, loop_function=None, dtype=None, scope=None, initial_state_attention=True): """RNN decoder with attention for the sequence-to-sequence model. In this context "attention" means that, during decoding, the RNN can look up information in the additional tensor attention_states, and it does this by focusing on a few entries from the tensor. This model has proven to yield especially good results in a number of sequence-to-sequence tasks. This implementation is based on http://arxiv.org/abs/1412.7449 (see below for details). It is recommended for complex sequence-to-sequence tasks. Args: decoder_inputs: A list of 2D Tensors [batch_size x input_size]. initial_state: 2D Tensor [batch_size x cell.state_size]. attention_states: 3D Tensor [batch_size x attn_length x attn_size]. cell: rnn_cell.RNNCell defining the cell function and size. output_size: Size of the output vectors; if None, we use cell.output_size. num_heads: Number of attention heads that read from attention_states. loop_function: If not None, this function will be applied to i-th output in order to generate i+1-th input, and decoder_inputs will be ignored, except for the first element ("GO" symbol). This can be used for decoding, but also for training to emulate http://arxiv.org/abs/1506.03099. Signature -- loop_function(prev, i) = next * prev is a 2D Tensor of shape [batch_size x output_size], * i is an integer, the step number (when advanced control is needed), * next is a 2D Tensor of shape [batch_size x input_size]. dtype: The dtype to use for the RNN initial state (default: tf.float32). scope: VariableScope for the created subgraph; default: "attention_decoder". initial_state_attention: If False (default), initial attentions are zero. If True, initialize the attentions from the initial state and attention states -- useful when we wish to resume decoding from a previously stored decoder state and attention states. Returns: A tuple of the form (outputs, state), where: outputs: A list of the same length as decoder_inputs of 2D Tensors of shape [batch_size x output_size]. These represent the generated outputs. Output i is computed from input i (which is either the i-th element of decoder_inputs or loop_function(output {i-1}, i)) as follows. First, we run the cell on a combination of the input and previous attention masks: cell_output, new_state = cell(linear(input, prev_attn), prev_state). Then, we calculate new attention masks: new_attn = softmax(V^T * tanh(W * attention_states + U * new_state)) and then we calculate the output: output = linear(cell_output, new_attn). state: The state of each decoder cell the final time-step. It is a 2D Tensor of shape [batch_size x cell.state_size]. Raises: ValueError: when num_heads is not positive, there are no inputs, shapes of attention_states are not set, or input size cannot be inferred from the input. """ if not decoder_inputs: raise ValueError("Must provide at least 1 input to attention decoder.") if num_heads < 1: raise ValueError("With less than 1 heads, use a non-attention decoder.") if attention_states.get_shape()[2].value is None: raise ValueError("Shape[2] of attention_states must be known: %s" % attention_states.get_shape()) if output_size is None: output_size = cell.output_size with variable_scope.variable_scope( scope or "attention_decoder", dtype=dtype) as scope: dtype = scope.dtype batch_size = array_ops.shape(decoder_inputs[0])[0] # Needed for reshaping. attn_length = attention_states.get_shape()[1].value if attn_length is None: attn_length = shape(attention_states)[1] attn_size = attention_states.get_shape()[2].value # To calculate W1 * h_t we use a 1-by-1 convolution, need to reshape before. hidden = array_ops.reshape( attention_states, [-1, attn_length, 1, attn_size]) hidden_features = [] v = [] attention_vec_size = attn_size # Size of query vectors for attention. for a in xrange(num_heads): k = variable_scope.get_variable("AttnW_%d" % a, [1, 1, attn_size, attention_vec_size]) hidden_features.append(nn_ops.conv2d(hidden, k, [1, 1, 1, 1], "SAME")) v.append( variable_scope.get_variable("AttnV_%d" % a, [attention_vec_size])) state = initial_state def attention(query): """Put attention masks on hidden using hidden_features and query.""" ds = [] # Results of attention reads will be stored here. if nest.is_sequence(query): # If the query is a tuple, flatten it. query_list = nest.flatten(query) for q in query_list: # Check that ndims == 2 if specified. ndims = q.get_shape().ndims if ndims: assert ndims == 2 query = array_ops.concat(1, query_list) for a in xrange(num_heads): with variable_scope.variable_scope("Attention_%d" % a): y = linear(query, attention_vec_size, True) y = array_ops.reshape(y, [-1, 1, 1, attention_vec_size]) # Attention mask is a softmax of v^T * tanh(...). s = math_ops.reduce_sum( v[a] * math_ops.tanh(hidden_features[a] + y), [2, 3]) a = nn_ops.softmax(s) # Now calculate the attention-weighted vector d. d = math_ops.reduce_sum( array_ops.reshape(a, [-1, attn_length, 1, 1]) * hidden, [1, 2]) ds.append(array_ops.reshape(d, [-1, attn_size])) return ds outputs = [] gates = [] prev = None batch_attn_size = array_ops.pack([batch_size, attn_size]) attns = [array_ops.zeros(batch_attn_size, dtype=dtype) for _ in xrange(num_heads)] for a in attns: # Ensure the second shape of attention vectors is set. a.set_shape([None, attn_size]) if initial_state_attention: attns = attention(initial_state) for i, inp in enumerate(decoder_inputs): if i > 0: variable_scope.get_variable_scope().reuse_variables() # If loop_function is set, we use it instead of decoder_inputs. if loop_function is not None and prev is not None: with variable_scope.variable_scope("loop_function", reuse=True): inp = loop_function(prev, i) # Merge input and previous attentions into one vector of the right size. input_size = inp.get_shape().with_rank(2)[1] if input_size.value is None: raise ValueError("Could not infer input size from input: %s" % inp.name) # x = linear([inp] + attns, input_size, True) x = array_ops.concat(1, [inp, attns[0]]) # Run the RNN. if emotion is None and imemory is None: cell_output, state = cell(x, state) else: # imemory = tf.Print(imemory, [tf.reduce_sum(imemory**2)], summarize=1000) cell_output, state, imemory = cell(x, state, emotion, imemory) # Run the attention mechanism. if i == 0 and initial_state_attention: with variable_scope.variable_scope(variable_scope.get_variable_scope(), reuse=True): attns = attention(state) else: attns = attention(state) with variable_scope.variable_scope("AttnOutputProjection"): output = linear([cell_output] + attns, output_size, True) if output_projection is not None: logit = nn_ops.xw_plus_b( output, output_projection[0], output_projection[1]) gate = None if ememory is not None: with variable_scope.variable_scope("OutputEmemoryGate"): g = tf.reshape(tf.sigmoid(linear([output], 1, True)), [-1]) # hard gate # g = tf.cast(tf.greater(g, 0.5), tf.float32) output0, output1 = tf.dynamic_partition(tf.transpose(logit), tf.cast(ememory, tf.int32), 2) indice0, indice1 = tf.dynamic_partition(tf.range(ememory.get_shape()[-1].value), tf.cast(ememory, tf.int32), 2) s0 = tf.nn.softmax(output0, dim=0) * (1 - g) s1 = tf.nn.softmax(output1, dim=0) * g output = tf.transpose(tf.reshape(tf.dynamic_stitch([indice0, indice1], [s0, s1]), [ememory.get_shape()[-1].value, batch_size])) gate = tf.stack([g, (1 - g)]) else: output = tf.nn.softmax(logit) if loop_function is not None: prev = output outputs.append(output) gates.append(gate) return outputs, state, imemory, ememory, gates def beam_attention_decoder(decoder_inputs, emotion, imemory, ememory, initial_state, attention_states, cell, output_size=None, num_heads=1, loop_function=None, dtype=None, scope=None, initial_state_attention=True, output_projection=None, beam_size=10): """RNN decoder with attention for the sequence-to-sequence model. In this context "attention" means that, during decoding, the RNN can look up information in the additional tensor attention_states, and it does this by focusing on a few entries from the tensor. This model has proven to yield especially good results in a number of sequence-to-sequence tasks. This implementation is based on http://arxiv.org/abs/1412.7449 (see below for details). It is recommended for complex sequence-to-sequence tasks. Args: decoder_inputs: A list of 2D Tensors [batch_size x input_size]. initial_state: 2D Tensor [batch_size x cell.state_size]. attention_states: 3D Tensor [batch_size x attn_length x attn_size]. cell: rnn_cell.RNNCell defining the cell function and size. output_size: Size of the output vectors; if None, we use cell.output_size. num_heads: Number of attention heads that read from attention_states. loop_function: If not None, this function will be applied to i-th output in order to generate i+1-th input, and decoder_inputs will be ignored, except for the first element ("GO" symbol). This can be used for decoding, but also for training to emulate http://arxiv.org/abs/1506.03099. Signature -- loop_function(prev, i) = next * prev is a 2D Tensor of shape [batch_size x output_size], * i is an integer, the step number (when advanced control is needed), * next is a 2D Tensor of shape [batch_size x input_size]. dtype: The dtype to use for the RNN initial state (default: tf.float32). scope: VariableScope for the created subgraph; default: "attention_decoder". initial_state_attention: If False (default), initial attentions are zero. If True, initialize the attentions from the initial state and attention states -- useful when we wish to resume decoding from a previously stored decoder state and attention states. Returns: A tuple of the form (outputs, state), where: outputs: A list of the same length as decoder_inputs of 2D Tensors of shape [batch_size x output_size]. These represent the generated outputs. Output i is computed from input i (which is either the i-th element of decoder_inputs or loop_function(output {i-1}, i)) as follows. First, we run the cell on a combination of the input and previous attention masks: cell_output, new_state = cell(linear(input, prev_attn), prev_state). Then, we calculate new attention masks: new_attn = softmax(V^T * tanh(W * attention_states + U * new_state)) and then we calculate the output: output = linear(cell_output, new_attn). state: The state of each decoder cell the final time-step. It is a 2D Tensor of shape [batch_size x cell.state_size]. Raises: ValueError: when num_heads is not positive, there are no inputs, shapes of attention_states are not set, or input size cannot be inferred from the input. """ if not decoder_inputs: raise ValueError("Must provide at least 1 input to attention decoder.") if num_heads < 1: raise ValueError("With less than 1 heads, use a non-attention decoder.") if attention_states.get_shape()[2].value is None: raise ValueError("Shape[2] of attention_states must be known: %s" % attention_states.get_shape()) if output_size is None: output_size = cell.output_size with variable_scope.variable_scope( scope or "attention_decoder", dtype=dtype) as scope: dtype = scope.dtype batch_size = array_ops.shape(decoder_inputs[0])[0] # Needed for reshaping. attn_length = attention_states.get_shape()[1].value if attn_length is None: attn_length = shape(attention_states)[1] attn_size = attention_states.get_shape()[2].value # To calculate W1 * h_t we use a 1-by-1 convolution, need to reshape before. hidden = array_ops.reshape( attention_states, [-1, attn_length, 1, attn_size]) hidden_features = [] v = [] attention_vec_size = attn_size # Size of query vectors for attention. for a in xrange(num_heads): k = variable_scope.get_variable("AttnW_%d" % a, [1, 1, attn_size, attention_vec_size]) hidden_features.append(nn_ops.conv2d(hidden, k, [1, 1, 1, 1], "SAME")) v.append( variable_scope.get_variable("AttnV_%d" % a, [attention_vec_size])) state = initial_state state_size = int(state[0].get_shape().with_rank(2)[1]) def attention(query): """Put attention masks on hidden using hidden_features and query.""" ds = [] # Results of attention reads will be stored here. if nest.is_sequence(query): # If the query is a tuple, flatten it. query_list = nest.flatten(query) for q in query_list: # Check that ndims == 2 if specified. ndims = q.get_shape().ndims if ndims: assert ndims == 2 query = array_ops.concat(1, query_list) for a in xrange(num_heads): with variable_scope.variable_scope("Attention_%d" % a): y = linear(query, attention_vec_size, True) y = array_ops.reshape(y, [-1, 1, 1, attention_vec_size]) # Attention mask is a softmax of v^T * tanh(...). s = math_ops.reduce_sum( v[a] * math_ops.tanh(hidden_features[a] + y), [2, 3]) a = nn_ops.softmax(s) # Now calculate the attention-weighted vector d. d = math_ops.reduce_sum( array_ops.reshape(a, [-1, attn_length, 1, 1]) * hidden, [1, 2]) ds.append(array_ops.reshape(d, [-1, attn_size])) return ds outputs = [] prev = None batch_attn_size = array_ops.pack([batch_size, attn_size]) attns = [array_ops.zeros(batch_attn_size, dtype=dtype) for _ in xrange(num_heads)] for a in attns: # Ensure the second shape of attention vectors is set. a.set_shape([None, attn_size]) if initial_state_attention: attns = attention(initial_state) log_beam_probs, beam_path, beam_symbols, beam_results = [], [], [], [] for i, inp in enumerate(decoder_inputs): if i > 0: variable_scope.get_variable_scope().reuse_variables() # If loop_function is set, we use it instead of decoder_inputs. if loop_function is not None and prev is not None: with variable_scope.variable_scope("loop_function", reuse=True): emb = loop_function(prev, i, log_beam_probs, beam_path, beam_symbols, beam_results) _state = [] for j in state: _state.append(tf.reshape(tf.gather(j, beam_path[-1]), [-1, state_size])) state = tuple(_state) # Merge input and previous attentions into one vector of the right size. input_size = inp.get_shape().with_rank(2)[1] if input_size.value is None: raise ValueError("Could not infer input size from input: %s" % inp.name) # x = linear([inp] + attns, input_size, True) if i == 0: x = array_ops.concat(1, [inp, attns[0]]) else: x = tf.concat(1, [emb, tf.reshape(tf.gather(attns[0], beam_path[-1]), [-1, attn_size])]) # Run the RNN. if emotion is None and imemory is None: cell_output, state = cell(x, state) else: cell_output, state, imemory = cell(x, state, emotion, imemory) # Run the attention mechanism. if i == 0 and initial_state_attention: with variable_scope.variable_scope(variable_scope.get_variable_scope(), reuse=True): attns = attention(state) else: attns = attention(state) with variable_scope.variable_scope("AttnOutputProjection"): output = linear([cell_output] + attns, output_size, True) if output_projection is not None: logit = nn_ops.xw_plus_b( output, output_projection[0], output_projection[1]) if ememory is not None: with variable_scope.variable_scope("OutputEmemoryGate"): g = tf.reshape(tf.sigmoid(linear([output], 1, True)), [-1]) # hard gate # g = tf.cast(tf.greater(g, 0.5), tf.float32) output0, output1 = tf.dynamic_partition(tf.transpose(logit), tf.cast(ememory, tf.int32), 2) indice0, indice1 = tf.dynamic_partition(tf.range(ememory.get_shape()[-1].value), tf.cast(ememory, tf.int32), 2) s0 = tf.nn.softmax(output0, dim=0) * (1 - g) s1 = tf.nn.softmax(output1, dim=0) * g output = tf.transpose( tf.reshape(tf.dynamic_stitch([indice0, indice1], [s0, s1]), [ememory.get_shape()[-1].value, -1])) else: output = tf.nn.softmax(logit) if loop_function is not None: prev = output if i == 0: # emotion = tf.reshape([emotion]*beam_size, [beam_size, -1]) if emotion is not None: emotion = tf.concat(0, [emotion] * beam_size) if imemory is not None: imemory = tf.concat(0, [imemory] * beam_size) outputs.append(output) return outputs, state, beam_results, tf.reshape(tf.concat(0, beam_symbols), [-1, beam_size]), tf.reshape( tf.concat(0, beam_path), [-1, beam_size]) def embedding_attention_decoder(decoder_inputs, decoder_emotions, initial_state, attention_states, cell, num_symbols, embedding_size, emotion_category, emotion_size, imemory_size, use_emb, use_imemory, use_ememory, num_heads=1, output_size=None, output_projection=None, feed_previous=False, update_embedding_for_previous=True, dtype=None, scope=None, initial_state_attention=True, beam_search=True, beam_size=10): """RNN decoder with embedding and attention and a pure-decoding option. Args: decoder_inputs: A list of 1D batch-sized int32 Tensors (decoder inputs). initial_state: 2D Tensor [batch_size x cell.state_size]. attention_states: 3D Tensor [batch_size x attn_length x attn_size]. cell: rnn_cell.RNNCell defining the cell function. num_symbols: Integer, how many symbols come into the embedding. embedding_size: Integer, the length of the embedding vector for each symbol. num_heads: Number of attention heads that read from attention_states. output_size: Size of the output vectors; if None, use output_size. output_projection: None or a pair (W, B) of output projection weights and biases; W has shape [output_size x num_symbols] and B has shape [num_symbols]; if provided and feed_previous=True, each fed previous output will first be multiplied by W and added B. feed_previous: Boolean; if True, only the first of decoder_inputs will be used (the "GO" symbol), and all other decoder inputs will be generated by: next = embedding_lookup(embedding, argmax(previous_output)), In effect, this implements a greedy decoder. It can also be used during training to emulate http://arxiv.org/abs/1506.03099. If False, decoder_inputs are used as given (the standard decoder case). update_embedding_for_previous: Boolean; if False and feed_previous=True, only the embedding for the first symbol of decoder_inputs (the "GO" symbol) will be updated by back propagation. Embeddings for the symbols generated from the decoder itself remain unchanged. This parameter has no effect if feed_previous=False. dtype: The dtype to use for the RNN initial states (default: tf.float32). scope: VariableScope for the created subgraph; defaults to "embedding_attention_decoder". initial_state_attention: If False (default), initial attentions are zero. If True, initialize the attentions from the initial state and attention states -- useful when we wish to resume decoding from a previously stored decoder state and attention states. Returns: A tuple of the form (outputs, state), where: outputs: A list of the same length as decoder_inputs of 2D Tensors with shape [batch_size x output_size] containing the generated outputs. state: The state of each decoder cell at the final time-step. It is a 2D Tensor of shape [batch_size x cell.state_size]. Raises: ValueError: When output_projection has the wrong shape. """ if output_size is None: output_size = cell.output_size if output_projection is not None: proj_biases = ops.convert_to_tensor(output_projection[1], dtype=dtype) proj_biases.get_shape().assert_is_compatible_with([num_symbols]) with variable_scope.variable_scope( scope or "embedding_attention_decoder", dtype=dtype) as scope: embedding = variable_scope.get_variable("embedding", [num_symbols, embedding_size]) emotion = None imemory = None ememory = None if use_emb: emotion_embedding = variable_scope.get_variable("emotion_embedding", [emotion_category, emotion_size]) emotion = embedding_ops.embedding_lookup(emotion_embedding, decoder_emotions) if use_imemory: internal_memory = variable_scope.get_variable("internal_memory", [emotion_category, imemory_size]) imemory = embedding_ops.embedding_lookup(internal_memory, decoder_emotions) # imemory = tf.Print(imemory, [imemory, internal_memory], summarize=10) if use_ememory: external_memory = variable_scope.get_variable("external_memory", [emotion_category, num_symbols], trainable=False) ememory = embedding_ops.embedding_lookup(external_memory, decoder_emotions[0]) if beam_search: loop_function = _extract_beam_search( embedding, beam_size, num_symbols, embedding_size, output_projection, update_embedding_for_previous) else: loop_function = _extract_argmax_and_embed( embedding, num_symbols, output_projection, update_embedding_for_previous) if feed_previous else None emb_inp = [ embedding_ops.embedding_lookup(embedding, i) for i in decoder_inputs] # array_ops.concat(1, [embedding_ops.embedding_lookup(embedding, i), emotion]) for i in decoder_inputs] if beam_search: return beam_attention_decoder( emb_inp, emotion, imemory, ememory, initial_state, attention_states, cell, output_size=output_size, num_heads=num_heads, loop_function=loop_function, initial_state_attention=initial_state_attention, output_projection=output_projection, beam_size=beam_size) else: return attention_decoder( emb_inp, emotion, imemory, ememory, initial_state, attention_states, cell, output_size=output_size, output_projection=output_projection, num_heads=num_heads, loop_function=loop_function, initial_state_attention=initial_state_attention) def embedding_attention_seq2seq(encoder_inputs, decoder_inputs, decoder_emotions, en_cell, de_cell, num_encoder_symbols, num_decoder_symbols, embedding_size, emotion_category, emotion_size, imemory_size, use_emb=False, use_imemory=False, use_ememory=False, num_heads=1, output_projection=None, feed_previous=False, dtype=None, scope=None, initial_state_attention=True, beam_search=True, beam_size=10): """Embedding sequence-to-sequence model with attention. This model first embeds encoder_inputs by a newly created embedding (of shape [num_encoder_symbols x input_size]). Then it runs an RNN to encode embedded encoder_inputs into a state vector. It keeps the outputs of this RNN at every step to use for attention later. Next, it embeds decoder_inputs by another newly created embedding (of shape [num_decoder_symbols x input_size]). Then it runs attention decoder, initialized with the last encoder state, on embedded decoder_inputs and attending to encoder outputs. Warning: when output_projection is None, the size of the attention vectors and variables will be made proportional to num_decoder_symbols, can be large. Args: encoder_inputs: A list of 1D int32 Tensors of shape [batch_size]. decoder_inputs: A list of 1D int32 Tensors of shape [batch_size]. cell: rnn_cell.RNNCell defining the cell function and size. num_encoder_symbols: Integer; number of symbols on the encoder side. num_decoder_symbols: Integer; number of symbols on the decoder side. embedding_size: Integer, the length of the embedding vector for each symbol. num_heads: Number of attention heads that read from attention_states. output_projection: None or a pair (W, B) of output projection weights and biases; W has shape [output_size x num_decoder_symbols] and B has shape [num_decoder_symbols]; if provided and feed_previous=True, each fed previous output will first be multiplied by W and added B. feed_previous: Boolean or scalar Boolean Tensor; if True, only the first of decoder_inputs will be used (the "GO" symbol), and all other decoder inputs will be taken from previous outputs (as in embedding_rnn_decoder). If False, decoder_inputs are used as given (the standard decoder case). dtype: The dtype of the initial RNN state (default: tf.float32). scope: VariableScope for the created subgraph; defaults to "embedding_attention_seq2seq". initial_state_attention: If False (default), initial attentions are zero. If True, initialize the attentions from the initial state and attention states. Returns: A tuple of the form (outputs, state), where: outputs: A list of the same length as decoder_inputs of 2D Tensors with shape [batch_size x num_decoder_symbols] containing the generated outputs. state: The state of each decoder cell at the final time-step. It is a 2D Tensor of shape [batch_size x cell.state_size]. """ with variable_scope.variable_scope( scope or "embedding_attention_seq2seq", dtype=dtype) as scope: dtype = scope.dtype # Encoder. encoder_cell = rnn_cell.EmbeddingWrapper( en_cell, embedding_classes=num_encoder_symbols, embedding_size=embedding_size) encoder_outputs, encoder_state = rnn.rnn( encoder_cell, encoder_inputs, dtype=dtype) # First calculate a concatenation of encoder outputs to put attention on. top_states = [array_ops.reshape(e, [-1, 1, en_cell.output_size]) for e in encoder_outputs] attention_states = array_ops.concat(1, top_states) # Decoder. output_size = None if output_projection is None: # not work de_cell = rnn_cell.OutputProjectionWrapper(de_cell, num_decoder_symbols) output_size = num_decoder_symbols if isinstance(feed_previous, bool): return embedding_attention_decoder( decoder_inputs, decoder_emotions, encoder_state, attention_states, de_cell, num_decoder_symbols, embedding_size, emotion_category=emotion_category, emotion_size=emotion_size, imemory_size=imemory_size, use_emb=use_emb, use_imemory=use_imemory, use_ememory=use_ememory, num_heads=num_heads, output_size=output_size, output_projection=output_projection, feed_previous=feed_previous, initial_state_attention=initial_state_attention, beam_search=beam_search, beam_size=beam_size) def sequence_loss_by_example(logits, targets, weights, ememory, average_across_timesteps=True, softmax_loss_function=None, name=None): """Weighted cross-entropy loss for a sequence of logits (per example). Args: logits: List of 2D Tensors of shape [batch_size x num_decoder_symbols]. targets: List of 1D batch-sized int32 Tensors of the same length as logits. weights: List of 1D batch-sized float-Tensors of the same length as logits. average_across_timesteps: If set, divide the returned cost by the total label weight. softmax_loss_function: Function (inputs-batch, labels-batch) -> loss-batch to be used instead of the standard softmax (the default if this is None). name: Optional name for this operation, default: "sequence_loss_by_example". Returns: 1D batch-sized float Tensor: The log-perplexity for each sequence. Raises: ValueError: If len(logits) is different from len(targets) or len(weights). """ if len(targets) != len(logits) or len(weights) != len(logits): raise ValueError("Lengths of logits, weights, and targets must be the same " "%d, %d, %d." % (len(logits), len(weights), len(targets))) with ops.name_scope(name, "sequence_loss_by_example", logits + targets + weights if ememory is None else logits + targets + weights + [ememory]): log_perp_list = [] for logit, target, weight in zip(logits, targets, weights): if softmax_loss_function is None: # TODO(irving,ebrevdo): This reshape is needed because # sequence_loss_by_example is called with scalars sometimes, which # violates our general scalar strictness policy. # target = array_ops.reshape(target, [-1]) # crossent = nn_ops.sparse_softmax_cross_entropy_with_logits( # logit, target) if ememory is None: target = array_ops.reshape(target, [-1]) label = tf.one_hot(target, depth=logit.get_shape().with_rank(2)[1], dtype=tf.float32) crossent = -tf.reduce_sum(label * tf.log(logit + 1e-12), 1) else: golden = tf.gather(ememory, target) golden = tf.stack([golden, 1 - golden]) crossent = -tf.reduce_sum(golden * tf.log(logit + 1e-12), 0) else: # sampled softmax not work crossent = softmax_loss_function(logit, target) log_perp_list.append(crossent * weight) log_perps = math_ops.add_n(log_perp_list) if average_across_timesteps: total_size = math_ops.add_n(weights) total_size += 1e-12 # Just to avoid division by 0 for all-0 weights. log_perps /= total_size return log_perps def sequence_loss(logits, targets, weights, ememory, average_across_timesteps=True, average_across_batch=True, softmax_loss_function=None, name=None): """Weighted cross-entropy loss for a sequence of logits, batch-collapsed. Args: logits: List of 2D Tensors of shape [batch_size x num_decoder_symbols]. targets: List of 1D batch-sized int32 Tensors of the same length as logits. weights: List of 1D batch-sized float-Tensors of the same length as logits. average_across_timesteps: If set, divide the returned cost by the total label weight. average_across_batch: If set, divide the returned cost by the batch size. softmax_loss_function: Function (inputs-batch, labels-batch) -> loss-batch to be used instead of the standard softmax (the default if this is None). name: Optional name for this operation, defaults to "sequence_loss". Returns: A scalar float Tensor: The average log-perplexity per symbol (weighted). Raises: ValueError: If len(logits) is different from len(targets) or len(weights). """ with ops.name_scope(name, "sequence_loss", logits + targets + weights if ememory is None else logits + targets + weights + [ememory]): p = sequence_loss_by_example( logits, targets, weights, ememory, average_across_timesteps=average_across_timesteps, softmax_loss_function=softmax_loss_function) cost_p = math_ops.reduce_sum(p) if average_across_batch: batch_size = array_ops.shape(targets[0])[0] return cost_p / math_ops.cast(batch_size, cost_p.dtype) else: return cost_p def model_with_buckets(encoder_inputs, decoder_inputs, targets, weights, decoder_emotions, buckets, seq2seq, softmax_loss_function=None, per_example_loss=False, use_imemory=False, use_ememory=False, name=None): """Create a sequence-to-sequence model with support for bucketing. The seq2seq argument is a function that defines a sequence-to-sequence model, e.g., seq2seq = lambda x, y: basic_rnn_seq2seq(x, y, rnn_cell.GRUCell(24)) Args: encoder_inputs: A list of Tensors to feed the encoder; first seq2seq input. decoder_inputs: A list of Tensors to feed the decoder; second seq2seq input. targets: A list of 1D batch-sized int32 Tensors (desired output sequence). weights: List of 1D batch-sized float-Tensors to weight the targets. buckets: A list of pairs of (input size, output size) for each bucket. seq2seq: A sequence-to-sequence model function; it takes 2 input that agree with encoder_inputs and decoder_inputs, and returns a pair consisting of outputs and states (as, e.g., basic_rnn_seq2seq). softmax_loss_function: Function (inputs-batch, labels-batch) -> loss-batch to be used instead of the standard softmax (the default if this is None). per_example_loss: Boolean. If set, the returned loss will be a batch-sized tensor of losses for each sequence in the batch. If unset, it will be a scalar with the averaged loss from all examples. name: Optional name for this operation, defaults to "model_with_buckets". Returns: A tuple of the form (outputs, losses), where: outputs: The outputs for each bucket. Its j'th element consists of a list of 2D Tensors. The shape of output tensors can be either [batch_size x output_size] or [batch_size x num_decoder_symbols] depending on the seq2seq model used. losses: List of scalar Tensors, representing losses for each bucket, or, if per_example_loss is set, a list of 1D batch-sized float Tensors. Raises: ValueError: If length of encoder_inputsut, targets, or weights is smaller than the largest (last) bucket. """ if len(encoder_inputs) < buckets[-1][0]: raise ValueError("Length of encoder_inputs (%d) must be at least that of la" "st bucket (%d)." % (len(encoder_inputs), buckets[-1][0])) if len(targets) < buckets[-1][1]: raise ValueError("Length of targets (%d) must be at least that of last" "bucket (%d)." % (len(targets), buckets[-1][1])) if len(weights) < buckets[-1][1]: raise ValueError("Length of weights (%d) must be at least that of last" "bucket (%d)." % (len(weights), buckets[-1][1])) all_inputs = encoder_inputs + decoder_inputs + targets + weights + [decoder_emotions] losses = [] outputs = [] ppxes = [] with ops.name_scope(name, "model_with_buckets", all_inputs): for j, bucket in enumerate(buckets): with variable_scope.variable_scope(variable_scope.get_variable_scope(), reuse=True if j > 0 else None): bucket_outputs, _, imemory, ememory, gates = seq2seq(encoder_inputs[:bucket[0]], decoder_inputs[:bucket[1]], decoder_emotions) outputs.append(bucket_outputs) if per_example_loss: # not work losses.append(math_ops.reduce_sum(imemory ** 2) / math_ops.cast(array_ops.shape(targets[0])[0], imemory.dtype) + sequence_loss_by_example( outputs[-1], targets[:bucket[1]], weights[:bucket[1]], softmax_loss_function=softmax_loss_function)) else: loss = sequence_loss( outputs[-1], targets[:bucket[1]], weights[:bucket[1]], None, softmax_loss_function=softmax_loss_function) ppxes.append(loss) if use_imemory: loss += math_ops.reduce_sum(imemory ** 2) / math_ops.cast(array_ops.shape(targets[0])[0], imemory.dtype) if use_ememory: loss += sequence_loss( gates, targets[:bucket[1]], weights[:bucket[1]], ememory, softmax_loss_function=softmax_loss_function) losses.append(loss) return outputs, losses, ppxes def decode_model_with_buckets(encoder_inputs, decoder_inputs, targets, weights, decoder_emotions, buckets, seq2seq, softmax_loss_function=None, per_example_loss=False, name=None): """Create a sequence-to-sequence model with support for bucketing. The seq2seq argument is a function that defines a sequence-to-sequence model, e.g., seq2seq = lambda x, y: basic_rnn_seq2seq(x, y, rnn_cell.GRUCell(24)) Args: encoder_inputs: A list of Tensors to feed the encoder; first seq2seq input. decoder_inputs: A list of Tensors to feed the decoder; second seq2seq input. targets: A list of 1D batch-sized int32 Tensors (desired output sequence). weights: List of 1D batch-sized float-Tensors to weight the targets. buckets: A list of pairs of (input size, output size) for each bucket. seq2seq: A sequence-to-sequence model function; it takes 2 input that agree with encoder_inputs and decoder_inputs, and returns a pair consisting of outputs and states (as, e.g., basic_rnn_seq2seq). softmax_loss_function: Function (inputs-batch, labels-batch) -> loss-batch to be used instead of the standard softmax (the default if this is None). per_example_loss: Boolean. If set, the returned loss will be a batch-sized tensor of losses for each sequence in the batch. If unset, it will be a scalar with the averaged loss from all examples. name: Optional name for this operation, defaults to "model_with_buckets". Returns: A tuple of the form (outputs, losses), where: outputs: The outputs for each bucket. Its j'th element consists of a list of 2D Tensors of shape [batch_size x num_decoder_symbols] (jth outputs). losses: List of scalar Tensors, representing losses for each bucket, or, if per_example_loss is set, a list of 1D batch-sized float Tensors. Raises: ValueError: If length of encoder_inputsut, targets, or weights is smaller than the largest (last) bucket. """ if len(encoder_inputs) < buckets[-1][0]: raise ValueError("Length of encoder_inputs (%d) must be at least that of la" "st bucket (%d)." % (len(encoder_inputs), buckets[-1][0])) if len(targets) < buckets[-1][1]: raise ValueError("Length of targets (%d) must be at least that of last" "bucket (%d)." % (len(targets), buckets[-1][1])) if len(weights) < buckets[-1][1]: raise ValueError("Length of weights (%d) must be at least that of last" "bucket (%d)." % (len(weights), buckets[-1][1])) all_inputs = encoder_inputs + decoder_inputs + targets + weights + [decoder_emotions] losses = [] outputs = [] beam_results = [] beam_symbols = [] beam_parents = [] with ops.name_scope(name, "model_with_buckets", all_inputs): for j, bucket in enumerate(buckets): with variable_scope.variable_scope(variable_scope.get_variable_scope(), reuse=True if j > 0 else None): bucket_outputs, _, beam_result, beam_symbol, beam_parent = seq2seq(encoder_inputs[:bucket[0]], decoder_inputs[:bucket[1]], decoder_emotions) outputs.append(bucket_outputs) beam_results.append(beam_result) beam_symbols.append(beam_symbol) beam_parents.append(beam_parent) print("End**********") return outputs, beam_results, beam_symbols, beam_parents
# -*- coding:utf-8 -*- # Author: hankcs # Date: 2019-12-28 19:26 __version__ = '2.1.0-alpha.8' """HanLP version"""
# GenomicRangeQuery - Find the minimal nucleotide from a range of sequence DNA. # A DNA sequence can be represented as a string consisting of the letters A, C, G and T, # which correspond to the types of successive nucleotides in the sequence. # Each nucleotide has an impact factor, which is an integer. # Nucleotides of types A, C, G and T have impact factors of 1, 2, 3 and 4, respectively. # You are going to answer several queries of the form: What is the minimal impact factor # of nucleotides contained in a particular part of the given DNA sequence? # The DNA sequence is given as a non-empty string S = S[0]S[1]...S[N-1] consisting of N # characters. There are M queries, which are given in non-empty arrays P and Q, # each consisting of M integers. The K-th query (0 ≤ K < M) requires you to find the # minimal impact factor of nucleotides contained in the DNA sequence between positions P[K] and Q[K] (inclusive). # For example, consider string S = CAGCCTA and arrays P, Q such that: # P[0] = 2 Q[0] = 4 # P[1] = 5 Q[1] = 5 # P[2] = 0 Q[2] = 6 # The answers to these M = 3 queries are as follows: # The part of the DNA between positions 2 and 4 contains nucleotides G and C (twice), whose impact factors are 3 and 2 respectively, so the answer is 2. # The part between positions 5 and 5 contains a single nucleotide T, whose impact factor is 4, so the answer is 4. # The part between positions 0 and 6 (the whole string) contains all nucleotides, in particular nucleotide A whose impact factor is 1, so the answer is 1. # For example, given the string S = CAGCCTA and arrays P, Q such that: # P[2,5,0] # Q[4,5,6 # the function should return the values [2, 4, 1], as explained above. # Important # N is an integer within the range [1..100,000]; # M is an integer within the range [1..50,000]; # each element of arrays P, Q is an integer within the range [0..N − 1]; # P[K] ≤ Q[K], where 0 ≤ K < M; # string S consists only of upper-case English letters A, C, G, T. def Solution(S,P,Q): result = [] for p in range(len(P)): min_let_factor = 'T' for i in S[P[p]:Q[p]+1]: if i < min_let_factor: min_let_factor = i #min_let_factor = min(S[P[p]:Q[p]+1]) if min_let_factor == 'A': factor = 1 elif min_let_factor == 'C': factor = 2 elif min_let_factor == 'G': factor = 3 else: factor = 4 result.append(factor) return result # Detected time complexity: O(N * M) ==> 62% # internet def Solution1(S,P,Q): l = len(S) lastSeen = [[-1,-1,-1,-1] for x in range(l)] print(len(lastSeen)) for x in range(len(S)): for i,j in enumerate(list("ACGT")): print(x,i,j) if S[x] == j: lastSeen[x][i] = x elif x>0: lastSeen[x][i] = lastSeen[x-1][i] print(lastSeen) res = [] for x in range(len(P)): startIdx = P[x] relevantLastSeen = lastSeen[Q[x]] res.append((min([i+1 for i,x in enumerate(relevantLastSeen) if x>=startIdx]))) return res # Testing S = 'CAGCCTA' P = [2,5,0] Q = [2,5,6] print(Solution1(S,P,Q))
# Definition for a binary tree node. # class TreeNode: # def __init__(self, val=0, left=None, right=None): # self.val = val # self.left = left # self.right = right class Solution: def findTilt(self, root: TreeNode) -> int: result = 0 def getSum(node): nonlocal result if node is None: return 0 leftSum = getSum(node.left) rightSum = getSum(node.right) result += abs(rightSum - leftSum) return leftSum + rightSum + node.val getSum(root) return result
# Copyright 2017, OpenCensus Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from opencensus.trace.ext import utils from opencensus.trace.ext.pyramid.config import PyramidTraceSettings from opencensus.trace import attributes_helper from opencensus.trace import execution_context from opencensus.trace import tracer as tracer_module HTTP_METHOD = attributes_helper.COMMON_ATTRIBUTES['HTTP_METHOD'] HTTP_URL = attributes_helper.COMMON_ATTRIBUTES['HTTP_URL'] HTTP_STATUS_CODE = attributes_helper.COMMON_ATTRIBUTES['HTTP_STATUS_CODE'] _PYRAMID_TRACE_HEADER = 'X_CLOUD_TRACE_CONTEXT' BLACKLIST_PATHS = 'BLACKLIST_PATHS' log = logging.getLogger(__name__) class OpenCensusTweenFactory(object): """Pyramid tweens are like wsgi middleware, but have access to things like the request, response, and application registry. The tween factory is a globally importable callable whose constructor takes a request handler and application registry. It will be called with a pyramid request object. For details on pyramid tweens, see https://docs.pylonsproject.org/projects/pyramid/en/latest/narr/hooks.html#creating-a-tween """ def __init__(self, handler, registry): """Constructor for the pyramid tween :param handler: Either the main Pyramid request handling function or another tween :type handler: function :param registry: The pyramid application registry :type registry: :class:`pyramid.registry.Registry` """ self.handler = handler self.registry = registry settings = PyramidTraceSettings(registry) self.sampler = settings.SAMPLER self.exporter = settings.EXPORTER self.propagator = settings.PROPAGATOR self._blacklist_paths = settings.params.get(BLACKLIST_PATHS) def __call__(self, request): self._before_request(request) response = self.handler(request) self._after_request(request, response) return response def _before_request(self, request): if utils.disable_tracing_url(request.path, self._blacklist_paths): return try: header = get_context_header(request) span_context = self.propagator.from_header(header) tracer = tracer_module.Tracer( span_context=span_context, sampler=self.sampler, exporter=self.exporter, propagator=self.propagator) span = tracer.start_span() # Set the span name as the name of the current module name span.name = '[{}]{}'.format( request.method, request.path) tracer.add_attribute_to_current_span( attribute_key=HTTP_METHOD, attribute_value=request.method) tracer.add_attribute_to_current_span( attribute_key=HTTP_URL, attribute_value=request.path) except Exception: # pragma: NO COVER log.error('Failed to trace request', exc_info=True) def _after_request(self, request, response): if utils.disable_tracing_url(request.path, self._blacklist_paths): return try: tracer = execution_context.get_opencensus_tracer() tracer.add_attribute_to_current_span( HTTP_STATUS_CODE, str(response.status_code)) tracer.end_span() tracer.finish() except Exception: # pragma: NO COVER log.error('Failed to trace request', exc_info=True) def get_context_header(request): """Get trace context header from pyramid request headers.""" return request.headers.get(_PYRAMID_TRACE_HEADER)
from datetime import datetime, timedelta, tzinfo from http import cookies as http_cookies import re import pytest import falcon import falcon.testing as testing from falcon.util import http_date_to_dt, TimezoneGMT from _util import create_app # NOQA UNICODE_TEST_STRING = 'Unicode_\xc3\xa6\xc3\xb8' class TimezoneGMTPlus1(tzinfo): def utcoffset(self, dt): return timedelta(hours=1) def tzname(self, dt): return 'GMT+1' def dst(self, dt): return timedelta(hours=1) GMT_PLUS_ONE = TimezoneGMTPlus1() class CookieResource: def on_get(self, req, resp): resp.set_cookie('foo', 'bar', domain='example.com', path='/') def on_head(self, req, resp): resp.set_cookie('foo', 'bar', max_age=300) resp.set_cookie('bar', 'baz', http_only=False) resp.set_cookie('bad', 'cookie') resp.unset_cookie('bad') def on_post(self, req, resp): e = datetime(year=2050, month=1, day=1) # naive resp.set_cookie('foo', 'bar', http_only=False, secure=False, expires=e) resp.unset_cookie('bad') def on_put(self, req, resp): e = datetime(year=2050, month=1, day=1, tzinfo=GMT_PLUS_ONE) # aware resp.set_cookie('foo', 'bar', http_only=False, secure=False, expires=e) resp.unset_cookie('bad') class CookieResourceMaxAgeFloatString: def on_get(self, req, resp): resp.set_cookie('foofloat', 'bar', max_age=15.3, secure=False, http_only=False) resp.set_cookie('foostring', 'bar', max_age='15', secure=False, http_only=False) class CookieResourceSameSite: def on_get(self, req, resp): resp.set_cookie('foo', 'bar', same_site='Lax') def on_post(self, req, resp): resp.set_cookie('bar', 'foo', same_site='STRICT') def on_put(self, req, resp): resp.set_cookie('baz', 'foo', same_site='none') def on_delete(self, req, resp): resp.set_cookie('baz', 'foo', same_site='') class CookieUnset: def on_get(self, req, resp): resp.unset_cookie('foo') resp.unset_cookie('bar', path='/bar') resp.unset_cookie('baz', domain='www.example.com') resp.unset_cookie('foobar', path='/foo', domain='www.example.com') @pytest.fixture def client(asgi): app = create_app(asgi) app.add_route('/', CookieResource()) app.add_route('/test-convert', CookieResourceMaxAgeFloatString()) app.add_route('/same-site', CookieResourceSameSite()) app.add_route('/unset-cookie', CookieUnset()) return testing.TestClient(app) # ===================================================================== # Response # ===================================================================== def test_response_base_case(client): result = client.simulate_get('/') cookie = result.cookies['foo'] assert cookie.name == 'foo' assert cookie.value == 'bar' assert cookie.domain == 'example.com' assert cookie.http_only # NOTE(kgriffs): Explicitly test for None to ensure # falcon.testing.Cookie is returning exactly what we # expect. Apps using falcon.testing.Cookie can be a # bit more cavalier if they wish. assert cookie.max_age is None assert cookie.expires is None assert cookie.path == '/' assert cookie.secure def test_response_disable_secure_globally(client): client.app.resp_options.secure_cookies_by_default = False result = client.simulate_get('/') cookie = result.cookies['foo'] assert not cookie.secure client.app.resp_options.secure_cookies_by_default = True result = client.simulate_get('/') cookie = result.cookies['foo'] assert cookie.secure def test_response_complex_case(client): result = client.simulate_head('/') assert len(result.cookies) == 3 cookie = result.cookies['foo'] assert cookie.value == 'bar' assert cookie.domain is None assert cookie.expires is None assert cookie.http_only assert cookie.max_age == 300 assert cookie.path is None assert cookie.secure cookie = result.cookies['bar'] assert cookie.value == 'baz' assert cookie.domain is None assert cookie.expires is None assert not cookie.http_only assert cookie.max_age is None assert cookie.path is None assert cookie.secure cookie = result.cookies['bad'] assert cookie.value == '' # An unset cookie has an empty value assert cookie.domain is None assert cookie.same_site == 'Lax' assert cookie.expires < datetime.utcnow() # NOTE(kgriffs): I know accessing a private attr like this is # naughty of me, but we just need to sanity-check that the # string is GMT. assert cookie._expires.endswith('GMT') assert cookie.http_only assert cookie.max_age is None assert cookie.path is None assert cookie.secure def test_unset_cookies(client): result = client.simulate_get('/unset-cookie') assert len(result.cookies) == 4 def test(cookie, path, domain): assert cookie.value == '' # An unset cookie has an empty value assert cookie.domain == domain assert cookie.path == path assert cookie.same_site == 'Lax' assert cookie.expires < datetime.utcnow() test(result.cookies['foo'], path=None, domain=None) test(result.cookies['bar'], path='/bar', domain=None) test(result.cookies['baz'], path=None, domain='www.example.com') test(result.cookies['foobar'], path='/foo', domain='www.example.com') def test_cookie_expires_naive(client): result = client.simulate_post('/') cookie = result.cookies['foo'] assert cookie.value == 'bar' assert cookie.domain is None assert cookie.expires == datetime(year=2050, month=1, day=1) assert not cookie.http_only assert cookie.max_age is None assert cookie.path is None assert not cookie.secure def test_cookie_expires_aware(client): result = client.simulate_put('/') cookie = result.cookies['foo'] assert cookie.value == 'bar' assert cookie.domain is None assert cookie.expires == datetime(year=2049, month=12, day=31, hour=23) assert not cookie.http_only assert cookie.max_age is None assert cookie.path is None assert not cookie.secure def test_cookies_setable(client): resp = falcon.Response() assert resp._cookies is None resp.set_cookie('foo', 'wrong-cookie', max_age=301) resp.set_cookie('foo', 'bar', max_age=300) morsel = resp._cookies['foo'] assert isinstance(morsel, http_cookies.Morsel) assert morsel.key == 'foo' assert morsel.value == 'bar' assert morsel['max-age'] == 300 @pytest.mark.parametrize('cookie_name', ('foofloat', 'foostring')) def test_cookie_max_age_float_and_string(client, cookie_name): # NOTE(tbug): Falcon implicitly converts max-age values to integers, # to ensure RFC 6265-compliance of the attribute value. result = client.simulate_get('/test-convert') cookie = result.cookies[cookie_name] assert cookie.value == 'bar' assert cookie.domain is None assert cookie.expires is None assert not cookie.http_only assert cookie.max_age == 15 assert cookie.path is None assert not cookie.secure def test_response_unset_cookie(client): resp = falcon.Response() resp.unset_cookie('bad') resp.set_cookie('bad', 'cookie', max_age=300) resp.unset_cookie('bad') morsels = list(resp._cookies.values()) len(morsels) == 1 bad_cookie = morsels[0] assert bad_cookie['expires'] == -1 output = bad_cookie.OutputString() assert 'bad=;' in output or 'bad="";' in output match = re.search('expires=([^;]+)', output) assert match expiration = http_date_to_dt(match.group(1), obs_date=True) assert expiration < datetime.utcnow() def test_cookie_timezone(client): tz = TimezoneGMT() assert tz.tzname(timedelta(0)) == 'GMT' # ===================================================================== # Request # ===================================================================== def test_request_cookie_parsing(): # testing with a github-ish set of cookies headers = [ ( 'Cookie', """ logged_in=no;_gh_sess=eyJzZXXzaW9uX2lkIjoiN2; tz=Europe/Berlin; _ga =GA1.2.332347814.1422308165; tz2=Europe/Paris ; _ga2="line1\\012line2"; tz3=Europe/Madrid ;_ga3= GA3.2.332347814.1422308165; _gat=1; _octo=GH1.1.201722077.1422308165 """, ), ] environ = testing.create_environ(headers=headers) req = falcon.Request(environ) # NOTE(kgriffs): Test case-sensitivity assert req.get_cookie_values('TZ') is None assert 'TZ' not in req.cookies with pytest.raises(KeyError): req.cookies['TZ'] for name, value in [ ('logged_in', 'no'), ('_gh_sess', 'eyJzZXXzaW9uX2lkIjoiN2'), ('tz', 'Europe/Berlin'), ('tz2', 'Europe/Paris'), ('tz3', 'Europe/Madrid'), ('_ga', 'GA1.2.332347814.1422308165'), ('_ga2', 'line1\nline2'), ('_ga3', 'GA3.2.332347814.1422308165'), ('_gat', '1'), ('_octo', 'GH1.1.201722077.1422308165'), ]: assert name in req.cookies assert req.cookies[name] == value assert req.get_cookie_values(name) == [value] def test_invalid_cookies_are_ignored(): vals = [chr(i) for i in range(0x1F)] vals += [chr(i) for i in range(0x7F, 0xFF)] vals += '()<>@,;:\\"/[]?={} \x09'.split() for c in vals: headers = [ ('Cookie', 'good_cookie=foo;bad' + c + 'cookie=bar'), ] environ = testing.create_environ(headers=headers) req = falcon.Request(environ) assert req.cookies['good_cookie'] == 'foo' assert 'bad' + c + 'cookie' not in req.cookies def test_duplicate_cookie(): headers = [ ('Cookie', 'x=1;bad{cookie=bar; x=2;x=3 ; x=4;'), ] environ = testing.create_environ(headers=headers) req = falcon.Request(environ) assert req.cookies['x'] == '1' assert req.get_cookie_values('x') == ['1', '2', '3', '4'] def test_cookie_header_is_missing(): environ = testing.create_environ(headers={}) req = falcon.Request(environ) assert req.cookies == {} assert req.get_cookie_values('x') is None # NOTE(kgriffs): Test again with a new object to cover calling in the # opposite order. req = falcon.Request(environ) assert req.get_cookie_values('x') is None assert req.cookies == {} def test_unicode_inside_ascii_range(): resp = falcon.Response() # should be ok resp.set_cookie('non_unicode_ascii_name_1', 'ascii_value') resp.set_cookie('unicode_ascii_name_1', 'ascii_value') resp.set_cookie('non_unicode_ascii_name_2', 'unicode_ascii_value') resp.set_cookie('unicode_ascii_name_2', 'unicode_ascii_value') @pytest.mark.parametrize( 'name', (UNICODE_TEST_STRING, UNICODE_TEST_STRING.encode('utf-8'), 42) ) def test_non_ascii_name(name): resp = falcon.Response() with pytest.raises(KeyError): resp.set_cookie(name, 'ok_value') @pytest.mark.parametrize( 'value', (UNICODE_TEST_STRING, UNICODE_TEST_STRING.encode('utf-8'), 42) ) def test_non_ascii_value(value): resp = falcon.Response() # NOTE(tbug): we need to grab the exception to check # that it is not instance of UnicodeEncodeError, so # we cannot simply use pytest.raises try: resp.set_cookie('ok_name', value) except ValueError as e: assert isinstance(e, ValueError) assert not isinstance(e, UnicodeEncodeError) else: pytest.fail('set_bad_cookie_value did not fail as expected') def test_lax_same_site_value(client): result = client.simulate_get('/same-site') cookie = result.cookies['foo'] assert cookie.same_site == 'Lax' def test_strict_same_site_value(client): result = client.simulate_post('/same-site') cookie = result.cookies['bar'] assert cookie.same_site == 'Strict' def test_none_same_site_value(client): result = client.simulate_put('/same-site') cookie = result.cookies['baz'] assert cookie.same_site == 'None' def test_same_site_empty_string(client): result = client.simulate_delete('/same-site') cookie = result.cookies['baz'] assert cookie.same_site is None @pytest.mark.parametrize( 'same_site', ['laX', 'lax', 'STRICT', 'strict', 'None', 'none'] ) def test_same_site_value_case_insensitive(same_site): resp = falcon.Response() resp.set_cookie('foo', 'bar', same_site=same_site) # NOTE(kgriffs): Verify directly, unit-test style, since we # already tested end-to-end above. morsel = resp._cookies['foo'] assert morsel['samesite'].lower() == same_site.lower() @pytest.mark.parametrize('same_site', ['bogus', 'laxx', 'stric']) def test_invalid_same_site_value(same_site): resp = falcon.Response() with pytest.raises(ValueError): resp.set_cookie('foo', 'bar', same_site=same_site)
#!/usr/bin/python # Support for python2 from __future__ import print_function #Modify system path import sys sys.path.append('../pycThermopack/') # Importing pyThermopack from pyctp import cubic # Importing Numpy (math, arrays, etc...) import numpy as np # Importing Matplotlib (plotting) import matplotlib.pyplot as plt cb = cubic.cubic() cb.init("Ne,H2","SRK","Classic","Classic") cb.set_kij(1,2,0.19) KSTYPE, VLE, LLVE, CRIT, AZ = cb.global_binary_plot(minimum_pressure=1.0e5, minimum_temperature=2.0, include_azeotropes=True) p_scaling = 1.0e-6 colors = [ "black", "blue", "red", "green"] linestyles = [ "-", "--", ":", "-."] label = "VLE" for i in range(len(VLE)): plt.plot(VLE[i][:,0], VLE[i][:,1]*p_scaling, linestyle=linestyles[0], color=colors[0], label=label) label = None label = "VLLE" for i in range(len(LLVE)): plt.plot(LLVE[i][:,0], LLVE[i][:,1]*p_scaling, linestyle=linestyles[1], color=colors[1], label=label) label = None label = "Critical" for i in range(len(CRIT)): plt.plot(CRIT[i][:,0], CRIT[i][:,1]*p_scaling, linestyle=linestyles[2], color=colors[2], label=label) label = None label = "AZ" for i in range(len(AZ)): plt.plot(AZ[i][:,0], AZ[i][:,1]*p_scaling, linestyle=linestyles[3], color=colors[3], label=label) label = None if KSTYPE == 1: ks_str = "I" elif KSTYPE == 2: ks_str = "II" elif KSTYPE == 3: ks_str = "III" elif KSTYPE == 4: ks_str = "IV" elif KSTYPE == 5: ks_str = "V" else: ks_str = str(KSTYPE) plt.title("van Konynenburg and Scott type: " + ks_str) leg = plt.legend(loc="best", numpoints=1) leg.get_frame().set_linewidth(0.0) plt.ylim([1.0e5*p_scaling, 0.3e7*p_scaling]) plt.ylabel(r"$P$ (MPa)") plt.xlabel(r"$T$ (K)") plt.show() plt.clf()
#Задание 4 from random import choices from collections import Counter #1. Напишите функцию (F): на вход список имен и целое число N; на выходе список #длины N случайных имен из первого списка (могут повторяться, можно взять значения: #количество имен 20, N = 100, рекомендуется использовать функцию random); list_1 = ['Winnie', 'Christopher', 'Robin', 'Piglet', 'Eeyore', 'Kanga', 'Roo', 'Tigger', 'Rabbit', 'Owl', 'Dorothy', 'Bill', 'Eureka', 'Toto', 'Scarecrow', 'Mowgli','Bagheera', 'Baloo', 'Kaa','Akela'] def names_choice(names, quantity): return choices(names, k = quantity) list_2 = names_choice(list_1, 100) #получаем новый список из 100 имен print(list_2) #2. Напишите функцию вывода самого частого имени из списка на выходе функции F; dict = {} #for i in range(len(list_2)): #dict[list_2[i]] = list_2.count(list_2[i]) #frequent = Counter(dict).most_common(1) #print (frequent) def most_frequent(list_2): counter = 0 num = list_2[0] for i in list_2: most_frequent = list_2.count(i) if(most_frequent > counter): counter = most_frequent num = i return num print(most_frequent(list_2)) #3. Напишите функцию вывода самой редкой буквы, с которого начинаются имена #в списке на выходе функции F. def unique(list_2): list_unique = [] for k in list_2: list_unique += k[0] for i in list_unique: dict[i] = list_unique.count(i) list_unique = list(dict.items()) list_unique.sort(key=lambda i: i[1]) return (list_unique[0][0]) print(unique(list_2))
""" Binner functions for grouping numeric variables into bins """ import re import math import numpy as np import pandas as pd def cutpoints( x, qntl_cutoff=[0.025,0.975], cuts='linear', ncuts=10, sig_fig=3, **kwargs): ''' Function to return cut points and bin labels for a numeric 1-D array Parameters ---------- x : numpy 1-D array numeric 1-D array qntl_cutoff : list list of length two with lower and upper quantile cutoffs: To prevent extreme outliers from influencing the cutpoints for the bins, construct the cutpoints between the qntl_cutoff[0] quantile and the qntl_cutoff[1] quantile. If qntl_cutoff is None then do not ignore outliers cuts: str one of: 'linear', 'log', 'logp1', 'quantile' 'linear' : equally spaced cutpoints 'log' : logarithmically spaced cutpoints 'logp1' : logarithmically spaced cutpoints after adding 1 'root' : apply transform np.sign(x)*np.sqrt(abs(x)) and then space points linearly 'quantile' : cutpoints corresponding to equally spaced quantiles ncut : int number of cutpoints sig_fig : int number of significant figures to display in the aesthetically printed bin labels Returns ------- c_final : numpy 1-D array final cut points ''' # Create lower bound: lb = np.nanmin(x) lb_ord_of_mag = _order_of_mag(lb) lb_pwr = sig_fig - 1 - lb_ord_of_mag lb = np.floor(lb * 10**lb_pwr) / 10**lb_pwr # Create upper bound: ub = np.nanmax(x) ub_ord_of_mag = _order_of_mag(ub) ub_pwr = sig_fig - 1 - ub_ord_of_mag ub = np.ceil(ub * 10**ub_pwr) / 10**ub_pwr # Apply quantile cutoffs if provided: if (qntl_cutoff is not None and len(qntl_cutoff) == 2 and isinstance(qntl_cutoff[0],float) and isinstance(qntl_cutoff[1],float)): ep = np.quantile(x, qntl_cutoff) else: ep = np.array([lb,ub]) # Create cut points if isinstance(cuts,str): if cuts == 'linear': c = np.linspace(ep[0],ep[1],num = ncuts) elif cuts == 'log': if ep[0] <= 0: msg = "Variable range includes zero when using 'log'" + \ " - consider using 'logp1' instead" raise ValueError(msg) else: c = 10**np.linspace( np.log10(ep[0]), np.log10(ep[1]), num = ncuts ) elif cuts == 'logp1': if ep[0] <= -1: msg = "Variable range includes zero when using 'log'" + \ " - consider using 'root' instead" raise ValueError(msg) else: c = 10**np.linspace( np.log10(ep[0] + 1), np.log10(ep[1] + 1), num = ncuts ) c = np.sort(np.unique(np.append(0,c))) elif cuts == 'root': c = np.linspace( np.sign(ep[0]) * np.sqrt(abs(ep[0])), np.sign(ep[1]) * np.sqrt(abs(ep[1])) ) c = np.sign(c) * c**2 elif cuts == 'quantile': c = np.quantile(x,np.linspace(0,1,ncuts)) else: # cuts are the actual cut points themselves c = cuts # add far endpoints to c: c = np.unique(np.append(np.append(lb,c),ub)) # round/format values in c: c_ord_of_mag = np.array([_order_of_mag(i) for i in c]) c_log_rnd = np.round(c / 10.0**c_ord_of_mag, sig_fig - 1) c_final = np.unique(c_log_rnd * (10.0**c_ord_of_mag)) return c_final def human_readable_num(number, sig_fig=3, **kwargs): ''' Function for making numbers aesthetically-pleasing Parameters ---------- number : float or int A number to format sig_fig : int Number of significant figures to print Returns ------- z : str number formatted as str ''' if np.isnan(number): z = 'MISSING' elif number == 0: z = '0' elif np.abs(number) < 1: magnitude = int(np.floor(np.log10(abs(number)))) # if |number| >= 0.01 if magnitude >= -2: z = ('%.' + str(sig_fig - 1 - magnitude) + 'f') % (number) z = _remove_trailing_zeros(z) else: final_num = number / 10**magnitude #z = ('%.' + str(sig_fig - 1) + 'f%s') % (final_num, 'E' + str(magnitude)) z = ('%.' + str(sig_fig - 1) + 'f') % (final_num) z = _remove_trailing_zeros(z) + 'E' + str(magnitude) else: units = ['', 'K', 'M', 'G', 'T', 'P'] k = 1000.0 magnitude = int(math.floor(math.log(np.abs(number), k))) final_num = number / k**magnitude if magnitude > 5: unit = 'E' + str(int(3*magnitude)) else: unit = units[magnitude] if np.abs(final_num) < 10: #z = ('%.' + str(sig_fig - 1) + 'f%s') % (final_num, unit) z = ('%.' + str(sig_fig - 1) + 'f') % (final_num) z = _remove_trailing_zeros(z) + unit elif np.abs(final_num) < 100: #z = ('%.' + str(sig_fig-2) + 'f%s') % (final_num, unit) z = ('%.' + str(sig_fig - 2) + 'f') % (final_num) z = _remove_trailing_zeros(z) + unit else: #z = ('%.' + str(sig_fig-3) + 'f%s') % (final_num, unit) z = ('%.' + str(sig_fig - 3) + 'f') % (final_num) z = _remove_trailing_zeros(z) + unit #z = _remove_trailing_zeros(z) return z def cutter( df, x, max_levels=20, point_mass_threshold=0.1, sig_fig=3, **kwargs): """ Cut a numeric variable into bins Parameters ---------- df : pandas.DataFrame x : str the name of the numeric variable in 'df' to construct bins from max_levels : int maximum number of bins to create from 'x' point_mass_threshold : float Levels of 'x' with frequency greater than point_mass_threshold get their own bin sig_fig : int Significant figures to use in binning Returns ------- z : pandas.Series Categorical series of binned values """ df = df.loc[:,[x]].copy() # pm contains any values that exceed point_mass_threshold # pm is 1-D numpy.array pm = _point_mass(df[x], threshold = point_mass_threshold) if len(pm) == 0: # if there are no values exceeding point_mass_threshold # proceed as usual x_no_nan = ~np.isnan(df.loc[:,x].values) cps = cutpoints( df.loc[x_no_nan,x].values, ncuts = max_levels, **kwargs) elif len(pm) > 0: # if there are values exceeding point_mass_threshold # put all remaining values in rem rem = df.loc[~df[x].isin(pm),[x]] x_no_nan = ~np.isnan(rem.loc[:,x].values) if len(rem.loc[x_no_nan,x].values) > 0: # apply cutpoints to rem if there are non-NaN # values cps = cutpoints( rem.loc[x_no_nan,x].values, ncuts = max_levels, # - len(pm), **kwargs) else: # Otherwise, rem has no non-NaN values and # we just generate empty cutpoints cps = np.array([]) # Construct bin_labels and pm_labels c_final, bin_labels, pm_labels = _finalize_bins(cps,pm,sig_fig=sig_fig) # Bin values df.loc[~df[x].isin(pm),x + '_BINNED'] = pd.cut( df.loc[~df[x].isin(pm),x].values, c_final, labels=bin_labels, include_lowest=True) # Bring in point masses for i,v in enumerate(pm): df.loc[df[x] == v,x + '_BINNED'] = pm_labels[i] # Construct final labels final_labels = bin_labels+pm_labels final_labels.sort() # Apply labels z = pd.Categorical( df.loc[:,x + '_BINNED'].values, categories = final_labels) return z def binner_df( df, x, new_col=None, fill_nan="MISSING", max_levels=20, **kwargs): """ Bin a numeric variable Parameters -------------------------- df : pandas.DataFrame x : str The name of the numeric variable in 'df' to construct bins from new_col : str Use as the name of the binned variable fill_nan : str Value to fill nans with max_levels : int Maximum number of bins to create from 'x' Returns --------------------------- pandas.DataFrame including new binned column """ if new_col is None: new_col = x df_ = df.copy().assign( **{new_col: lambda z: cutter(z, x, max_levels, **kwargs)}) if fill_nan is not None: df_.replace({new_col: {np.nan: fill_nan}}, inplace=True) return df_ def _log_spcl(x): """ Log special returns the base 10 log of the absolute value of x for non-zero x. Otherwise, if x is 0, return 0 Parameters ---------- x : int or float Returns ------- float """ if x == 0: return 0 else: return math.log(abs(x), 10) def _order_of_mag(x): """ Calculate the order of magnitude of a number Parameters ---------- x : float or int Returns ------- int : order of magnitude of x """ if x == 0: ord_of_mag = 0 else: ord_of_mag = int(np.floor(_log_spcl(x))) return ord_of_mag def _point_mass(x, threshold=0.1): """ Find point masses in pandas.Series with frequency exceeding specified value Parameters ---------- x : pandas.Series threshold : float If value frequency exceeds threshold, consider value to have point mass Returns ------- 1-D numpy array that contains the point masses """ cnts = x.value_counts(normalize=True) v = cnts[cnts > threshold].index.values v.sort() return v def _remove_trailing_zeros(num_as_str): """ Remove unnecessary trailing zeros from number Parameters ---------- num_as_str : str Number as string Returns ------- Number as str with unnecessary trailing zeros removed """ if re.search("\\.", num_as_str): num_as_str = re.sub("0*$", "", num_as_str) num_as_str = re.sub("\\.$", "", num_as_str) return num_as_str def _remove_closest(x, y, exclude_endpoints=True, **kwargs): """ Remove the elements of x that are closest to the elements of y. Optionally excluding the endpoints of x in the determination Parameters ---------- x : 1-D numpy array y : 1-D numpy array exclude_endpoints : Boolean default True Returns ------- x : numpy.array numpy 1-D array, the elements of x after removing the values closest to the elements of y """ x = x.copy() if len(x) > 2 or not exclude_endpoints: if exclude_endpoints: z = x[1:-1] else: z = x r = range(len(z)) ridx = [min(r, key=lambda i: abs(z[i] - j)) + 1 for j in y] ridx = list(set(ridx)) x = x.tolist() for index in sorted(ridx, reverse=True): del x[index] x = np.array(x) return x def _finalize_bins(x, pm, sig_fig=3, **kwargs): """ Orchestrator for creating bins for numeric variables Parameters ---------- x : numpy.array 1-D array of preliminary bin endpoints pm : numpy.array 1-D array of values with point masses sig_fig : int Number of significant figures to use Returns ------- b : numpy.array 1-D array of finalizied bin endpoints bin_labels : list Final bin labels pm_labels : list Final point mass labels """ b = _remove_closest(x, pm, **kwargs) b = np.unique(np.concatenate([b, pm])) b.sort() bin_labels, pm_labels = _label_constructor( b, pm, sig_fig=sig_fig, **kwargs) return b, bin_labels, pm_labels def _label_constructor(x, pm, sig_fig=3, **kwargs): """ Create bin labels for histogramming a numeric variable Parameters ---------- x : 1-D numpy array The cutpoints for binning pm : 1-D numpy array The values with point masses sig_fig : int The number of significant figures to use Returns ------- bin_labels : list Labels for the bins pm_labels : list Labels for the point masses """ bin_labels = [] pm_labels = [] x_format = [human_readable_num(i, sig_fig=sig_fig) for i in x] cntr = 0 for i in range(len(x)): if x[i] in pm: pm_labels.append(str(i+cntr+1).zfill(2) + ": " + x_format[i]) cntr += 1 if i < len(x) - 1: bin_labels.append( str(i + cntr + 1).zfill(2) + ': ' + ('[' if i == 0 and (x[i] not in pm) else '(') + x_format[i] + ', ' + x_format[i+1] + (']' if (x[i+1] not in pm) else ')') ) return bin_labels, pm_labels
import numpy import gzip import os import config substitution_rate_directory = '%s/substitution_rates/' % (config.data_directory) intermediate_filename_template = '%s/%s/%s.txt.gz' def load_substitution_rate_map(species_name, prev_cohort='all'): intermediate_filename = intermediate_filename_template % (substitution_rate_directory, prev_cohort, species_name) substitution_rate_map = {} if not os.path.isfile(intermediate_filename): return substitution_rate_map file = gzip.open(intermediate_filename,"r") file.readline() # header for line in file: items = line.split(",") if items[0].strip()!=species_name: continue record_strs = [", ".join(['Species', 'Sample1', 'Sample2', 'Type', 'Num_muts', 'Num_revs', 'Num_mut_opportunities', 'Num_rev_opportunities'])] sample_1 = items[1].strip() sample_2 = items[2].strip() type = items[3].strip() num_muts = float(items[4]) num_revs = float(items[5]) num_mut_opportunities = float(items[6]) num_rev_opportunities = float(items[7]) num_changes = num_muts+num_revs num_opportunities = num_mut_opportunities+num_rev_opportunities sample_pair = (sample_1, sample_2) if type not in substitution_rate_map: substitution_rate_map[type] = {} substitution_rate_map[type][sample_pair] = (num_muts, num_revs, num_mut_opportunities, num_rev_opportunities) return substitution_rate_map def calculate_mutrev_matrices_from_substitution_rate_map(substitution_rate_map, type, allowed_samples=[]): # Rewritten to preserve order of allowed samples # If allowed samples contains things that are not in DB, it returns zero opportunities total_sample_set = set([]) for sample_1, sample_2 in substitution_rate_map[type].keys(): total_sample_set.add(sample_1) total_sample_set.add(sample_2) if len(allowed_samples)==0: allowed_samples = list(sorted(total_sample_set)) # allows us to go from sample name to idx in allowed samples (to preserve order) sample_idx_map = {allowed_samples[i]:i for i in xrange(0,len(allowed_samples))} mut_difference_matrix = numpy.zeros((len(allowed_samples), len(allowed_samples)))*1.0 rev_difference_matrix = numpy.zeros_like(mut_difference_matrix) mut_opportunity_matrix = numpy.zeros_like(mut_difference_matrix) rev_opportunity_matrix = numpy.zeros_like(mut_difference_matrix) for sample_pair in substitution_rate_map[type].keys(): sample_i = sample_pair[0] sample_j = sample_pair[1] if not ((sample_i in sample_idx_map) and (sample_j in sample_idx_map)): continue i = sample_idx_map[sample_i] j = sample_idx_map[sample_j] num_muts, num_revs, num_mut_opportunities, num_rev_opportunities = substitution_rate_map[type][sample_pair] mut_difference_matrix[i,j] = num_muts rev_difference_matrix[i,j] = num_revs mut_opportunity_matrix[i,j] = num_mut_opportunities rev_opportunity_matrix[i,j] = num_rev_opportunities return allowed_samples, mut_difference_matrix, rev_difference_matrix, mut_opportunity_matrix, rev_opportunity_matrix def calculate_matrices_from_substitution_rate_map(substitution_rate_map, type, allowed_samples=[]): # once the map is loaded, then we can compute rate matrices in this definition (so, it relies on the previous def) samples, mut_difference_matrix, rev_difference_matrix, mut_opportunity_matrix, rev_opportunity_matrix = calculate_mutrev_matrices_from_substitution_rate_map( substitution_rate_map, type, allowed_samples) difference_matrix = mut_difference_matrix+rev_difference_matrix opportunity_matrix = mut_opportunity_matrix+rev_opportunity_matrix return samples, difference_matrix, opportunity_matrix
from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('default_documents', '0012_auto_20151202_1413'), ] operations = [ migrations.AddField( model_name='contractordeliverable', name='document_number', field=models.CharField(max_length=250, null=True, verbose_name='Document number'), ), migrations.AddField( model_name='correspondence', name='document_number', field=models.CharField(max_length=250, null=True, verbose_name='Document number'), ), migrations.AddField( model_name='demometadata', name='document_number', field=models.CharField(max_length=250, null=True, verbose_name='Document number'), ), migrations.AddField( model_name='minutesofmeeting', name='document_number', field=models.CharField(max_length=250, null=True, verbose_name='Document number'), ), migrations.AlterField( model_name='contractordeliverable', name='document_key', field=models.SlugField(unique=True, max_length=250, verbose_name='Document key'), ), migrations.AlterField( model_name='correspondence', name='document_key', field=models.SlugField(unique=True, max_length=250, verbose_name='Document key'), ), migrations.AlterField( model_name='demometadata', name='document_key', field=models.SlugField(unique=True, max_length=250, verbose_name='Document key'), ), migrations.AlterField( model_name='minutesofmeeting', name='document_key', field=models.SlugField(unique=True, max_length=250, verbose_name='Document key'), ), ]
import unittest import rx3 from rx3 import operators as ops from rx3.testing import TestScheduler, ReactiveTest on_next = ReactiveTest.on_next on_completed = ReactiveTest.on_completed on_error = ReactiveTest.on_error subscribe = ReactiveTest.subscribe subscribed = ReactiveTest.subscribed disposed = ReactiveTest.disposed created = ReactiveTest.created class RxException(Exception): pass # Helper function for raising exceptions within lambdas def _raise(ex): raise RxException(ex) class TestDistinctUntilChanged(unittest.TestCase): def test_distinct_until_changed_never(self): scheduler = TestScheduler() def create(): return rx3.never().pipe(ops.distinct_until_changed()) results = scheduler.start(create) assert results.messages == [] def test_distinct_until_changed_empty(self): scheduler = TestScheduler() xs = scheduler.create_hot_observable(on_next(150, 1), on_completed(250)) def create(): return xs.pipe(ops.distinct_until_changed()) results = scheduler.start(create).messages self.assertEqual(1, len(results)) assert(results[0].value.kind == 'C' and results[0].time == 250) def test_distinct_until_changed_return(self): scheduler = TestScheduler() xs = scheduler.create_hot_observable(on_next(150, 1), on_next(220, 2), on_completed(250)) def create(): return xs.pipe(ops.distinct_until_changed()) results = scheduler.start(create).messages self.assertEqual(2, len(results)) assert(results[0].value.kind == 'N' and results[0].time == 220 and results[0].value.value == 2) assert(results[1].value.kind == 'C' and results[1].time == 250) def test_distinct_until_changed_on_error(self): ex = 'ex' scheduler = TestScheduler() xs = scheduler.create_hot_observable(on_next(150, 1), on_error(250, ex)) def create(): return xs.pipe(ops.distinct_until_changed()) results = scheduler.start(create).messages self.assertEqual(1, len(results)) assert(results[0].value.kind == 'E' and results[0].time == 250 and results[0].value.exception == ex) def test_distinct_until_changed_all_changes(self): scheduler = TestScheduler() xs = scheduler.create_hot_observable( on_next(150, 1), on_next(210, 2), on_next(220, 3), on_next(230, 4), on_next(240, 5), on_completed(250)) def create(): return xs.pipe(ops.distinct_until_changed()) results = scheduler.start(create).messages self.assertEqual(5, len(results)) assert(results[0].value.kind == 'N' and results[0].time == 210 and results[0].value.value == 2) assert(results[1].value.kind == 'N' and results[1].time == 220 and results[1].value.value == 3) assert(results[2].value.kind == 'N' and results[2].time == 230 and results[2].value.value == 4) assert(results[3].value.kind == 'N' and results[3].time == 240 and results[3].value.value == 5) assert(results[4].value.kind == 'C' and results[4].time == 250) def test_distinct_until_changed_all_same(self): scheduler = TestScheduler() xs = scheduler.create_hot_observable(on_next(150, 1), on_next(210, 2), on_next( 220, 2), on_next(230, 2), on_next(240, 2), on_completed(250)) def create(): return xs.pipe(ops.distinct_until_changed()) results = scheduler.start(create).messages self.assertEqual(2, len(results)) assert(results[0].value.kind == 'N' and results[0].time == 210 and results[0].value.value == 2) assert(results[1].value.kind == 'C' and results[1].time == 250) def test_distinct_until_changed_some_changes(self): scheduler = TestScheduler() xs = scheduler.create_hot_observable(on_next(150, 1), on_next(210, 2), on_next(215, 3), on_next( 220, 3), on_next(225, 2), on_next(230, 2), on_next(230, 1), on_next(240, 2), on_completed(250)) def create(): return xs.pipe(ops.distinct_until_changed()) results = scheduler.start(create).messages self.assertEqual(6, len(results)) assert(results[0].value.kind == 'N' and results[0].time == 210 and results[0].value.value == 2) assert(results[1].value.kind == 'N' and results[1].time == 215 and results[1].value.value == 3) assert(results[2].value.kind == 'N' and results[2].time == 225 and results[2].value.value == 2) assert(results[3].value.kind == 'N' and results[3].time == 230 and results[3].value.value == 1) assert(results[4].value.kind == 'N' and results[4].time == 240 and results[4].value.value == 2) assert(results[5].value.kind == 'C' and results[5].time == 250) def test_distinct_until_changed_comparer_all_equal(self): scheduler = TestScheduler() xs = scheduler.create_hot_observable(on_next(150, 1), on_next(210, 2), on_next( 220, 3), on_next(230, 4), on_next(240, 5), on_completed(250)) def create(): return xs.pipe( ops.distinct_until_changed(comparer=lambda x, y: True) ) results = scheduler.start(create).messages self.assertEqual(2, len(results)) assert(results[0].value.kind == 'N' and results[0].time == 210 and results[0].value.value == 2) assert(results[1].value.kind == 'C' and results[1].time == 250) def test_distinct_until_changed_comparer_all_different(self): scheduler = TestScheduler() xs = scheduler.create_hot_observable(on_next(150, 1), on_next(210, 2), on_next( 220, 2), on_next(230, 2), on_next(240, 2), on_completed(250)) def create(): return xs.pipe( ops.distinct_until_changed(comparer=lambda x, y: False) ) results = scheduler.start(create).messages self.assertEqual(5, len(results)) assert(results[0].value.kind == 'N' and results[0].time == 210 and results[0].value.value == 2) assert(results[1].value.kind == 'N' and results[1].time == 220 and results[1].value.value == 2) assert(results[2].value.kind == 'N' and results[2].time == 230 and results[2].value.value == 2) assert(results[3].value.kind == 'N' and results[3].time == 240 and results[3].value.value == 2) assert(results[4].value.kind == 'C' and results[4].time == 250) def test_distinct_until_changed_key_mapper_div2(self): scheduler = TestScheduler() xs = scheduler.create_hot_observable(on_next(150, 1), on_next(210, 2), on_next( 220, 4), on_next(230, 3), on_next(240, 5), on_completed(250)) def create(): return xs.pipe(ops.distinct_until_changed(lambda x: x % 2)) results = scheduler.start(create).messages self.assertEqual(3, len(results)) assert(results[0].value.kind == 'N' and results[0].time == 210 and results[0].value.value == 2) assert(results[1].value.kind == 'N' and results[1].time == 230 and results[1].value.value == 3) assert(results[2].value.kind == 'C' and results[2].time == 250) def test_distinct_until_changed_key_mapper_throws(self): ex = 'ex' scheduler = TestScheduler() xs = scheduler.create_hot_observable(on_next(150, 1), on_next(210, 2), on_completed(250)) def create(): return xs.pipe(ops.distinct_until_changed(lambda x: _raise(ex))) results = scheduler.start(create) assert results.messages == [on_error(210, ex)] def test_distinct_until_changed_comparer_throws(self): ex = 'ex' scheduler = TestScheduler() xs = scheduler.create_hot_observable(on_next(150, 1), on_next(210, 2), on_next(220, 3), on_completed(250)) def create(): return xs.pipe( ops.distinct_until_changed(comparer=lambda x, y: _raise(ex)), ) results = scheduler.start(create) assert results.messages == [on_next(210, 2), on_error(220, ex)]
#!/usr/bin/env python # T. Carman, Jan 20 2021 (Biden inauguration!) # A quick stab at setting up an ensemble of runs. import os import argparse import textwrap import sys import subprocess import json import numpy as np import netCDF4 as nc def setup_for_driver_adjust(exe_path, input_data_path, N=5): ''' Work in progress... ''' # Build the ensemble member directories for i in range(N): run_dir = 'ens_{:06d}'.format(i) # Note the --copy-inputs argument! Might want to verify that it is # single site inputs or space consumption might be a problem... # If space is a problem, could get fancier and only copy the input file # that is going to be modified.... s = "{}/setup_working_directory.py --copy-inputs --input-data-path {} {}".format(exe_path, input_data_path, run_dir) result = subprocess.run(s.split(' '), stdout=subprocess.PIPE, stderr=subprocess.PIPE) #capture_output=True) if len(result.stderr) > 0: print(result) # Now loop over the directories and modify the driver(s) in each for i in range(N): run_dir = 'ens_{:06d}'.format(i) #ds = nc.Dataset('{}/inputs/{}/historic-climate.nc'.format(run_dir, os.path.basename(input_data_path))) ds = nc.Dataset('{}/inputs/{}/historic-climate.nc'.format(run_dir, os.path.basename(input_data_path)), 'a') air_temp_timeseries = ds.variables['tair'][:,0,0] print(type(air_temp_timeseries)) d=1 variation = np.random.normal(0, d, len(air_temp_timeseries)) air_temp_mod = (air_temp_timeseries + variation) print(type(air_temp_mod)) ds.variables['tair'][:,0,0] = air_temp_mod # Now add some variation here.... #air_temp_mod = ????? # # This sorta works but would be horribly inefficient... # for i, value in enumerate(air_temp_timeseries): # variation = np.random.normal(value, .1, 1)[0] # print(i, value, variation, value + variation) # # And write it back to the file here... #ds.variables['tair'][:,0,0] = air_temp_mod ds.close() def setup_for_parameter_adjust_ensemble(exe_path, input_data_path, PFT='pft0', N=5, PARAM='albvisnir'): ''' Work in progress...bunch of hard coded stuff, not very flexible at the moment. Parameters ---------- exe_path : str, the path to the directory where this (ensemble_setup.py) script is. Assumes that other dvmdostem supporting scripts are in the same directory as this script. N : integer, number of members of the ensemble. PARAM : str, which parameter to adjust, must exist in one of the parameter files. PFT : str, which pft to adjust parameter for, e.g. 'pft0' ''' # draw samples from distribution PARAM_VALS = np.random.normal(loc=.5,scale=.1,size=N) # see what the samples look like #import matplotlib.pyplot as plt #plt.scatter(np.arange(0,N),np.random.normal(loc=.5,scale=.1,size=N)) #plt.show() for i, pv in enumerate(PARAM_VALS): # add leading zeros, so like this: ens_000000, ens_000001, etc run_dir = 'ens_{:06d}'.format(i) # 1. Setup the run directory s = "{}/setup_working_directory.py --input-data-path {} {}".format(exe_path, input_data_path, run_dir) result = subprocess.run(s.split(' '), stdout=subprocess.PIPE, stderr=subprocess.PIPE) #capture_output=True) if len(result.stderr) > 0: print(result) # Note that we could avoid the subprocess by importing the setup-working-directory.py into # this script and using the appropriate functions... # 2. Modify the appropriate value in the parameter files. This is somewhat # obtuse, these are the steps: # a) convert "block" of parameter data to json (using param_util.py) # b) modify value in json datastructure # c) write json data structure to temporary file # d) convert json file to "block" of data formatted as required for # our parameter files, again using param_util.py # e) capture output of previous step and overwrite the parameter file s = "{}/param_util.py --dump-block-to-json {}/parameters/cmt_envcanopy.txt 4".format(exe_path, run_dir) result = subprocess.run(s.split(' '), stdout=subprocess.PIPE, stderr=subprocess.PIPE) #, capture_output=True) if len(result.stderr) > 0: print(result) jd = json.loads(result.stdout.decode('utf-8')) jd[PFT][PARAM] = pv with open('/tmp/data.json', 'w') as f: f.write(json.dumps(jd)) s = "{}/param_util.py --fmt-block-from-json /tmp/data.json {}/parameters/cmt_envcanopy.txt".format(exe_path, run_dir) result = subprocess.run(s.split(' '), stdout=subprocess.PIPE, stderr=subprocess.PIPE) #, capture_output=True) with open("{}/parameters/cmt_envcanopy.txt".format(run_dir), 'w') as f: f.write(result.stdout.decode('utf-8')) if __name__ == '__main__': parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent('''\ Helper script for setting up an ensemble of dvmdostem runs. Work in progress. For now assumes that the working directory from which you run this script will be where you want your ensemble sub folders to be built. So you frequently will end up running this script with a relative path, i.e.: ../dvm-dos-tem/scripts/ensemble_setup.py --param-adjust --input-data ../some/path '''), epilog=textwrap.dedent('''\ epilog text...''') ) parser.add_argument('--param-adjust', action='store_true', help=textwrap.dedent('''\ Setup for a series of runs where parameter(s) are adjusted between runs. ''')) parser.add_argument('--input-data', help="Path to the driving data (i.e. something in the input data catalog...") parser.add_argument('--driver-adjust', action='store_true', help=textwrap.dedent('''\ Setup for a series of runs where the drivers are adjusted between runs. ''')) args = parser.parse_args() exe_path = os.path.dirname(os.path.abspath(sys.argv[0])) if args.param_adjust: print("setup for parameter adjust") setup_for_parameter_adjust_ensemble(exe_path, args.input_data) sys.exit(0) if args.driver_adjust: print("setup for driver adjust") setup_for_driver_adjust(exe_path, input_data_path=args.input_data) sys.exit(0) if not (args.driver_adjust or args.param_adjust): print("Error: must provide one of the options.") parser.print_help()
import asyncio from asyncio.streams import StreamReader, StreamWriter from concurrent.futures import TimeoutError from os import urandom from hashlib import sha1 from base64 import b64decode from io import BytesIO from struct import pack, unpack from Auth.Constants.AuthStep import AuthStep from Auth.Handlers.LoginChallenge import LoginChallenge from Auth.Handlers.LoginProof import LoginProof from Auth.Handlers.Realmlist import Realmlist from Auth.Constants.LoginOpCode import LoginOpCode from Auth.Crypto.SRP import SRP from World.WorldPacket.WorldPacketManager import WorldPacketManager from World.WorldPacket.Constants.WorldOpCode import WorldOpCode from Auth.Constants.WorldServerAuthResponseCodes import ResponseCodes from Account.AccountManager import AccountManager from Auth.Crypto.HeaderCrypt import HeaderCrypt from Server.Redis.RedisConnection import RedisConnection from Utils.Debug.Logger import Logger class AuthManager(object): AUTH_HANDLERS = { LoginOpCode.LOGIN_CHALL: LoginChallenge, LoginOpCode.LOGIN_PROOF: LoginProof, LoginOpCode.RECON_CHALL: 'ReconChallenge', LoginOpCode.RECON_PROOF: 'ReconProof', LoginOpCode.REALMLIST: Realmlist } def __init__(self, reader: StreamReader, writer: StreamWriter, **kwargs): self.reader = reader self.writer = writer # uses on first step self.srp = SRP() # uses on second step self.world_packet_manager = kwargs.pop('world_packet_manager', None) self.data = bytes() self.build = 0 self.unk = 0 self.account_name = None self.client_seed = 0 self.auth_seed = bytes() self.client_hash = bytes() self.session_key = bytes() self.server_hash = bytes() # uses in both cases self.temp_ref = kwargs.pop('temp_ref', None) async def process(self, step: AuthStep): if step == AuthStep.FIRST: await self.authenticate_on_login_server() elif step == AuthStep.SECOND: await self.authenticate_on_world_server() else: return None async def authenticate_on_login_server(self): while True: try: request = await asyncio.wait_for(self.reader.read(1024), timeout=1.0) if request: opcode, packet = request[0], request[1:] try: handler = AuthManager.AUTH_HANDLERS[LoginOpCode(opcode)] except ValueError: Logger.error('[AuthManager]: Incorrect request, check the opcode') pass else: response = await handler(packet=packet, srp=self.srp, temp_ref=self.temp_ref).process() if response: self.writer.write(response) except TimeoutError: continue async def authenticate_on_world_server(self): self.send_auth_challenge() try: await self._parse_data() await self._check_session_key() self._generate_server_hash() # after this step next packets will be encrypted self._setup_encryption() if self.server_hash != self.client_hash: raise Exception('[Auth Manager]: Server hash is differs from client hash') else: self._send_addon_info() self._send_auth_response() except TimeoutError: Logger.error('[Auth Manager]: Timeout on step2') def send_auth_challenge(self): # auth seed need to generate header_crypt Logger.info('[Auth Manager]: sending auth challenge') self.auth_seed = int.from_bytes(urandom(4), 'little') auth_seed_bytes = pack('<I', self.auth_seed) # TODO: make it like standard request handler response = WorldPacketManager.generate_packet(WorldOpCode.SMSG_AUTH_CHALLENGE, auth_seed_bytes) self.writer.write(response) async def _parse_data(self): data = await asyncio.wait_for(self.reader.read(1024), timeout=1.0) # omit first 6 bytes, cause 01-02 = packet size, 03-04 = opcode (0x1ED), 05-06 - unknown null-bytes tmp_buf = BytesIO(data[6:]) self.build = unpack('<H', tmp_buf.read(2))[0] # remove next 6 unknown null-bytes (\x00) tmp_buf.read(6) self.account_name = self._parse_account_name(tmp_buf) # set account for using in world packet handlers with AccountManager() as account_mgr: self.temp_ref.account = account_mgr.get(name=self.account_name).account self.client_seed = tmp_buf.read(4) self.client_hash = tmp_buf.read(20) def _parse_account_name(self, buffer: BytesIO): Logger.info('[Auth Session Manager]: parsing account name') result = bytes() while True: char = buffer.read(1) if char and char != b'\x00': result += char else: break try: result = result.decode('utf-8') except UnicodeDecodeError: Logger.error('[Auth Session Manager]: decode error, wrong name = {}'.format(result)) else: return result async def _check_session_key(self): Logger.info('[Auth Session Manager]: checking session key') session_key = await RedisConnection.create().get('#{}-session-key'.format(self.account_name)) if not session_key: raise Exception('Session key does not exists') self.session_key = b64decode(session_key) def _generate_server_hash(self): Logger.info('[Auth Session Manager]: generating server hash, acc={}, seed={}'.format(self.account_name, self.auth_seed)) to_hash = ( self.account_name.encode('ascii') + bytes(4) + self.client_seed + int.to_bytes(self.auth_seed, 4, 'little') + self.session_key ) self.server_hash = sha1(to_hash).digest() def _setup_encryption(self): Logger.info('[Auth Manager]: setup encryption') try: header_crypt = HeaderCrypt(self.session_key) except Exception as e: raise Exception('[Auth Manager]: error on setup encryption = {}'.format(e)) else: self.world_packet_manager.set_header_crypt(header_crypt) def _send_auth_response(self): # updating session request response = pack('<BIBIB', ResponseCodes.AUTH_OK.value, 0x00, # BillingTimeRemainingSMSG_AUTH_RESPONSE 0x00, # BillingPlanFlags 0x00, # BillingTimeRested 0x01 # Expansion, 0 - normal, 1 - TBC, must be set manually for each account ) response = WorldPacketManager.generate_packet( opcode=WorldOpCode.SMSG_AUTH_RESPONSE, data=response, header_crypt=self.world_packet_manager.header_crypt ) self.writer.write(response) def _send_addon_info(self): # TODO parse actual addon list from CMSG_AUTH_SESSION and check response = b'\x02\x01\x00\x00\x00\x00\x00\x00' * 16 response = WorldPacketManager.generate_packet( opcode=WorldOpCode.SMSG_ADDON_INFO, data=response, header_crypt=self.world_packet_manager.header_crypt ) # send this packet to show 'addons' button on Characters screen self.writer.write(response)
# -*- coding: utf-8 -*- # # Configuration file for the Sphinx documentation builder. # # This file does only contain a selection of the most common options. For a # full list see the documentation: # http://www.sphinx-doc.org/en/master/config # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys sys.path.insert(0, os.path.abspath('../')) # -- Project information ----------------------------------------------------- project = 'pyfair' copyright = '2020, Theo Naunheim' author = 'Theo Naunheim' # The short X.Y version version = '0.1.12' # The full version, including alpha/beta/rc tags release = '0.1-alpha.12' # -- General configuration --------------------------------------------------- # User defined HMTL Logo html_logo = '_static/logo.PNG' # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.mathjax', 'sphinx.ext.napoleon', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The master toctree document. master_doc = 'index' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = None # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # The default sidebars (for documents that don't match any pattern) are # defined by theme itself. Builtin themes are using these templates by # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', # 'searchbox.html']``. # # html_sidebars = {} # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. htmlhelp_basename = 'pyfairdoc' # -- Options for LaTeX output ------------------------------------------------ latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'pyfair.tex', 'pyfair Documentation', 'Theo Naunheim', 'manual'), ] # -- Options for manual page output ------------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'pyfair', 'pyfair Documentation', [author], 1) ] # -- Options for Texinfo output ---------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'pyfair', 'pyfair Documentation', author, 'pyfair', 'One line description of project.', 'Miscellaneous'), ] # -- Options for Epub output ------------------------------------------------- # Bibliographic Dublin Core info. epub_title = project # The unique identifier of the text. This can be a ISBN number # or the project homepage. # # epub_identifier = '' # A unique identification for the text. # # epub_uid = '' # A list of files that should not be packed into the epub file. epub_exclude_files = ['search.html'] # -- Extension configuration -------------------------------------------------
from nanome._internal._util._serializers import _StringSerializer from nanome.util import FileError from nanome._internal._util._serializers import _TypeSerializer class _Get(_TypeSerializer): def __init__(self): self.__string = _StringSerializer() def version(self): return 0 def name(self): return "get" def serialize(self, version, value, context): context.write_using_serializer(self.__string, value) def deserialize(self, version, context): error_code = FileError(context.read_int()) length = context.read_uint() file = context.read_bytes(length) return (error_code, file)
#!/usr/bin/env python ######## # NOTE: this may not be used at all at this point. # # The code in here is now in teh py_gnome repository anyway ####### """ hazmatPy module This module contains assorted functions and classes that are useful for NOAA/HAZMAT stuff. It currently contains: read_bna(filename,polytype = "list"): returns the data in a bna file, given in filename, in twop possible forms: If polytype is set to "list" (the default) what is returned is a list of two-element tuples. The first element is the polygon type, taken from the second field in the header of each polygon in the bna. The second element is the polygon. Each polygon is a Nx2 NumPy array of floats, such that polygons[n][m,0] is the latitude of the mth point of the nth polygon. polygon[n][m,1] is the longitude (in decimal degrees) If polytype is set to "PolygonSet", a polygon set, as defined in the Geometry module is returned. A polygonset is a set of N polygons, such that P[n] returns a NX2 NumPy array of Floats, as defined above. sort_by_other_list(list_to_sort,list_to_sort_by): returns a list of the elements of "list_to_sort", sorted by the elements of "list_to_sort_by". Example: >>> hazmat.sort_by_other_list(['a,','b','c','d'],[4,1,3,2]) ['b', 'd', 'c', 'a,'] """ # from Numeric import * from TextFile import open print "about to define read_bna" def read_bna(filename,polytype = "list"): import string file = open(filename,'rt') if polytype == "list": polygons = [] line = file.readline() while line: num_points = int(string.split(line,',')[2]) polygon_type = string.replace(string.split(line,',')[1],'"','') polygon = zeros((num_points,2),Float) for i in range(num_points): polygon[i,:] = map(float,string.split(file.readline(),',')) polygons.append((polygon_type,polygon)) line = string.strip(file.readline()) elif polytype == "PolygonSet": import Geometry polygons = Geometry.PolygonSet() while 1: line = file.readline() if not line: break num_points = int(line.split(',')[2]) polygon = zeros((num_points,2),Float) for i in range(num_points): polygon[i,:] = map(float,file.readline().split(',')) polygons.append(polygon) else: raise('polytype must be either "list" or "PolygonSet') file.close() return polygons #Sorting routine: def sort_by_other_list(list_to_sort,list_to_sort_by): """ sort_by_other list(list_to_sort,list_to_sort_by) function that sorts one list by the contents of another list. the list that is being sorted does not have to be sortable """ pairs = map(None, list_to_sort_by,range(len(list_to_sort_by))) pairs.sort() out_list = [] for i in map(lambda x: x[1],pairs): out_list.append(list_to_sort[i]) return out_list
import choraconfig, glob batch = choraconfig.clone_batch("nrec-new") batch["toolIDs"] = ["chorafull","icra2019","ua","utaipan","viap"]
import pandas as pd def result1(print_table=True): # building table for result 1 shs27_bfs = pd.read_csv('save_test_results/SHS27k_bfs', sep=',', header=None) shs27_dfs = pd.read_csv('save_test_results/SHS27k_dfs', sep=',', header=None) shs27_random = pd.read_csv('save_test_results/SHS27k_random', sep=',', header=None) shs148_bfs = pd.read_csv('save_test_results/SHS148k_bfs', sep=',', header=None) shs148_dfs = pd.read_csv('save_test_results/SHS148k_dfs', sep=',', header=None) shs148_random = pd.read_csv('save_test_results/SHS148k_random', sep=',', header=None) cols = ['X_all', 'X_bs', 'X_es', 'X_ns', 'bs', 'es', 'ns'] means = pd.DataFrame(columns=cols) stds = pd.DataFrame(columns=cols) for table in [shs27_random, shs27_bfs, shs27_dfs, shs148_random, shs148_bfs, shs148_dfs]: table[1] = table[1].replace(' None', -1) means = means.append({k: v for k, v in zip(cols, table.mean())}, ignore_index=True) stds = stds.append({k: v for k, v in zip(cols, table.std())}, ignore_index=True) means.index = ['random', 'bfs', 'dfs','random', 'bfs', 'dfs'] stds.index = ['random', 'bfs', 'dfs','random', 'bfs', 'dfs'] combined = means.round(2).astype(str) + ' pm ' + stds.round(2).astype(str) if print_table: print(combined.to_latex()) return combined def result2(): gen_shs27_bfs = pd.read_csv('save_test_results/generalize_SHS27k_bfs', sep=',', header=None) gen_shs27_dfs = pd.read_csv('save_test_results/generalize_SHS27k_dfs', sep=',', header=None) gen_shs27_random = pd.read_csv('save_test_results/generalize_SHS27k_random', sep=',', header=None) gen_shs148_bfs = pd.read_csv('save_test_results/generalize_SHS148k_bfs', sep=',', header=None) gen_shs148_dfs = pd.read_csv('save_test_results/generalize_SHS148k_dfs', sep=',', header=None) gen_shs148_random = pd.read_csv('save_test_results/generalize_SHS148k_random', sep=',', header=None) cols = ['random', 'BFS', 'DFS'] gen_means = pd.DataFrame(columns=cols) gen_stds = pd.DataFrame(columns=cols) gen_means = gen_means.append( {'random': gen_shs27_random.mean().values[0], 'BFS': gen_shs27_bfs.mean().values[0], 'DFS': gen_shs27_dfs.mean().values[0]}, ignore_index=True) gen_means = gen_means.append( {'random': gen_shs148_random.mean().values[0], 'BFS': gen_shs148_bfs.mean().values[0], 'DFS': gen_shs148_dfs.mean().values[0]}, ignore_index=True) gen_stds = gen_stds.append({'random': gen_shs27_random.std().values[0], 'BFS': gen_shs27_bfs.std().values[0], 'DFS': gen_shs27_dfs.std().values[0]}, ignore_index=True) gen_stds = gen_stds.append({'random': gen_shs148_random.std().values[0], 'BFS': gen_shs148_bfs.std().values[0], 'DFS': gen_shs148_dfs.std().values[0]}, ignore_index=True) gen_means.index = ['SHS27k-Train', 'SHS148k-Train'] gen_stds.index = ['SHS27k-Train', 'SHS148k-Train'] gen_combined = gen_means.round(2).astype(str) + ' pm ' + gen_stds.round(2).astype(str) print(gen_combined.to_latex()) def result3(): gct_bfs_27 = pd.read_csv('save_test_results/all_GCT_SHS27k_bfs', sep=',', header=None) gct_dfs_27 = pd.read_csv('save_test_results/all_GCT_SHS27k_dfs', sep=',', header=None) gct_bfs_148 = pd.read_csv('save_test_results/all_GCT_SHS148k_bfs', sep=',', header=None) gct_dfs_148 = pd.read_csv('save_test_results/all_GCT_SHS148k_dfs', sep=',', header=None) bfs = [str(df.mean().round(2)[0]) + ' pm ' + str(df.std().round(2)[0]) for df in [gct_bfs_27, gct_bfs_148]] dfs = [str(df.mean().round(2)[0]) + ' pm ' + str(df.std().round(2)[0]) for df in [gct_dfs_27, gct_dfs_148]] cols = ['SHS27k', 'SHS148k'] res3 = pd.DataFrame(columns=cols) combined = result1(False) rows = [combined.loc['bfs', 'X_all'].values, bfs, combined.loc['dfs', 'X_all'].values, dfs] for row in rows: res3 = res3.append({k: v for k, v in zip(cols, row)}, ignore_index=True) print(res3.to_latex()) def results_pipr(): shs27_bfs = pd.read_csv('save_test_results/PIPR_SHS27k_bfs', sep=',', header=None).iloc[:, 3:] shs27_dfs = pd.read_csv('save_test_results/PIPR_SHS27k_dfs', sep=',', header=None).iloc[:, 3:] shs27_random = pd.read_csv('save_test_results/PIPR_SHS27k_random', sep=',', header=None).iloc[:, 3:] shs148_bfs = pd.read_csv('save_test_results/PIPR_SHS148k_bfs', sep=',', header=None).iloc[:, 3:] shs148_dfs = pd.read_csv('save_test_results/PIPR_SHS148k_dfs', sep=',', header=None).iloc[:, 3:] shs148_random = pd.read_csv('save_test_results/PIPR_SHS148k_random', sep=',', header=None).iloc[:, 3:] cols = ['X_all', 'X_bs', 'X_es', 'X_ns'] means = pd.DataFrame(columns=cols) stds = pd.DataFrame(columns=cols) for table in [shs27_random, shs27_bfs, shs27_dfs, shs148_random, shs148_bfs, shs148_dfs]: table[3] = table[3].apply(lambda x: float(x.split(': ')[1])) table[4] = table[4].replace(' None', -1) means = means.append({k: v for k, v in zip(cols, table.mean())}, ignore_index=True) stds = stds.append({k: v for k, v in zip(cols, table.std())}, ignore_index=True) means.index = ['random', 'bfs', 'dfs', 'random', 'bfs', 'dfs'] stds.index = ['random', 'bfs', 'dfs', 'random', 'bfs', 'dfs'] combined = means.round(2).astype(str) + ' pm ' + stds.round(2).astype(str) print(combined.to_latex()) if __name__ == '__main__': # call functions from above, depending on which results you would like to combine # functions print skeletons for the latex tables used in the report results_pipr()
def pot(x, n): if (n == 0): return(1) if (n == 1): return(x) if (n % 2 == 0): half = pot(x, n / 2) return(half * half) else: half = pot(x, (n - 1) / 2) return(half * half * x) x, n = map(int, input().split()) print(pot(x, n)) def compare(x, n): start = time.time() print(x**n) default = time.time() - start print("Python default: ", default, "sec") # start = time.time() print(pot(x, n)) fast = time.time() - start print("Fast Exponentiation: ", fast, "sec") print("\nFast Exponentiation is: ", default - fast, "sec faster (", ((default - fast) / default) * 100, "%)")
""" Copyright (C) Microsoft Corporation. All rights reserved.​ ​ Microsoft Corporation (“Microsoft”) grants you a nonexclusive, perpetual, royalty-free right to use, copy, and modify the software code provided by us ("Software Code"). You may not sublicense the Software Code or any use of it (except to your affiliates and to vendors to perform work on your behalf) through distribution, network access, service agreement, lease, rental, or otherwise. This license does not purport to express any claim of ownership over data you may have shared with Microsoft in the creation of the Software Code. Unless applicable law gives you more rights, Microsoft reserves all other rights not expressly granted herein, whether by implication, estoppel or otherwise. ​ ​ THE SOFTWARE CODE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL MICROSOFT OR ITS LICENSORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THE SOFTWARE CODE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ import os from azureml.pipeline.steps import ParallelRunConfig, ParallelRunStep from ml_service.util.manage_environment import get_environment from ml_service.pipelines.load_sample_data import create_sample_data_csv from ml_service.util.env_variables import Env from ml_service.util.attach_compute import get_compute from azureml.core import ( Workspace, Dataset, Datastore, RunConfiguration, ) from azureml.pipeline.core import Pipeline, PipelineData, PipelineParameter from azureml.core.compute import ComputeTarget from azureml.data.datapath import DataPath from azureml.pipeline.steps import PythonScriptStep from typing import Tuple def get_or_create_datastore( datastorename: str, ws: Workspace, env: Env, input: bool = True ) -> Datastore: """ Obtains a datastore with matching name. Creates it if none exists. :param datastorename: Name of the datastore :param ws: Current AML Workspace :param env: Environment variables :param input: Datastore points to the input container if this is True(default) or the output storage container otherwise :returns: Datastore :raises: ValueError """ if datastorename is None: raise ValueError("Datastore name is required.") containername = ( env.scoring_datastore_input_container if input else env.scoring_datastore_output_container ) if datastorename in ws.datastores: datastore = ws.datastores[datastorename] # the datastore is not registered but we have all details to register it elif ( env.scoring_datastore_access_key is not None and containername is not None # NOQA: E501 ): # NOQA:E501 datastore = Datastore.register_azure_blob_container( workspace=ws, datastore_name=datastorename, account_name=env.scoring_datastore_storage_name, account_key=env.scoring_datastore_access_key, container_name=containername, ) else: raise ValueError( "No existing datastore named {} nor was enough information supplied to create one.".format( # NOQA: E501 datastorename ) ) return datastore def get_input_dataset(ws: Workspace, ds: Datastore, env: Env) -> Dataset: """ Gets an input dataset wrapped around an input data file. The input data file is assumed to exist in the supplied datastore. :param ws: AML Workspace :param ds: Datastore containing the data file :param env: Environment variables :returns: Input Dataset """ scoringinputds = Dataset.Tabular.from_delimited_files( path=DataPath(ds, env.scoring_datastore_input_filename) ) scoringinputds = scoringinputds.register( ws, name=env.scoring_dataset_name, tags={"purpose": "scoring input", "format": "csv"}, create_new_version=True, ).as_named_input(env.scoring_dataset_name) return scoringinputds def get_fallback_input_dataset(ws: Workspace, env: Env) -> Dataset: """ Called when an input datastore does not exist or no input data file exists at that location. Create a sample dataset using the SPOT dataset from scikit-learn. Useful when debugging this code in the absence of the input data location Azure blob. :param ws: AML Workspace :param env: Environment Variables :returns: Fallback input dataset :raises: FileNotFoundError """ # This call creates an example CSV from sklearn sample data. If you # have already bootstrapped your project, you can comment this line # out and use your own CSV. create_sample_data_csv( file_name=env.scoring_datastore_input_filename, for_scoring=True ) if not os.path.exists(env.scoring_datastore_input_filename): error_message = ( "Could not find CSV dataset for scoring at {}. " + "No alternate data store location was provided either.".format( env.scoring_datastore_input_filename ) # NOQA: E501 ) raise FileNotFoundError(error_message) # upload the input data to the workspace default datastore default_datastore = ws.get_default_datastore() scoreinputdataref = default_datastore.upload_files( [env.scoring_datastore_input_filename], target_path="scoringinput", overwrite=False, ) scoringinputds = ( Dataset.Tabular.from_delimited_files(scoreinputdataref) .register(ws, env.scoring_dataset_name, create_new_version=True) .as_named_input(env.scoring_dataset_name) ) return scoringinputds def get_output_location( ws: Workspace, env: Env, outputdatastore: Datastore = None ) -> PipelineData: """ Returns a Datastore wrapped as a PipelineData instance suitable for passing into a pipeline step. Represents the location where the scoring output should be written. Uses the default workspace blob store if no output datastore is supplied. :param ws: AML Workspace :param env: Environment Variables :param outputdatastore: AML Datastore, optional, default is None :returns: PipelineData wrapping the output datastore """ if outputdatastore is None: output_loc = PipelineData( name="defaultoutput", datastore=ws.get_default_datastore() ) else: output_loc = PipelineData( name=outputdatastore.name, datastore=outputdatastore ) # NOQA: E501 return output_loc def get_inputds_outputloc( ws: Workspace, env: Env ) -> Tuple[Dataset, PipelineData]: # NOQA: E501 """ Prepare the input and output for the scoring step. Input is a tabular dataset wrapped around the scoring data. Output is PipelineData representing a location to write the scores down. :param ws: AML Workspace :param env: Environment Variables :returns: Input dataset and output location """ if env.scoring_datastore_storage_name is None: # fall back to default scoringinputds = get_fallback_input_dataset(ws, env) output_loc = get_output_location(ws, env) else: inputdatastore = get_or_create_datastore( "{}_in".format(env.scoring_datastore_storage_name), ws, env ) outputdatastore = get_or_create_datastore( "{}_out".format(env.scoring_datastore_storage_name), ws, env, input=False, # NOQA: E501 ) scoringinputds = get_input_dataset(ws, inputdatastore, env) output_loc = get_output_location(ws, env, outputdatastore) return (scoringinputds, output_loc) def get_run_configs( ws: Workspace, computetarget: ComputeTarget, env: Env ) -> Tuple[ParallelRunConfig, RunConfiguration]: """ Creates the necessary run configurations required by the pipeline to enable parallelized scoring. :param ws: AML Workspace :param computetarget: AML Compute target :param env: Environment Variables :returns: Tuple[Scoring Run configuration, Score copy run configuration] """ # get a conda environment for scoring environment = get_environment( ws, env.aml_env_name_scoring, conda_dependencies_file=env.aml_env_score_conda_dep_file, enable_docker=True, use_gpu=env.use_gpu_for_scoring, create_new=env.rebuild_env_scoring, ) score_run_config = ParallelRunConfig( entry_script=env.batchscore_script_path, source_directory=env.sources_directory_train, error_threshold=10, output_action="append_row", compute_target=computetarget, node_count=env.max_nodes_scoring, environment=environment, run_invocation_timeout=300, ) copy_run_config = RunConfiguration() copy_run_config.environment = get_environment( ws, env.aml_env_name_score_copy, conda_dependencies_file=env.aml_env_scorecopy_conda_dep_file, enable_docker=True, use_gpu=env.use_gpu_for_scoring, create_new=env.rebuild_env_scoring, ) return (score_run_config, copy_run_config) def get_scoring_pipeline( scoring_dataset: Dataset, output_loc: PipelineData, score_run_config: ParallelRunConfig, copy_run_config: RunConfiguration, computetarget: ComputeTarget, ws: Workspace, env: Env, ) -> Pipeline: """ Creates the scoring pipeline. :param scoring_dataset: Data to score :param output_loc: Location to save the scoring results :param score_run_config: Parallel Run configuration to support parallelized scoring :param copy_run_config: Script Run configuration to support score copying :param computetarget: AML Compute target :param ws: AML Workspace :param env: Environment Variables :returns: Scoring pipeline instance """ # To help filter the model make the model name, model version and a # tag/value pair bindable parameters so that they can be passed to # the pipeline when invoked either over REST or via the AML SDK. model_name_param = PipelineParameter( "model_name", default_value=" " ) # NOQA: E501 model_version_param = PipelineParameter( "model_version", default_value=" " ) # NOQA: E501 model_tag_name_param = PipelineParameter( "model_tag_name", default_value=" " ) # NOQA: E501 model_tag_value_param = PipelineParameter( "model_tag_value", default_value=" " ) # NOQA: E501 scoring_step = ParallelRunStep( name="scoringstep", inputs=[scoring_dataset], output=output_loc, arguments=[ "--model_name", model_name_param, "--model_version", model_version_param, "--model_tag_name", model_tag_name_param, "--model_tag_value", model_tag_value_param, ], parallel_run_config=score_run_config, allow_reuse=False, ) copying_step = PythonScriptStep( name="scorecopystep", script_name=env.batchscore_copy_script_path, source_directory=env.sources_directory_train, arguments=[ "--output_path", output_loc, "--scoring_output_filename", env.scoring_datastore_output_filename if env.scoring_datastore_output_filename is not None else "", "--scoring_datastore", env.scoring_datastore_storage_name if env.scoring_datastore_storage_name is not None else "", "--score_container", env.scoring_datastore_output_container if env.scoring_datastore_output_container is not None else "", "--scoring_datastore_key", env.scoring_datastore_access_key if env.scoring_datastore_access_key is not None else "", ], inputs=[output_loc], allow_reuse=False, compute_target=computetarget, runconfig=copy_run_config, ) return Pipeline(workspace=ws, steps=[scoring_step, copying_step]) def build_batchscore_pipeline(): """ Main method that builds and publishes a scoring pipeline. """ try: env = Env() # Get Azure machine learning workspace aml_workspace = Workspace.get( name=env.workspace_name, subscription_id=env.subscription_id, resource_group=env.resource_group, ) # Get Azure machine learning cluster aml_compute_score = get_compute( aml_workspace, env.compute_name_scoring, env.vm_size_scoring, for_batch_scoring=True, ) input_dataset, output_location = get_inputds_outputloc( aml_workspace, env ) # NOQA: E501 scoring_runconfig, score_copy_runconfig = get_run_configs( aml_workspace, aml_compute_score, env ) scoring_pipeline = get_scoring_pipeline( input_dataset, output_location, scoring_runconfig, score_copy_runconfig, aml_compute_score, aml_workspace, env, ) published_pipeline = scoring_pipeline.publish( name=env.scoring_pipeline_name, description="Diabetes Batch Scoring Pipeline", ) pipeline_id_string = "##vso[task.setvariable variable=pipeline_id;isOutput=true]{}".format( # NOQA: E501 published_pipeline.id ) print(pipeline_id_string) except Exception as e: print(e) exit(1) if __name__ == "__main__": build_batchscore_pipeline()
class ConfigurationReloadInfo(object,IDisposable): """ This object contains information returned by a reload of the fabrication configuration. ConfigurationReloadInfo() """ def Dispose(self): """ Dispose(self: ConfigurationReloadInfo) """ pass def GetConnectivityValidation(self): """ GetConnectivityValidation(self: ConfigurationReloadInfo) -> ConnectionValidationInfo Returns information about the post-reload connectivity validation. Returns: Information about the post-reload connectivity validation. """ pass def ReleaseUnmanagedResources(self,*args): """ ReleaseUnmanagedResources(self: ConfigurationReloadInfo,disposing: bool) """ pass def __enter__(self,*args): """ __enter__(self: IDisposable) -> object """ pass def __exit__(self,*args): """ __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """ pass def __init__(self,*args): """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __repr__(self,*args): """ __repr__(self: object) -> str """ pass Disconnects=property(lambda self: object(),lambda self,v: None,lambda self: None) """The number of disconnections caused by the reload. Get: Disconnects(self: ConfigurationReloadInfo) -> int """ IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None) """Specifies whether the .NET object represents a valid Revit entity. Get: IsValidObject(self: ConfigurationReloadInfo) -> bool """ ProfileNotAvailable=property(lambda self: object(),lambda self,v: None,lambda self: None) """The current profile is not available in the disk configuration. Get: ProfileNotAvailable(self: ConfigurationReloadInfo) -> bool """
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 import boto3 from botocore import config import tarfile import os import json from io import BytesIO from MediaInsightsEngineLambdaHelper import MediaInsightsOperationHelper from MediaInsightsEngineLambdaHelper import MasExecutionError from MediaInsightsEngineLambdaHelper import DataPlane mie_config = json.loads(os.environ['botoConfig']) config = config.Config(**mie_config) comprehend = boto3.client('comprehend', config=config) s3_client = boto3.client('s3') headers = {"Content-Type": "application/json"} def read_from_s3(bucket, key): try: obj = s3_client.get_object( Bucket=bucket, Key=key ) except Exception as e: print("Exception occurred while reading asset metadata from s3") return {"Status": "Error", "Message": e} else: results = obj['Body'].read() return {"Status": "Success", "Object": results} def lambda_handler(event, context): print("We got this event:\n", event) operator_object = MediaInsightsOperationHelper(event) try: job_id = operator_object.metadata["comprehend_phrases_job_id"] asset_id = operator_object.asset_id workflow_id = operator_object.workflow_execution_id # If Comprehend wasn't run due to empty text input, then we're done if job_id == "Empty input --> empty output.": operator_object.update_workflow_status("Complete") return operator_object.return_output_object() except KeyError: operator_object.update_workflow_status("Error") operator_object.add_workflow_metadata(comprehend_error="No valid job id") raise MasExecutionError(operator_object.return_output_object()) try: response = comprehend.list_key_phrases_detection_jobs( Filter={ 'JobName': job_id, }, ) except Exception as e: operator_object.update_workflow_status("Error") operator_object.add_workflow_metadata(comprehend_error="Unable to get response from comprehend: {e}".format(e=e)) raise MasExecutionError(operator_object.return_output_object()) else: print(response) comprehend_status = response["KeyPhrasesDetectionJobPropertiesList"][0]["JobStatus"] if comprehend_status == "SUBMITTED" or comprehend_status == "IN_PROGRESS": operator_object.add_workflow_metadata(comprehend_phrases_job_id=job_id) operator_object.update_workflow_status("Executing") return operator_object.return_output_object() elif comprehend_status == "COMPLETED": output_uri = response["KeyPhrasesDetectionJobPropertiesList"][0]["OutputDataConfig"]["S3Uri"] delimeter = '/' bucket = delimeter.join(output_uri.split(delimeter)[2:3]) file_name = output_uri.split(delimeter)[-1] key = delimeter.join(output_uri.split(delimeter)[3:-1]) + '/' + file_name comprehend_tarball = read_from_s3(bucket, key) comprehend_data = {"LanguageCode": response['KeyPhrasesDetectionJobPropertiesList'][0]['LanguageCode'], "Results": []} if comprehend_tarball["Status"] == "Success": input_bytes = comprehend_tarball["Object"] with tarfile.open(fileobj=BytesIO(input_bytes)) as tf: for member in tf: if member.isfile(): comprehend_data["Results"].append(tf.extractfile(member).read().decode('utf-8')) dataplane = DataPlane() metadata_upload = dataplane.store_asset_metadata(asset_id, "key_phrases", workflow_id, comprehend_data) if "Status" not in metadata_upload: operator_object.update_workflow_status("Error") operator_object.add_workflow_metadata( comprehend_error="Unable to store key phrases data {e}".format(e=metadata_upload)) raise MasExecutionError(operator_object.return_output_object()) else: if metadata_upload["Status"] == "Success": operator_object.add_workflow_metadata(comprehend_entity_job_id=job_id, output_uri=output_uri) operator_object.update_workflow_status("Complete") return operator_object.return_output_object() else: operator_object.update_workflow_status("Error") operator_object.add_workflow_metadata(comprehend_error="Unable to store key phrases data {e}".format(e=metadata_upload)) raise MasExecutionError(operator_object.return_output_object()) else: operator_object.update_workflow_status("Error") operator_object.add_workflow_metadata(comprehend_entity_job_id=job_id, comprehend_error="could not retrieve output from s3: {e}".format(e=comprehend_tarball["Message"])) raise MasExecutionError(operator_object.return_output_object()) else: operator_object.update_workflow_status("Error") operator_object.add_workflow_metadata(comprehend_phrases_job_id=job_id, comprehend_error="comprehend returned as failed: {e}".format(e=response["KeyPhrasesDetectionJobPropertiesList"][0]["Message"])) raise MasExecutionError(operator_object.return_output_object())
# Non-parametric optimization.<br> # Find interesting bits. Combine them. Repeat.<br> # [home](http://menzies.us/bnbab2) :: [lib](http://menzies.us/bnbad2/lib.html) :: # [cols](http://menzies.us/bnbad2/tab.html) :: [tbl](http://menzies.us/bnbad2/grow.html)<br> # <hr> # <a href="http://github.com/timm/bnbad2"><img src="https://github.com/timm/bnbad2/raw/main/etc/img/banner.png" align=left></a> # <p><a href="https://zenodo.org/badge/latestdoi/326061406"><img src="https://zenodo.org/badge/326061406.svg"></a> # <br><img src="https://img.shields.io/badge/language-python3,bash-blue"> # <br><a href="https://badge.fury.io/py/bnbad2"><img src="https://badge.fury.io/py/bnbad2.svg" alt="PyPI version" height="18"></a> # <br><img src="https://img.shields.io/badge/purpose-ai%20,%20se-blueviolet"> # <br><a href="https://travis-ci.com/timm/bnbad2"><img src="https://travis-ci.com/timm/bnbad2.svg?branch=main"></a> # <br><img src="https://img.shields.io/badge/license-mit-lightgrey"></p><hr> def bytedata(): for y in x.decode("utf-8").splitlines(): yield prep(y) def strings(): for y in x.splitlines(): yield prep(y) def csv(): with open(x) as fp: for y in fp: yield prep(y) def stdin(): for y in sys.stdin: yield prep(y) f = strings print("xx", x) if x is None: f = stdio elif type(x) is bytes: f = bytedata elif x[-4:] == ".csv": f = csv for y in f(): yield y
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import os import subprocess import numpy as np import pytest from jina import Document, DocumentArray, Flow from jina.executors.metas import get_default_metas from ...faiss_searcher import FaissSearcher def _get_docs_from_vecs(queries): docs = DocumentArray() for q in queries: doc = Document(embedding=q) docs.append(doc) return docs @pytest.fixture(scope='function', autouse=True) def metas(tmpdir): os.environ['TEST_WORKSPACE'] = str(tmpdir) metas = get_default_metas() metas['workspace'] = os.environ['TEST_WORKSPACE'] metas['name'] = 'faiss_idx' yield metas del os.environ['TEST_WORKSPACE'] def test_train_and_index(metas, tmpdir): query = np.array(np.random.random([10, 10]), dtype=np.float32) query_docs = _get_docs_from_vecs(query) trained_index_file = os.path.join(tmpdir, 'faiss.index') train_data = np.array(np.random.random([512, 10]), dtype=np.float32) index_docs = _get_docs_from_vecs(train_data) f = Flow().add( uses=FaissSearcher, timeout_ready=-1, uses_with={ 'index_key': 'IVF6,PQ2', 'trained_index_file': trained_index_file, }, uses_meta=metas, ) with f: import faiss faiss_index = faiss.index_factory(10, 'IVF6,PQ2', faiss.METRIC_INNER_PRODUCT) faiss.normalize_L2(train_data) faiss_index.train(train_data) faiss.write_index(faiss_index, trained_index_file) # train and index docs first f.post(on='/index', data=index_docs) result = f.post( on='/search', data=query_docs, return_results=True, parameters={'limit': 4} )[0].docs assert len(result[0].matches) == 4 for d in result: assert ( d.matches[0].scores['cosine'].value <= d.matches[1].scores['cosine'].value ) @pytest.mark.gpu @pytest.mark.docker def test_docker_runtime_gpu(build_docker_image_gpu: str): with pytest.raises(subprocess.TimeoutExpired): subprocess.run( [ 'jina', 'executor', f'--uses=docker://{build_docker_image_gpu}', '--gpus', 'all', ], timeout=30, check=True, )
"""Tests for `vivit.extensions.firstorder.batch_grad`."""
# Generated by Django 3.2 on 2021-05-08 08:06 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('example', '0001_initial'), ] operations = [ migrations.AddField( model_name='calendar', name='public', field=models.BooleanField(default=False), ), ]
from .construct_workloads import *
from locust import HttpLocust, TaskSet, between, task, seq_task from locust.events import request_failure import requests import json import random class WebsiteTasks(TaskSet): @task(1) def get_data_1(self): self.client.get("http://localhost:8000/api/v1/projectsss/") @task(1) def get_data_2(self): self.client.get("http://localhost:8000/api/v1/testing/") @task(1) def post_data_3(self): self.client.post("http://localhost:8000/api/v1/testing/",{'name': 'LoadTest', 'test_type': 'POST'}) class WebsiteUser(HttpLocust): task_set = WebsiteTasks min_wait = 5000 max_wait = 15000
def notas(*num, sit=False): """ Retorna um dicionário com a quantidade de notas, a maior nota, a menor nota, a média e a situação(opcional) :param num: Notas dos alunos :param sit: Situação. Se true,, mostra a situação conforme a média. Se false, não mostra a situação. :return: dicionário com a quantidade de notas, a maior, a menor, a média e a situação(Opcional). """ total = len(num) maior = max(num) menor = min(num) media = sum(num) / total avaliacao = '' if sit: if media >= 7: avaliacao = 'Boa' if 5 < media < 7: avaliacao = 'Razoável' if media <= 5: avaliacao = 'Ruim' # end-if if sit: return {'total': total, 'maior': maior, 'menor': menor, 'média': media, 'situação': avaliacao} else: return {'total': total, 'maior': maior, 'menor': menor, 'média': media} # end-if-else # end-def notas print(notas(4.5, 5.5, 7, 8, 2.5, 4, sit=True)) print(notas(4.5, 5.5, 7, 8, 2.5, 4)) help(notas)
from __future__ import annotations import logging import typing as t import typing_extensions as tx from prestring.python import Module, Symbol, FromStatement InputData = t.Dict[str, t.Any] if t.TYPE_CHECKING: from swagger_marshmallow_codegen.resolver import Resolver logger = logging.getLogger(__name__) class Context: def __init__( self, *, name: str = "", m: t.Optional[Module] = None, im: t.Optional[Module] = None, rim: t.Optional[Module] = None, relative_imported: t.Optional[t.Dict[str, Symbol]] = None, separated: bool = False, ): self.name = name self.separated = separated self.m: Module = m or Module() self.im: Module = im or self.m.submodule() self.rim: Module = rim or self.m.submodule() self._relative_imported = relative_imported if relative_imported is None: self._relative_imported = {} def from_(self, module: str, name: str) -> FromStatement: logger.debug(" import: module=%s, name=%s", module, name) return self.im.from_(module, name) def import_(self, module: str) -> Symbol: logger.debug(" import: module=%s", module) return self.im.import_(module) def relative_import_from_lazy(self, name: str) -> None: if not self.separated: return imported = self._relative_imported.get(name) if imported is not None: return None logger.debug(" relative import lazy: module=.%s symbol:%s", name, name) self._relative_imported[name] = self.rim.from_("._lazy", f"_use{name}") return None def relative_import(self, name: str) -> None: if not self.separated: return logger.debug(" relative import: module=.%s symbol:%s", name, name) self.rim.from_(f".{name}", name) return None def use_relative(self, name: str) -> t.Any: if not self.separated: return f"lambda: {name}()" elif self.name == name: # self recursion return f"lambda: {name}()" elif name not in self._relative_imported: # inline definition return f"lambda: {name}()" else: return f"_use{name}" def new_child(self) -> Context: return self.__class__( name=self.name, m=self.m.submodule(newline=False), im=self.im, rim=self.rim, relative_imported=self._relative_imported, separated=self.separated, ) @tx.runtime_checkable class ContextFactory(tx.Protocol): setup: t.Optional[t.Callable[[Context], None]] teardown: t.Optional[t.Callable[[Resolver], None]] def __call__(self, name: str, *, part: t.Optional[str] = None) -> Context: ... @tx.runtime_checkable class OutputData(tx.Protocol): @property def files(self) -> t.Iterator[t.Tuple[str, t.Any]]: ... class SeparatedFilesContextFactory: def __init__(self, ctx: Context, *, setup: t.Callable[[Context], None]) -> None: self._files: t.Dict[str, Context] = {} self._parts: t.Dict[t.Tuple[str, t.Optional[str]], Context] = {} self._root = ctx self.setup = setup self.teardown = self._teardown def __call__(self, name: str, *, part: t.Optional[str] = None) -> Context: ctx = self._files.get(name) if ctx is None: ctx = self._files[name] = Context(name=name, separated=True) ctx._relative_imported[name] = Symbol(name) self.setup(ctx) sctx = self._parts.get((name, part)) if sctx is None: sctx = self._parts[(name, part)] = ctx.new_child() return sctx @property def files(self) -> t.Iterator[t.Tuple[str, t.Any]]: return sorted([(name, ctx.m) for name, ctx in self._files.items()]) def _teardown(self, resolver: Resolver) -> None: names = [name for name, _ in self.files] c = self._files["__init__"] = Context(name="__init__", separated=True) for name in names: c.from_(f".{name}", resolver.resolve_schema_name(name)) c = self._files["_lazy"] = Context(name="_lazy", separated=True) for name in names: cls_name = resolver.resolve_schema_name(name) with c.m.def_(f"_use{cls_name}"): c.m.from_(f".{name}", cls_name) c.m.return_(cls_name) class OneFileContextFactory: def __init__(self, ctx: Context, *, setup: t.Callable[[Context], None]) -> None: self._parts: t.Dict[t.Optional[str], Context] = {} self._root = ctx self.setup = setup self.setup(ctx) self.teardown = None def __call__(self, name: str, *, part: t.Optional[str] = None) -> Context: ctx = self._parts.get(part) if ctx is not None: return ctx ctx = self._parts[part] = self._root.new_child() return ctx @property def files(self) -> t.Iterator[t.Tuple[str, t.Any]]: yield ("", self._root.m)
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # filename: pkuyouth_server/reply.py from lxml import etree import time __all__ = ['TextMsg','ImageMsg','SystemMsg','ArticleMsg'] class Message(object): def __init__(self, toUser, fromUser): self.tree = etree.Element('xml') self._update({ 'ToUserName': toUser, 'FromUserName': fromUser, 'CreateTime': ( int(time.time()), False ), 'MsgType': self.msgType }) @property def xml(self): return etree.tostring(self.tree, encoding='UTF-8').decode('utf-8') def __str__(self): return self.xml def __repr__(self): return self.xml def _update(self, dataDict): self.__form_xml(self.tree, dataDict) def __form_xml(self, element, dataDict): for key, value in dataDict.items(): subElement = etree.SubElement(element, key) if isinstance(value, (str,int,float)): subElement.text = etree.CDATA(str(value)) elif isinstance(value, tuple): # 第二位表示是否使用 CDATA value, isCData = value subElement.text = etree.CDATA(str(value)) if isCData else str(value) elif isinstance(value, list): # 同辈关系 for subDataDict in value: self.__form_xml(subElement, subDataDict) elif isinstance(value, dict): # 父子关系 self.__form_xml(subElement, value) def send(self): if not issubclass(self.__class__, Message): raise NotImplementedError else: return etree.tostring(self.tree, encoding='UTF-8').decode('utf-8') class TextMsg(Message): msgType = 'text' def __init__(self, toUser, fromUser, content): Message.__init__(self, toUser, fromUser) self._update({'Content': content}) class SystemMsg(TextMsg): def __init__(self, toUser, fromUser, content, admin): Message.__init__(self, toUser, fromUser) self._update({'Content': self.template % {"admin": admin[toUser], "content": content} }) @property def timestamp(self): return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) @property def template(self): return "[System] {timestamp} 管理员 %(admin)s %(content)s".format(timestamp = self.timestamp) def __str__(self): return self.tree.find('Content').text class ImageMsg(Message): msgType = 'image' def __init__(self, toUser, fromUser, mediaId): Message.__init__(self, toUser, fromUser) self._update({'Image': {'MediaId': mediaId}}) class ArticleMsg(Message): msgType = 'news' def __init__(self, toUser, fromUser, newsInfo): Message.__init__(self, toUser, fromUser) self._update({ "ArticleCount": ( len(newsInfo), False ), "Articles": [{ "item": { "Title": news['title'], "Description": news['digest'], "PicUrl": news['cover_url'], "Url": news['news_url'], } } for news in newsInfo] })
# coding: utf-8 """ syntropy-controller No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501 OpenAPI spec version: 0.1.0 Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six class WgCreateInterfaceMetadata(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = {"user_id": "float"} attribute_map = {"user_id": "user_id"} def __init__(self, user_id=None): # noqa: E501 """WgCreateInterfaceMetadata - a model defined in Swagger""" # noqa: E501 self._user_id = None self.discriminator = None self.user_id = user_id @property def user_id(self): """Gets the user_id of this WgCreateInterfaceMetadata. # noqa: E501 :return: The user_id of this WgCreateInterfaceMetadata. # noqa: E501 :rtype: float """ return self._user_id @user_id.setter def user_id(self, user_id): """Sets the user_id of this WgCreateInterfaceMetadata. :param user_id: The user_id of this WgCreateInterfaceMetadata. # noqa: E501 :type: float """ if user_id is None: raise ValueError( "Invalid value for `user_id`, must not be `None`" ) # noqa: E501 self._user_id = user_id def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list( map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value) ) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict( map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items(), ) ) else: result[attr] = value if issubclass(WgCreateInterfaceMetadata, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, WgCreateInterfaceMetadata): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
# _*_coding:utf-8_*_ # @auther:FelixFu # @Date: 2021.4.16 # @github:https://github.com/felixfu520 import torch from torchvision.models.densenet import densenet169 from torchvision.models.densenet import densenet121 from torchvision.models.densenet import densenet161 from torchvision.models.densenet import densenet201 class Densenet121(torch.nn.Module): def __init__(self, num_class=1000, in_channels=3, pretrained=False, **kwargs): super(Densenet121, self).__init__() self.model = densenet121(pretrained=pretrained) self.model.features[0] = torch.nn.Conv2d(in_channels=in_channels, out_channels=64, kernel_size=7, stride=2, padding=3, bias=False) self.model.classifier = torch.nn.Linear(in_features=1024, out_features=num_class, bias=True) def forward(self, x): return self.model(x) class Densenet161(torch.nn.Module): def __init__(self, num_class=1000, in_channels=3, pretrained=False, **kargs): super(Densenet161, self).__init__() self.model = densenet161(pretrained=pretrained) self.model.features[0] = torch.nn.Conv2d(in_channels=in_channels, out_channels=96, kernel_size=7, stride=2, padding=3, bias=False) self.model.classifier = torch.nn.Linear(in_features=2208, out_features=num_class, bias=True) def forward(self, x): return self.model(x)
from django.apps import AppConfig class ChatConfig(AppConfig): name = 'chat' def ready(self): import chat.signals
#!python3 """ Implementation of the classic cut-and-choose protocol for fair cake-cutting among two agents. References: Abram, Genesis 13:8-9. Programmer: Erel Segal-Halevi Since: 2019-11 """ from fairpy.agents import * from fairpy import Allocation from typing import * import logging logger = logging.getLogger(__name__) def asymmetric_protocol(agents: List[Agent])->Allocation: """ Asymmetric cut-and-choose protocol: one cuts and the other chooses. :param agents: a list that must contain exactly 2 Agent objects. :return: a proportional and envy-free allocation. >>> Alice = PiecewiseConstantAgent([33,33], "Alice") >>> George = PiecewiseConstantAgent([11,55], "George") >>> asymmetric_protocol([Alice, George]) Alice gets {(0, 1.0)} with value 33. George gets {(1.0, 2)} with value 55. <BLANKLINE> >>> asymmetric_protocol([George, Alice]) George gets {(1.4, 2)} with value 33. Alice gets {(0, 1.4)} with value 46.2. <BLANKLINE> >>> Alice = PiecewiseConstantAgent([33,33,33], "Alice") >>> asymmetric_protocol([Alice, George]) Alice gets {(1.5, 3)} with value 49.5. George gets {(0, 1.5)} with value 38.5. <BLANKLINE> >>> asymmetric_protocol([George, Alice]) George gets {(0, 1.4)} with value 33. Alice gets {(1.4, 3)} with value 52.8. <BLANKLINE> """ num_of_agents = len(agents) if num_of_agents!=2: raise ValueError("Cut and choose works only for two agents") pieces = num_of_agents*[None] (cutter,chooser) = agents # equivalent to: cutter=agents[0]; chooser=agents[1] cut = cutter.mark(0, cutter.total_value() / 2) logger.info("The cutter (%s) cuts at %.2f.", cutter.name(), cut) if chooser.eval(0,cut) > chooser.total_value()/2: logger.info("The chooser (%s) chooses the leftmost piece.", chooser.name()) pieces[1] = [(0,cut)] pieces[0] = [(cut, cutter.cake_length())] else: logger.info("The chooser (%s) chooses the rightmost piece.", chooser.name()) pieces[1] = [(cut, chooser.cake_length())] pieces[0] = [(0,cut)] return Allocation(agents, pieces) def symmetric_protocol(agents: List[Agent])->Allocation: """ Symmetric cut-and-choose protocol: both agents cut, the manager chooses who gets what. :param agents: a list that must contain exactly 2 Agent objects. :return: a proportional and envy-free allocation. >>> Alice = PiecewiseConstantAgent([33,33], "Alice") >>> George = PiecewiseConstantAgent([11,55], "George") >>> symmetric_protocol([Alice, George]) Alice gets {(0, 1.2)} with value 39.6. George gets {(1.2, 2)} with value 44. <BLANKLINE> >>> symmetric_protocol([George, Alice]) George gets {(1.2, 2)} with value 44. Alice gets {(0, 1.2)} with value 39.6. <BLANKLINE> >>> Alice = PiecewiseConstantAgent([33,33,33], "Alice") >>> symmetric_protocol([Alice, George]) Alice gets {(1.45, 3)} with value 51.2. George gets {(0, 1.45)} with value 35.8. <BLANKLINE> >>> symmetric_protocol([George, Alice]) George gets {(0, 1.45)} with value 35.8. Alice gets {(1.45, 3)} with value 51.2. <BLANKLINE> """ num_of_agents = len(agents) if num_of_agents!=2: raise ValueError("Cut and choose works only for two agents") pieces = num_of_agents*[None] marks = [agent.mark(0, agent.total_value() / 2) for agent in agents] logger.info("The agents mark at %f, %f", marks[0], marks[1]) cut = sum(marks)/2 logger.info("The cake is cut at %f.", cut) if marks[0] < marks[1]: logger.info("%s's mark is to the left of %s's mark.", agents[0].name(), agents[1].name()) pieces[0] = [(0,cut)] pieces[1] =[(cut, agents[1].cake_length())] else: logger.info("%s's mark is to the left of %s's mark.", agents[1].name(), agents[0].name()) pieces[1] = [(0,cut)] pieces[0] = [(cut, agents[0].cake_length())] return Allocation(agents, pieces) if __name__ == "__main__": import doctest (failures,tests) = doctest.testmod(report=True) print ("{} failures, {} tests".format(failures,tests))
# Copyright (c) 2019 - The Procedural Generation for Gazebo authors # For information on the respective copyright owner see the NOTICE file # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..types import XMLScalar class Dissipation(XMLScalar): _NAME = 'dissipation' _TYPE = 'sdf' def __init__(self, default=100): XMLScalar.__init__(self, default) def _set_value(self, value): assert value > 0 XMLScalar._set_value(self, value)
#!/usr/bin/env python3 import math import torch from .kernel import Kernel from ..functions import add_jitter from ..lazy import DiagLazyTensor, LazyTensor, MatmulLazyTensor, PsdSumLazyTensor, RootLazyTensor from ..distributions import MultivariateNormal from ..mlls import InducingPointKernelAddedLossTerm class InducingPointKernel(Kernel): def __init__(self, base_kernel, inducing_points, likelihood, active_dims=None): super(InducingPointKernel, self).__init__(active_dims=active_dims) self.base_kernel = base_kernel self.likelihood = likelihood if inducing_points.ndimension() == 1: inducing_points = inducing_points.unsqueeze(-1) if inducing_points.ndimension() != 2: raise RuntimeError("Inducing points should be 2 dimensional") self.register_parameter(name="inducing_points", parameter=torch.nn.Parameter(inducing_points.unsqueeze(0))) self.register_added_loss_term("inducing_point_loss_term") def train(self, mode=True): if hasattr(self, "_cached_kernel_mat"): del self._cached_kernel_mat return super(InducingPointKernel, self).train(mode) @property def _inducing_mat(self): if not self.training and hasattr(self, "_cached_kernel_mat"): return self._cached_kernel_mat else: res = self.base_kernel(self.inducing_points, self.inducing_points).evaluate() if not self.training: self._cached_kernel_mat = res return res @property def _inducing_inv_root(self): if not self.training and hasattr(self, "_cached_kernel_inv_root"): return self._cached_kernel_inv_root else: inv_roots_list = [] for i in range(self._inducing_mat.size(0)): jitter_mat = add_jitter(self._inducing_mat[i]) chol = torch.cholesky(jitter_mat, upper=True) eye = torch.eye(chol.size(-1), device=chol.device) inv_roots_list.append(torch.trtrs(eye, chol)[0]) res = torch.cat(inv_roots_list, 0) if not self.training: self._cached_kernel_inv_root = res return res def _get_covariance(self, x1, x2): k_ux1 = self.base_kernel(x1, self.inducing_points).evaluate() if torch.equal(x1, x2): covar = RootLazyTensor(k_ux1.matmul(self._inducing_inv_root)) # Diagonal correction for predictive posterior correction = (self.base_kernel(x1, x2).diag() - covar.diag()).clamp(0, math.inf) covar = PsdSumLazyTensor(DiagLazyTensor(correction) + covar) else: k_ux2 = self.base_kernel(x2, self.inducing_points).evaluate() covar = MatmulLazyTensor( k_ux1.matmul(self._inducing_inv_root), k_ux2.matmul(self._inducing_inv_root).transpose(-1, -2) ) return covar def _covar_diag(self, inputs): if inputs.ndimension() == 1: inputs = inputs.unsqueeze(1) orig_size = list(inputs.size()) # Resize inputs so that everything is batch inputs = inputs.unsqueeze(-2).view(-1, 1, inputs.size(-1)) # Get diagonal of covar covar_diag = self.base_kernel(inputs) if isinstance(covar_diag, LazyTensor): covar_diag = covar_diag.evaluate() covar_diag = covar_diag.view(orig_size[:-1]) return DiagLazyTensor(covar_diag) def forward(self, x1, x2, **kwargs): covar = self._get_covariance(x1, x2) if self.training: if not torch.equal(x1, x2): raise RuntimeError("x1 should equal x2 in training mode") zero_mean = torch.zeros_like(x1.select(-1, 0)) new_added_loss_term = InducingPointKernelAddedLossTerm( MultivariateNormal(zero_mean, self._covar_diag(x1)), MultivariateNormal(zero_mean, covar), self.likelihood, ) self.update_added_loss_term("inducing_point_loss_term", new_added_loss_term) return covar
import datetime import json import locale import logging import os import random import shutil from typing import Dict, List import numpy as np import pandas as pd import pytest from freezegun import freeze_time from ruamel.yaml import YAML import great_expectations as ge from great_expectations import DataContext from great_expectations.core import ExpectationConfiguration, expectationSuiteSchema from great_expectations.core.expectation_suite import ExpectationSuite from great_expectations.core.expectation_validation_result import ( ExpectationValidationResult, ) from great_expectations.core.util import get_or_create_spark_application from great_expectations.data_context.types.base import CheckpointConfig from great_expectations.data_context.types.resource_identifiers import ( ConfigurationIdentifier, ExpectationSuiteIdentifier, ) from great_expectations.data_context.util import ( file_relative_path, instantiate_class_from_config, ) from great_expectations.dataset.pandas_dataset import PandasDataset from great_expectations.datasource import SqlAlchemyDatasource from great_expectations.datasource.new_datasource import BaseDatasource, Datasource from great_expectations.execution_engine import SqlAlchemyExecutionEngine from great_expectations.self_check.util import ( build_test_backends_list as build_test_backends_list_v3, ) from great_expectations.self_check.util import ( expectationSuiteSchema, expectationSuiteValidationResultSchema, get_dataset, ) from great_expectations.util import is_library_loadable from tests.test_utils import create_files_in_directory yaml = YAML() ### # # NOTE: THESE TESTS ARE WRITTEN WITH THE en_US.UTF-8 LOCALE AS DEFAULT FOR STRING FORMATTING # ### locale.setlocale(locale.LC_ALL, "en_US.UTF-8") logger = logging.getLogger(__name__) def pytest_configure(config): config.addinivalue_line( "markers", "smoketest: mark test as smoketest--it does not have useful assertions but may produce side effects " "that require manual inspection.", ) config.addinivalue_line( "markers", "rendered_output: produces rendered output that should be manually reviewed.", ) config.addinivalue_line( "markers", "aws_integration: runs aws integration test that may be very slow and requires credentials", ) def pytest_addoption(parser): parser.addoption( "--no-spark", action="store_true", help="If set, suppress all tests against the spark test suite", ) parser.addoption( "--no-sqlalchemy", action="store_true", help="If set, suppress all tests using sqlalchemy", ) parser.addoption( "--no-postgresql", action="store_true", help="If set, suppress all tests against postgresql", ) parser.addoption( "--mysql", action="store_true", help="If set, execute tests against mysql", ) parser.addoption( "--mssql", action="store_true", help="If set, execute tests against mssql", ) parser.addoption( "--aws-integration", action="store_true", help="If set, run aws integration tests", ) def build_test_backends_list(metafunc): test_backend_names: List[str] = build_test_backends_list_cfe(metafunc) backend_name_class_name_map: Dict[str, str] = { "pandas": "PandasDataset", "spark": "SparkDFDataset", } backend_name: str return [ (backend_name_class_name_map.get(backend_name) or backend_name) for backend_name in test_backend_names ] def build_test_backends_list_cfe(metafunc): include_pandas: bool = True include_spark: bool = not metafunc.config.getoption("--no-spark") include_sqlalchemy: bool = not metafunc.config.getoption("--no-sqlalchemy") include_postgresql = not metafunc.config.getoption("--no-postgresql") include_mysql: bool = metafunc.config.getoption("--mysql") include_mssql: bool = metafunc.config.getoption("--mssql") test_backend_names: List[str] = build_test_backends_list_v3( include_pandas=include_pandas, include_spark=include_spark, include_sqlalchemy=include_sqlalchemy, include_postgresql=include_postgresql, include_mysql=include_mysql, include_mssql=include_mssql, ) return test_backend_names def pytest_generate_tests(metafunc): test_backends = build_test_backends_list(metafunc) if "test_backend" in metafunc.fixturenames: metafunc.parametrize("test_backend", test_backends, scope="module") if "test_backends" in metafunc.fixturenames: metafunc.parametrize("test_backends", [test_backends], scope="module") def pytest_collection_modifyitems(config, items): if config.getoption("--aws-integration"): # --aws-integration given in cli: do not skip aws-integration tests return skip_aws_integration = pytest.mark.skip( reason="need --aws-integration option to run" ) for item in items: if "aws_integration" in item.keywords: item.add_marker(skip_aws_integration) @pytest.fixture(autouse=True) def no_usage_stats(monkeypatch): # Do not generate usage stats from test runs monkeypatch.setenv("GE_USAGE_STATS", "False") @pytest.fixture def sa(test_backends): if not any( [dbms in test_backends for dbms in ["postgresql", "sqlite", "mysql", "mssql"]] ): pytest.skip("No recognized sqlalchemy backend selected.") else: try: import sqlalchemy as sa return sa except ImportError: raise ValueError("SQL Database tests require sqlalchemy to be installed.") @pytest.mark.order(index=2) @pytest.fixture def spark_session(test_backends): if "SparkDFDataset" not in test_backends: pytest.skip("No spark backend selected.") try: import pyspark from pyspark.sql import SparkSession return get_or_create_spark_application( spark_config={ "spark.sql.catalogImplementation": "hive", "spark.executor.memory": "450m", # "spark.driver.allowMultipleContexts": "true", # This directive does not appear to have any effect. } ) except ImportError: raise ValueError("spark tests are requested, but pyspark is not installed") @pytest.fixture def basic_spark_df_execution_engine(spark_session): from great_expectations.execution_engine import SparkDFExecutionEngine conf: List[tuple] = spark_session.sparkContext.getConf().getAll() spark_config: Dict[str, str] = dict(conf) execution_engine: SparkDFExecutionEngine = SparkDFExecutionEngine( spark_config=spark_config, ) return execution_engine @pytest.mark.order(index=3) @pytest.fixture def spark_session_v012(test_backends): if "SparkDFDataset" not in test_backends: pytest.skip("No spark backend selected.") try: import pyspark from pyspark.sql import SparkSession return get_or_create_spark_application( spark_config={ "spark.sql.catalogImplementation": "hive", "spark.executor.memory": "450m", # "spark.driver.allowMultipleContexts": "true", # This directive does not appear to have any effect. } ) except ImportError: raise ValueError("spark tests are requested, but pyspark is not installed") @pytest.fixture def empty_expectation_suite(): expectation_suite = { "expectation_suite_name": "default", "meta": {}, "expectations": [], } return expectation_suite @pytest.fixture def basic_expectation_suite(): expectation_suite = ExpectationSuite( expectation_suite_name="default", meta={}, expectations=[ ExpectationConfiguration( expectation_type="expect_column_to_exist", kwargs={"column": "infinities"}, ), ExpectationConfiguration( expectation_type="expect_column_to_exist", kwargs={"column": "nulls"} ), ExpectationConfiguration( expectation_type="expect_column_to_exist", kwargs={"column": "naturals"} ), ExpectationConfiguration( expectation_type="expect_column_values_to_be_unique", kwargs={"column": "naturals"}, ), ], ) return expectation_suite @pytest.fixture def file_data_asset(tmp_path): tmp_path = str(tmp_path) path = os.path.join(tmp_path, "file_data_asset.txt") with open(path, "w+") as file: file.write(json.dumps([0, 1, 2, 3, 4])) return ge.data_asset.FileDataAsset(file_path=path) @pytest.fixture def numeric_high_card_dict(): data = { "norm_0_1": [ 0.7225866251125405, -0.5951819764073379, -0.2679313226299394, -0.22503289285616823, 0.1432092195399402, 1.1874676802669433, 1.2766412196640815, 0.15197071140718296, -0.08787273509474242, -0.14524643717509128, -1.236408169492396, -0.1595432263317598, 1.0856768114741797, 0.5082788229519655, 0.26419244684748955, -0.2532308428977167, -0.6362679196021943, -3.134120304969242, -1.8990888524318292, 0.15701781863102648, -0.775788419966582, -0.7400872167978756, -0.10578357492485335, 0.30287010067847436, -1.2127058770179304, -0.6750567678010801, 0.3341434318919877, 1.8336516507046157, 1.105410842250908, -0.7711783703442725, -0.20834347267477862, -0.06315849766945486, 0.003016997583954831, -1.0500016329150343, -0.9168020284223636, 0.306128397266698, 1.0980602112281863, -0.10465519493772572, 0.4557797534454941, -0.2524452955086468, -1.6176089110359837, 0.46251282530754667, 0.45751208998354903, 0.4222844954971609, 0.9651098606162691, -0.1364401431697167, -0.4988616288584964, -0.29549238375582904, 0.6950204582392359, 0.2975369992016046, -1.0159498719807218, 1.3704532401348395, 1.1210419577766673, 1.2051869452003332, 0.10749349867353084, -3.1876892257116562, 1.316240976262548, -1.3777452919511493, -1.0666211985935259, 1.605446695828751, -0.39682821266996865, -0.2828059717857655, 1.30488698803017, -2.116606225467923, -0.2026680301462151, -0.05504008273574069, -0.028520163428411835, 0.4424105678123449, -0.3427628263418371, 0.23805293411919937, -0.7515414823259695, -0.1272505897548366, 1.803348436304099, -2.0178252709022124, 0.4860300090112474, 1.2304054166426217, 0.7228668982068365, 1.7400607500575112, 0.3480274098246697, -0.3887978895385282, -1.6511926233909175, 0.14517929503564567, -1.1599010576123796, -0.016133552438119002, 0.47157644883706273, 0.27657785075518254, 1.4464286976282463, -1.2605489185634533, -1.2548765025615338, 0.0755319579826929, 1.0476733637516833, -0.7038690219524807, -0.9580696842862921, -0.18135657098008018, -0.18163993379314564, 0.4092798531146971, -2.049808182546896, -1.2447062617916826, -1.6681140306283337, 1.0709944517933483, -0.7059385234342846, -0.8033587669003331, -1.8152275905903312, 0.11729996097670137, 2.2994900038012376, -0.1291192451734159, -0.6731565869164164, -0.06690994571366346, -0.40330072968473235, -0.23927186025094221, 2.7756216937096676, 0.06441299443146056, -0.5095247173507204, -0.5228853558871007, 0.806629654091097, -2.110096084114651, -0.1233374136509439, -1.021178519845751, 0.058906278340351045, -0.26316852406211017, -1.2990807244026237, -0.1937986598084067, 0.3909222793445317, 0.578027315076297, -0.11837271520846208, -1.134297652720464, 0.496915417153268, -0.5315184110418045, 0.5284176849952198, -1.6810338988102331, 0.41220454054009154, 1.0554031136792, -1.4222775023918832, -1.1664353586956209, 0.018952180522661358, -0.04620616876577671, -0.8446292647938418, -0.6889432180332509, -0.16012081070647954, 0.5680940644754282, -1.9792941921407943, 0.35441842206114726, 0.12433268557499534, 0.25366905921805377, 0.6262297786892028, 1.327981424671081, 1.774834324890265, -0.9725604763128438, 0.42824027889428, 0.19725541390327114, 1.4640606982992412, 1.6484993842838995, 0.009848260786412894, -2.318740403198263, -0.4125245127403577, -0.15500831770388285, 1.010740123094443, 0.7509498708766653, -0.021415407776108144, 0.6466776546788641, -1.421096837521404, 0.5632248951325018, -1.230539161899903, -0.26766333435961503, -1.7208241092827994, -1.068122926814994, -1.6339248620455546, 0.07225436117508208, -1.2018233250224348, -0.07213000691963527, -1.0080992229563746, -1.151378048476321, -0.2660104149809121, 1.6307779136408695, 0.8394822016824073, -0.23362802143120032, -0.36799502320054384, 0.35359852278856263, 0.5830948999779656, -0.730683771776052, 1.4715728371820667, -1.0668090648998136, -1.025762014881618, 0.21056106958224155, -0.5141254207774576, -0.1592942838690149, 0.7688711617969363, -2.464535892598544, -0.33306989349452987, 0.9457207224940593, 0.36108072442574435, -0.6490066877470516, -0.8714147266896871, 0.6567118414749348, -0.18543305444915045, 0.11156511615955596, 0.7299392157186994, -0.9902398239693843, -1.3231344439063761, -1.1402773433114928, 0.3696183719476138, -1.0512718152423168, -0.6093518314203102, 0.0010622538704462257, -0.17676306948277776, -0.6291120128576891, 1.6390197341434742, -0.8105788162716191, -2.0105672384392204, -0.7909143328024505, -0.10510684692203587, -0.013384480496840259, 0.37683659744804815, -0.15123337965442354, 1.8427651248902048, 1.0371006855495906, 0.29198928612503655, -1.7455852392709181, 1.0854545339796853, 1.8156620972829793, 1.2399563224061596, 1.1196530775769857, 0.4349954478175989, 0.11093680938321168, 0.9945934589378227, -0.5779739742428905, 1.0398502505219054, -0.09401160691650227, 0.22793239636661505, -1.8664992140331715, -0.16104499274010126, -0.8497511318264537, -0.005035074822415585, -1.7956896952184151, 1.8304783101189757, 0.19094408763231646, 1.3353023874309002, 0.5889134606052353, -0.48487660139277866, 0.4817014755127622, 1.5981632863770983, 2.1416849775567943, -0.5524061711669017, 0.3364804821524787, -0.8609687548167294, 0.24548635047971906, -0.1281468603588133, -0.03871410517044196, -0.2678174852638268, 0.41800607312114096, -0.2503930647517959, 0.8432391494945226, -0.5684563173706987, -0.6737077809046504, 2.0559579098493606, -0.29098826888414253, -0.08572747304559661, -0.301857666880195, -0.3446199959065524, 0.7391340848217359, -0.3087136212446006, 0.5245553707204758, -3.063281336805349, 0.47471623010413705, 0.3733427291759615, -0.26216851429591426, -0.5433523111756248, 0.3305385199964823, -1.4866150542941634, -0.4699911958560942, 0.7312367186673805, -0.22346998944216903, -0.4102860865811592, -0.3003478250288424, -0.3436168605845268, 0.9456524589400904, -0.03710285453384255, 0.10330609878001526, 0.6919858329179392, 0.8673477607085118, 0.380742577915601, 0.5785785515837437, -0.011421905830097267, 0.587187810965595, -1.172536467775141, -0.532086162097372, -0.34440413367820183, -1.404900386188497, -0.1916375229779241, 1.6910999461291834, -0.6070351182769795, -0.8371447893868493, 0.8853944070432224, 1.4062946075925473, -0.4575973141608374, 1.1458755768004445, 0.2619874618238163, 1.7105876844856704, -1.3938976454537522, -0.11403217166441704, -1.0354305240085717, -0.4285770475062154, 0.10326635421187867, 0.6911853442971228, 0.6293835213179542, -0.819693698713199, -0.7378190403744175, -1.495947672573938, -1.2406693914431872, -1.0486341638186725, -1.3715759883075953, 3.585407817418151, -0.8007079372574223, -1.527336776754733, -0.4716571043072485, -0.6967311271405545, 1.0003347462169225, -0.30569565002022697, 0.3646134876772732, 0.49083033603832493, 0.07754580794955847, -0.13467337850920083, 0.02134473458605164, 0.5025183900540823, -0.940929087894874, 1.441600637127558, -0.0857298131221344, -0.575175243519591, 0.42622029657630595, -0.3239674701415489, 0.22648849821602596, -0.6636465305318631, 0.30415000329164754, -0.6170241274574016, 0.07578674772163065, 0.2952841441615124, 0.8120317689468056, -0.46861353019671337, 0.04718559572470416, -0.3105660017232523, -0.28898463203535724, 0.9575298065734561, -0.1977556031830993, 0.009658232624257272, 1.1432743259603295, -1.8989396918936858, 0.20787070770386357, 1.4256750543782999, -0.03838329973778874, -0.9051229357470373, -1.2002277085489457, 2.405569956130733, 1.895817948326675, -0.8260858325924574, 0.5759061866255807, 2.7022875569683342, 1.0591327405967745, 0.21449833798124354, 0.19970388388081273, 0.018242139911433558, -0.630960146999549, -2.389646042147776, 0.5424304992480339, -1.2159551561948718, -1.6851632640204128, -0.4812221268109694, 0.6217652794219579, -0.380139431677482, -0.2643524783321051, 0.5106648694993016, -0.895602157034141, -0.20559568725141816, 1.5449271875734911, 1.544075783565114, 0.17877619857826843, 1.9729717339967108, 0.8302033109816261, -0.39118561199170965, -0.4428357598297098, -0.02550407946753186, -1.0202977138210447, 2.6604654314300835, 1.9163029269361842, 0.34697436596877657, -0.8078124769022497, -1.3876596649099957, 0.44707250163663864, -0.6752837232272447, -0.851291770954755, 0.7599767868730256, 0.8134109401706875, -1.6766750539980289, -0.06051832829232975, -0.4652931327216134, -0.9249124398287735, 1.9022739762222731, 1.7632300613807597, 1.675335012283785, 0.47529854476887495, -0.7892463423254658, 0.3910120652706098, 0.5812432547936405, 0.2693084649672777, -0.08138564925779349, 0.9150619269526952, -0.8637356349272142, -0.14137853834901817, -0.20192754829896423, 0.04718228147088756, -0.9743600144318, -0.9936290943927825, 0.3544612180477054, 0.6839546770735121, 1.5089070357620178, 1.301167565172228, -1.5396145667672985, 0.42854366341485456, -1.5876582617301032, -0.0316985879141714, 0.3144220016570915, -0.05054766725644431, 0.2934139006870167, 0.11396170275994542, -0.6472140129693643, 1.6556030742445431, 1.0319410208453506, 0.3292217603989991, -0.058758121958605435, -0.19917171648476298, -0.5192866115874029, 0.1997510689920335, -1.3675686656161756, -1.7761517497832053, -0.11260276070167097, 0.9717892642758689, 0.0840815981843948, -0.40211265381258554, 0.27384496844034517, -1.0403875081272367, 1.2884781173493884, -1.8066239592554476, 1.1136979156298865, -0.06223155785690416, 1.3930381289015936, 0.4586305673655182, 1.3159249757827194, -0.5369892835955705, 0.17827408233621184, 0.22693934439969682, 0.8216240002114816, -1.0422409752281838, 0.3329686606709231, -1.5128804353968217, 1.0323052869815534, 1.1640486934424354, 1.6450118078345612, -0.6717687395070293, -0.08135119186406627, 1.2746921873544188, -0.8255794145095643, 0.7123504776564864, 0.6953336934741682, 2.191382322698439, 1.4155790749261592, 2.4681081786912866, -2.2904357033803815, -0.8375155191566624, 1.1040106662196736, 0.7084133268872015, -3.401968681942055, 0.23237090512844757, 1.1199436238058174, 0.6333916486592628, -0.6012340913121055, -0.3693951838866523, -1.7742670566875682, -0.36431378282545124, -0.4042586409194551, -0.04648644034604476, 1.5138191613743486, -0.2053670782251071, 1.8679122383251414, 0.8355881018692999, -0.5369705129279005, -0.7909355080370954, 2.1080036780007987, 0.019537331188020687, -1.4672982688640615, -1.486842866467901, -1.1036839537574874, 1.0800858540685894, -0.2313974176207594, 0.47763272078271807, -1.9196070490691473, -0.8193535127855751, -0.6853651905832031, -0.18272370464882973, -0.33413577684633056, 2.2261342671906106, 1.6853726343573683, 0.8563421109235769, 1.0468799885096596, 0.12189082561416206, -1.3596466927672854, -0.7607432068282968, 0.7061728288620306, -0.4384478018639071, 0.8620104661898899, 1.04258758121448, -1.1464159128515612, 0.9617945424413628, 0.04987102831355013, -0.8472878887606543, 0.32986774370339184, 1.278319839581162, -0.4040926804592034, -0.6691567800662129, 0.9415431940597389, 0.3974846022291844, -0.8425204662387112, -1.506166868030291, -0.04248497940038203, 0.26434168799067986, -1.5698380163561454, -0.6651727917714935, 1.2400220571204048, -0.1251830593977037, 0.6156254221302833, 0.43585628657139575, -1.6014619037611209, 1.9152323656075512, -0.8847911114213622, 1.359854519784993, -0.5554989575409871, 0.25064804193232354, 0.7976616257678464, 0.37834567410982123, -0.6300374359617635, -1.0613465068052854, -0.866474302027355, 1.2458556977164312, 0.577814049080149, 2.069400463823993, 0.9068690176961165, -0.5031387968484738, -0.3640749863516844, -1.041502465417534, 0.6732994659644133, -0.006355018868252906, -0.3650517541386253, 1.0975063446734974, -2.203726812834859, 1.060685913143899, -0.4618706570892267, 0.06475263817517128, -0.19326357638969882, -0.01812119454736379, 0.1337618009668529, 1.1838276997792907, 0.4273677345455913, -0.4912341608307858, 0.2349993979417651, 0.9566260826411601, -0.7948243131958422, -0.6168334352331588, 0.3369425926447926, 0.8547756445246633, 0.2666330662219728, 2.431868771129661, 1.0089732701876513, -0.1162341515974066, -1.1746306816795218, -0.08227639025627424, 0.794676385688044, 0.15005011094018297, -0.8763821573601055, -1.0811684990769739, 0.6311588092267179, 0.026124278982220386, 0.8306502001533514, 1.0856487813261877, -0.018702855899823106, -0.07338137135247896, -0.8435746484744243, -0.18091216366556986, 0.2295807891528797, -1.0689295774443397, -1.5621175533013612, 1.3314045672598216, 0.6211561903553582, 1.0479302317100871, -1.1509436982013124, 0.447985084931758, 0.19917261474342404, 0.3582887259341301, 0.9953552868908098, 0.8948165434511316, 0.4949033431999123, -0.23004847985703908, 0.6411581535557106, -1.1589671573242186, -0.13691519182560624, -0.8849560872785238, 0.6629182075027006, 2.2608150731789696, 2.2823614453180294, -1.2291376923498247, -0.9267975556981378, 0.2597417839242135, -0.7667310491821938, 0.10503294084132372, 2.960320355577672, -1.0645098483081497, -1.2888339889815872, -0.6564570556444346, 0.4742489396354781, 0.8879606773334898, -0.6477585196839569, -0.7309497810668936, 1.7025953934976548, 0.1789174966941155, -0.4839093362740933, -0.8917713440107442, 1.4521776747175792, -0.1676974219641624, -0.500672037099228, -0.2947747621553442, 0.929636971325952, -0.7614935150071248, 1.6886298813725842, -0.8136217834373227, 1.2030997228178093, 1.382267485738376, 2.594387458306705, -0.7703668776292266, -0.7642584795112598, 1.3356598324609947, -0.5745269784148925, -2.212092904499444, -1.727975556661197, -0.18543087256023608, -0.10167435635752538, 1.3480966068787303, 0.0142803272337873, -0.480077631815393, -0.32270216749876185, -1.7884435311074431, -0.5695640948971382, -0.22859087912027687, -0.08783386938029487, -0.18151955278624396, 0.2031493507095467, 0.06444304447669409, -0.4339138073294572, 0.236563959074551, -0.2937958719187449, 0.1611232843821199, -0.6574871644742827, 1.3141902865107886, 0.6093649138398077, 0.056674985715912514, -1.828714441504608, -0.46768482587669535, 0.6489735384886999, 0.5035677725398181, -0.887590772676158, -0.3222316759913631, -0.35172770495027483, -0.4329205472963193, -0.8449916868048998, 0.38282765028957993, 1.3171924061732359, 0.2956667124648384, 0.5390909497681301, -0.7591989862253667, -1.1520792974885883, -0.39344757869384944, 0.6192677330177175, -0.05578834574542242, 0.593015990282657, 0.9374465229256678, 0.647772562443425, 1.1071167572595217, -1.3015016617832518, 1.267300472456379, -0.5807673178649629, 0.9343468385348384, -0.28554893036513673, 0.4487573993840033, 0.6749018890520516, -1.20482985206765, 0.17291806504654686, -0.4124576407610529, -0.9203236505429044, -0.7461342369802754, -0.19694162321688435, 0.46556512963300906, 0.5198366004764268, -1.7222561645076129, -0.7078891617994071, -1.1653209054214695, 1.5560964971092122, 0.3335520152642012, 0.008390825910327906, 0.11336719644324977, 0.3158913817073965, 0.4704483453862008, -0.5700583482495889, -1.276634964816531, -1.7880560933777756, -0.26514994709973827, 0.6194447367446946, -0.654762456435761, 1.0621929196158544, 0.4454719444987052, -0.9323145612076791, 1.3197357985874438, -0.8792938558447049, -0.2470423905508279, 0.5128954444799875, -0.09202044992462606, -1.3082892596744382, -0.34428948138804927, 0.012422196356164879, 1.4626152292162142, 0.34678216997159833, 0.409462409138861, 0.32838364873801185, 1.8776849459782967, 1.6816627852133539, -0.24894138693568296, 0.7150105850753732, 0.22929306929129853, -0.21434910504054566, 1.3339497173912471, -1.2497042452057836, -0.04487255356399775, -0.6486304639082145, -0.8048044333264733, -1.8090170501469942, 1.481689285694336, -1.4772553200884717, -0.36792462539303805, -1.103508260812736, -0.2135236993720317, 0.40889179796540165, 1.993585196733386, 0.43879096427562897, -0.44512875171982147, -1.1780830020629518, -1.666001035275436, -0.2977294957665528, 1.7299614542270356, 0.9882265798853356, 2.2412430815464597, 0.5801434875813244, -0.739190619909163, -1.2663490594895201, 0.5735521649879137, 1.2105709455012765, 1.9112159951415644, -2.259218931706201, -0.563310876529377, -2.4119185903750493, 0.9662624485722368, -0.22788851242764951, 0.9198283887420099, 0.7855927065251492, -0.7459868094792474, 0.10543289218409971, 0.6401750224618271, -0.0077375118689326705, -0.11647036625911977, -0.4722391874001602, -0.2718425102733572, -0.8796746964457087, 0.6112903638894259, 0.5347851929096421, -0.4749419210717794, 1.0633720764557604, -0.2590556665572949, 2.590182301241823, 1.4524061372706638, -0.8503733047335056, 0.5609357391481067, -1.5661825434426477, 0.8019667474525984, 1.2716795425969496, 0.20011166646917924, -0.7105405282282679, -0.5593129072748189, -1.2401371010520867, -0.7002520937780202, -2.236596391787529, -1.8130090502823886, -0.23990633860801777, 1.7428780878151378, 1.4661206538178901, -0.8678567353744017, 0.2957423562639015, 0.13935419069962593, 1.399598845123674, 0.059729544605779575, -0.9607778026198247, 0.18474907798482051, 1.0117193651915666, -0.9173540069396245, 0.8934765521365161, -0.665655291396948, -0.32955768273493324, 0.3062873812209283, 0.177342106982554, 0.3595522704599547, -1.5964209653110262, 0.6705899137346863, -1.1034642863469553, -1.0029562484065524, 0.10622956543479244, 0.4261871936541378, 0.7777501694354336, -0.806235923997437, -0.8272801398172428, -1.2783440745845536, 0.5982979227669168, -0.28214494859284556, 1.101560367699546, -0.14008021262664466, -0.38717961692054237, 0.9962925044431369, -0.7391490127960976, -0.06294945881724459, 0.7283671247384875, -0.8458895297768138, 0.22808829204347086, 0.43685668023014523, 0.9204095286935638, -0.028241645704951284, 0.15951784765135396, 0.8068984900818966, -0.34387965576978663, 0.573828962760762, -0.13374515460012618, -0.5552788325377814, 0.5644705833909952, -0.7500532220469983, 0.33436674493862256, -0.8595435026628129, -0.38943898244735853, 0.6401502590131951, -1.2968645995363652, 0.5861622311675501, 0.2311759458689689, 0.10962292708600496, -0.26025023584932205, -0.5398478003611565, -1.0514168636922954, 1.2689172189127857, 1.7029909647408918, -0.02325431623491577, -0.3064675950620902, -1.5816446841009473, 0.6874254059433739, 0.7755967316475798, 1.4119333324396597, 0.14198739135512406, 0.2927714469848192, -0.7239793888399496, 0.3506448783535265, -0.7568480706640158, -1.2158508387501554, 0.22197589131086445, -0.5621415304506887, -1.2381112050191665, -1.917208333033256, -0.3321665793941188, -0.5916951886991071, -1.244826507645294, -0.29767661008214463, 0.8590635852032509, -1.8579290298421591, -1.0470546224962876, -2.540080936704841, 0.5458326769958273, 0.042222128206941614, 0.6080450228346708, 0.6542717901662132, -1.7292955132690793, -0.4793123354077725, 0.7341767020417185, -1.3322222208234826, -0.5076389542432337, 0.684399163420284, 0.3948487980667425, -1.7919279627150193, 1.582925890933478, 0.8341846456063038, 0.11776890377042544, 1.7471239793853526, 1.2269451783893597, 0.4235463733287474, 1.5908284320029056, -1.635191535538596, 0.04419903330064594, -1.264385360373252, 0.5370192519783876, 1.2368603501240771, -0.9241079150337286, -0.3428051342915208, 0.0882286441353256, -2.210824604513402, -1.9000343283757128, 0.4633735273417207, -0.32534396967175094, 0.026187836765356437, 0.18253601230609245, 0.8519745761039671, -0.028225375482784816, -0.5114197447067229, -1.2428743809444227, 0.2879711400745508, 1.2857130031108321, 0.5296743558975853, -0.8440551904275335, -1.3776032491368861, 1.8164028526343798, -1.1422045767986222, -1.8675179752970443, 0.6969635320800454, 0.9444010906414336, -1.28197913481747, -0.06259132322304235, -0.4518754825442558, 0.9183188639099813, -0.2916931407869574, -1.1464007469977915, -0.4475136941593681, 0.44385573868752803, 2.1606711638680762, -1.4813603018181851, -0.5647618024870872, -1.474746204557383, -2.9067748098220485, 0.06132111635940877, -0.09663310829361334, -1.087053744976143, -1.774855117659402, 0.8130120568830074, -0.5179279676199186, -0.32549430825787784, -1.1995838271705979, 0.8587480835176114, -0.02095126282663596, 0.6677898019388228, -1.1891003375304232, -2.1125937754631305, -0.047765192715672734, 0.09812525010300294, -1.034992359189106, 1.0213451864081846, 1.0788796513160641, -1.444469239557739, 0.28341828947950637, -2.4556013891966737, 1.7126080715698266, -0.5943068899412715, 1.0897594994215383, -0.16345461884651272, 0.7027032523865234, 2.2851158088542562, 0.5038100496225458, -0.16724173993999966, -0.6747457076421414, 0.42254684460738184, 1.277203836895222, -0.34438446183574595, 0.38956738377878264, -0.26884968654334923, -0.02148772950361766, 0.02044885235644607, -1.3873669828232345, 0.19995968746809226, -1.5826859815811556, -0.20385119370067947, 0.5724329589281247, -1.330307658319185, 0.7756101314358208, -0.4989071461473931, 0.5388161769427321, -0.9811085284266614, 2.335331094403556, -0.5588657325211347, -1.2850853695283377, 0.40092993245913744, -1.9675685522110529, 0.9378938542456674, -0.18645815013912917, -0.6828273180353106, -1.840122530632185, -1.2581798109361761, 0.2867275394896832, ], } return data @pytest.fixture def numeric_high_card_dataset(test_backend, numeric_high_card_dict): schemas = { "pandas": { "norm_0_1": "float64", }, "postgresql": { # "norm_0_1": "DOUBLE_PRECISION", "norm_0_1": "NUMERIC", }, "sqlite": { "norm_0_1": "FLOAT", }, "mysql": { "norm_0_1": "DOUBLE", }, "mssql": { "norm_0_1": "FLOAT", }, "spark": { "norm_0_1": "FloatType", }, } return get_dataset(test_backend, numeric_high_card_dict, schemas=schemas) @pytest.fixture def datetime_dataset(test_backend): data = { "datetime": [ str(datetime.datetime(2020, 2, 4, 22, 12, 5, 943152)), str(datetime.datetime(2020, 2, 5, 22, 12, 5, 943152)), str(datetime.datetime(2020, 2, 6, 22, 12, 5, 943152)), str(datetime.datetime(2020, 2, 7, 22, 12, 5, 943152)), str(datetime.datetime(2020, 2, 8, 22, 12, 5, 943152)), str(datetime.datetime(2020, 2, 9, 22, 12, 5, 943152)), str(datetime.datetime(2020, 2, 10, 22, 12, 5, 943152)), str(datetime.datetime(2020, 2, 11, 22, 12, 5, 943152)), str(datetime.datetime(2020, 2, 12, 22, 12, 5, 943152)), str(datetime.datetime(2020, 2, 13, 22, 12, 5, 943152)), ] } schemas = { "pandas": { "datetime": "datetime64", }, "postgresql": { "datetime": "TIMESTAMP", }, "sqlite": { "datetime": "TIMESTAMP", }, "mysql": { "datetime": "TIMESTAMP", }, "mssql": { "datetime": "DATETIME", }, "spark": { "datetime": "TimestampType", }, } return get_dataset(test_backend, data, schemas=schemas) @pytest.fixture def non_numeric_low_card_dataset(test_backend): """Provide dataset fixtures that have special values and/or are otherwise useful outside the standard json testing framework""" data = { "lowcardnonnum": [ "a", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", ] } schemas = { "pandas": { "lowcardnonnum": "str", }, "postgresql": { "lowcardnonnum": "TEXT", }, "sqlite": { "lowcardnonnum": "VARCHAR", }, "mysql": { "lowcardnonnum": "TEXT", }, "mssql": { "lowcardnonnum": "VARCHAR", }, "spark": { "lowcardnonnum": "StringType", }, } return get_dataset(test_backend, data, schemas=schemas) @pytest.fixture def non_numeric_high_card_dataset(test_backend): """Provide dataset fixtures that have special values and/or are otherwise useful outside the standard json testing framework""" data = { "highcardnonnum": [ "CZVYSnQhHhoti8mQ66XbDuIjE5FMeIHb", "cPWAg2MJjh8fkRRU1B9aD8vWq3P8KTxJ", "4tehKwWiCDpuOmTPRYYqTqM7TvEa8Zi7", "ZvlAnCGiGfkKgQoNrhnnyrjmU7sLsUZz", "AaqMhdYukVdexTk6LlWvzXYXTp5upPuf", "ZSKmXUB35K14khHGyjYtuCHuI8yeM7yR", "F1cwKp4HsCN2s2kXQGR5RUa3WAcibCq2", "coaX8bSHoVZ8FP8SuQ57SFbrvpRHcibq", "3IzmbSJF525qtn7O4AvfKONnz7eFgnyU", "gLCtw7435gaR532PNFVCtvk14lNJpZXv", "hNyjMYZkVlOKRjhg8cKymU5Bvnh0MK5R", "IqKC2auGTNehP8y24HzDQOdt9oysgFyx", "TePy034aBKlNeAmcJmKJ4p1yF7EUYEOg", "cIfDv6ieTAobe84P84InzDKrJrccmqbq", "m1979gfI6lVF9ijJA245bchYFd1EaMap", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "7wcR161jyKYhFLEZkhFqSXLwXW46I5x8", "IpmNsUFgbbVnL0ljJZOBHnTV0FKARwSn", "hsA4btHJg6Gq1jwOuOc3pl2UPB5QUwZg", "vwZyG0jGUys3HQdUiOocIbzhUdUugwKX", "rTc9h94WjOXN5Wg40DyatFEFfp9mgWj6", "p1f20s14ZJGUTIBUNeBmJEkWKlwoyqjA", "VzgAIYNKHA0APN0oZtzMAfmbCzJenswy", "IO7BqR3iS136YMlLCEo6W3jKNOVJIlLG", "eTEyhiRuyEcTnHThi1W6yi1mxUjq8TEp", "4OHPKQgk3sPPYpKWcEWUtNZ0jv00UuPU", "ZJCstyyUvTR2gwSM6FLgkXYDwG54qo8u", "nGQsvDAzuL5Yc2XpqoG5P7RhpiTpJp8H", "NfX4KfEompMbbKloFq8NQpdXtk5PjaPe", "CP22IFHDX1maoSjTEdtBfrMHWQKACGDB", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "hGwZQW7ao9HqNV2xAovuMBdyafNDE8q6", "OJmDHbqP1wzarsaSwCphsqvdy5SnTQMT", "JQbXIcgwUhttfPIGB7VGGfL2KiElabrO", "eTTNDggfPpRC22SEVNo9W0BPEWO4Cr57", "GW2JuUJmuCebia7RUiCNl2BTjukIzZWj", "oVFAvQEKmRTLBqdCuPoJNvzPvQ7UArWC", "zeMHFFKLr5j4DIFxRQ7jHWCMClrP3LmJ", "eECcArV5TZRftL6ZWaUDO6D2l3HiZj1Y", "xLNJXaCkOLrD6E0kgGaFOFwctNXjrd77", "1f8KOCkOvehXYvN8PKv1Ch6dzOjRAr01", "uVF6HJgjVmoipK1sEpVOFJYuv2TXXsOG", "agIk8H2nFa0K27IFr0VM2RNp6saihYI3", "cAUnysbb8SBLSTr0H7cA1fmnpaL80e0N", "fM1IzD5USx4lMYi6bqPCEZjd2aP7G9vv", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "i65d8jqET5FsVw9t5BwAvBjkEJI6eUMj", "HbT1b7DQL7n7ZEt2FsKHIggycT1XIYd8", "938eC0iGMSqZNlqgDNG9YVE7t4izO2Ev", "PyZetp4izgE4ymPcUXyImF5mm7I6zbta", "FaXA6YSUrvSnW7quAimLqQMNrU1Dxyjs", "PisVMvI9RsqQw21B7qYcKkRo5c8C2AKd", "eSQIxFqyYVf55UMzMEZrotPO74i3Sh03", "2b74DhJ6YFHrAkrjK4tvvKkYUKll44bR", "3svDRnrELyAsC69Phpnl2Os89856tFBJ", "ZcSGN9YYNHnHjUp0SktWoZI7JDmvRTTN", "m9eDkZ5oZEOFP3HUfaZEirecv2UhQ1B1", "wZTwJmMX5Q58DhDdmScdigTSyUUC04sO", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "KAuFgcmRKQPIIqGMAQQPfjyC1VXt40vs", "0S4iueoqKNjvS55O57BdY3DbfwhIDwKc", "ywbQfOLkvXEUzZISZp1cpwCenrrNPjfF", "Mayxk8JkV3Z6aROtnsKyqwVK5exiJa8i", "pXqIRP5fQzbDtj1xFqgJey6dyFOJ1YiU", "6Ba6RSM56x4MIaJ2wChQ3trBVOw1SWGM", "puqzOpRJyNVAwH2vLjVCL3uuggxO5aoB", "jOI4E43wA3lYBWbV0nMxqix885Tye1Pf", "YgTTYpRDrxU1dMKZeVHYzYNovH2mWGB7", "24yYfUg1ATvfI1PW79ytsEqHWJHI69wQ", "mS2AVcLFp6i36sX7yAUrdfM0g0RB2X4D", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "ItvI4l02oAIZEd5cPtDf4OnyBazji0PL", "DW4oLNP49MNNENFoFf7jDTI04xdvCiWg", "vrOZrkAS9MCGOqzhCv4cmr5AGddVBShU", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "R74JT4EEhh3Xeu5tbx8bZFkXZRhx6HUn", "bd9yxS6b1QrKXuT4irY4kpjSyLmKZmx6", "UMdFQNSiJZtLK3jxBETZrINDKcRqRd0c", "He7xIY2BMNZ7vSO47KfKoYskVJeeedI7", "G8PqO0ADoKfDPsMT1K0uOrYf1AtwlTSR", "hqfmEBNCA7qgntcQVqB7beBt0hB7eaxF", "mlYdlfei13P6JrT7ZbSZdsudhE24aPYr", "gUTUoH9LycaItbwLZkK9qf0xbRDgOMN4", "xw3AuIPyHYq59Qbo5QkQnECSqd2UCvLo", "kbfzRyRqGZ9WvmTdYKDjyds6EK4fYCyx", "7AOZ3o2egl6aU1zOrS8CVwXYZMI8NTPg", "Wkh43H7t95kRb9oOMjTSqC7163SrI4rU", "x586wCHsLsOaXl3F9cYeaROwdFc2pbU1", "oOd7GdoPn4qqfAeFj2Z3ddyFdmkuPznh", "suns0vGgaMzasYpwDEEof2Ktovy0o4os", "of6W1csCTCBMBXli4a6cEmGZ9EFIOFRC", "mmTiWVje9SotwPgmRxrGrNeI9DssAaCj", "pIX0vhOzql5c6Z6NpLbzc8MvYiONyT54", "nvyCo3MkIK4tS6rkuL4Yw1RgGKwhm4c2", "prQGAOvQbB8fQIrp8xaLXmGwcxDcCnqt", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "mty9rQJBeTsBQ7ra8vWRbBaWulzhWRSG", "JL38Vw7yERPC4gBplBaixlbpDg8V7gC6", "MylTvGl5L1tzosEcgGCQPjIRN6bCUwtI", "hmr0LNyYObqe5sURs408IhRb50Lnek5K", "CZVYSnQhHhoti8mQ66XbDuIjE5FMeIHb", "cPWAg2MJjh8fkRRU1B9aD8vWq3P8KTxJ", "4tehKwWiCDpuOmTPRYYqTqM7TvEa8Zi7", "ZvlAnCGiGfkKgQoNrhnnyrjmU7sLsUZz", "AaqMhdYukVdexTk6LlWvzXYXTp5upPuf", "ZSKmXUB35K14khHGyjYtuCHuI8yeM7yR", "F1cwKp4HsCN2s2kXQGR5RUa3WAcibCq2", "coaX8bSHoVZ8FP8SuQ57SFbrvpRHcibq", "3IzmbSJF525qtn7O4AvfKONnz7eFgnyU", "gLCtw7435gaR532PNFVCtvk14lNJpZXv", "hNyjMYZkVlOKRjhg8cKymU5Bvnh0MK5R", "IqKC2auGTNehP8y24HzDQOdt9oysgFyx", "TePy034aBKlNeAmcJmKJ4p1yF7EUYEOg", "cIfDv6ieTAobe84P84InzDKrJrccmqbq", "m1979gfI6lVF9ijJA245bchYFd1EaMap", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "7wcR161jyKYhFLEZkhFqSXLwXW46I5x8", "IpmNsUFgbbVnL0ljJZOBHnTV0FKARwSn", "hsA4btHJg6Gq1jwOuOc3pl2UPB5QUwZg", "vwZyG0jGUys3HQdUiOocIbzhUdUugwKX", "rTc9h94WjOXN5Wg40DyatFEFfp9mgWj6", "p1f20s14ZJGUTIBUNeBmJEkWKlwoyqjA", "VzgAIYNKHA0APN0oZtzMAfmbCzJenswy", "IO7BqR3iS136YMlLCEo6W3jKNOVJIlLG", "eTEyhiRuyEcTnHThi1W6yi1mxUjq8TEp", "4OHPKQgk3sPPYpKWcEWUtNZ0jv00UuPU", "ZJCstyyUvTR2gwSM6FLgkXYDwG54qo8u", "nGQsvDAzuL5Yc2XpqoG5P7RhpiTpJp8H", "NfX4KfEompMbbKloFq8NQpdXtk5PjaPe", "CP22IFHDX1maoSjTEdtBfrMHWQKACGDB", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "hGwZQW7ao9HqNV2xAovuMBdyafNDE8q6", "OJmDHbqP1wzarsaSwCphsqvdy5SnTQMT", "JQbXIcgwUhttfPIGB7VGGfL2KiElabrO", "eTTNDggfPpRC22SEVNo9W0BPEWO4Cr57", "GW2JuUJmuCebia7RUiCNl2BTjukIzZWj", "oVFAvQEKmRTLBqdCuPoJNvzPvQ7UArWC", "zeMHFFKLr5j4DIFxRQ7jHWCMClrP3LmJ", "eECcArV5TZRftL6ZWaUDO6D2l3HiZj1Y", "xLNJXaCkOLrD6E0kgGaFOFwctNXjrd77", "1f8KOCkOvehXYvN8PKv1Ch6dzOjRAr01", "uVF6HJgjVmoipK1sEpVOFJYuv2TXXsOG", "agIk8H2nFa0K27IFr0VM2RNp6saihYI3", "cAUnysbb8SBLSTr0H7cA1fmnpaL80e0N", "fM1IzD5USx4lMYi6bqPCEZjd2aP7G9vv", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "i65d8jqET5FsVw9t5BwAvBjkEJI6eUMj", "HbT1b7DQL7n7ZEt2FsKHIggycT1XIYd8", "938eC0iGMSqZNlqgDNG9YVE7t4izO2Ev", "PyZetp4izgE4ymPcUXyImF5mm7I6zbta", "FaXA6YSUrvSnW7quAimLqQMNrU1Dxyjs", "PisVMvI9RsqQw21B7qYcKkRo5c8C2AKd", "eSQIxFqyYVf55UMzMEZrotPO74i3Sh03", "2b74DhJ6YFHrAkrjK4tvvKkYUKll44bR", "3svDRnrELyAsC69Phpnl2Os89856tFBJ", "ZcSGN9YYNHnHjUp0SktWoZI7JDmvRTTN", "m9eDkZ5oZEOFP3HUfaZEirecv2UhQ1B1", "wZTwJmMX5Q58DhDdmScdigTSyUUC04sO", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "KAuFgcmRKQPIIqGMAQQPfjyC1VXt40vs", "0S4iueoqKNjvS55O57BdY3DbfwhIDwKc", "ywbQfOLkvXEUzZISZp1cpwCenrrNPjfF", "Mayxk8JkV3Z6aROtnsKyqwVK5exiJa8i", "pXqIRP5fQzbDtj1xFqgJey6dyFOJ1YiU", "6Ba6RSM56x4MIaJ2wChQ3trBVOw1SWGM", "puqzOpRJyNVAwH2vLjVCL3uuggxO5aoB", "jOI4E43wA3lYBWbV0nMxqix885Tye1Pf", "YgTTYpRDrxU1dMKZeVHYzYNovH2mWGB7", "24yYfUg1ATvfI1PW79ytsEqHWJHI69wQ", "mS2AVcLFp6i36sX7yAUrdfM0g0RB2X4D", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "ItvI4l02oAIZEd5cPtDf4OnyBazji0PL", "DW4oLNP49MNNENFoFf7jDTI04xdvCiWg", "vrOZrkAS9MCGOqzhCv4cmr5AGddVBShU", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "R74JT4EEhh3Xeu5tbx8bZFkXZRhx6HUn", "bd9yxS6b1QrKXuT4irY4kpjSyLmKZmx6", "UMdFQNSiJZtLK3jxBETZrINDKcRqRd0c", "He7xIY2BMNZ7vSO47KfKoYskVJeeedI7", "G8PqO0ADoKfDPsMT1K0uOrYf1AtwlTSR", "hqfmEBNCA7qgntcQVqB7beBt0hB7eaxF", "mlYdlfei13P6JrT7ZbSZdsudhE24aPYr", "gUTUoH9LycaItbwLZkK9qf0xbRDgOMN4", "xw3AuIPyHYq59Qbo5QkQnECSqd2UCvLo", "kbfzRyRqGZ9WvmTdYKDjyds6EK4fYCyx", "7AOZ3o2egl6aU1zOrS8CVwXYZMI8NTPg", "Wkh43H7t95kRb9oOMjTSqC7163SrI4rU", "x586wCHsLsOaXl3F9cYeaROwdFc2pbU1", "oOd7GdoPn4qqfAeFj2Z3ddyFdmkuPznh", "suns0vGgaMzasYpwDEEof2Ktovy0o4os", "of6W1csCTCBMBXli4a6cEmGZ9EFIOFRC", "mmTiWVje9SotwPgmRxrGrNeI9DssAaCj", "pIX0vhOzql5c6Z6NpLbzc8MvYiONyT54", "nvyCo3MkIK4tS6rkuL4Yw1RgGKwhm4c2", "prQGAOvQbB8fQIrp8xaLXmGwcxDcCnqt", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "mty9rQJBeTsBQ7ra8vWRbBaWulzhWRSG", "JL38Vw7yERPC4gBplBaixlbpDg8V7gC6", "MylTvGl5L1tzosEcgGCQPjIRN6bCUwtI", "hmr0LNyYObqe5sURs408IhRb50Lnek5K", ], # Built from highcardnonnum using the following: # vals = pd.Series(data["highcardnonnum"]) # sample_vals = vals.sample(n=10, random_state=42) # weights = np.random.RandomState(42).rand(10) # weights = weights / np.sum(weights) # new_vals = sample_vals.sample(n=200, weights=weights, replace=True, random_state=11) "medcardnonnum": [ "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "mS2AVcLFp6i36sX7yAUrdfM0g0RB2X4D", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "NfX4KfEompMbbKloFq8NQpdXtk5PjaPe", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "NfX4KfEompMbbKloFq8NQpdXtk5PjaPe", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "NfX4KfEompMbbKloFq8NQpdXtk5PjaPe", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "NfX4KfEompMbbKloFq8NQpdXtk5PjaPe", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", ], } schemas = { "pandas": { "highcardnonnum": "str", "medcardnonnum": "str", }, "postgresql": { "highcardnonnum": "TEXT", "medcardnonnum": "TEXT", }, "sqlite": { "highcardnonnum": "VARCHAR", "medcardnonnum": "VARCHAR", }, "mysql": { "highcardnonnum": "TEXT", "medcardnonnum": "TEXT", }, "mssql": { "highcardnonnum": "VARCHAR", "medcardnonnum": "VARCHAR", }, "spark": { "highcardnonnum": "StringType", "medcardnonnum": "StringType", }, } return get_dataset(test_backend, data, schemas=schemas) @pytest.fixture def periodic_table_of_elements(): data = [ "Hydrogen", "Helium", "Lithium", "Beryllium", "Boron", "Carbon", "Nitrogen", "Oxygen", "Fluorine", "Neon", "Sodium", "Magnesium", "Aluminum", "Silicon", "Phosphorus", "Sulfur", "Chlorine", "Argon", "Potassium", "Calcium", "Scandium", "Titanium", "Vanadium", "Chromium", "Manganese", "Iron", "Cobalt", "Nickel", "Copper", "Zinc", "Gallium", "Germanium", "Arsenic", "Selenium", "Bromine", "Krypton", "Rubidium", "Strontium", "Yttrium", "Zirconium", "Niobium", "Molybdenum", "Technetium", "Ruthenium", "Rhodium", "Palladium", "Silver", "Cadmium", "Indium", "Tin", "Antimony", "Tellurium", "Iodine", "Xenon", "Cesium", "Barium", "Lanthanum", "Cerium", "Praseodymium", "Neodymium", "Promethium", "Samarium", "Europium", "Gadolinium", "Terbium", "Dysprosium", "Holmium", "Erbium", "Thulium", "Ytterbium", "Lutetium", "Hafnium", "Tantalum", "Tungsten", "Rhenium", "Osmium", "Iridium", "Platinum", "Gold", "Mercury", "Thallium", "Lead", "Bismuth", "Polonium", "Astatine", "Radon", "Francium", "Radium", "Actinium", "Thorium", "Protactinium", "Uranium", "Neptunium", "Plutonium", "Americium", "Curium", "Berkelium", "Californium", "Einsteinium", "Fermium", "Mendelevium", "Nobelium", "Lawrencium", "Rutherfordium", "Dubnium", "Seaborgium", "Bohrium", "Hassium", "Meitnerium", "Darmstadtium", "Roentgenium", "Copernicium", "Nihomium", "Flerovium", "Moscovium", "Livermorium", "Tennessine", "Oganesson", ] return data def dataset_sample_data(test_backend): # No infinities for mysql if test_backend == "mysql": data = { # "infinities": [-np.inf, -10, -np.pi, 0, np.pi, 10/2.2, np.inf], "nulls": [np.nan, None, 0, 1.1, 2.2, 3.3, None], "naturals": [1, 2, 3, 4, 5, 6, 7], } else: data = { "infinities": [-np.inf, -10, -np.pi, 0, np.pi, 10 / 2.2, np.inf], "nulls": [np.nan, None, 0, 1.1, 2.2, 3.3, None], "naturals": [1, 2, 3, 4, 5, 6, 7], } schemas = { "pandas": {"infinities": "float64", "nulls": "float64", "naturals": "float64"}, "postgresql": { "infinities": "DOUBLE_PRECISION", "nulls": "DOUBLE_PRECISION", "naturals": "NUMERIC", }, "sqlite": {"infinities": "FLOAT", "nulls": "FLOAT", "naturals": "FLOAT"}, "mysql": {"nulls": "DOUBLE", "naturals": "DOUBLE"}, "mssql": {"infinities": "FLOAT", "nulls": "FLOAT", "naturals": "FLOAT"}, "spark": { "infinities": "FloatType", "nulls": "FloatType", "naturals": "FloatType", }, } return data, schemas @pytest.fixture def dataset(test_backend): """Provide dataset fixtures that have special values and/or are otherwise useful outside the standard json testing framework""" data, schemas = dataset_sample_data(test_backend) return get_dataset(test_backend, data, schemas=schemas) @pytest.fixture def pandas_dataset(): test_backend = "PandasDataset" data, schemas = dataset_sample_data(test_backend) return get_dataset(test_backend, data, schemas=schemas) @pytest.fixture def sqlalchemy_dataset(test_backends): """Provide dataset fixtures that have special values and/or are otherwise useful outside the standard json testing framework""" if "postgresql" in test_backends: backend = "postgresql" elif "sqlite" in test_backends: backend = "sqlite" else: return data = { "infinities": [-np.inf, -10, -np.pi, 0, np.pi, 10 / 2.2, np.inf], "nulls": [np.nan, None, 0, 1.1, 2.2, 3.3, None], "naturals": [1, 2, 3, 4, 5, 6, 7], } schemas = { "postgresql": { "infinities": "DOUBLE_PRECISION", "nulls": "DOUBLE_PRECISION", "naturals": "DOUBLE_PRECISION", }, "sqlite": {"infinities": "FLOAT", "nulls": "FLOAT", "naturals": "FLOAT"}, } return get_dataset(backend, data, schemas=schemas, profiler=None) @pytest.fixture def sqlitedb_engine(test_backend): if test_backend == "sqlite": try: import sqlalchemy as sa return sa.create_engine("sqlite://") except ImportError: raise ValueError("sqlite tests require sqlalchemy to be installed") else: pytest.skip("Skipping test designed for sqlite on non-sqlite backend.") @pytest.fixture def postgresql_engine(test_backend): if test_backend == "postgresql": try: import sqlalchemy as sa db_hostname = os.getenv("GE_TEST_LOCAL_DB_HOSTNAME", "localhost") engine = sa.create_engine( f"postgresql://postgres@{db_hostname}/test_ci" ).connect() yield engine engine.close() except ImportError: raise ValueError("SQL Database tests require sqlalchemy to be installed.") else: pytest.skip("Skipping test designed for postgresql on non-postgresql backend.") @pytest.fixture(scope="function") def empty_data_context(tmp_path) -> DataContext: project_path = tmp_path / "empty_data_context" project_path.mkdir() project_path = str(project_path) context = ge.data_context.DataContext.create(project_path) context_path = os.path.join(project_path, "great_expectations") asset_config_path = os.path.join(context_path, "expectations") os.makedirs(asset_config_path, exist_ok=True) assert context.list_datasources() == [] return context @pytest.fixture def titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled( tmp_path_factory, monkeypatch, ): # Reenable GE_USAGE_STATS monkeypatch.delenv("GE_USAGE_STATS") project_path: str = str(tmp_path_factory.mktemp("titanic_data_context")) context_path: str = os.path.join(project_path, "great_expectations") os.makedirs(os.path.join(context_path, "expectations"), exist_ok=True) data_path: str = os.path.join(context_path, "..", "data", "titanic") os.makedirs(os.path.join(data_path), exist_ok=True) shutil.copy( file_relative_path( __file__, os.path.join( "test_fixtures", "great_expectations_v013_no_datasource_stats_enabled.yml", ), ), str(os.path.join(context_path, "great_expectations.yml")), ) shutil.copy( file_relative_path(__file__, os.path.join("test_sets", "Titanic.csv")), str( os.path.join( context_path, "..", "data", "titanic", "Titanic_19120414_1313.csv" ) ), ) shutil.copy( file_relative_path(__file__, os.path.join("test_sets", "Titanic.csv")), str( os.path.join(context_path, "..", "data", "titanic", "Titanic_19120414_1313") ), ) shutil.copy( file_relative_path(__file__, os.path.join("test_sets", "Titanic.csv")), str(os.path.join(context_path, "..", "data", "titanic", "Titanic_1911.csv")), ) shutil.copy( file_relative_path(__file__, os.path.join("test_sets", "Titanic.csv")), str(os.path.join(context_path, "..", "data", "titanic", "Titanic_1912.csv")), ) context: DataContext = DataContext(context_root_dir=context_path) assert context.root_directory == context_path datasource_config: str = f""" class_name: Datasource execution_engine: class_name: PandasExecutionEngine data_connectors: my_basic_data_connector: class_name: InferredAssetFilesystemDataConnector base_directory: {data_path} default_regex: pattern: (.*)\\.csv group_names: - data_asset_name my_special_data_connector: class_name: ConfiguredAssetFilesystemDataConnector base_directory: {data_path} glob_directive: "*.csv" default_regex: pattern: (.+)\\.csv group_names: - name assets: users: base_directory: {data_path} pattern: (.+)_(\\d+)_(\\d+)\\.csv group_names: - name - timestamp - size my_other_data_connector: class_name: ConfiguredAssetFilesystemDataConnector base_directory: {data_path} glob_directive: "*.csv" default_regex: pattern: (.+)\\.csv group_names: - name assets: users: {{}} my_runtime_data_connector: module_name: great_expectations.datasource.data_connector class_name: RuntimeDataConnector batch_identifiers: - pipeline_stage_name - airflow_run_id """ # noinspection PyUnusedLocal datasource: Datasource = context.test_yaml_config( name="my_datasource", yaml_config=datasource_config, pretty_print=False ) # noinspection PyProtectedMember context._save_project_config() return context @pytest.fixture def titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled( titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, tmp_path_factory, monkeypatch, ): context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled project_dir: str = context.root_directory data_path: str = os.path.join(project_dir, "..", "data", "titanic") datasource_config: str = f""" class_name: Datasource execution_engine: class_name: PandasExecutionEngine data_connectors: my_additional_data_connector: class_name: InferredAssetFilesystemDataConnector base_directory: {data_path} default_regex: pattern: (.*)\\.csv group_names: - data_asset_name """ # noinspection PyUnusedLocal datasource: BaseDatasource = context.add_datasource( "my_additional_datasource", **yaml.load(datasource_config) ) return context @pytest.fixture def titanic_v013_multi_datasource_multi_execution_engine_data_context_with_checkpoints_v1_with_empty_store_stats_enabled( sa, spark_session, titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled, tmp_path_factory, test_backends, monkeypatch, ): context: DataContext = titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled project_dir: str = context.root_directory data_path: str = os.path.join(project_dir, "..", "data", "titanic") if ( any( [ dbms in test_backends for dbms in ["postgresql", "sqlite", "mysql", "mssql"] ] ) and (sa is not None) and is_library_loadable(library_name="sqlalchemy") ): db_fixture_file_path: str = file_relative_path( __file__, os.path.join("test_sets", "titanic_sql_test_cases.db"), ) db_file_path: str = os.path.join( data_path, "titanic_sql_test_cases.db", ) shutil.copy( db_fixture_file_path, db_file_path, ) datasource_config: str = f""" class_name: SimpleSqlalchemyDatasource connection_string: sqlite:///{db_file_path} introspection: whole_table: {{}} """ # noinspection PyUnusedLocal datasource: BaseDatasource = context.add_datasource( "my_sqlite_db_datasource", **yaml.load(datasource_config) ) return context @pytest.fixture def assetless_dataconnector_context( tmp_path_factory, monkeypatch, ): # Reenable GE_USAGE_STATS monkeypatch.delenv("GE_USAGE_STATS") project_path = str(tmp_path_factory.mktemp("titanic_data_context")) context_path = os.path.join(project_path, "great_expectations") os.makedirs(os.path.join(context_path, "expectations"), exist_ok=True) data_path = os.path.join(context_path, "..", "data", "titanic") os.makedirs(os.path.join(data_path), exist_ok=True) shutil.copy( file_relative_path( __file__, "./test_fixtures/great_expectations_v013_no_datasource_stats_enabled.yml", ), str(os.path.join(context_path, "great_expectations.yml")), ) context = ge.data_context.DataContext(context_path) assert context.root_directory == context_path datasource_config = f""" class_name: Datasource execution_engine: class_name: PandasExecutionEngine data_connectors: my_other_data_connector: class_name: ConfiguredAssetFilesystemDataConnector base_directory: {data_path} glob_directive: "*.csv" default_regex: pattern: (.+)\\.csv group_names: - name assets: {{}} """ context.test_yaml_config( name="my_datasource", yaml_config=datasource_config, pretty_print=False ) # noinspection PyProtectedMember context._save_project_config() return context @pytest.fixture def deterministic_asset_dataconnector_context( tmp_path_factory, monkeypatch, ): # Reenable GE_USAGE_STATS monkeypatch.delenv("GE_USAGE_STATS") project_path = str(tmp_path_factory.mktemp("titanic_data_context")) context_path = os.path.join(project_path, "great_expectations") os.makedirs(os.path.join(context_path, "expectations"), exist_ok=True) data_path = os.path.join(context_path, "..", "data", "titanic") os.makedirs(os.path.join(data_path), exist_ok=True) shutil.copy( file_relative_path( __file__, "./test_fixtures/great_expectations_v013_no_datasource_stats_enabled.yml", ), str(os.path.join(context_path, "great_expectations.yml")), ) shutil.copy( file_relative_path(__file__, "./test_sets/Titanic.csv"), str( os.path.join( context_path, "..", "data", "titanic", "Titanic_19120414_1313.csv" ) ), ) shutil.copy( file_relative_path(__file__, "./test_sets/Titanic.csv"), str(os.path.join(context_path, "..", "data", "titanic", "Titanic_1911.csv")), ) shutil.copy( file_relative_path(__file__, "./test_sets/Titanic.csv"), str(os.path.join(context_path, "..", "data", "titanic", "Titanic_1912.csv")), ) context = ge.data_context.DataContext(context_path) assert context.root_directory == context_path datasource_config = f""" class_name: Datasource execution_engine: class_name: PandasExecutionEngine data_connectors: my_other_data_connector: class_name: ConfiguredAssetFilesystemDataConnector base_directory: {data_path} glob_directive: "*.csv" default_regex: pattern: (.+)\\.csv group_names: - name assets: users: {{}} """ context.test_yaml_config( name="my_datasource", yaml_config=datasource_config, pretty_print=False ) # noinspection PyProtectedMember context._save_project_config() return context @pytest.fixture def titanic_pandas_data_context_with_v013_datasource_stats_enabled_with_checkpoints_v1_with_templates( titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled # add simple template config simple_checkpoint_template_config: CheckpointConfig = CheckpointConfig( name="my_simple_template_checkpoint", config_version=1, run_name_template="%Y-%M-foo-bar-template-$VAR", action_list=[ { "name": "store_validation_result", "action": { "class_name": "StoreValidationResultAction", }, }, { "name": "store_evaluation_params", "action": { "class_name": "StoreEvaluationParametersAction", }, }, { "name": "update_data_docs", "action": { "class_name": "UpdateDataDocsAction", }, }, ], evaluation_parameters={ "environment": "$GE_ENVIRONMENT", "tolerance": 1.0e-2, "aux_param_0": "$MY_PARAM", "aux_param_1": "1 + $MY_PARAM", }, runtime_configuration={ "result_format": { "result_format": "BASIC", "partial_unexpected_count": 20, } }, ) simple_checkpoint_template_config_key: ConfigurationIdentifier = ( ConfigurationIdentifier( configuration_key=simple_checkpoint_template_config.name ) ) context.checkpoint_store.set( key=simple_checkpoint_template_config_key, value=simple_checkpoint_template_config, ) # add nested template configs nested_checkpoint_template_config_1: CheckpointConfig = CheckpointConfig( name="my_nested_checkpoint_template_1", config_version=1, run_name_template="%Y-%M-foo-bar-template-$VAR", expectation_suite_name="suite_from_template_1", action_list=[ { "name": "store_validation_result", "action": { "class_name": "StoreValidationResultAction", }, }, { "name": "store_evaluation_params", "action": { "class_name": "StoreEvaluationParametersAction", }, }, { "name": "update_data_docs", "action": { "class_name": "UpdateDataDocsAction", }, }, ], evaluation_parameters={ "environment": "FOO", "tolerance": "FOOBOO", "aux_param_0": "FOOBARBOO", "aux_param_1": "FOOBARBOO", "template_1_key": 456, }, runtime_configuration={ "result_format": "FOOBARBOO", "partial_unexpected_count": "FOOBARBOO", "template_1_key": 123, }, validations=[ { "batch_request": { "datasource_name": "my_datasource_template_1", "data_connector_name": "my_special_data_connector_template_1", "data_asset_name": "users_from_template_1", "data_connector_query": {"partition_index": -999}, } } ], ) nested_checkpoint_template_config_1_key: ConfigurationIdentifier = ( ConfigurationIdentifier( configuration_key=nested_checkpoint_template_config_1.name ) ) context.checkpoint_store.set( key=nested_checkpoint_template_config_1_key, value=nested_checkpoint_template_config_1, ) nested_checkpoint_template_config_2: CheckpointConfig = CheckpointConfig( name="my_nested_checkpoint_template_2", config_version=1, template_name="my_nested_checkpoint_template_1", run_name_template="%Y-%M-foo-bar-template-$VAR-template-2", action_list=[ { "name": "store_validation_result", "action": { "class_name": "StoreValidationResultAction", }, }, { "name": "store_evaluation_params", "action": { "class_name": "MyCustomStoreEvaluationParametersActionTemplate2", }, }, { "name": "update_data_docs", "action": { "class_name": "UpdateDataDocsAction", }, }, { "name": "new_action_from_template_2", "action": {"class_name": "Template2SpecialAction"}, }, ], evaluation_parameters={ "environment": "$GE_ENVIRONMENT", "tolerance": 1.0e-2, "aux_param_0": "$MY_PARAM", "aux_param_1": "1 + $MY_PARAM", }, runtime_configuration={ "result_format": "BASIC", "partial_unexpected_count": 20, }, ) nested_checkpoint_template_config_2_key: ConfigurationIdentifier = ( ConfigurationIdentifier( configuration_key=nested_checkpoint_template_config_2.name ) ) context.checkpoint_store.set( key=nested_checkpoint_template_config_2_key, value=nested_checkpoint_template_config_2, ) nested_checkpoint_template_config_3: CheckpointConfig = CheckpointConfig( name="my_nested_checkpoint_template_3", config_version=1, template_name="my_nested_checkpoint_template_2", run_name_template="%Y-%M-foo-bar-template-$VAR-template-3", action_list=[ { "name": "store_validation_result", "action": { "class_name": "StoreValidationResultAction", }, }, { "name": "store_evaluation_params", "action": { "class_name": "MyCustomStoreEvaluationParametersActionTemplate3", }, }, { "name": "update_data_docs", "action": { "class_name": "UpdateDataDocsAction", }, }, { "name": "new_action_from_template_3", "action": {"class_name": "Template3SpecialAction"}, }, ], evaluation_parameters={ "environment": "$GE_ENVIRONMENT", "tolerance": 1.0e-2, "aux_param_0": "$MY_PARAM", "aux_param_1": "1 + $MY_PARAM", "template_3_key": 123, }, runtime_configuration={ "result_format": "BASIC", "partial_unexpected_count": 20, "template_3_key": "bloopy!", }, ) nested_checkpoint_template_config_3_key: ConfigurationIdentifier = ( ConfigurationIdentifier( configuration_key=nested_checkpoint_template_config_3.name ) ) context.checkpoint_store.set( key=nested_checkpoint_template_config_3_key, value=nested_checkpoint_template_config_3, ) # add minimal SimpleCheckpoint simple_checkpoint_config: CheckpointConfig = CheckpointConfig( name="my_minimal_simple_checkpoint", class_name="SimpleCheckpoint", config_version=1, ) simple_checkpoint_config_key: ConfigurationIdentifier = ConfigurationIdentifier( configuration_key=simple_checkpoint_config.name ) context.checkpoint_store.set( key=simple_checkpoint_config_key, value=simple_checkpoint_config, ) # add SimpleCheckpoint with slack webhook simple_checkpoint_with_slack_webhook_config: CheckpointConfig = CheckpointConfig( name="my_simple_checkpoint_with_slack", class_name="SimpleCheckpoint", config_version=1, slack_webhook="https://hooks.slack.com/foo/bar", ) simple_checkpoint_with_slack_webhook_config_key: ConfigurationIdentifier = ( ConfigurationIdentifier( configuration_key=simple_checkpoint_with_slack_webhook_config.name ) ) context.checkpoint_store.set( key=simple_checkpoint_with_slack_webhook_config_key, value=simple_checkpoint_with_slack_webhook_config, ) # add SimpleCheckpoint with slack webhook and notify_with simple_checkpoint_with_slack_webhook_and_notify_with_all_config: CheckpointConfig = CheckpointConfig( name="my_simple_checkpoint_with_slack_and_notify_with_all", class_name="SimpleCheckpoint", config_version=1, slack_webhook="https://hooks.slack.com/foo/bar", notify_with="all", ) simple_checkpoint_with_slack_webhook_and_notify_with_all_config_key: ConfigurationIdentifier = ConfigurationIdentifier( configuration_key=simple_checkpoint_with_slack_webhook_and_notify_with_all_config.name ) context.checkpoint_store.set( key=simple_checkpoint_with_slack_webhook_and_notify_with_all_config_key, value=simple_checkpoint_with_slack_webhook_and_notify_with_all_config, ) # add SimpleCheckpoint with site_names simple_checkpoint_with_site_names_config: CheckpointConfig = CheckpointConfig( name="my_simple_checkpoint_with_site_names", class_name="SimpleCheckpoint", config_version=1, site_names=["local_site"], ) simple_checkpoint_with_site_names_config_key: ConfigurationIdentifier = ( ConfigurationIdentifier( configuration_key=simple_checkpoint_with_site_names_config.name ) ) context.checkpoint_store.set( key=simple_checkpoint_with_site_names_config_key, value=simple_checkpoint_with_site_names_config, ) # noinspection PyProtectedMember context._save_project_config() return context @pytest.fixture def empty_data_context_with_config_variables(monkeypatch, empty_data_context): monkeypatch.setenv("FOO", "BAR") monkeypatch.setenv("REPLACE_ME_ESCAPED_ENV", "ive_been_$--replaced") root_dir = empty_data_context.root_directory ge_config_path = file_relative_path( __file__, "./test_fixtures/great_expectations_basic_with_variables.yml", ) shutil.copy(ge_config_path, os.path.join(root_dir, "great_expectations.yml")) config_variables_path = file_relative_path( __file__, "./test_fixtures/config_variables.yml", ) shutil.copy(config_variables_path, os.path.join(root_dir, "uncommitted")) return DataContext(context_root_dir=root_dir) @pytest.fixture def empty_context_with_checkpoint(empty_data_context): context = empty_data_context root_dir = empty_data_context.root_directory fixture_name = "my_checkpoint.yml" fixture_path = file_relative_path( __file__, f"./data_context/fixtures/contexts/{fixture_name}" ) checkpoints_file = os.path.join(root_dir, "checkpoints", fixture_name) shutil.copy(fixture_path, checkpoints_file) assert os.path.isfile(checkpoints_file) return context @pytest.fixture def empty_context_with_checkpoint_stats_enabled(empty_data_context_stats_enabled): context = empty_data_context_stats_enabled root_dir = context.root_directory fixture_name = "my_checkpoint.yml" fixture_path = file_relative_path( __file__, f"./data_context/fixtures/contexts/{fixture_name}" ) checkpoints_file = os.path.join(root_dir, "checkpoints", fixture_name) shutil.copy(fixture_path, checkpoints_file) return context @pytest.fixture def empty_data_context_stats_enabled(tmp_path_factory, monkeypatch): # Reenable GE_USAGE_STATS monkeypatch.delenv("GE_USAGE_STATS") project_path = str(tmp_path_factory.mktemp("empty_data_context")) context = ge.data_context.DataContext.create(project_path) context_path = os.path.join(project_path, "great_expectations") asset_config_path = os.path.join(context_path, "expectations") os.makedirs(asset_config_path, exist_ok=True) return context @pytest.fixture def empty_context_with_checkpoint_v1_stats_enabled( empty_data_context_stats_enabled, monkeypatch ): try: monkeypatch.delenv("VAR") monkeypatch.delenv("MY_PARAM") monkeypatch.delenv("OLD_PARAM") except: pass monkeypatch.setenv("VAR", "test") monkeypatch.setenv("MY_PARAM", "1") monkeypatch.setenv("OLD_PARAM", "2") context = empty_data_context_stats_enabled root_dir = context.root_directory fixture_name = "my_v1_checkpoint.yml" fixture_path = file_relative_path( __file__, f"./data_context/fixtures/contexts/{fixture_name}" ) checkpoints_file = os.path.join(root_dir, "checkpoints", fixture_name) shutil.copy(fixture_path, checkpoints_file) # # noinspection PyProtectedMember context._save_project_config() return context @pytest.fixture def titanic_data_context(tmp_path_factory): project_path = str(tmp_path_factory.mktemp("titanic_data_context")) context_path = os.path.join(project_path, "great_expectations") os.makedirs(os.path.join(context_path, "expectations"), exist_ok=True) os.makedirs(os.path.join(context_path, "checkpoints"), exist_ok=True) data_path = os.path.join(context_path, "..", "data") os.makedirs(os.path.join(data_path), exist_ok=True) titanic_yml_path = file_relative_path( __file__, "./test_fixtures/great_expectations_v013_titanic.yml" ) shutil.copy( titanic_yml_path, str(os.path.join(context_path, "great_expectations.yml")) ) titanic_csv_path = file_relative_path(__file__, "./test_sets/Titanic.csv") shutil.copy( titanic_csv_path, str(os.path.join(context_path, "..", "data", "Titanic.csv")) ) return ge.data_context.DataContext(context_path) @pytest.fixture def titanic_data_context_no_data_docs_no_checkpoint_store(tmp_path_factory): project_path = str(tmp_path_factory.mktemp("titanic_data_context")) context_path = os.path.join(project_path, "great_expectations") os.makedirs(os.path.join(context_path, "expectations"), exist_ok=True) os.makedirs(os.path.join(context_path, "checkpoints"), exist_ok=True) data_path = os.path.join(context_path, "..", "data") os.makedirs(os.path.join(data_path), exist_ok=True) titanic_yml_path = file_relative_path( __file__, "./test_fixtures/great_expectations_titanic_pre_v013_no_data_docs.yml" ) shutil.copy( titanic_yml_path, str(os.path.join(context_path, "great_expectations.yml")) ) titanic_csv_path = file_relative_path(__file__, "./test_sets/Titanic.csv") shutil.copy( titanic_csv_path, str(os.path.join(context_path, "..", "data", "Titanic.csv")) ) return ge.data_context.DataContext(context_path) @pytest.fixture def titanic_data_context_no_data_docs(tmp_path_factory): project_path = str(tmp_path_factory.mktemp("titanic_data_context")) context_path = os.path.join(project_path, "great_expectations") os.makedirs(os.path.join(context_path, "expectations"), exist_ok=True) os.makedirs(os.path.join(context_path, "checkpoints"), exist_ok=True) data_path = os.path.join(context_path, "..", "data") os.makedirs(os.path.join(data_path), exist_ok=True) titanic_yml_path = file_relative_path( __file__, "./test_fixtures/great_expectations_titanic_no_data_docs.yml" ) shutil.copy( titanic_yml_path, str(os.path.join(context_path, "great_expectations.yml")) ) titanic_csv_path = file_relative_path(__file__, "./test_sets/Titanic.csv") shutil.copy( titanic_csv_path, str(os.path.join(context_path, "..", "data", "Titanic.csv")) ) return ge.data_context.DataContext(context_path) @pytest.fixture def titanic_data_context_stats_enabled_no_config_store(tmp_path_factory, monkeypatch): # Reenable GE_USAGE_STATS monkeypatch.delenv("GE_USAGE_STATS") project_path = str(tmp_path_factory.mktemp("titanic_data_context")) context_path = os.path.join(project_path, "great_expectations") os.makedirs(os.path.join(context_path, "expectations"), exist_ok=True) os.makedirs(os.path.join(context_path, "checkpoints"), exist_ok=True) data_path = os.path.join(context_path, "..", "data") os.makedirs(os.path.join(data_path), exist_ok=True) titanic_yml_path = file_relative_path( __file__, "./test_fixtures/great_expectations_titanic.yml" ) shutil.copy( titanic_yml_path, str(os.path.join(context_path, "great_expectations.yml")) ) titanic_csv_path = file_relative_path(__file__, "./test_sets/Titanic.csv") shutil.copy( titanic_csv_path, str(os.path.join(context_path, "..", "data", "Titanic.csv")) ) return ge.data_context.DataContext(context_path) @pytest.fixture def titanic_data_context_stats_enabled(tmp_path_factory, monkeypatch): # Reenable GE_USAGE_STATS monkeypatch.delenv("GE_USAGE_STATS") project_path = str(tmp_path_factory.mktemp("titanic_data_context")) context_path = os.path.join(project_path, "great_expectations") os.makedirs(os.path.join(context_path, "expectations"), exist_ok=True) os.makedirs(os.path.join(context_path, "checkpoints"), exist_ok=True) data_path = os.path.join(context_path, "..", "data") os.makedirs(os.path.join(data_path), exist_ok=True) titanic_yml_path = file_relative_path( __file__, "./test_fixtures/great_expectations_v013_titanic.yml" ) shutil.copy( titanic_yml_path, str(os.path.join(context_path, "great_expectations.yml")) ) titanic_csv_path = file_relative_path(__file__, "./test_sets/Titanic.csv") shutil.copy( titanic_csv_path, str(os.path.join(context_path, "..", "data", "Titanic.csv")) ) return ge.data_context.DataContext(context_path) @pytest.fixture def titanic_data_context_stats_enabled_config_version_2(tmp_path_factory, monkeypatch): # Reenable GE_USAGE_STATS monkeypatch.delenv("GE_USAGE_STATS") project_path = str(tmp_path_factory.mktemp("titanic_data_context")) context_path = os.path.join(project_path, "great_expectations") os.makedirs(os.path.join(context_path, "expectations"), exist_ok=True) os.makedirs(os.path.join(context_path, "checkpoints"), exist_ok=True) data_path = os.path.join(context_path, "..", "data") os.makedirs(os.path.join(data_path), exist_ok=True) titanic_yml_path = file_relative_path( __file__, "./test_fixtures/great_expectations_titanic.yml" ) shutil.copy( titanic_yml_path, str(os.path.join(context_path, "great_expectations.yml")) ) titanic_csv_path = file_relative_path(__file__, "./test_sets/Titanic.csv") shutil.copy( titanic_csv_path, str(os.path.join(context_path, "..", "data", "Titanic.csv")) ) return ge.data_context.DataContext(context_path) @pytest.fixture def titanic_data_context_stats_enabled_config_version_3(tmp_path_factory, monkeypatch): # Reenable GE_USAGE_STATS monkeypatch.delenv("GE_USAGE_STATS") project_path = str(tmp_path_factory.mktemp("titanic_data_context")) context_path = os.path.join(project_path, "great_expectations") os.makedirs(os.path.join(context_path, "expectations"), exist_ok=True) os.makedirs(os.path.join(context_path, "checkpoints"), exist_ok=True) data_path = os.path.join(context_path, "..", "data") os.makedirs(os.path.join(data_path), exist_ok=True) titanic_yml_path = file_relative_path( __file__, "./test_fixtures/great_expectations_v013_upgraded_titanic.yml" ) shutil.copy( titanic_yml_path, str(os.path.join(context_path, "great_expectations.yml")) ) titanic_csv_path = file_relative_path(__file__, "./test_sets/Titanic.csv") shutil.copy( titanic_csv_path, str(os.path.join(context_path, "..", "data", "Titanic.csv")) ) return ge.data_context.DataContext(context_path) @pytest.fixture def titanic_data_context_stats_enabled_config_version_2_with_checkpoint( tmp_path_factory, monkeypatch, titanic_data_context_stats_enabled_config_version_2 ): context = titanic_data_context_stats_enabled_config_version_2 root_dir = context.root_directory fixture_name = "my_checkpoint.yml" fixture_path = file_relative_path( __file__, f"./data_context/fixtures/contexts/{fixture_name}" ) checkpoints_file = os.path.join(root_dir, "checkpoints", fixture_name) shutil.copy(fixture_path, checkpoints_file) return context @pytest.fixture def titanic_sqlite_db(sa): try: import sqlalchemy as sa from sqlalchemy import create_engine titanic_db_path = file_relative_path(__file__, "./test_sets/titanic.db") engine = create_engine("sqlite:///{}".format(titanic_db_path)) assert engine.execute("select count(*) from titanic").fetchall()[0] == (1313,) return engine except ImportError: raise ValueError("sqlite tests require sqlalchemy to be installed") @pytest.fixture def titanic_expectation_suite(): return ExpectationSuite( expectation_suite_name="Titanic.warning", meta={}, data_asset_type="Dataset", expectations=[ ExpectationConfiguration( expectation_type="expect_column_to_exist", kwargs={"column": "PClass"} ), ExpectationConfiguration( expectation_type="expect_column_values_to_not_be_null", kwargs={"column": "Name"}, ), ExpectationConfiguration( expectation_type="expect_table_row_count_to_equal", kwargs={"value": 1313}, ), ], ) @pytest.fixture def empty_sqlite_db(sa): """An empty in-memory sqlite db that always gets run.""" try: import sqlalchemy as sa from sqlalchemy import create_engine engine = create_engine("sqlite://") assert engine.execute("select 1").fetchall()[0] == (1,) return engine except ImportError: raise ValueError("sqlite tests require sqlalchemy to be installed") @pytest.fixture @freeze_time("09/26/2019 13:42:41") def site_builder_data_context_with_html_store_titanic_random( tmp_path_factory, filesystem_csv_3 ): base_dir = str(tmp_path_factory.mktemp("project_dir")) project_dir = os.path.join(base_dir, "project_path") os.mkdir(project_dir) os.makedirs(os.path.join(project_dir, "data")) os.makedirs(os.path.join(project_dir, "data/titanic")) shutil.copy( file_relative_path(__file__, "./test_sets/Titanic.csv"), str(os.path.join(project_dir, "data", "titanic", "Titanic.csv")), ) os.makedirs(os.path.join(project_dir, "data", "random")) shutil.copy( os.path.join(filesystem_csv_3, "f1.csv"), str(os.path.join(project_dir, "data", "random", "f1.csv")), ) shutil.copy( os.path.join(filesystem_csv_3, "f2.csv"), str(os.path.join(project_dir, "data", "random", "f2.csv")), ) ge.data_context.DataContext.create(project_dir) shutil.copy( file_relative_path( __file__, "./test_fixtures/great_expectations_site_builder.yml" ), str(os.path.join(project_dir, "great_expectations", "great_expectations.yml")), ) context = ge.data_context.DataContext( context_root_dir=os.path.join(project_dir, "great_expectations") ) context.add_datasource( "titanic", class_name="PandasDatasource", batch_kwargs_generators={ "subdir_reader": { "class_name": "SubdirReaderBatchKwargsGenerator", "base_directory": os.path.join(project_dir, "data", "titanic"), } }, ) context.add_datasource( "random", class_name="PandasDatasource", batch_kwargs_generators={ "subdir_reader": { "class_name": "SubdirReaderBatchKwargsGenerator", "base_directory": os.path.join(project_dir, "data", "random"), } }, ) context.profile_datasource("titanic") context.profile_datasource("random") context.profile_datasource(context.list_datasources()[0]["name"]) context._project_config.anonymous_usage_statistics = { "enabled": True, "data_context_id": "f43d4897-385f-4366-82b0-1a8eda2bf79c", } return context @pytest.fixture(scope="function") @freeze_time("09/26/2019 13:42:41") def site_builder_data_context_v013_with_html_store_titanic_random( tmp_path, filesystem_csv_3 ): base_dir = tmp_path / "project_dir" base_dir.mkdir() base_dir = str(base_dir) project_dir = os.path.join(base_dir, "project_path") os.mkdir(project_dir) os.makedirs(os.path.join(project_dir, "data")) os.makedirs(os.path.join(project_dir, "data", "titanic")) shutil.copy( file_relative_path(__file__, "./test_sets/Titanic.csv"), str(os.path.join(project_dir, "data", "titanic", "Titanic.csv")), ) os.makedirs(os.path.join(project_dir, "data", "random")) shutil.copy( os.path.join(filesystem_csv_3, "f1.csv"), str(os.path.join(project_dir, "data", "random", "f1.csv")), ) shutil.copy( os.path.join(filesystem_csv_3, "f2.csv"), str(os.path.join(project_dir, "data", "random", "f2.csv")), ) ge.data_context.DataContext.create(project_dir) shutil.copy( file_relative_path( __file__, "./test_fixtures/great_expectations_v013_site_builder.yml" ), str(os.path.join(project_dir, "great_expectations", "great_expectations.yml")), ) context = ge.data_context.DataContext( context_root_dir=os.path.join(project_dir, "great_expectations") ) context.add_datasource( "titanic", class_name="PandasDatasource", batch_kwargs_generators={ "subdir_reader": { "class_name": "SubdirReaderBatchKwargsGenerator", "base_directory": os.path.join(project_dir, "data", "titanic"), } }, ) context.add_datasource( "random", class_name="PandasDatasource", batch_kwargs_generators={ "subdir_reader": { "class_name": "SubdirReaderBatchKwargsGenerator", "base_directory": os.path.join(project_dir, "data", "random"), } }, ) context.profile_datasource("titanic") context.profile_datasource("random") context.profile_datasource(context.list_datasources()[0]["name"]) context._project_config.anonymous_usage_statistics = { "enabled": True, "data_context_id": "f43d4897-385f-4366-82b0-1a8eda2bf79c", } return context @pytest.fixture(scope="function") def titanic_multibatch_data_context(tmp_path): """ Based on titanic_data_context, but with 2 identical batches of data asset "titanic" """ project_path = tmp_path / "titanic_data_context" project_path.mkdir() project_path = str(project_path) context_path = os.path.join(project_path, "great_expectations") os.makedirs(os.path.join(context_path, "expectations"), exist_ok=True) data_path = os.path.join(context_path, "..", "data", "titanic") os.makedirs(os.path.join(data_path), exist_ok=True) shutil.copy( file_relative_path(__file__, "./test_fixtures/great_expectations_titanic.yml"), str(os.path.join(context_path, "great_expectations.yml")), ) shutil.copy( file_relative_path(__file__, "./test_sets/Titanic.csv"), str(os.path.join(context_path, "..", "data", "titanic", "Titanic_1911.csv")), ) shutil.copy( file_relative_path(__file__, "./test_sets/Titanic.csv"), str(os.path.join(context_path, "..", "data", "titanic", "Titanic_1912.csv")), ) return ge.data_context.DataContext(context_path) @pytest.fixture def v10_project_directory(tmp_path_factory): """ GE 0.10.x project for testing upgrade helper """ project_path = str(tmp_path_factory.mktemp("v10_project")) context_root_dir = os.path.join(project_path, "great_expectations") shutil.copytree( file_relative_path( __file__, "./test_fixtures/upgrade_helper/great_expectations_v10_project/" ), context_root_dir, ) shutil.copy( file_relative_path( __file__, "./test_fixtures/upgrade_helper/great_expectations_v1_basic.yml" ), os.path.join(context_root_dir, "great_expectations.yml"), ) return context_root_dir @pytest.fixture def v20_project_directory(tmp_path_factory): """ GE config_version: 2 project for testing upgrade helper """ project_path = str(tmp_path_factory.mktemp("v20_project")) context_root_dir = os.path.join(project_path, "great_expectations") shutil.copytree( file_relative_path( __file__, "./test_fixtures/upgrade_helper/great_expectations_v20_project/" ), context_root_dir, ) shutil.copy( file_relative_path( __file__, "./test_fixtures/upgrade_helper/great_expectations_v2.yml" ), os.path.join(context_root_dir, "great_expectations.yml"), ) return context_root_dir @pytest.fixture def data_context_parameterized_expectation_suite_no_checkpoint_store(tmp_path_factory): """ This data_context is *manually* created to have the config we want, vs created with DataContext.create() """ project_path = str(tmp_path_factory.mktemp("data_context")) context_path = os.path.join(project_path, "great_expectations") asset_config_path = os.path.join(context_path, "expectations") fixture_dir = file_relative_path(__file__, "./test_fixtures") os.makedirs( os.path.join(asset_config_path, "my_dag_node"), exist_ok=True, ) shutil.copy( os.path.join(fixture_dir, "great_expectations_basic.yml"), str(os.path.join(context_path, "great_expectations.yml")), ) shutil.copy( os.path.join( fixture_dir, "expectation_suites/parameterized_expectation_suite_fixture.json", ), os.path.join(asset_config_path, "my_dag_node", "default.json"), ) os.makedirs(os.path.join(context_path, "plugins"), exist_ok=True) shutil.copy( os.path.join(fixture_dir, "custom_pandas_dataset.py"), str(os.path.join(context_path, "plugins", "custom_pandas_dataset.py")), ) shutil.copy( os.path.join(fixture_dir, "custom_sqlalchemy_dataset.py"), str(os.path.join(context_path, "plugins", "custom_sqlalchemy_dataset.py")), ) shutil.copy( os.path.join(fixture_dir, "custom_sparkdf_dataset.py"), str(os.path.join(context_path, "plugins", "custom_sparkdf_dataset.py")), ) return ge.data_context.DataContext(context_path) @pytest.fixture def data_context_with_bad_datasource(tmp_path_factory): """ This data_context is *manually* created to have the config we want, vs created with DataContext.create() This DataContext has a connection to a datasource named my_postgres_db which is not a valid datasource. It is used by test_get_batch_multiple_datasources_do_not_scan_all() """ project_path = str(tmp_path_factory.mktemp("data_context")) context_path = os.path.join(project_path, "great_expectations") asset_config_path = os.path.join(context_path, "expectations") fixture_dir = file_relative_path(__file__, "./test_fixtures") os.makedirs( os.path.join(asset_config_path, "my_dag_node"), exist_ok=True, ) shutil.copy( os.path.join(fixture_dir, "great_expectations_bad_datasource.yml"), str(os.path.join(context_path, "great_expectations.yml")), ) return ge.data_context.DataContext(context_path) @pytest.fixture def data_context_parameterized_expectation_suite_no_checkpoint_store_with_usage_statistics_enabled( tmp_path_factory, ): """ This data_context is *manually* created to have the config we want, vs created with DataContext.create() """ project_path = str(tmp_path_factory.mktemp("data_context")) context_path = os.path.join(project_path, "great_expectations") asset_config_path = os.path.join(context_path, "expectations") fixture_dir = file_relative_path(__file__, "./test_fixtures") os.makedirs( os.path.join(asset_config_path, "my_dag_node"), exist_ok=True, ) shutil.copy( os.path.join( fixture_dir, "great_expectations_basic_with_usage_stats_enabled.yml" ), str(os.path.join(context_path, "great_expectations.yml")), ) shutil.copy( os.path.join( fixture_dir, "expectation_suites/parameterized_expectation_suite_fixture.json", ), os.path.join(asset_config_path, "my_dag_node", "default.json"), ) os.makedirs(os.path.join(context_path, "plugins"), exist_ok=True) shutil.copy( os.path.join(fixture_dir, "custom_pandas_dataset.py"), str(os.path.join(context_path, "plugins", "custom_pandas_dataset.py")), ) shutil.copy( os.path.join(fixture_dir, "custom_sqlalchemy_dataset.py"), str(os.path.join(context_path, "plugins", "custom_sqlalchemy_dataset.py")), ) shutil.copy( os.path.join(fixture_dir, "custom_sparkdf_dataset.py"), str(os.path.join(context_path, "plugins", "custom_sparkdf_dataset.py")), ) return ge.data_context.DataContext(context_path) @pytest.fixture def data_context_parameterized_expectation_suite(tmp_path_factory): """ This data_context is *manually* created to have the config we want, vs created with DataContext.create() """ project_path = str(tmp_path_factory.mktemp("data_context")) context_path = os.path.join(project_path, "great_expectations") asset_config_path = os.path.join(context_path, "expectations") fixture_dir = file_relative_path(__file__, "./test_fixtures") os.makedirs( os.path.join(asset_config_path, "my_dag_node"), exist_ok=True, ) shutil.copy( os.path.join(fixture_dir, "great_expectations_v013_basic.yml"), str(os.path.join(context_path, "great_expectations.yml")), ) shutil.copy( os.path.join( fixture_dir, "expectation_suites/parameterized_expectation_suite_fixture.json", ), os.path.join(asset_config_path, "my_dag_node", "default.json"), ) os.makedirs(os.path.join(context_path, "plugins"), exist_ok=True) shutil.copy( os.path.join(fixture_dir, "custom_pandas_dataset.py"), str(os.path.join(context_path, "plugins", "custom_pandas_dataset.py")), ) shutil.copy( os.path.join(fixture_dir, "custom_sqlalchemy_dataset.py"), str(os.path.join(context_path, "plugins", "custom_sqlalchemy_dataset.py")), ) shutil.copy( os.path.join(fixture_dir, "custom_sparkdf_dataset.py"), str(os.path.join(context_path, "plugins", "custom_sparkdf_dataset.py")), ) return ge.data_context.DataContext(context_path) @pytest.fixture def data_context_parameterized_expectation_suite_with_usage_statistics_enabled( tmp_path_factory, ): """ This data_context is *manually* created to have the config we want, vs created with DataContext.create() """ project_path = str(tmp_path_factory.mktemp("data_context")) context_path = os.path.join(project_path, "great_expectations") asset_config_path = os.path.join(context_path, "expectations") fixture_dir = file_relative_path(__file__, "./test_fixtures") os.makedirs( os.path.join(asset_config_path, "my_dag_node"), exist_ok=True, ) shutil.copy( os.path.join( fixture_dir, "great_expectations_v013_basic_with_usage_stats_enabled.yml" ), str(os.path.join(context_path, "great_expectations.yml")), ) shutil.copy( os.path.join( fixture_dir, "expectation_suites/parameterized_expectation_suite_fixture.json", ), os.path.join(asset_config_path, "my_dag_node", "default.json"), ) os.makedirs(os.path.join(context_path, "plugins"), exist_ok=True) shutil.copy( os.path.join(fixture_dir, "custom_pandas_dataset.py"), str(os.path.join(context_path, "plugins", "custom_pandas_dataset.py")), ) shutil.copy( os.path.join(fixture_dir, "custom_sqlalchemy_dataset.py"), str(os.path.join(context_path, "plugins", "custom_sqlalchemy_dataset.py")), ) shutil.copy( os.path.join(fixture_dir, "custom_sparkdf_dataset.py"), str(os.path.join(context_path, "plugins", "custom_sparkdf_dataset.py")), ) return ge.data_context.DataContext(context_path) @pytest.fixture def data_context_with_bad_notebooks(tmp_path_factory): """ This data_context is *manually* created to have the config we want, vs created with DataContext.create() """ project_path = str(tmp_path_factory.mktemp("data_context")) context_path = os.path.join(project_path, "great_expectations") asset_config_path = os.path.join(context_path, "expectations") fixture_dir = file_relative_path(__file__, "./test_fixtures") custom_notebook_assets_dir = "notebook_assets" os.makedirs( os.path.join(asset_config_path, "my_dag_node"), exist_ok=True, ) shutil.copy( os.path.join(fixture_dir, "great_expectations_basic_with_bad_notebooks.yml"), str(os.path.join(context_path, "great_expectations.yml")), ) shutil.copy( os.path.join( fixture_dir, "expectation_suites/parameterized_expectation_suite_fixture.json", ), os.path.join(asset_config_path, "my_dag_node", "default.json"), ) os.makedirs(os.path.join(context_path, "plugins"), exist_ok=True) shutil.copytree( os.path.join(fixture_dir, custom_notebook_assets_dir), str(os.path.join(context_path, "plugins", custom_notebook_assets_dir)), ) return ge.data_context.DataContext(context_path) @pytest.fixture def data_context_custom_notebooks(tmp_path_factory): """ This data_context is *manually* created to have the config we want, vs created with DataContext.create() """ project_path = str(tmp_path_factory.mktemp("data_context")) context_path = os.path.join(project_path, "great_expectations") asset_config_path = os.path.join(context_path, "expectations") fixture_dir = file_relative_path(__file__, "./test_fixtures") os.makedirs( os.path.join(asset_config_path, "my_dag_node"), exist_ok=True, ) shutil.copy( os.path.join(fixture_dir, "great_expectations_custom_notebooks.yml"), str(os.path.join(context_path, "great_expectations.yml")), ) shutil.copy( os.path.join( fixture_dir, "expectation_suites/parameterized_expectation_suite_fixture.json", ), os.path.join(asset_config_path, "my_dag_node", "default.json"), ) os.makedirs(os.path.join(context_path, "plugins"), exist_ok=True) return ge.data_context.DataContext(context_path) @pytest.fixture def data_context_v3_custom_notebooks(tmp_path): """ This data_context is *manually* created to have the config we want, vs created with DataContext.create() """ project_path = tmp_path context_path = os.path.join(project_path, "great_expectations") expectations_dir = os.path.join(context_path, "expectations") fixture_dir = file_relative_path(__file__, "./test_fixtures") custom_notebook_assets_dir = os.path.join("v3", "notebook_assets") os.makedirs( os.path.join(expectations_dir, "my_dag_node"), exist_ok=True, ) shutil.copy( os.path.join(fixture_dir, "great_expectations_v013_custom_notebooks.yml"), str(os.path.join(context_path, "great_expectations.yml")), ) shutil.copy( os.path.join( fixture_dir, "expectation_suites/parameterized_expectation_suite_fixture.json", ), os.path.join(expectations_dir, "my_dag_node", "default.json"), ) os.makedirs(os.path.join(context_path, "plugins"), exist_ok=True) shutil.copytree( os.path.join(fixture_dir, custom_notebook_assets_dir), str(os.path.join(context_path, "plugins", custom_notebook_assets_dir)), ) return ge.data_context.DataContext(context_path) @pytest.fixture def data_context_v3_custom_bad_notebooks(tmp_path): """ This data_context is *manually* created to have the config we want, vs created with DataContext.create() """ project_path = tmp_path context_path = os.path.join(project_path, "great_expectations") expectations_dir = os.path.join(context_path, "expectations") fixture_dir = file_relative_path(__file__, "./test_fixtures") custom_notebook_assets_dir = os.path.join("v3", "notebook_assets") os.makedirs( os.path.join(expectations_dir, "my_dag_node"), exist_ok=True, ) shutil.copy( os.path.join(fixture_dir, "great_expectations_v013_bad_notebooks.yml"), str(os.path.join(context_path, "great_expectations.yml")), ) shutil.copy( os.path.join( fixture_dir, "expectation_suites/parameterized_expectation_suite_fixture.json", ), os.path.join(expectations_dir, "my_dag_node", "default.json"), ) os.makedirs(os.path.join(context_path, "plugins"), exist_ok=True) shutil.copytree( os.path.join(fixture_dir, custom_notebook_assets_dir), str(os.path.join(context_path, "plugins", custom_notebook_assets_dir)), ) return ge.data_context.DataContext(context_path) @pytest.fixture def data_context_simple_expectation_suite(tmp_path_factory): """ This data_context is *manually* created to have the config we want, vs created with DataContext.create() """ project_path = str(tmp_path_factory.mktemp("data_context")) context_path = os.path.join(project_path, "great_expectations") asset_config_path = os.path.join(context_path, "expectations") fixture_dir = file_relative_path(__file__, "./test_fixtures") os.makedirs( os.path.join(asset_config_path, "my_dag_node"), exist_ok=True, ) shutil.copy( os.path.join(fixture_dir, "great_expectations_basic.yml"), str(os.path.join(context_path, "great_expectations.yml")), ) shutil.copy( os.path.join( fixture_dir, "rendering_fixtures/expectations_suite_1.json", ), os.path.join(asset_config_path, "default.json"), ) os.makedirs(os.path.join(context_path, "plugins"), exist_ok=True) shutil.copy( os.path.join(fixture_dir, "custom_pandas_dataset.py"), str(os.path.join(context_path, "plugins", "custom_pandas_dataset.py")), ) shutil.copy( os.path.join(fixture_dir, "custom_sqlalchemy_dataset.py"), str(os.path.join(context_path, "plugins", "custom_sqlalchemy_dataset.py")), ) shutil.copy( os.path.join(fixture_dir, "custom_sparkdf_dataset.py"), str(os.path.join(context_path, "plugins", "custom_sparkdf_dataset.py")), ) return ge.data_context.DataContext(context_path) @pytest.fixture def data_context_simple_expectation_suite_with_custom_pandas_dataset(tmp_path_factory): """ This data_context is *manually* created to have the config we want, vs created with DataContext.create() """ project_path = str(tmp_path_factory.mktemp("data_context")) context_path = os.path.join(project_path, "great_expectations") asset_config_path = os.path.join(context_path, "expectations") fixture_dir = file_relative_path(__file__, "./test_fixtures") os.makedirs( os.path.join(asset_config_path, "my_dag_node"), exist_ok=True, ) shutil.copy( os.path.join( fixture_dir, "great_expectations_basic_with_custom_pandas_dataset.yml" ), str(os.path.join(context_path, "great_expectations.yml")), ) shutil.copy( os.path.join( fixture_dir, "rendering_fixtures/expectations_suite_1.json", ), os.path.join(asset_config_path, "default.json"), ) os.makedirs(os.path.join(context_path, "plugins"), exist_ok=True) shutil.copy( os.path.join(fixture_dir, "custom_pandas_dataset.py"), str(os.path.join(context_path, "plugins", "custom_pandas_dataset.py")), ) shutil.copy( os.path.join(fixture_dir, "custom_sqlalchemy_dataset.py"), str(os.path.join(context_path, "plugins", "custom_sqlalchemy_dataset.py")), ) shutil.copy( os.path.join(fixture_dir, "custom_sparkdf_dataset.py"), str(os.path.join(context_path, "plugins", "custom_sparkdf_dataset.py")), ) return ge.data_context.DataContext(context_path) @pytest.fixture() def filesystem_csv_data_context_with_validation_operators( titanic_data_context_stats_enabled, filesystem_csv_2 ): titanic_data_context_stats_enabled.add_datasource( "rad_datasource", module_name="great_expectations.datasource", class_name="PandasDatasource", batch_kwargs_generators={ "subdir_reader": { "class_name": "SubdirReaderBatchKwargsGenerator", "base_directory": str(filesystem_csv_2), } }, ) return titanic_data_context_stats_enabled @pytest.fixture() def filesystem_csv_data_context(empty_data_context, filesystem_csv_2): empty_data_context.add_datasource( "rad_datasource", module_name="great_expectations.datasource", class_name="PandasDatasource", batch_kwargs_generators={ "subdir_reader": { "class_name": "SubdirReaderBatchKwargsGenerator", "base_directory": str(filesystem_csv_2), } }, ) return empty_data_context @pytest.fixture def filesystem_csv(tmp_path_factory): base_dir = tmp_path_factory.mktemp("filesystem_csv") base_dir = str(base_dir) # Put a few files in the directory with open(os.path.join(base_dir, "f1.csv"), "w") as outfile: outfile.writelines(["a,b,c\n"]) with open(os.path.join(base_dir, "f2.csv"), "w") as outfile: outfile.writelines(["a,b,c\n"]) os.makedirs(os.path.join(base_dir, "f3"), exist_ok=True) with open(os.path.join(base_dir, "f3", "f3_20190101.csv"), "w") as outfile: outfile.writelines(["a,b,c\n"]) with open(os.path.join(base_dir, "f3", "f3_20190102.csv"), "w") as outfile: outfile.writelines(["a,b,c\n"]) return base_dir @pytest.fixture(scope="function") def filesystem_csv_2(tmp_path): base_dir = tmp_path / "filesystem_csv_2" base_dir.mkdir() base_dir = str(base_dir) # Put a file in the directory toy_dataset = PandasDataset({"x": [1, 2, 3]}) toy_dataset.to_csv(os.path.join(base_dir, "f1.csv"), index=False) assert os.path.isabs(base_dir) assert os.path.isfile(os.path.join(base_dir, "f1.csv")) return base_dir @pytest.fixture(scope="function") def filesystem_csv_3(tmp_path): base_dir = tmp_path / "filesystem_csv_3" base_dir.mkdir() base_dir = str(base_dir) # Put a file in the directory toy_dataset = PandasDataset({"x": [1, 2, 3]}) toy_dataset.to_csv(os.path.join(base_dir, "f1.csv"), index=False) toy_dataset_2 = PandasDataset({"y": [1, 2, 3]}) toy_dataset_2.to_csv(os.path.join(base_dir, "f2.csv"), index=False) return base_dir @pytest.fixture(scope="function") def filesystem_csv_4(tmp_path): base_dir = tmp_path / "filesystem_csv_4" base_dir.mkdir() base_dir = str(base_dir) # Put a file in the directory toy_dataset = PandasDataset( { "x": [1, 2, 3], "y": [1, 2, 3], } ) toy_dataset.to_csv(os.path.join(base_dir, "f1.csv"), index=None) return base_dir @pytest.fixture def titanic_profiled_evrs_1(): with open( file_relative_path( __file__, "./render/fixtures/BasicDatasetProfiler_evrs.json" ), ) as infile: return expectationSuiteValidationResultSchema.loads(infile.read()) @pytest.fixture def titanic_profiled_name_column_evrs(): # This is a janky way to fetch expectations matching a specific name from an EVR suite. # TODO: It will no longer be necessary once we implement ValidationResultSuite._group_evrs_by_column from great_expectations.render.renderer.renderer import Renderer with open( file_relative_path( __file__, "./render/fixtures/BasicDatasetProfiler_evrs.json" ), ) as infile: titanic_profiled_evrs_1 = expectationSuiteValidationResultSchema.load( json.load(infile) ) evrs_by_column = Renderer()._group_evrs_by_column(titanic_profiled_evrs_1) name_column_evrs = evrs_by_column["Name"] return name_column_evrs @pytest.fixture def titanic_profiled_expectations_1(): with open( file_relative_path( __file__, "./render/fixtures/BasicDatasetProfiler_expectations.json" ), ) as infile: return expectationSuiteSchema.load(json.load(infile)) @pytest.fixture def titanic_profiled_name_column_expectations(): from great_expectations.render.renderer.renderer import Renderer with open( file_relative_path( __file__, "./render/fixtures/BasicDatasetProfiler_expectations.json" ), ) as infile: titanic_profiled_expectations = expectationSuiteSchema.load(json.load(infile)) columns, ordered_columns = Renderer()._group_and_order_expectations_by_column( titanic_profiled_expectations ) name_column_expectations = columns["Name"] return name_column_expectations @pytest.fixture def titanic_validation_results(): with open( file_relative_path(__file__, "./test_sets/expected_cli_results_default.json"), ) as infile: return expectationSuiteValidationResultSchema.load(json.load(infile)) # various types of evr @pytest.fixture def evr_failed(): return ExpectationValidationResult( success=False, result={ "element_count": 1313, "missing_count": 0, "missing_percent": 0.0, "unexpected_count": 3, "unexpected_percent": 0.2284843869002285, "unexpected_percent_nonmissing": 0.2284843869002285, "partial_unexpected_list": [ "Daly, Mr Peter Denis ", "Barber, Ms ", "Geiger, Miss Emily ", ], "partial_unexpected_index_list": [77, 289, 303], "partial_unexpected_counts": [ {"value": "Barber, Ms ", "count": 1}, {"value": "Daly, Mr Peter Denis ", "count": 1}, {"value": "Geiger, Miss Emily ", "count": 1}, ], }, exception_info={ "raised_exception": False, "exception_message": None, "exception_traceback": None, }, expectation_config=ExpectationConfiguration( expectation_type="expect_column_values_to_not_match_regex", kwargs={ "column": "Name", "regex": "^\\s+|\\s+$", "result_format": "SUMMARY", }, ), ) @pytest.fixture def evr_failed_with_exception(): return ExpectationValidationResult( success=False, exception_info={ "raised_exception": True, "exception_message": "Invalid partition object.", "exception_traceback": 'Traceback (most recent call last):\n File "/great_expectations/great_expectations/data_asset/data_asset.py", line 216, in wrapper\n return_obj = func(self, **evaluation_args)\n File "/great_expectations/great_expectations/dataset/dataset.py", line 106, in inner_wrapper\n evaluation_result = func(self, column, *args, **kwargs)\n File "/great_expectations/great_expectations/dataset/dataset.py", line 3381, in expect_column_kl_divergence_to_be_less_than\n raise ValueError("Invalid partition object.")\nValueError: Invalid partition object.\n', }, expectation_config=ExpectationConfiguration( expectation_type="expect_column_kl_divergence_to_be_less_than", kwargs={ "column": "live", "partition_object": None, "threshold": None, "result_format": "SUMMARY", }, meta={"BasicDatasetProfiler": {"confidence": "very low"}}, ), ) @pytest.fixture def evr_success(): return ExpectationValidationResult( success=True, result={"observed_value": 1313}, exception_info={ "raised_exception": False, "exception_message": None, "exception_traceback": None, }, expectation_config=ExpectationConfiguration( expectation_type="expect_table_row_count_to_be_between", kwargs={"min_value": 0, "max_value": None, "result_format": "SUMMARY"}, ), ) @pytest.fixture def sqlite_view_engine(test_backends): # Create a small in-memory engine with two views, one of which is temporary if "sqlite" in test_backends: try: import sqlalchemy as sa sqlite_engine = sa.create_engine("sqlite://") df = pd.DataFrame({"a": [1, 2, 3, 4, 5]}) df.to_sql(name="test_table", con=sqlite_engine, index=True) sqlite_engine.execute( "CREATE TEMP VIEW test_temp_view AS SELECT * FROM test_table where a < 4;" ) sqlite_engine.execute( "CREATE VIEW test_view AS SELECT * FROM test_table where a > 4;" ) return sqlite_engine except ImportError: sa = None else: pytest.skip("SqlAlchemy tests disabled; not testing views") @pytest.fixture def expectation_suite_identifier(): return ExpectationSuiteIdentifier("my.expectation.suite.name") @pytest.fixture def basic_sqlalchemy_datasource(sqlitedb_engine): return SqlAlchemyDatasource("basic_sqlalchemy_datasource", engine=sqlitedb_engine) @pytest.fixture def test_cases_for_sql_data_connector_sqlite_execution_engine(sa): if sa is None: raise ValueError("SQL Database tests require sqlalchemy to be installed.") db_file_path: str = file_relative_path( __file__, os.path.join("test_sets", "test_cases_for_sql_data_connector.db"), ) engine: sa.engine.Engine = sa.create_engine(f"sqlite:////{db_file_path}") conn: sa.engine.Connection = engine.connect() # Build a SqlAlchemyDataset using that database return SqlAlchemyExecutionEngine( name="test_sql_execution_engine", engine=conn, ) @pytest.fixture def test_folder_connection_path_csv(tmp_path_factory): df1 = pd.DataFrame({"col_1": [1, 2, 3, 4, 5], "col_2": ["a", "b", "c", "d", "e"]}) path = str(tmp_path_factory.mktemp("test_folder_connection_path_csv")) df1.to_csv(path_or_buf=os.path.join(path, "test.csv"), index=False) return str(path) @pytest.fixture def test_folder_connection_path_tsv(tmp_path_factory): df1 = pd.DataFrame({"col_1": [1, 2, 3, 4, 5], "col_2": ["a", "b", "c", "d", "e"]}) path = str(tmp_path_factory.mktemp("test_folder_connection_path_tsv")) df1.to_csv(path_or_buf=os.path.join(path, "test.tsv"), sep="\t", index=False) return str(path) @pytest.fixture def test_folder_connection_path_parquet(tmp_path_factory): df1 = pd.DataFrame({"col_1": [1, 2, 3, 4, 5], "col_2": ["a", "b", "c", "d", "e"]}) path = str(tmp_path_factory.mktemp("test_folder_connection_path_parquet")) df1.to_parquet(path=os.path.join(path, "test.parquet")) return str(path) @pytest.fixture def test_db_connection_string(tmp_path_factory, test_backends): if "sqlite" not in test_backends: pytest.skip("skipping fixture because sqlite not selected") df1 = pd.DataFrame({"col_1": [1, 2, 3, 4, 5], "col_2": ["a", "b", "c", "d", "e"]}) df2 = pd.DataFrame({"col_1": [0, 1, 2, 3, 4], "col_2": ["b", "c", "d", "e", "f"]}) try: import sqlalchemy as sa basepath = str(tmp_path_factory.mktemp("db_context")) path = os.path.join(basepath, "test.db") engine = sa.create_engine("sqlite:///" + str(path)) df1.to_sql(name="table_1", con=engine, index=True) df2.to_sql(name="table_2", con=engine, index=True, schema="main") # Return a connection string to this newly-created db return "sqlite:///" + str(path) except ImportError: raise ValueError("SQL Database tests require sqlalchemy to be installed.") @pytest.fixture def test_df(tmp_path_factory): def generate_ascending_list_of_datetimes( k, start_date=datetime.date(2020, 1, 1), end_date=datetime.date(2020, 12, 31) ): start_time = datetime.datetime( start_date.year, start_date.month, start_date.day ) days_between_dates = (end_date - start_date).total_seconds() datetime_list = [ start_time + datetime.timedelta(seconds=random.randrange(days_between_dates)) for i in range(k) ] datetime_list.sort() return datetime_list k = 120 random.seed(1) timestamp_list = generate_ascending_list_of_datetimes( k, end_date=datetime.date(2020, 1, 31) ) date_list = [datetime.date(ts.year, ts.month, ts.day) for ts in timestamp_list] batch_ids = [random.randint(0, 10) for i in range(k)] batch_ids.sort() session_ids = [random.randint(2, 60) for i in range(k)] session_ids.sort() session_ids = [i - random.randint(0, 2) for i in session_ids] events_df = pd.DataFrame( { "id": range(k), "batch_id": batch_ids, "date": date_list, "y": [d.year for d in date_list], "m": [d.month for d in date_list], "d": [d.day for d in date_list], "timestamp": timestamp_list, "session_ids": session_ids, "event_type": [ random.choice(["start", "stop", "continue"]) for i in range(k) ], "favorite_color": [ "#" + "".join([random.choice(list("0123456789ABCDEF")) for j in range(6)]) for i in range(k) ], } ) return events_df @pytest.fixture def test_connectable_postgresql_db(sa, test_backends, test_df): """Populates a postgres DB with a `test_df` table in the `connection_test` schema to test DataConnectors against""" if "postgresql" not in test_backends: pytest.skip("skipping fixture because postgresql not selected") import sqlalchemy as sa url = sa.engine.url.URL( drivername="postgresql", username="postgres", password="", host=os.getenv("GE_TEST_LOCAL_DB_HOSTNAME", "localhost"), port="5432", database="test_ci", ) engine = sa.create_engine(url) schema_check_results = engine.execute( "SELECT schema_name FROM information_schema.schemata WHERE schema_name = 'connection_test';" ).fetchall() if len(schema_check_results) == 0: engine.execute("CREATE SCHEMA connection_test;") table_check_results = engine.execute( """ SELECT EXISTS ( SELECT FROM information_schema.tables WHERE table_schema = 'connection_test' AND table_name = 'test_df' ); """ ).fetchall() if table_check_results != [(True,)]: test_df.to_sql(name="test_df", con=engine, index=True, schema="connection_test") # Return a connection string to this newly-created db return engine @pytest.fixture def data_context_with_runtime_sql_datasource_for_testing_get_batch( sa, empty_data_context ): context: DataContext = empty_data_context db_file_path: str = file_relative_path( __file__, os.path.join("test_sets", "test_cases_for_sql_data_connector.db"), ) datasource_config: str = f""" class_name: Datasource execution_engine: class_name: SqlAlchemyExecutionEngine connection_string: sqlite:///{db_file_path} data_connectors: my_runtime_data_connector: module_name: great_expectations.datasource.data_connector class_name: RuntimeDataConnector batch_identifiers: - pipeline_stage_name - airflow_run_id """ context.test_yaml_config( name="my_runtime_sql_datasource", yaml_config=datasource_config ) # noinspection PyProtectedMember context._save_project_config() return context @pytest.fixture def data_context_with_simple_sql_datasource_for_testing_get_batch( sa, empty_data_context ): context: DataContext = empty_data_context db_file_path: str = file_relative_path( __file__, os.path.join("test_sets", "test_cases_for_sql_data_connector.db"), ) datasource_config: str = f""" class_name: SimpleSqlalchemyDatasource connection_string: sqlite:///{db_file_path} introspection: whole_table: {{}} daily: splitter_method: _split_on_converted_datetime splitter_kwargs: column_name: date date_format_string: "%Y-%m-%d" weekly: splitter_method: _split_on_converted_datetime splitter_kwargs: column_name: date date_format_string: "%Y-%W" by_id_dozens: splitter_method: _split_on_divided_integer splitter_kwargs: column_name: id divisor: 12 """ try: context.add_datasource("my_sqlite_db", **yaml.load(datasource_config)) except AttributeError: pytest.skip("SQL Database tests require sqlalchemy to be installed.") return context @pytest.fixture def data_context_with_pandas_datasource_for_testing_get_batch( empty_data_context_v3, tmp_path_factory ): context = empty_data_context_v3 base_directory: str = str( tmp_path_factory.mktemp( "data_context_with_pandas_datasource_for_testing_get_batch" ) ) sample_file_names: List[str] = [ "test_dir_charlie/A/A-1.csv", "test_dir_charlie/A/A-2.csv", "test_dir_charlie/A/A-3.csv", "test_dir_charlie/B/B-1.csv", "test_dir_charlie/B/B-2.csv", "test_dir_charlie/B/B-3.csv", "test_dir_charlie/C/C-1.csv", "test_dir_charlie/C/C-2.csv", "test_dir_charlie/C/C-3.csv", "test_dir_charlie/D/D-1.csv", "test_dir_charlie/D/D-2.csv", "test_dir_charlie/D/D-3.csv", ] create_files_in_directory( directory=base_directory, file_name_list=sample_file_names ) config = yaml.load( f""" class_name: Datasource execution_engine: class_name: PandasExecutionEngine data_connectors: my_filesystem_data_connector: class_name: InferredAssetFilesystemDataConnector base_directory: {base_directory}/test_dir_charlie glob_directive: "*/*.csv" default_regex: pattern: (.+)/(.+)-(\\d+)\\.csv group_names: - subdirectory - data_asset_name - number """, ) context.add_datasource("my_pandas_datasource", **config) return context @pytest.fixture def basic_datasource(tmp_path_factory): base_directory: str = str( tmp_path_factory.mktemp("basic_datasource_runtime_data_connector") ) basic_datasource: Datasource = instantiate_class_from_config( config=yaml.load( f""" class_name: Datasource data_connectors: test_runtime_data_connector: module_name: great_expectations.datasource.data_connector class_name: RuntimeDataConnector batch_identifiers: - pipeline_stage_name - airflow_run_id - custom_key_0 execution_engine: class_name: PandasExecutionEngine """, ), runtime_environment={ "name": "my_datasource", }, config_defaults={ "module_name": "great_expectations.datasource", }, ) return basic_datasource @pytest.fixture(scope="function") def misc_directory(tmp_path): misc_dir = tmp_path / "random" misc_dir.mkdir() assert os.path.isabs(misc_dir) return misc_dir @pytest.fixture() def yellow_trip_pandas_data_context( tmp_path_factory, monkeypatch, ): """ Provides a data context with a data_connector for a pandas datasource which can connect to three months of yellow trip taxi data in csv form. This data connector enables access to all three months through a BatchRequest where the "year" in batch_filter_parameters is set to "2019", or to individual months if the "month" in batch_filter_parameters is set to "01", "02", or "03" """ # Reenable GE_USAGE_STATS monkeypatch.delenv("GE_USAGE_STATS") project_path: str = str(tmp_path_factory.mktemp("taxi_data_context")) context_path: str = os.path.join(project_path, "great_expectations") os.makedirs(os.path.join(context_path, "expectations"), exist_ok=True) data_path: str = os.path.join(context_path, "..", "test_data") os.makedirs(os.path.join(data_path), exist_ok=True) shutil.copy( file_relative_path( __file__, os.path.join( "integration", "fixtures", "yellow_trip_data_pandas_fixture", "great_expectations", "great_expectations.yml", ), ), str(os.path.join(context_path, "great_expectations.yml")), ) shutil.copy( file_relative_path( __file__, os.path.join( "test_sets", "taxi_yellow_trip_data_samples", "yellow_trip_data_sample_2019-01.csv", ), ), str( os.path.join( context_path, "..", "test_data", "yellow_trip_data_sample_2019-01.csv" ) ), ) shutil.copy( file_relative_path( __file__, os.path.join( "test_sets", "taxi_yellow_trip_data_samples", "yellow_trip_data_sample_2019-02.csv", ), ), str( os.path.join( context_path, "..", "test_data", "yellow_trip_data_sample_2019-02.csv" ) ), ) shutil.copy( file_relative_path( __file__, os.path.join( "test_sets", "taxi_yellow_trip_data_samples", "yellow_trip_data_sample_2019-03.csv", ), ), str( os.path.join( context_path, "..", "test_data", "yellow_trip_data_sample_2019-03.csv" ) ), ) context: DataContext = DataContext(context_root_dir=context_path) assert context.root_directory == context_path return context
ROOT_SCOPE_METHOD( MD( 'FlatmapObjective', 'FLATMAP_FACTORY_single()' ) ) REGISTER_FLAG( 'map_evaluation', 'evaluation uses something similar to a buffered map' ) FUNCTION( 'void nom_phrase_flatten( ANY context, ANY phrase, ANY scope )', """ $ENABLED( map_evaluation, JUMP__return_ANY( context, context, nom_mill_new( phrase, $CA(FLATMAP_OBJECTIVE_new( scope )) ) ) ; ) $DISABLED( map_evaluation, nom_list_eager_copy( context, $CA(ELEMENT_MAP_new( phrase, $CA(FLATMAP_OBJECTIVE_new( scope )) )) ) ; ) """ ) OBJECT( 'FLATMAP_FACTORY', methods = [ MS( ARG( CW( '@' ), CG( 'ANY', 'value' ) ), """ JUMP__return_ANY( CONTEXT, CONTEXT, $CA(FLATMAP_OBJECTIVE_new( PARAM_value )) ) ; """ ), ] ) OBJECTIVE( 'FLATMAP_OBJECTIVE', attributes = [ A( 'ANY', 'scope' ), ], objective = """ $OPT( $IFLET( o, WORD, THAT ) ; JUMP__return_ANY( CONTEXT, CONTEXT, THAT ) ; ) $OPT( $IFLET( expression, EXPRESSION, THAT ) ; nom_expression_produce_for_evaluation( CONTEXT, expression->phrase, ACTION->scope ) ; ) $OPT( $IFLET( o, INTEGER, THAT ) ; JUMP__return_ANY( CONTEXT, CONTEXT, THAT ) ; ) $OPT( $IFLET( o, STRING, THAT ) ; JUMP__return_ANY( CONTEXT, CONTEXT, THAT ) ; ) $OPT( $IFLET_SUBSTRUCT( o, LIST, THAT ) ; JUMP__return_ANY( CONTEXT, CONTEXT, THAT ) ; ) JUMP__produceForEvaluation_ANY( CONTEXT, THAT, ACTION->scope ) ; """ )
# -*- coding: utf-8 -*- from django.contrib import admin class TipoImpostoAdmin(admin.ModelAdmin): list_display = ('codigo','nome') list_display_links = ('codigo','nome') search_fields = ['nome','codigo'] fieldsets = ( ('', { 'fields': (('nome',),) }), )
""" Copyright (c) 2022 Huawei Technologies Co.,Ltd. openGauss is licensed under Mulan PSL v2. You can use this software according to the terms and conditions of the Mulan PSL v2. You may obtain a copy of Mulan PSL v2 at: http://license.coscl.org.cn/MulanPSL2 THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. """ """ Case Type : security-auditing Case Name : 开启数据库对象TABLESPACE的CREATE、DROP、ALTER操作审计功能, audit_system_object=511 Description : 1.设置gs_guc reload -N all -I all -c "audit_system_object=511" 2.登录数据库,创建TABLESPACE对象,CREATE TABLESPACE ds_location1 RELATIVE LOCATION 'tablespace/tablespace_1'; 3.修改TABLESPACE对象,ALTER TABLESPACE ds_location1 RENAME TO new_ds_location1; 4.删除TABLESPACE对象,DROP TABLESPACE new_ds_location1; 5.登录数据库,查看审计日志SELECT * FROM pg_query_audit('$start_time', '$end_time');时间设在最接近登录数据库的时间 Expect : 1.设置成功 2.创建成功 3.修改成功 4.删除成功 5.查询到创建、删除TABLESPACE的信息 History : """ import unittest from yat.test import Node from yat.test import macro from testcase.utils.Common import Common from testcase.utils.CommonSH import * from testcase.utils.Logger import Logger class Auditing(unittest.TestCase): def setUp(self): self.logger = Logger() self.logger.info( '------Opengauss_Function_Security_Auditing_Case0049 start-----') self.sh_primy = CommonSH('PrimaryDbUser') self.userNode = Node('PrimaryDbUser') self.common = Common() self.DB_ENV_PATH = macro.DB_ENV_PATH def test_security(self): excute_cmd = f'source {self.DB_ENV_PATH};' \ f'gs_guc reload -N all -I all -c ' \ f'"audit_system_object=511"' msg2 = self.userNode.sh(excute_cmd).result() self.logger.info(msg2) start_time_msg = self.sh_primy.execut_db_sql('SELECT sysdate;') start_time = start_time_msg.splitlines()[2].strip() time.sleep(5) sql_cmd2 = 'CREATE TABLESPACE ds_location1 RELATIVE LOCATION ' \ '\'tablespace/tablespace_1\';' \ 'ALTER TABLESPACE ds_location1 RENAME TO ' \ 'new_ds_location1;DROP TABLESPACE new_ds_location1;' msg2 = self.sh_primy.execut_db_sql(sql_cmd2) self.logger.info(msg2) time.sleep(5) end_time_msg = self.sh_primy.execut_db_sql('SELECT sysdate;') end_time = end_time_msg.splitlines()[2].strip() sql_cmd3 = f'select * from pg_query_audit(\'{start_time}\',\ \'{end_time}\');' excute_cmd3 = f'source {self.DB_ENV_PATH};' \ f'gsql -d {self.userNode.db_name} -p ' \ f'{self.userNode.db_port} -c "{sql_cmd3}"' msg3 = self.userNode.sh(excute_cmd3).result() self.logger.info(msg3) self.assertTrue(msg3.find( 'CREATE TABLESPACE ds_location1 RELATIVE LOCATION ' '\'tablespace/tablespace_1\'') > -1) self.assertTrue(msg3.find( 'ALTER TABLESPACE ds_location1 RENAME TO new_ds_location1') > -1) self.assertTrue(msg3.find('DROP TABLESPACE new_ds_location1') > -1) def tearDown(self): excute_cmd1 = f'source {self.DB_ENV_PATH};' \ f'gs_guc reload -N all -I all -c ' \ f'"audit_system_object=12295"' msg1 = self.userNode.sh(excute_cmd1).result() self.logger.info(msg1) self.logger.info( '----Opengauss_Function_Security_Auditing_Case0049 end-----')
# @copyright@ # Copyright (c) 2006 - 2018 Teradata # All rights reserved. Stacki(r) v5.x stacki.com # https://github.com/Teradata/stacki/blob/master/LICENSE.txt # @copyright@ import os import stack.commands class Plugin(stack.commands.Plugin): """ Generate a UEFI specific configuration file """ def provides(self): return 'uefi' def run(self, ha): for host in ha: if 'interfaces' not in ha[host]: continue for interface in ha[host]['interfaces']: filename = os.path.join(os.path.sep, 'tftpboot', 'pxelinux', 'uefi', 'grub.cfg-%s' % interface['ip']) self.owner.addOutput(host, """ <stack:file stack:name="%s" stack:owner="root:apache" stack:perms="0664" stack:rcs="off"><![CDATA[""" % filename) self.owner.runImplementation("%s_uefi" % ha[host]['os'], (ha[host], interface)) self.owner.addOutput(host, ']]>\n</stack:file>')
# # Copyright 2018 Analytics Zoo Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import sys from ..engine.topology import ZooKerasLayer if sys.version >= '3': long = int unicode = str class SimpleRNN(ZooKerasLayer): """ A fully-connected recurrent neural network cell. The output is to be fed back to input. The input of this layer should be 3D, i.e. (batch, time steps, input dim). When you use this layer as the first layer of a model, you need to provide the argument input_shape (a shape tuple, does not include the batch dimension). # Arguments output_dim: Hidden unit size. Dimension of internal projections and final output. activation: String representation of the activation function to use (such as 'relu' or 'sigmoid'). Default is 'tanh'. return_sequences: Whether to return the full sequence or only return the last output in the output sequence. Default is False. go_backwards: Whether the input sequence will be processed backwards. Default is False. W_regularizer: An instance of [[Regularizer]], (eg. L1 or L2 regularization), applied to the input weights matrices. Default is None. U_regularizer: An instance of [[Regularizer]], applied the recurrent weights matrices. Default is None. b_regularizer: An instance of [[Regularizer]], applied to the bias. Default is None. input_shape: A shape tuple, not including batch. name: String to set the name of the layer. If not specified, its name will by default to be a generated string. >>> simplernn = SimpleRNN(16, input_shape=(3, 32)) creating: createZooKerasSimpleRNN """ def __init__(self, output_dim, activation="tanh", return_sequences=False, go_backwards=False, W_regularizer=None, U_regularizer=None, b_regularizer=None, input_shape=None, **kwargs): super(SimpleRNN, self).__init__(None, output_dim, activation, return_sequences, go_backwards, W_regularizer, U_regularizer, b_regularizer, list(input_shape) if input_shape else None, **kwargs) class GRU(ZooKerasLayer): """ Gated Recurrent Unit architecture. The input of this layer should be 3D, i.e. (batch, time steps, input dim). When you use this layer as the first layer of a model, you need to provide the argument input_shape (a shape tuple, does not include the batch dimension). # Arguments output_dim: Hidden unit size. Dimension of internal projections and final output. activation: String representation of the activation function to use (such as 'relu' or 'sigmoid'). Default is 'tanh'. inner_activation: String representation of the activation function for inner cells. Default is 'hard_sigmoid'. return_sequences: Whether to return the full sequence or only return the last output in the output sequence. Default is False. go_backwards: Whether the input sequence will be processed backwards. Default is False. W_regularizer: An instance of [[Regularizer]], (eg. L1 or L2 regularization), applied to the input weights matrices. Default is None. U_regularizer: An instance of [[Regularizer]], applied the recurrent weights matrices. Default is None. b_regularizer: An instance of [[Regularizer]], applied to the bias. Default is None. input_shape: A shape tuple, not including batch. name: String to set the name of the layer. If not specified, its name will by default to be a generated string. >>> gru = GRU(24, input_shape=(32, 32)) creating: createZooKerasGRU """ def __init__(self, output_dim, activation="tanh", inner_activation="hard_sigmoid", return_sequences=False, go_backwards=False, W_regularizer=None, U_regularizer=None, b_regularizer=None, input_shape=None, **kwargs): super(GRU, self).__init__(None, output_dim, activation, inner_activation, return_sequences, go_backwards, W_regularizer, U_regularizer, b_regularizer, list(input_shape) if input_shape else None, **kwargs) class LSTM(ZooKerasLayer): """ Long Short Term Memory unit architecture. The input of this layer should be 3D, i.e. (batch, time steps, input dim). When you use this layer as the first layer of a model, you need to provide the argument input_shape (a shape tuple, does not include the batch dimension). # Arguments output_dim: Hidden unit size. Dimension of internal projections and final output. activation: String representation of the activation function to use (such as 'relu' or 'sigmoid'). Default is 'tanh'. inner_activation: String representation of the activation function for inner cells. Default is 'hard_sigmoid'. return_sequences: Whether to return the full sequence or only return the last output in the output sequence. Default is False. go_backwards: Whether the input sequence will be processed backwards. Default is False. W_regularizer: An instance of [[Regularizer]], (eg. L1 or L2 regularization), applied to the input weights matrices. Default is None. U_regularizer: An instance of [[Regularizer]], applied the recurrent weights matrices. Default is None. b_regularizer: An instance of [[Regularizer]], applied to the bias. Default is None. input_shape: A shape tuple, not including batch. name: String to set the name of the layer. If not specified, its name will by default to be a generated string. >>> lstm = LSTM(32, input_shape=(8, 16), name="lstm1") creating: createZooKerasLSTM """ def __init__(self, output_dim, activation="tanh", inner_activation="hard_sigmoid", return_sequences=False, go_backwards=False, W_regularizer=None, U_regularizer=None, b_regularizer=None, input_shape=None, **kwargs): super(LSTM, self).__init__(None, output_dim, activation, inner_activation, return_sequences, go_backwards, W_regularizer, U_regularizer, b_regularizer, list(input_shape) if input_shape else None, **kwargs)
# # Copyright (c) 2018-2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import sys sys.path.append(".") from conftest import infer, get_model_metadata, model_metadata_response, \ ERROR_SHAPE # noqa class TestSingleModelInferenceS3(): def test_run_inference(self, input_data_downloader_v1_224, start_server_single_model_from_s3, create_channel_for_port_single_server): """ <b>Description</b> Submit request to gRPC interface serving a single resnet model <b>input data</b> - directory with the model in IR format - docker image with ie-serving-py service - input data in numpy format <b>fixtures used</b> - model downloader - input data downloader - service launching <b>Expected results</b> - response contains proper numpy shape """ # Connect to grpc service stub = create_channel_for_port_single_server imgs_v1_224 = np.array(input_data_downloader_v1_224) out_name = 'resnet_v1_50/predictions/Reshape_1' for x in range(0, 10): output = infer(imgs_v1_224, slice_number=x, input_tensor='input', grpc_stub=stub, model_spec_name='resnet', model_spec_version=None, output_tensors=[out_name]) print("output shape", output[out_name].shape) assert output[out_name].shape == (1, 1000), ERROR_SHAPE def test_get_model_metadata(self, start_server_single_model_from_s3, create_channel_for_port_single_server): stub = create_channel_for_port_single_server model_name = 'resnet' out_name = 'resnet_v1_50/predictions/Reshape_1' expected_input_metadata = {'input': {'dtype': 1, 'shape': [1, 3, 224, 224]}} expected_output_metadata = {out_name: {'dtype': 1, 'shape': [1, 1000]}} request = get_model_metadata(model_name='resnet') response = stub.GetModelMetadata(request, 10) input_metadata, output_metadata = model_metadata_response( response=response) print(output_metadata) assert model_name == response.model_spec.name assert expected_input_metadata == input_metadata assert expected_output_metadata == output_metadata
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import unittest import numpy as np import discretize # from SimPEG import Maps # from SimPEG.EM import FDEM from scipy.constants import mu_0, epsilon_0 from geoana.em import fdem from discretize.utils import ndgrid, asArray_N_x_Dim def H_from_MagneticDipoleWholeSpace( XYZ, srcLoc, sig, f, current=1., loopArea=1., orientation='X', kappa=0, epsr=1., t=0. ): assert current == 1 assert loopArea == 1 assert np.all(srcLoc == np.r_[0., 0., 0.]) assert kappa == 0 mu = mu_0 * (1+kappa) epsilon = epsilon_0 * epsr m = current * loopArea assert m == 1 assert mu == mu_0 assert epsilon == epsilon_0 omega = lambda f: 2*np.pi*f XYZ = discretize.utils.asArray_N_x_Dim(XYZ, 3) # Check dx = XYZ[:, 0]-srcLoc[0] dy = XYZ[:, 1]-srcLoc[1] dz = XYZ[:, 2]-srcLoc[2] r = np.sqrt(dx**2. + dy**2. + dz**2.) # k = np.sqrt( -1j*2.*np.pi*f*mu*sig ) k = np.sqrt(omega(f)**2. * mu * epsilon - 1j*omega(f)*mu*sig) front = m / (4. * np.pi * r**3) * np.exp(-1j*k*r) mid = - k**2 * r**2 + 3*1j*k*r + 3 if orientation.upper() == 'X': Hx = front*((dx**2 / r**2)*mid + (k**2 * r**2 - 1j*k*r - 1.)) Hy = front*(dx*dy / r**2)*mid Hz = front*(dx*dz / r**2)*mid elif orientation.upper() == 'Y': # x--> y, y--> z, z-->x Hy = front * ((dy**2 / r**2)*mid + (k**2 * r**2 - 1j*k*r - 1.)) Hz = front * (dy*dz / r**2)*mid Hx = front * (dy*dx / r**2)*mid elif orientation.upper() == 'Z': # x --> z, y --> x, z --> y Hz = front*((dz**2 / r**2)*mid + (k**2 * r**2 - 1j*k*r - 1.)) Hx = front*(dz*dx / r**2)*mid Hy = front*(dz*dy / r**2)*mid return Hx, Hy, Hz def B_from_MagneticDipoleWholeSpace( XYZ, srcLoc, sig, f, current=1., loopArea=1., orientation='X', kappa=0, epsr=1., t=0. ): mu = mu_0 * (1+kappa) Hx, Hy, Hz = H_from_MagneticDipoleWholeSpace( XYZ, srcLoc, sig, f, current=current, loopArea=loopArea, orientation=orientation, kappa=kappa, epsr=epsr ) Bx = mu * Hx By = mu * Hy Bz = mu * Hz return Bx, By, Bz def E_from_MagneticDipoleWholeSpace( XYZ, srcLoc, sig, f, current=1., loopArea=1., orientation='X', kappa=0., epsr=1., t=0. ): mu = mu_0 * (1+kappa) epsilon = epsilon_0 * epsr m = current * loopArea omega = lambda f: 2 * np.pi * f XYZ = discretize.utils.asArray_N_x_Dim(XYZ, 3) dx = XYZ[:, 0]-srcLoc[0] dy = XYZ[:, 1]-srcLoc[1] dz = XYZ[:, 2]-srcLoc[2] r = np.sqrt( dx**2. + dy**2. + dz**2.) # k = np.sqrt( -1j*2.*np.pi*f*mu*sig ) k = np.sqrt( omega(f)**2. *mu*epsilon -1j*omega(f)*mu*sig ) front = ( ((1j * omega(f) * mu * m) / (4.* np.pi * r**2)) * (1j * k * r + 1) * np.exp(-1j*k*r) ) if orientation.upper() == 'X': Ey = front * (dz / r) Ez = front * (-dy / r) Ex = np.zeros_like(Ey) elif orientation.upper() == 'Y': Ex = front * (-dz / r) Ez = front * (dx / r) Ey = np.zeros_like(Ex) elif orientation.upper() == 'Z': Ex = front * (dy / r) Ey = front * (-dx / r) Ez = np.zeros_like(Ex) return Ex, Ey, Ez class TestFDEMdipole(unittest.TestCase): def test_defaults(self): TOL = 1e-15 mdws = fdem.MagneticDipoleWholeSpace() assert(mdws.sigma == 1.) assert(mdws.mu == mu_0) assert(mdws.epsilon == epsilon_0) assert(np.all(mdws.orientation == np.r_[1., 0., 0.])) assert(mdws.moment == 1.) assert(np.all(mdws.location == np.r_[0., 0., 0.])) assert(mdws.frequency == 1.) assert(mdws.omega == 2.*np.pi*1.) assert(mdws.quasistatic is False) assert np.linalg.norm( mdws.wavenumber - np.sqrt( mu_0 * epsilon_0 * (2*np.pi)**2 - 1j * mu_0 * 1. * 2*np.pi ) ) <= TOL assert np.linalg.norm( mdws.wavenumber**2 - ( mu_0 * epsilon_0 * (2*np.pi)**2 - 1j * mu_0 * 1. * 2*np.pi ) ) <= TOL def compare_fields(name, field, ftest): def check_component(name, f, ftest): geoana_norm = np.linalg.norm(f) test_norm = np.linalg.norm(ftest) diff = np.linalg.norm(f-ftest) passed = np.allclose(f, ftest) print( "Testing {} ... geoana: {:1.4e}, compare: {:1.4e}, " "diff: {:1.4e}, passed?: {}".format( name, geoana_norm, test_norm, diff, passed ) ) return passed passed = [] for i, orientation in enumerate(['x', 'y', 'z']): for component in ['real', 'imag']: passed.append(check_component( orientation + '_' + component, getattr(field[:, i], component), getattr(ftest[:, i], component) )) return all(passed) def magnetic_dipole_b(self, orientation): sigma = 1 frequency = 1. mdws = fdem.MagneticDipoleWholeSpace( orientation=orientation, sigma=sigma, frequency=frequency ) x = np.linspace(-20., 20., 50) y = np.linspace(-30., 30., 50) z = np.linspace(-40., 40., 50) xyz = discretize.utils.ndgrid([x, y, z]) # srcLoc, obsLoc, component, orientation='Z', moment=1., mu=mu_0 # btest = [MagneticDipoleFields( # mdws.location, xyz, rx_orientation, # orientation=orientation.upper() # ) for rx_orientation in ["x", "y", "z"]] bxtest, bytest, bztest = B_from_MagneticDipoleWholeSpace( xyz, mdws.location, mdws.sigma, mdws.frequency, orientation=orientation ) b = mdws.magnetic_flux_density(xyz) print( "\n\nTesting Magnetic Dipole B: {} orientation\n".format(orientation) ) passed = self.compare_fields(b, np.vstack([bxtest, bytest, bztest]).T) self.assertTrue(passed) def magnetic_dipole_e(self, orientation): sigma = 1e-2 frequency = 1 mdws = fdem.MagneticDipoleWholeSpace( orientation=orientation, sigma=sigma, frequency=frequency ) x = np.linspace(-20., 20., 50) y = np.linspace(-30., 30., 50) z = np.linspace(-40., 40., 50) xyz = discretize.utils.ndgrid([x, y, z]) extest, eytest, eztest = E_from_MagneticDipoleWholeSpace( xyz, mdws.location, mdws.sigma, mdws.frequency, orientation=orientation ) e = mdws.electric_field(xyz) print( "\n\nTesting Magnetic Dipole E: {} orientation\n".format(orientation) ) passed = self.compare_fields(e, np.vstack([extest, eytest, eztest]).T) self.assertTrue(passed) def test_magnetic_dipole_x_b(self): self.magnetic_dipole_b("x") def test_magnetic_dipole_y_b(self): self.magnetic_dipole_b("y") def test_magnetic_dipole_z_b(self): self.magnetic_dipole_b("z") def test_magnetic_dipole_tilted_b(self): orientation = np.random.rand(3) orientation = orientation / np.linalg.norm(orientation) mdws = fdem.MagneticDipoleWholeSpace( orientation=orientation ) x = np.linspace(-20., 20., 50) y = np.linspace(-30., 30., 50) z = np.linspace(-40., 40., 50) xyz = discretize.utils.ndgrid([x, y, z]) bxtest0, bytest0, bztest0 = B_from_MagneticDipoleWholeSpace( xyz, mdws.location, mdws.sigma, mdws.frequency, orientation='X' ) bxtest1, bytest1, bztest1 = B_from_MagneticDipoleWholeSpace( xyz, mdws.location, mdws.sigma, mdws.frequency, orientation='Y' ) bxtest2, bytest2, bztest2 = B_from_MagneticDipoleWholeSpace( xyz, mdws.location, mdws.sigma, mdws.frequency, orientation='Z' ) bxtest = ( orientation[0]*bxtest0 + orientation[1]*bxtest1 + orientation[2]*bxtest2 ) bytest = ( orientation[0]*bytest0 + orientation[1]*bytest1 + orientation[2]*bytest2 ) bztest = ( orientation[0]*bztest0 + orientation[1]*bztest1 + orientation[2]*bztest2 ) b = mdws.magnetic_flux_density(xyz) print( "\n\nTesting Magnetic Dipole B: {} orientation\n".format("45 degree") ) self.compare_fields(b, np.vstack([bxtest, bytest, bztest]).T) def test_magnetic_dipole_x_e(self): self.magnetic_dipole_e("x") def test_magnetic_dipole_y_e(self): self.magnetic_dipole_e("y") def test_magnetic_dipole_z_e(self): self.magnetic_dipole_e("z") def test_magnetic_dipole_tilted_e(self): orientation = np.random.rand(3) orientation = orientation / np.linalg.norm(orientation) mdws = fdem.MagneticDipoleWholeSpace( orientation=orientation ) x = np.linspace(-20., 20., 50) y = np.linspace(-30., 30., 50) z = np.linspace(-40., 40., 50) xyz = discretize.utils.ndgrid([x, y, z]) extest0, eytest0, eztest0 = E_from_MagneticDipoleWholeSpace( xyz, mdws.location, mdws.sigma, mdws.frequency, orientation='X' ) extest1, eytest1, eztest1 = E_from_MagneticDipoleWholeSpace( xyz, mdws.location, mdws.sigma, mdws.frequency, orientation='Y' ) extest2, eytest2, eztest2 = E_from_MagneticDipoleWholeSpace( xyz, mdws.location, mdws.sigma, mdws.frequency, orientation='Z' ) extest = ( orientation[0]*extest0 + orientation[1]*extest1 + orientation[2]*extest2 ) eytest = ( orientation[0]*eytest0 + orientation[1]*eytest1 + orientation[2]*eytest2 ) eztest = ( orientation[0]*eztest0 + orientation[1]*eztest1 + orientation[2]*eztest2 ) e = mdws.electric_field(xyz) print( "\n\nTesting Magnetic Dipole E: {} orientation\n".format("45 degree") ) self.compare_fields(e, np.vstack([extest, eytest, eztest]).T) # class TestFDEMdipole_SimPEG(unittest.TestCase): # # tol = 1e-1 # error must be an order of magnitude smaller than results # # def getProjections(self, mesh): # ignore_inside_radius = 10*mesh.hx.min() # ignore_outside_radius = 40*mesh.hx.min() # # def ignoredGridLocs(grid): # return ( # ( # grid[:, 0]**2 + grid[:, 1]**2 + grid[:, 2]**2 < # ignore_inside_radius**2 # ) | ( # grid[:, 0]**2 + grid[:, 1]**2 + grid[:, 2]**2 > # ignore_outside_radius**2 # ) # ) # # # Faces # ignore_me_Fx = ignoredGridLocs(mesh.gridFx) # ignore_me_Fz = ignoredGridLocs(mesh.gridFz) # ignore_me = np.hstack([ignore_me_Fx, ignore_me_Fz]) # keep_me = np.array(~ignore_me, dtype=float) # Pf = discretize.utils.sdiag(keep_me) # # # Edges # ignore_me_Ey = ignoredGridLocs(mesh.gridEy) # keep_me_Ey = np.array(~ignore_me_Ey, dtype=float) # Pe = discretize.utils.sdiag(keep_me_Ey) # # return Pf, Pe # # def test_b_dipole_v_SimPEG(self): # # def compare_w_SimPEG(name, geoana, simpeg): # # norm_geoana = np.linalg.norm(geoana) # norm_simpeg = np.linalg.norm(simpeg) # diff = np.linalg.norm(geoana - simpeg) # passed = diff < self.tol * 0.5 * (norm_geoana + norm_simpeg) # print( # " {} ... geoana: {:1.4e}, SimPEG: {:1.4e}, diff: {:1.4e}, " # "passed?: {}".format( # name, norm_geoana, norm_simpeg, diff, passed # ) # ) # # return passed # # print("\n\nComparing Magnetic dipole with SimPEG") # # sigma_back = 1. # freqs = np.r_[10., 100.] # # csx, ncx, npadx = 1, 50, 50 # ncy = 1 # csz, ncz, npadz = 1, 50, 50 # # hx = discretize.utils.meshTensor( # [(csx, ncx), (csx, npadx, 1.3)] # ) # hy = 2*np.pi / ncy * np.ones(ncy) # hz = discretize.utils.meshTensor( # [(csz, npadz, -1.3), (csz, ncz), (csz, npadz, 1.3)] # ) # # mesh = discretize.CylMesh([hx, hy, hz], x0='00C') # # prob = FDEM.Problem3D_e(mesh, sigmaMap=Maps.IdentityMap(mesh)) # srcList = [FDEM.Src.MagDipole([], loc=np.r_[0., 0., 0.], freq=f) for f in freqs] # survey = FDEM.Survey(srcList) # # prob.pair(survey) # # fields = prob.fields(sigma_back*np.ones(mesh.nC)) # # moment = 1. # mdws = fdem.MagneticDipoleWholeSpace( # sigma=sigma_back, moment=moment, orientation="z" # ) # # Pf, Pe = self.getProjections(mesh) # # e_passed = [] # b_passed = [] # for i, f in enumerate(freqs): # mdws.frequency = f # # b_xz = [] # for b, component in zip([0, 2], ['x', 'z']): # grid = getattr(mesh, "gridF{}".format(component)) # b_xz.append( # mdws.magnetic_flux_density(grid)[:, b] # ) # b_geoana = np.hstack(b_xz) # e_geoana = mdws.electric_field(mesh.gridEy)[:, 1] # # P_e_geoana = Pe*e_geoana # P_e_simpeg = Pe*discretize.utils.mkvc(fields[srcList[i], 'e']) # # P_b_geoana = Pf*b_geoana # P_b_simpeg = Pf*discretize.utils.mkvc(fields[srcList[i], 'b']) # # print("Testing {} Hz".format(f)) # # for comp in ['real', 'imag']: # e_passed.append(compare_w_SimPEG( # 'E {}'.format(comp), # getattr(P_e_geoana, comp), # getattr(P_e_simpeg, comp) # )) # b_passed.append(compare_w_SimPEG( # 'B {}'.format(comp), # getattr(P_b_geoana, comp), # getattr(P_b_simpeg, comp) # )) # assert(all(e_passed)) # assert(all(b_passed)) if __name__ == '__main__': unittest.main()
import os import re import io import wx from html import escape import sys import math from math import radians, degrees, sin, cos, asin, sqrt, atan2, exp, modf, pi import random import bisect import datetime import getpass import socket from Version import AppVerName from Animation import GetLapRatio import Utils from Utils import fld from Utils import floatFormatLocale as ff import xml.etree.ElementTree import xml.etree.cElementTree import xml.dom import xml.dom.minidom from GpxParse import GpxParse import collections import zipfile def LineNormal( x1, y1, x2, y2, normLen ): ''' Returns the coords of a normal line passing through x1, y1 of length normLen. ''' dx, dy = x2 - x1, y2 - y1 scale = (normLen / 2.0) / sqrt( dx**2 + dy**2 ) dx *= scale dy *= scale return x1 + dy, y1 - dx, x1 - dy, y1 + dx def GreatCircleDistance( lat1, lon1, lat2, lon2 ): """ Calculate the great circle distance between two points on the earth (specified in decimal degrees) in meters. """ # convert decimal degrees to radians lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2]) # haversine formula dlon = lon2 - lon1 dlat = lat2 - lat1 a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2 c = 2.0 * asin(sqrt(max(a, 0.0))) m = 6371000.0 * c return m def GreatCircleDistance3D( lat1, lon1, ele1, lat2, lon2, ele2 ): d = GreatCircleDistance( lat1, lon1, lat2, lon2 ) return sqrt( d ** 2 + (ele2 - ele1) ** 2 ) def GradeAdjustedDistance( lat1, lon1, ele1, lat2, lon2, ele2 ): d = GreatCircleDistance(lat1, lon1, lat2, lon2 ) if not d: return 0.0 a = atan2( ele2 - ele1, d ) m = 2.0 / (1.0 + exp(-a * 2.5)) # Use a sigmoid curve to approximate the effect of grade on speed. return m * d LatLonEle = collections.namedtuple('LatLonEle', ['lat','lon','ele', 't'] ) GpsPoint = collections.namedtuple('GpsPoint', ['lat','lon','ele','x','y','d','dCum'] ) def triangle( t, a ): a = float(a) return (2 / a) * (t - a * int(t / a + 0.5)) * (-1 ** int(t / a + 0.5)) def CompassBearing(lat1, lon1, lat2, lon2): """ Calculates the bearing between two points. """ lat1 = radians(lat1) lat2 = radians(lat2) diffLong = math.radians(lon2 - lon1) x = sin(diffLong) * cos(lat2) y = cos(lat1) * sin(lat2) - (sin(lat1) * cos(lat2) * cos(diffLong)) initial_bearing = atan2(x, y) # Now we have the initial bearing but math.atan2 return values # from -180 to + 180 which is not what we want for a compass bearing # The solution is to normalize the initial bearing as shown below initial_bearing = degrees(initial_bearing) compass_bearing = (initial_bearing + 360) % 360 return compass_bearing reGpxTime = re.compile( '[^0-9+]' ) def GpxHasTimes( fname ): ''' Check that the gpx file contains valid times. ''' points = GpxParse( fname ) tLast = None for p in points: try: t = p['time'] except KeyError: return False if tLast is not None and tLast > t: return False tLast = t return True def LatLonElesToGpsPoints( latLonEles, useTimes = False, isPointToPoint = False ): hasTimes = useTimes latMin, lonMin = 1000.0, 1000.0 for latLonEle in latLonEles: if latLonEle.t is None: hasTimes = False if latLonEle.lat < latMin: latMin = latLonEle.lat if latLonEle.lon < lonMin: lonMin = latLonEle.lon gpsPoints = [] dCum = 0.0 for i in range(len(latLonEles) - (1 if isPointToPoint else 0)): p, pNext = latLonEles[i], latLonEles[(i+1) % len(latLonEles)] if hasTimes: if pNext.t > p.t: gad = (pNext.t - p.t).total_seconds() else: # Estimate the last time difference based on the speed of the last segment. pPrev = latLonEles[(i+len(latLonEles)-1)%len(latLonEles)] d = GreatCircleDistance( pPrev.lat, pPrev.lon, p.lat, p.lon ) t = (p.t - pPrev.t).total_seconds() if t > 0: s = d / t gad = GreatCircleDistance( p.lat, p.lon, pNext.lat, pNext.lon ) / s else: gad = 0.0 else: gad = GradeAdjustedDistance( p.lat, p.lon, p.ele, pNext.lat, pNext.lon, pNext.ele ) x = GreatCircleDistance( latMin, lonMin, latMin, p.lon ) y = GreatCircleDistance( latMin, lonMin, p.lat, lonMin ) if gad > 0.0: gpsPoints.append( GpsPoint(p.lat, p.lon, p.ele, x, y, gad, dCum) ) dCum += gad return gpsPoints def ParseGpxFile( fname, useTimes = False, isPointToPoint = False ): points = GpxParse( fname ) latLonEles = [] for p in points: lat, lon, ele, t = p['lat'], p['lon'], p.get('ele',0.0), p.get('time', None) # Skip consecutive duplicate points. try: if latLonEles[-1].lat == lat and latLonEles[-1].lon == lon: continue except IndexError: pass latLonEles.append( LatLonEle(lat, lon, ele, t) ) return latLonEles def createAppendChild( doc, parent, name, textAttr={} ): child = doc.createElement( name ) parent.appendChild( child ) for k, v in textAttr.items(): attr = doc.createElement( k ) if isinstance(v, float) and modf(v)[0] == 0.0: v = int(v) attr.appendChild( doc.createTextNode( '{:.6f}'.format(v) if isinstance(v, float) else '{}'.format(v) ) ) child.appendChild( attr ) return child def createAppendTextChild( doc, parent, text ): child = doc.createTextNode( text ) parent.appendChild( child ) return child def CreateGPX( courseName, gpsPoints ): ''' Create a GPX file from the gpsPoints list. ''' doc = xml.dom.minidom.Document() gpx = createAppendChild( doc, doc, 'gpx' ) gpx.attributes['creator'] = AppVerName + ' http://sites.google.com/site/crossmgrsoftware/' gpx.attributes['xmlns'] ="http://www.topografix.com/GPX/1/0" gpx.attributes['xmlns:xsi'] = "http://www.w3.org/2001/XMLSchema-instance" gpx.attributes['xsi:schemaLocation'] = "http://www.topografix.com/GPX/1/0 http://www.topografix.com/GPX/1/0/gpx.xsd" gpx.appendChild( doc.createComment( '\n'.join( [ '', 'DO NOT EDIT!', '', 'This file was created automatically by {}.'.format(AppVerName), '', 'For more information, see http://sites.google.com/site/crossmgrsoftware', '', 'Created: {}'.format(datetime.datetime.now().strftime( '%Y-%m-%d %H:%M:%S' )), 'User: {}'.format(escape(getpass.getuser())), 'Computer: {}'.format(escape(socket.gethostname())), '', ] ) ) ) trk = createAppendChild( doc, gpx, 'trk', { 'name': courseName, } ) trkseg = createAppendChild( doc, trk, 'trkseg' ) def fmt( v ): return '{:.7f}'.format(v).rstrip('0').rstrip('.') for p in gpsPoints: trkpt = createAppendChild( doc, trkseg, 'trkpt' ) trkpt.attributes['lat'] = fmt(p.lat) trkpt.attributes['lon'] = fmt(p.lon) if p.ele: ele = createAppendChild( doc, trkpt, 'ele' ) createAppendTextChild( doc, ele, fmt(p.ele) ) return doc class GeoTrack: def __init__( self ): self.gpsPoints = [] self.distanceTotal = 0.0 self.cumDistance = [] self.x = 0 self.xMax = self.yMax = 0.0 self.yBottom = 0 self.mult = 1.0 self.length = 0.0 self.totalElevationGain = 0.0 self.isPointToPoint = False self.cache = {} def computeSummary( self ): lenGpsPoints = len(self.gpsPoints) length = 0.0 totalElevationGain = 0.0 self.isPointToPoint = getattr( self, 'isPointToPoint', False ) for i in range(lenGpsPoints - (1 if self.isPointToPoint else 0)): pCur, pNext = self.gpsPoints[i], self.gpsPoints[(i + 1) % lenGpsPoints] length += GreatCircleDistance3D( pCur.lat, pCur.lon, pCur.ele, pNext.lat, pNext.lon, pNext.ele ) totalElevationGain += max(0.0, pNext.ele - pCur.ele) self.length = length self.totalElevationGain = totalElevationGain def setPoints( self, gpsPoints, isPointToPoint = False ): self.gpsPoints = gpsPoints self.isPointToPoint = isPointToPoint self.xMax = max( p.x for p in self.gpsPoints ) self.yMax = max( p.y for p in self.gpsPoints ) dCum = 0.0 self.cumDistance = [] for p in self.gpsPoints: self.cumDistance.append( dCum ) dCum += p.d self.distanceTotal = dCum self.computeSummary() def read( self, fname, useTimes = False, isPointToPoint = False ): self.isPointToPoint = isPointToPoint latLonEles = ParseGpxFile( fname, useTimes=useTimes, isPointToPoint=isPointToPoint ) gpsPoints = LatLonElesToGpsPoints( latLonEles, useTimes=useTimes, isPointToPoint=isPointToPoint ) self.setPoints( gpsPoints, isPointToPoint=isPointToPoint ) def getGPX( self, courseName ): return CreateGPX( courseName, self.gpsPoints ) def writeGPXFile( self, fname ): with io.open( fname, 'w') as fp: self.getGPX( os.path.splitext(os.path.basename(fname))[0] ).writexml(fp, indent="", addindent=" ", newl="\n", encoding='utf-8') def readElevation( self, fname ): header = None distance, elevation = [], [] iDistance, iElevation = None, None with io.open(fname, 'r') as fp: for line in fp: fields = [f.strip() for f in line.split(',')] if not header: header = fields for i, h in enumerate(header): h = h.lower() if h.startswith('distance'): iDistance = i elif h.startswith('elevation'): iElevation = i assert iDistance is not None and iElevation is not None, 'Invalid header in file.' else: distance.append( float(fields[iDistance]) ) elevation.append( float(fields[iElevation]) ) if len(elevation) < 2: return lenGpsPoints = len(self.gpsPoints) length = 0.0 for i in range(lenGpsPoints-1): pCur, pNext = self.gpsPoints[i], self.gpsPoints[(i + 1) % lenGpsPoints] length += GreatCircleDistance( pCur.lat, pCur.lon, pNext.lat, pNext.lon ) distanceMult = distance[-1] / length # Update the known GPS points with the proportional elevation. length = 0.0 iSearch = 0 for i in range(lenGpsPoints): pCur, pNext = self.gpsPoints[i], self.gpsPoints[(i + 1) % lenGpsPoints] d = min( length * distanceMult, distance[-1] ) for iSearch in range(iSearch, len(elevation) - 2): if distance[iSearch] <= d < distance[iSearch+1]: break deltaDistance = max( distance[iSearch+1] - distance[iSearch], 0.000001 ) ele = elevation[iSearch] + (elevation[iSearch+1] - elevation[iSearch]) * \ (d - distance[iSearch]) / deltaDistance self.gpsPoints[i] = pCur._replace( ele = ele ) length += GreatCircleDistance( pCur.lat, pCur.lon, pNext.lat, pNext.lon ) self.computeSummary() def getXYTrack( self ): x, yBottom, mult = self.x, self.yBottom, self.mult return [(p.x * mult + x, yBottom - p.y * mult) for p in self.gpsPoints] def asExportJson( self ): return [ [int(getattr(p, a)*10.0) for a in ('x', 'y', 'd')] for p in self.gpsPoints ] def getAltigraph( self ): if not self.gpsPoints or all( p.ele == 0.0 for p in self.gpsPoints ): return [] altigraph = [(0.0, self.gpsPoints[0].ele)] p = self.gpsPoints for i in range(1, len(p)): altigraph.append( (altigraph[-1][0] + GreatCircleDistance(p[i-1].lat, p[i-1].lon, p[i].lat, p[i].lon), p[i].ele) ) altigraph.append( (altigraph[-1][0] + GreatCircleDistance(p[-1].lat, p[-1].lon, p[0].lat, p[0].lon), p[0].ele) ) return altigraph def isClockwise( self ): if not self.gpsPoints: return False p = self.gpsPoints return sum( (p[j].x - p[j-1].x) * (p[j].y + p[j-1].y) for j in range(len(self.gpsPoints)) ) > 0.0 def reverse( self ): ''' Reverse the points in the track. Make sure the distance to the next point and cumDistance is correct. ''' self.cumDistance = [] gpsPointsReversed = [] dCum = 0.0 for i in range(len(self.gpsPoints)-1, -1, -1): p = self.gpsPoints[i] pPrev = self.gpsPoints[i-1 if i > 0 else len(self.gpsPoints)-1] gpsPointsReversed.append( GpsPoint(p.lat, p.lon, p.ele, p.x, p.y, pPrev.d, dCum) ) self.cumDistance.append( dCum ) dCum += pPrev.d self.gpsPoints = gpsPointsReversed def setClockwise( self, clockwise = True ): if self.isClockwise() != clockwise: self.reverse() def asCoordinates( self ): coordinates = [] for p in self.gpsPoints: coordinates.append( p.lon ) coordinates.append( p.lat ) return coordinates def asKmlTour( self, raceName=None, speedKMH=None ): race = raceName or 'Race' speed = (speedKMH or 35.0) * (1000.0 / (60.0*60.0)) doc = xml.dom.minidom.Document() kml = createAppendChild( doc, doc, 'kml' ) kml.attributes['xmlns'] = "http://www.opengis.net/kml/2.2" kml.attributes['xmlns:gx'] = "http://www.google.com/kml/ext/2.2" kml.appendChild( doc.createComment( '\n'.join( [ '', 'DO NOT EDIT!', '', 'This file was created automatically by CrossMgr.', '', 'Is shows a fly-through of the actual bicycle race course.', 'For more information, see http://sites.google.com/site/crossmgrsoftware', '', 'Created: {}'.format(datetime.datetime.now().strftime( '%Y/%m/%d %H:%M:%S' )), 'User: {}'.format(escape(getpass.getuser())), 'Computer: {}'.format(escape(socket.gethostname())), '', ] ) ) ) Document = createAppendChild( doc, kml, 'Document', { 'open': 1, 'name': raceName, } ) # Define some styles. Style = createAppendChild( doc, Document, 'Style' ) Style.attributes['id'] = 'thickBlueLine' LineStyle = createAppendChild( doc, Style, 'LineStyle', { 'width': 5, 'color': '#7fff0000', # aabbggrr } ) # Define an flying tour around the course. Tour = createAppendChild( doc, Document, 'gx:Tour', { 'name': '{}: Tour'.format(race), } ) Playlist = createAppendChild( doc, Tour, 'gx:Playlist' ) def fly( doc, PlayList, p, mode, duration, heading ): FlyTo = createAppendChild( doc, Playlist, 'gx:FlyTo', { 'gx:duration': duration, 'gx:flyToMode': mode, } ) Camera = createAppendChild( doc, FlyTo, 'Camera', { 'latitude': p.lat, 'longitude': p.lon, 'altitude': 2, 'altitudeMode': 'relativeToGround', 'heading': heading, 'tilt': 80, } ) # Fly to the starting point. p, pNext = self.gpsPoints[:2] fly( doc, Playlist, p, 'bounce', 3, CompassBearing(p.lat, p.lon, pNext.lat, pNext.lon) ) # Follow the path through all the points. lenGpsPoints = len(self.gpsPoints) for i in range(1, lenGpsPoints + (0 if self.isPointToPoint else 1)): pPrev, p = self.gpsPoints[i-1], self.gpsPoints[i%lenGpsPoints] fly(doc, Playlist, p, 'smooth', GreatCircleDistance(pPrev.lat, pPrev.lon, p.lat, p.lon) / speed, CompassBearing(pPrev.lat, pPrev.lon, p.lat, p.lon) ) if self.isPointToPoint: # Marker for the start line. Placemark = createAppendChild( doc, Document, 'Placemark', {'name': '{}: Start Line'.format(race)} ) createAppendChild( doc, Placemark, 'Point', { 'coordinates': '{},{}'.format(self.gpsPoints[0].lon, self.gpsPoints[0].lat) } ) pFinish = self.gpsPoints[-1] else: pFinish = self.gpsPoints[0] # Marker for the finish line. Placemark = createAppendChild( doc, Document, 'Placemark', {'name': '{}: Finish Line'.format(race)} ) createAppendChild( doc, Placemark, 'Point', {'coordinates': '{},{}'.format(pFinish.lon, pFinish.lat)} ) # Path for the course. Placemark = createAppendChild( doc, Document, 'Placemark', { 'name': '{}: Course'.format(race), 'styleUrl': '#thickBlueLine', } ) coords = [''] for i in range(lenGpsPoints + (0 if self.isPointToPoint else 1)): p = self.gpsPoints[i % lenGpsPoints] coords.append( '{},{}'.format(p.lon, p.lat) ) coords.append('') LineString = createAppendChild( doc, Placemark, 'LineString', { 'tessellate': 1, 'altitudeMode': 'clampToGround', 'coordinates': '\n'.join(coords), } ) ret = doc.toprettyxml( indent=' ' ) doc.unlink() return ret def getXY( self, lap, id = None ): # Find the segment at this distance in the lap. lap = modf(lap)[0] # Get fraction of lap. lapDistance = lap * self.distanceTotal # Get distance traveled in the lap. # Avoid the cost of the binary search by checking if the id request is still on the last segment. lenGpsPoints = len(self.gpsPoints) try: i = self.cache[id] pCur = self.gpsPoints[i] if not (pCur.dCum <= lapDistance <= pCur.dCum + pCur.d): i = (i + 1) % lenGpsPoints pCur = self.gpsPoints[i] if not (pCur.dCum <= lapDistance <= pCur.dCum + pCur.d): i = None except (IndexError, KeyError): i = None if i is None: # Find the closest point LT the lap distance. i = bisect.bisect_right( self.cumDistance, lapDistance ) i %= lenGpsPoints if self.cumDistance[i] > lapDistance: i -= 1 self.cache[id] = i pCur, pNext = self.gpsPoints[i], self.gpsPoints[(i + 1) % lenGpsPoints] segDistance = lapDistance - self.cumDistance[i] segRatio = 0.0 if pCur.d <= 0.0 else segDistance / pCur.d x, y = pCur.x + (pNext.x - pCur.x) * segRatio, pCur.y + (pNext.y - pCur.y) * segRatio return x * self.mult + self.x, self.yBottom - y * self.mult @property def lengthKm( self ): return self.length / 1000.0 @property def lengthMiles( self ): return self.length * 0.621371/1000.0 @property def totalElevationGainM( self ): try: return self.totalElevationGain except AttributeError: self.totalElevationGain = sum( max(0.0, self.gpsPoints[i].ele - self.gpsPoints[i-1].ele) for i in range(len(self.gpsPoints)) ) return self.totalElevationGain @property def totalElevationGainFt( self ): return self.totalElevationGainM * 3.28084 @property def numPoints( self ): return len(self.gpsPoints) def setDisplayRect( self, x, y, width, height ): if width <= 0 or height <= 0: self.mult = 1.0 self.x = x self.yBottom = y + height return mult = min( width / self.xMax, height / self.yMax ) w, h = self.xMax * mult, self.yMax * mult xBorder = (width - w) / 2.0 yBorder = (height - h) / 2.0 self.mult = mult self.x = xBorder + x self.yBottom = y + height shapes = [ [(cos(a), -sin(a)) \ for a in (q*(2.0*pi/i)+pi/2.0+(2.0*pi/(i*2.0) if i % 2 == 0 else 0)\ for q in range(i))] for i in range(3,9)] def DrawShape( dc, num, x, y, radius ): dc.DrawPolygon( [ wx.Point(int(p*radius+x), int(q*radius+y)) for p,q in shapes[num % len(shapes)] ] ) class GeoAnimation(wx.Control): topFewCount = 5 infoLines = 1 def __init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize, style=wx.NO_BORDER, validator=wx.DefaultValidator, name="GeoAnimation"): super().__init__(parent, id, pos, size, style, validator, name) self.SetBackgroundColour('white') self.data = {} self.categoryDetails = {} self.t = 0 self.tMax = None self.tDelta = 1 self.r = 100 # Radius of the turns of the fictional track. self.laneMax = 8 self.geoTrack = None self.compassLocation = '' self.widthLast = -1 self.heightLast = -1 self.xBanner = 300 self.tBannerLast = None self.course = 'geo' self.units = 'km' self.framesPerSecond = 32 self.lapCur = 0 self.iLapDistance = 0 self.tLast = datetime.datetime.now() self.speedup = 1.0 self.suspendGeoAnimation = False self.numsToWatch = set() self.checkeredFlag = wx.Bitmap(os.path.join(Utils.getImageFolder(), 'CheckeredFlag.png'), wx.BITMAP_TYPE_PNG) trackRGB = [int('7FE57F'[i:i+2],16) for i in range(0, 6, 2)] self.trackColour = wx.Colour( *trackRGB ) self.colours = [] k = [0,32,64,128,128+32,128+64,255] for r in k: for g in k: for b in k: if sum( abs(c - t) for c, t in zip([r,g,b],trackRGB) ) > 80 and \ sum( c for c in [r,g,b] ) > 64: self.colours.append( wx.Colour(r, g, b) ) random.seed( 1234 ) random.shuffle( self.colours ) self.topFewColours = [ wx.Colour(255,215,0), wx.Colour(230,230,230), wx.Colour(205,133,63) ] while len(self.topFewColours) < self.topFewCount: self.topFewColours.append( wx.Colour(200,200,200) ) self.trackColour = wx.Colour( *[int('7FE57F'[i:i+2],16) for i in range(0, 6, 2)] ) # Cache the fonts if the size does not change. self.numberFont = None self.timeFont = None self.highlightFont = None self.rLast = -1 self.timer = wx.Timer( self, id=wx.ID_ANY ) self.Bind( wx.EVT_TIMER, self.NextFrame, self.timer ) # Bind the events related to our control: first of all, we use a # combination of wx.BufferedPaintDC and an empty handler for # wx.EVT_ERASE_BACKGROUND (see later) to reduce flicker self.Bind(wx.EVT_PAINT, self.OnPaint) self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground) self.Bind(wx.EVT_SIZE, self.OnSize) def SetGeoTrack( self, geoTrack ): if self.geoTrack != geoTrack: self.geoTrack = geoTrack def SetOptions( self, *argc, **kwargs ): pass def DoGetBestSize(self): return wx.Size(400, 200) def _initGeoAnimation( self ): self.tLast = datetime.datetime.now() self.suspendGeoAnimation = False def Animate( self, tRunning, tMax = None, tCur = 0.001 ): self.StopAnimate(); self._initGeoAnimation() self.t = tCur if not self.data: return if tMax is None: tMax = 0 for num, info in self.data.items(): try: tMax = max(tMax, info['raceTimes'][-1]) except IndexError: pass self.speedup = float(tMax) / float(tRunning) self.tMax = tMax self.timer.Start( int(1000.0/self.framesPerSecond), False ) def StartAnimateRealtime( self ): self.StopAnimate(); self._initGeoAnimation() self.speedup = 1.0 self.tMax = 999999 self.timer.Start( int(1000.0/self.framesPerSecond), False ) def StopAnimate( self ): if self.timer.IsRunning(): self.timer.Stop() self.tBannerLast = None def SetNumsToWatch( self, numsToWatch ): self.numsToWatch = numsToWatch self.Refresh() def SuspendAnimate( self ): self.suspendGeoAnimation = True def IsAnimating( self ): return not self.suspendGeoAnimation and self.timer.IsRunning() def SetTime( self, t ): self.t = t self.Refresh() def NextFrame( self, event ): if event.GetId() == self.timer.GetId(): tNow = datetime.datetime.now() tDelta = tNow - self.tLast self.tLast = tNow secsDelta = tDelta.seconds + tDelta.microseconds / 1000000.0 self.SetTime( self.t + secsDelta * self.speedup ) if self.suspendGeoAnimation or self.t >= self.tMax: self.StopAnimate() def SetForegroundColour(self, colour): wx.Control.SetForegroundColour(self, colour) self.Refresh() def SetBackgroundColour(self, colour): wx.Control.SetBackgroundColour(self, colour) self.Refresh() def GetDefaultAttributes(self): """ Overridden base class virtual. By default we should use the same font/colour attributes as the native wx.StaticText. """ return wx.StaticText.GetClassDefaultAttributes() def ShouldInheritColours(self): """ Overridden base class virtual. If the parent has non-default colours then we want this control to inherit them. """ return True def SetData( self, data, tCur = None, categoryDetails = None ): """ * data is a rider information indexed by number. Info includes lap times and lastTime times. * lap times should include the start offset. Example: data = { 101: { raceTimes: [xx, yy, zz], lastTime: None }, 102 { raceTimes: [aa, bb], lastTime: cc} } """ self.data = data if data else {} self.categoryDetails = categoryDetails if categoryDetails else {} for num, info in self.data.items(): info['iLast'] = 1 if info['status'] == 'Finisher' and info['raceTimes']: info['finishTime'] = info['raceTimes'][-1] else: info['finishTime'] = info['lastTime'] # Get the units. for num, info in self.data.items(): if info['status'] == 'Finisher': try: self.units = 'miles' if 'mph' in info['speed'] else 'km' except KeyError: self.units = 'km' break if tCur is not None: self.t = tCur; self.tBannerLast = None self.Refresh() def getShortName( self, num ): try: info = self.data[num] except KeyError: return '' lastName = info.get('LastName','') firstName = info.get('FirstName','') if lastName: if firstName: return '%s, %s.' % (lastName, firstName[:1]) else: return lastName return firstName def OnPaint(self, event): dc = wx.BufferedPaintDC(self) self.Draw(dc) def OnSize(self, event): self.Refresh() event.Skip() def getRiderPositionTime( self, num ): """ Returns the fraction of the lap covered by the rider and the time. """ if num not in self.data: return (None, None) info = self.data[num] raceTimes = info['raceTimes'] if not raceTimes or self.t < raceTimes[0] or len(raceTimes) < 2: return (None, None) tSearch = self.t finishTime = info['finishTime'] if finishTime is not None and finishTime < self.t: if finishTime == raceTimes[-1]: return (len(raceTimes), finishTime) tSearch = finishTime if tSearch >= raceTimes[-1]: p = len(raceTimes) + float(tSearch - raceTimes[-1]) / float(raceTimes[-1] - raceTimes[-2]) else: i = info['iLast'] if not (raceTimes[i-1] < tSearch <= raceTimes[i]): i += 1 if not (raceTimes[i-1] < tSearch <= raceTimes[i]): i = bisect.bisect_left( raceTimes, tSearch ) info['iLast'] = i if i == 1: firstLapRatio = info['flr'] p = float(tSearch - raceTimes[i-1]) / float(raceTimes[i] - raceTimes[i-1]) p = 1.0 - firstLapRatio + p * firstLapRatio p -= math.floor(p) - 1.0 else: p = i + float(tSearch - raceTimes[i-1]) / float(raceTimes[i] - raceTimes[i-1]) return (p, tSearch) def getRiderXYPT( self, num ): positionTime = self.getRiderPositionTime( num ) if positionTime[0] is None: return None, None, None, None if self.data[num]['finishTime'] is not None and self.t >= self.data[num]['finishTime']: self.lapCur = max(self.lapCur, len(self.data[num]['raceTimes'])) return (None, None, positionTime[0], positionTime[1]) self.lapCur = max(self.lapCur, int(positionTime[0])) xy = self.geoTrack.getXY( positionTime[0], num ) return xy[0], xy[1], positionTime[0], positionTime[1] def drawBanner( self, dc, width, height, tHeight, bannerItems ): blue = wx.Colour(0, 0, 200) dc.SetPen( wx.Pen(blue) ) dc.SetBrush( wx.Brush(blue, wx.SOLID) ) dc.DrawRectangle( 0, 0, int(width), int(tHeight*1.1) ) if not bannerItems: return y = tHeight * 0.1 tHeight *= 0.85 x = self.xBanner while x < width: for bi in bannerItems: if x >= width: break position, num, name = '{}'.format(bi[1]), '{}'.format(bi[0]), self.getShortName(bi[0]) if position == '1': x += tHeight / 2 tWidth = self.checkeredFlag.Width if x + tWidth > 0 and x < width: dc.DrawBitmap( self.checkeredFlag, int(x), int(y), False ) x += tWidth + tHeight / 2 dc.SetFont( self.positionFont ) tWidth = dc.GetTextExtent( position )[0] if x + tWidth > 0 and x < width: dc.SetTextForeground( wx.WHITE ) dc.DrawText( position, int(x), int(y) ) x += tWidth + tHeight / 4 dc.SetFont( self.bibFont ) tWidth = dc.GetTextExtent(num)[0] if x + tWidth > 0 and x < width: dc.SetTextForeground( 'YELLOW' ) dc.DrawText(num, int(x), int(y) ) x += tWidth + tHeight / 3 dc.SetFont( self.nameFont ) tWidth = dc.GetTextExtent(name)[0] if x + tWidth > 0 and x < width: dc.SetTextForeground( wx.WHITE ) dc.DrawText(name, int(x), int(y) ) x += tWidth + tHeight if x < 0: self.xBanner = x tBanner = datetime.datetime.now() if self.tBannerLast is None: self.tBannerLast = tBanner self.xBanner -= 64.0 * (tBanner - self.tBannerLast).total_seconds() self.tBannerLast = tBanner def Draw(self, dc): size = self.GetClientSize() width = size.width height = size.height backColour = self.GetBackgroundColour() backBrush = wx.Brush(backColour, wx.SOLID) dc.SetBackground(backBrush) dc.Clear() if width < 80 or height < 80 or not self.geoTrack: return avePoints = 1 isPointToPoint = getattr(self.geoTrack, 'isPointToPoint', False) self.r = int(width / 4) if self.r * 2 > height: self.r = int(height / 2) self.r -= (self.r & 1) # Make sure that r is an even number. r = self.r # Get the fonts if needed. if self.rLast != r: tHeight = int(r / 8.0) self.numberFont = wx.Font( (0,int(tHeight)), wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL ) self.timeFont = self.numberFont self.highlightFont = wx.Font( (0,int(tHeight * 1.6)), wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL ) self.positionFont = wx.Font( (0,int(tHeight*0.85*0.7)), wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL ) self.bibFont = wx.Font( (0,int(tHeight*0.85)), wx.FONTFAMILY_SWISS, wx.FONTSTYLE_ITALIC, wx.FONTWEIGHT_BOLD ) self.nameFont = wx.Font((0,int(tHeight*0.85)), wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD ) self.rLast = r tHeight = int(r / 8.0) textVSpace = tHeight*0.2 laneWidth = (r/2) / self.laneMax border = laneWidth * 1.5 / 2 trackWidth = width - border * 2 topMargin = border + tHeight + textVSpace trackHeight = height - topMargin - border self.geoTrack.setDisplayRect( int(border), int(topMargin), int(trackWidth), int(trackHeight) ) # Draw the course. dc.SetBrush( wx.TRANSPARENT_BRUSH ) drawPoints = self.geoTrack.getXYTrack() drawPointsInt = [(int(p[0]), int(p[1])) for p in drawPoints] if width != self.widthLast or height != self.heightLast: self.widthLast, self.heightLast = width, height locations = ['NE', 'SE', 'NW', 'SW', ] compassWidth, compassHeight = width * 0.25, height * 0.25 inCountBest = len(drawPoints) + 1 self.compassLocation = locations[0] for loc in locations: xCompass = 0 if 'W' in loc else width - compassWidth yCompass = 0 if 'S' in loc else height - compassHeight inCount = sum( 1 for x, y in drawPoints if xCompass <= x < xCompass + compassWidth and yCompass <= y < yCompass + compassHeight ) if inCount < inCountBest: inCountBest = inCount self.compassLocation = loc if inCount == 0: break dc.SetPen( wx.Pen(wx.Colour(128,128,128), int(laneWidth * 1.25 + 2), wx.SOLID) ) if isPointToPoint: dc.DrawLines( drawPointsInt ) else: dc.DrawPolygon( drawPointsInt ) dc.SetPen( wx.Pen(self.trackColour, int(laneWidth * 1.25), wx.SOLID) ) if isPointToPoint: dc.DrawLines( drawPointsInt ) else: dc.DrawPolygon( drawPointsInt ) # Draw a centerline to show all the curves in the course. dc.SetPen( wx.Pen(wx.Colour(80,80,80), 1, wx.SOLID) ) if isPointToPoint: dc.DrawLines( drawPointsInt ) else: dc.DrawPolygon( drawPointsInt ) # Draw a finish line. finishLineLength = laneWidth * 2 if isPointToPoint: x1, y1, x2, y2 = LineNormal( drawPoints[-1][0], drawPoints[-1][1], drawPoints[-2][0], drawPoints[-2][1], laneWidth * 2 ) else: x1, y1, x2, y2 = LineNormal( drawPoints[0][0], drawPoints[0][1], drawPoints[1][0], drawPoints[1][1], laneWidth * 2 ) dc.SetPen( wx.Pen(wx.WHITE, int(laneWidth / 1.5), wx.SOLID) ) dc.DrawLine( int(x1), int(y1), int(x2), int(y2) ) dc.SetPen( wx.Pen(wx.BLACK, int(laneWidth / 5), wx.SOLID) ) dc.DrawLine( int(x1), int(y1), int(x2), int(y2) ) if isPointToPoint: x1, y1, x2, y2 = LineNormal( drawPoints[-1][0], drawPoints[-1][1], drawPoints[-2][0], drawPoints[-2][1], laneWidth * 4 ) else: x1, y1, x2, y2 = LineNormal( drawPoints[0][0], drawPoints[0][1], drawPoints[1][0], drawPoints[1][1], laneWidth * 4 ) dc.DrawBitmap( self.checkeredFlag, int(x2 - self.checkeredFlag.Width/2), int(y2 - self.checkeredFlag.Height/2), False ) # Draw starting arrow showing direction. if not self.data and not isPointToPoint and len(drawPoints) > avePoints: x1, y1 = drawPoints[0][0], drawPoints[0][1] x2, y2 = drawPoints[1][0], drawPoints[1][1] a = atan2( y2-y1, x2-x1 ) x2 = int(x1 + cos(a) * laneWidth*4) y2 = int(y1 + sin(a) * laneWidth*4) dc.SetPen( wx.Pen(wx.BLACK, int(laneWidth / 4), wx.SOLID) ) dc.DrawLine( int(x1), int(y1), int(x2), int(y2) ) a = atan2( y1-y2, x1-x2 ) x1, y1 = x2, y2 arrowLength = 1.25 arrowAngle = 3.14159/8.0 x2 = int(x1 + cos(a+arrowAngle) * laneWidth*arrowLength) y2 = int(y1 + sin(a+arrowAngle) * laneWidth*arrowLength) dc.DrawLine( x1, y1, x2, y2 ) x2 = int(x1 + cos(a-arrowAngle) * laneWidth*arrowLength) y2 = int(y1 + sin(a-arrowAngle) * laneWidth*arrowLength) dc.DrawLine( x1, y1, x2, y2 ) # Draw the riders dc.SetFont( self.numberFont ) dc.SetPen( wx.BLACK_PEN ) numSize = (r/2)/self.laneMax self.lapCur = 0 topFew = {} riderRadius = laneWidth * 0.5 thickLine = r / 32 highlightPen = wx.Pen( wx.WHITE, int(thickLine * 1.0) ) riderPosition = {} if self.data: riderXYPT = [] for num, d in self.data.items(): xypt = list(self.getRiderXYPT(num)) xypt.insert( 0, num ) riderXYPT.append( xypt ) # Sort by reverse greatest distance, then by shortest time. # Do this so the leaders are drawn last. riderXYPT.sort( key=lambda x : ( x[3] if x[3] is not None else 0.0, -x[4] if x[4] is not None else 0.0) ) topFew = {} for j, i in enumerate(range(len(riderXYPT) - 1, max(-1,len(riderXYPT)-self.topFewCount-1), -1)): topFew[riderXYPT[i][0]] = j numRiders = len(riderXYPT) for j, (num, x, y, position, time) in enumerate(riderXYPT): riderPosition[num] = numRiders - j if x is None: continue dc.SetBrush( wx.Brush(self.colours[num % len(self.colours)], wx.SOLID) ) try: i = topFew[num] dc.SetPen( wx.Pen(self.topFewColours[i], int(thickLine)) ) if num in self.numsToWatch: dc.SetFont( self.highlightFont ) except KeyError: if num in self.numsToWatch: dc.SetFont( self.highlightFont ) dc.SetPen( highlightPen ) i = 9999 else: i = None DrawShape( dc, num, x, y, riderRadius ) if i is not None: if not self.numsToWatch or num in self.numsToWatch: dc.DrawLabel('{}'.format(num), wx.Rect(int(x+numSize), int(y-numSize), int(numSize*2), int(numSize*2)) ) if i is not None: dc.SetPen( wx.BLACK_PEN ) dc.SetFont( self.numberFont ) # Convert topFew from dict to list. leaders = [0] * len(topFew) for num, position in topFew.items(): leaders[position] = num yTop = height - self.infoLines * tHeight tWidth, tHeight = dc.GetTextExtent( '999' ) yCur = tHeight+textVSpace*1.6 # Draw the race time secs = int( self.t ) if secs < 60*60: tStr = '%d:%02d ' % ((secs // 60)%60, secs % 60 ) else: tStr = '%d:%02d:%02d ' % (secs // (60*60), (secs // 60)%60, secs % 60 ) tWidth = dc.GetTextExtent( tStr )[0] dc.DrawText( tStr, int(width - tWidth), int(yCur) ) yCur += tHeight # Draw the current lap dc.SetFont( self.timeFont ) if self.lapCur and leaders: leaderRaceTimes = self.data[leaders[0]]['raceTimes'] if leaderRaceTimes and leaderRaceTimes[0] < leaderRaceTimes[-1]: maxLaps = len(leaderRaceTimes) - 1 self.iLapDistance, lapRatio = GetLapRatio( leaderRaceTimes, self.t, self.iLapDistance ) lapRatio = int(lapRatio * 10.0) / 10.0 # Always round down, not to nearest decimal. text = ['{} {} {} '.format(ff(self.iLapDistance + lapRatio,5,1), _('Laps of'), maxLaps), '{} {}'.format(ff(maxLaps - self.iLapDistance - lapRatio,5,1), _('Laps to go'))] cat = self.categoryDetails.get( self.data[leaders[0]].get('raceCat', None) ) if cat: distanceCur, distanceRace = None, None if cat.get('lapDistance', None) is not None: text.append( '' ) flr = self.data[leaders[0]].get('flr', 1.0) distanceLap = cat['lapDistance'] distanceRace = distanceLap * (flr + maxLaps-1) if self.iLapDistance == 0: distanceCur = lapRatio * (distanceLap * flr) else: distanceCur = distanceLap * (flr + self.iLapDistance - 1 + lapRatio) elif cat.get('raceDistance', None) is not None and leaderRaceTimes[0] != leaderRaceTimes[-1]: distanceRace = cat['raceDistance'] distanceCur = (self.t - leaderRaceTimes[0]) / (leaderRaceTimes[-1] - leaderRaceTimes[0]) * distanceRace distanceCur = max( 0.0, min(distanceCur, distanceRace) ) if distanceCur is not None: if distanceCur != distanceRace: distanceCur = int( distanceCur * 10.0 ) / 10.0 text.extend( [ '{} {} {} {}'.format(ff(distanceCur,5,1), self.units, _('of'), fld(distanceRace,1)), '{} {} {}'.format(ff(distanceRace - distanceCur,5,1), self.units, _('to go'))] ) widthMax = max( dc.GetTextExtent(t)[0] for t in text ) if 'N' in self.compassLocation: yCur = height - tHeight * (len(text) + 1) if 'E' in self.compassLocation: xCur = width - widthMax else: xCur = tHeight * 0.5 for row, t in enumerate(text): yCur += tHeight if not t: continue tShow = t.lstrip('0') if tShow.startswith('.'): tShow = '0' + tShow dc.DrawText( tShow, xCur + dc.GetTextExtent('0' * (len(t) - len(tShow)))[0], yCur ) # Draw the leader board. bannerItems = [] for i, leader in enumerate(leaders): bannerItems.append( (leaders[i], i+1) ) if self.numsToWatch: rp = [] for n in self.numsToWatch: try: p = riderPosition[n] rp.append( (p, n) ) except KeyError: pass rp.sort() for w in rp: bannerItems.append( (w[1], w[0]) ) if self.data: self.drawBanner( dc, width, height, tHeight, bannerItems ) def OnEraseBackground(self, event): # This is intentionally empty, because we are using the combination # of wx.BufferedPaintDC + an empty OnEraseBackground event to # reduce flicker pass if __name__ == '__main__': #fname = r'C:\Projects\CrossMgr\bugs\Stuart\20160419-glenlyon\2016-04-19-WTNC Glenlyon 710-r2-Course.gpx' fname = 'GPX/circuit-violet-100-km.gpx' print( GpxHasTimes(fname) ) data = {} for num in range(100,200): mean = random.normalvariate(6.0, 0.3) raceTimes = [0] for lap in range( 4 ): raceTimes.append( raceTimes[-1] + random.normalvariate(mean, mean/20)*60.0 ) data[num] = { 'raceTimes': raceTimes, 'lastTime': raceTimes[-1], 'flr': 1.0, 'status':'Finisher', 'speed':'32.7 km/h' } app = wx.App(False) mainWin = wx.Frame(None,title="GeoAnimation", size=(800,700)) animation = GeoAnimation(mainWin) geoTrack = GeoTrack() geoTrack.read( fname ) geoTrack.writeGPXFile( 'geotrack.gpx' ) #sys.exit() #geoTrack.read( 'St._John__039_s_Cyclocross_course_v2.gpx' ) #geoTrack.read( 'Camp Arrowhead mtb GPS course.gpx' ) #geoTrack.read( 'Races/Midweek/Midweek_Learn_to_Race_and_Elite_Series_course.gpx' ) #geoTrack.reverse() print( 'Clockwise:', geoTrack.isClockwise() ) zf = zipfile.ZipFile( 'track.kmz', 'w', zipfile.ZIP_DEFLATED ) zf.writestr( 'track.kml', geoTrack.asKmlTour('Race Track') ) zf.close() with open('track.kml', 'w') as f: f.write( geoTrack.asKmlTour('Race Track') ) #sys.exit() animation.SetGeoTrack( geoTrack ) animation.SetData( data ) animation.Animate( 2*60, 60*60 ) mainWin.Show() app.MainLoop()
from collections import deque import random as rand import math as math import time # the default weight is 1 if not assigned but all the implementation is weighted class DirectedGraph: def __init__(self): self.graph = {} # adding vertices and edges # adding the weight is optional # handles repetition def add_pair(self, u, v, w=1): if self.graph.get(u): if self.graph[u].count([w, v]) == 0: self.graph[u].append([w, v]) else: self.graph[u] = [[w, v]] if not self.graph.get(v): self.graph[v] = [] def all_nodes(self): return list(self.graph) # handles if the input does not exist def remove_pair(self, u, v): if self.graph.get(u): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(_) # if no destination is meant the default value is -1 def dfs(self, s=-2, d=-1): if s == d: return [] stack = [] visited = [] if s == -2: s = list(self.graph.keys())[0] stack.append(s) visited.append(s) ss = s while True: # check if there is any non isolated nodes if len(self.graph[s]) != 0: ss = s for __ in self.graph[s]: if visited.count(__[1]) < 1: if __[1] == d: visited.append(d) return visited else: stack.append(__[1]) visited.append(__[1]) ss = __[1] break # check if all the children are visited if s == ss: stack.pop() if len(stack) != 0: s = stack[len(stack) - 1] else: s = ss # check if se have reached the starting point if len(stack) == 0: return visited # c is the count of nodes you want and if you leave it or pass -1 to the function # the count will be random from 10 to 10000 def fill_graph_randomly(self, c=-1): if c == -1: c = (math.floor(rand.random() * 10000)) + 10 for _ in range(c): # every vertex has max 100 edges e = math.floor(rand.random() * 102) + 1 for __ in range(e): n = math.floor(rand.random() * (c)) + 1 if n == _: continue self.add_pair(_, n, 1) def bfs(self, s=-2): d = deque() visited = [] if s == -2: s = list(self.graph.keys())[0] d.append(s) visited.append(s) while d: s = d.popleft() if len(self.graph[s]) != 0: for __ in self.graph[s]: if visited.count(__[1]) < 1: d.append(__[1]) visited.append(__[1]) return visited def in_degree(self, u): count = 0 for _ in self.graph: for __ in self.graph[_]: if __[1] == u: count += 1 return count def out_degree(self, u): return len(self.graph[u]) def topological_sort(self, s=-2): stack = [] visited = [] if s == -2: s = list(self.graph.keys())[0] stack.append(s) visited.append(s) ss = s sorted_nodes = [] while True: # check if there is any non isolated nodes if len(self.graph[s]) != 0: ss = s for __ in self.graph[s]: if visited.count(__[1]) < 1: stack.append(__[1]) visited.append(__[1]) ss = __[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop()) if len(stack) != 0: s = stack[len(stack) - 1] else: s = ss # check if se have reached the starting point if len(stack) == 0: return sorted_nodes def cycle_nodes(self): stack = [] visited = [] s = list(self.graph.keys())[0] stack.append(s) visited.append(s) parent = -2 indirect_parents = [] ss = s on_the_way_back = False anticipating_nodes = set() while True: # check if there is any non isolated nodes if len(self.graph[s]) != 0: ss = s for __ in self.graph[s]: if ( visited.count(__[1]) > 0 and __[1] != parent and indirect_parents.count(__[1]) > 0 and not on_the_way_back ): len_stack = len(stack) - 1 while True and len_stack >= 0: if stack[len_stack] == __[1]: anticipating_nodes.add(__[1]) break else: anticipating_nodes.add(stack[len_stack]) len_stack -= 1 if visited.count(__[1]) < 1: stack.append(__[1]) visited.append(__[1]) ss = __[1] break # check if all the children are visited if s == ss: stack.pop() on_the_way_back = True if len(stack) != 0: s = stack[len(stack) - 1] else: on_the_way_back = False indirect_parents.append(parent) parent = s s = ss # check if se have reached the starting point if len(stack) == 0: return list(anticipating_nodes) def has_cycle(self): stack = [] visited = [] s = list(self.graph.keys())[0] stack.append(s) visited.append(s) parent = -2 indirect_parents = [] ss = s on_the_way_back = False anticipating_nodes = set() while True: # check if there is any non isolated nodes if len(self.graph[s]) != 0: ss = s for __ in self.graph[s]: if ( visited.count(__[1]) > 0 and __[1] != parent and indirect_parents.count(__[1]) > 0 and not on_the_way_back ): len_stack_minus_one = len(stack) - 1 while True and len_stack_minus_one >= 0: if stack[len_stack_minus_one] == __[1]: anticipating_nodes.add(__[1]) break else: return True anticipating_nodes.add(stack[len_stack_minus_one]) len_stack_minus_one -= 1 if visited.count(__[1]) < 1: stack.append(__[1]) visited.append(__[1]) ss = __[1] break # check if all the children are visited if s == ss: stack.pop() on_the_way_back = True if len(stack) != 0: s = stack[len(stack) - 1] else: on_the_way_back = False indirect_parents.append(parent) parent = s s = ss # check if se have reached the starting point if len(stack) == 0: return False def dfs_time(self, s=-2, e=-1): begin = time.time() self.dfs(s, e) end = time.time() return end - begin def bfs_time(self, s=-2): begin = time.time() self.bfs(s) end = time.time() return end - begin class Graph: def __init__(self): self.graph = {} # adding vertices and edges # adding the weight is optional # handles repetition def add_pair(self, u, v, w=1): # check if the u exists if self.graph.get(u): # if there already is a edge if self.graph[u].count([w, v]) == 0: self.graph[u].append([w, v]) else: # if u does not exist self.graph[u] = [[w, v]] # add the other way if self.graph.get(v): # if there already is a edge if self.graph[v].count([w, u]) == 0: self.graph[v].append([w, u]) else: # if u does not exist self.graph[v] = [[w, u]] # handles if the input does not exist def remove_pair(self, u, v): if self.graph.get(u): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(_) # the other way round if self.graph.get(v): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(_) # if no destination is meant the default value is -1 def dfs(self, s=-2, d=-1): if s == d: return [] stack = [] visited = [] if s == -2: s = list(self.graph.keys())[0] stack.append(s) visited.append(s) ss = s while True: # check if there is any non isolated nodes if len(self.graph[s]) != 0: ss = s for __ in self.graph[s]: if visited.count(__[1]) < 1: if __[1] == d: visited.append(d) return visited else: stack.append(__[1]) visited.append(__[1]) ss = __[1] break # check if all the children are visited if s == ss: stack.pop() if len(stack) != 0: s = stack[len(stack) - 1] else: s = ss # check if se have reached the starting point if len(stack) == 0: return visited # c is the count of nodes you want and if you leave it or pass -1 to the function # the count will be random from 10 to 10000 def fill_graph_randomly(self, c=-1): if c == -1: c = (math.floor(rand.random() * 10000)) + 10 for _ in range(c): # every vertex has max 100 edges e = math.floor(rand.random() * 102) + 1 for __ in range(e): n = math.floor(rand.random() * (c)) + 1 if n == _: continue self.add_pair(_, n, 1) def bfs(self, s=-2): d = deque() visited = [] if s == -2: s = list(self.graph.keys())[0] d.append(s) visited.append(s) while d: s = d.popleft() if len(self.graph[s]) != 0: for __ in self.graph[s]: if visited.count(__[1]) < 1: d.append(__[1]) visited.append(__[1]) return visited def degree(self, u): return len(self.graph[u]) def cycle_nodes(self): stack = [] visited = [] s = list(self.graph.keys())[0] stack.append(s) visited.append(s) parent = -2 indirect_parents = [] ss = s on_the_way_back = False anticipating_nodes = set() while True: # check if there is any non isolated nodes if len(self.graph[s]) != 0: ss = s for __ in self.graph[s]: if ( visited.count(__[1]) > 0 and __[1] != parent and indirect_parents.count(__[1]) > 0 and not on_the_way_back ): len_stack = len(stack) - 1 while True and len_stack >= 0: if stack[len_stack] == __[1]: anticipating_nodes.add(__[1]) break else: anticipating_nodes.add(stack[len_stack]) len_stack -= 1 if visited.count(__[1]) < 1: stack.append(__[1]) visited.append(__[1]) ss = __[1] break # check if all the children are visited if s == ss: stack.pop() on_the_way_back = True if len(stack) != 0: s = stack[len(stack) - 1] else: on_the_way_back = False indirect_parents.append(parent) parent = s s = ss # check if se have reached the starting point if len(stack) == 0: return list(anticipating_nodes) def has_cycle(self): stack = [] visited = [] s = list(self.graph.keys())[0] stack.append(s) visited.append(s) parent = -2 indirect_parents = [] ss = s on_the_way_back = False anticipating_nodes = set() while True: # check if there is any non isolated nodes if len(self.graph[s]) != 0: ss = s for __ in self.graph[s]: if ( visited.count(__[1]) > 0 and __[1] != parent and indirect_parents.count(__[1]) > 0 and not on_the_way_back ): len_stack_minus_one = len(stack) - 1 while True and len_stack_minus_one >= 0: if stack[len_stack_minus_one] == __[1]: anticipating_nodes.add(__[1]) break else: return True anticipating_nodes.add(stack[len_stack_minus_one]) len_stack_minus_one -= 1 if visited.count(__[1]) < 1: stack.append(__[1]) visited.append(__[1]) ss = __[1] break # check if all the children are visited if s == ss: stack.pop() on_the_way_back = True if len(stack) != 0: s = stack[len(stack) - 1] else: on_the_way_back = False indirect_parents.append(parent) parent = s s = ss # check if se have reached the starting point if len(stack) == 0: return False def all_nodes(self): return list(self.graph) def dfs_time(self, s=-2, e=-1): begin = time.time() self.dfs(s, e) end = time.time() return end - begin def bfs_time(self, s=-2): begin = time.time() self.bfs(s) end = time.time() return end - begin
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class Resource(Model): """The core properties of ARM resources. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Fully qualified resource Id for the resource. Example - '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateDnsZoneName}'. :vartype id: str :ivar name: The name of the resource :vartype name: str :ivar type: The type of the resource. Example - 'Microsoft.Network/privateDnsZones'. :vartype type: str """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, } def __init__(self, **kwargs) -> None: super(Resource, self).__init__(**kwargs) self.id = None self.name = None self.type = None
from setuptools import setup, find_namespace_packages setup( name='embit', version='0.1.1', license='MIT license', url='https://github.com/diybitcoinhardware/embit', description = 'yet another bitcoin library', long_description="A minimal bitcoin library for MicroPython and Python3 with a focus on embedded systems.", author = 'Stepan Snigirev', author_email = 'snigirev.stepan@gmail.com', packages=find_namespace_packages("src", include=["*"]), package_dir={"": "src"}, classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], )
import cv2 import numpy as np import random face_cascade = cv2.CascadeClassifier('/usr/share/opencv4/haarcascades/haarcascade_frontalface_default.xml') class FaceDetect(): clahe = cv2.createCLAHE(clipLimit=1.5, tileGridSize=(4,4)) min_count = 10 interval = 10 def __init__(self): self.x = 0 self.y = 0 self.w = 0 self.h = 0 self.count = 0 self.sumx = 0 self.sumy = 0 self.sumw = 0 self.sumh = 0 self.miss = 0 self.skip = random.randrange(self.interval) self.precomputed = False def update_average(self, x, y, w, h, reset=False): if reset: self.count = 1 self.sumx = x self.sumy = y self.sumw = w self.sumh = h else: self.count += 1 self.sumx += x self.sumy += y self.sumw += w self.sumh += h self.x = self.sumx / self.count self.y = self.sumy / self.count self.w = self.sumw / self.count self.h = self.sumh / self.count def get_face(self, mode="average"): return(int(round(self.x)), int(round(self.y)), int(round(self.w)), int(round(self.h))) def find_face(self, raw_frame, time): if raw_frame is None: return None # draw filtered face box if self.count > 0: (x, y, w, h) = self.get_face() #print(" ", self.fx, self.fy, self.fw, self.fh) self.skip += 1 if self.skip % self.interval != 0: return None if self.count < self.min_count: print("haven't found %d matches yet ..." % self.min_count) gray = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY) gray = self.clahe.apply(gray) cv2.imshow('gray', gray) faces = face_cascade.detectMultiScale(gray, 1.2, 5, minSize=(30,30)) #faces = face_cascade.detectMultiScale(gray, 1.2, 3) biggest_index = -1 #biggest = 0.02 * raw_frame.shape[0] * raw_frame.shape[1] biggest = 0 avg_size = self.w * self.h raw_frame = cv2.rectangle(np.array(raw_frame), (int(self.x),int(self.y)), (int(self.x+self.w),int(self.y+self.h)), (255,255,255), 2) for i, (x,y,w,h) in enumerate(faces): if w*h > biggest: biggest_index = i biggest = w*h if biggest_index >= 0: # current (x,y,w,h) = faces[biggest_index] raw_frame = cv2.rectangle(np.array(raw_frame), (x,y), (x+w,y+h), (255,0,0), 2) print(x,y,w,h) self.update_average(x, y, w, h) #self.update_filter(x, y, w, h) else: self.miss += 1 return raw_frame def no_face(self, raw_frame): if not raw_frame is None: self.x = 0 self.y = 0 self.h = raw_frame.shape[0] self.w = raw_frame.shape[1]
class Solution: def brokenCalc(self, X: int, Y: int) -> int: num = 0 base = 1 while X * base < Y: num += 1 base <<= 1 diff = X * base - Y while diff: num += diff // base diff %= base base >>= 1 return num class Solution2: def brokenCalc(self, X: int, Y: int) -> int: num = 0 while X < Y: if Y & 1: Y += 1 else: Y >>= 1 num += 1 return num + X - Y
# Copyright (c) 2020 # Author: xiaoweixiang import glob import os import subprocess import sys from distutils import log from distutils.errors import DistutilsError import pkg_resources from setuptools.command.easy_install import easy_install from setuptools.extern import six from setuptools.wheel import Wheel from .py31compat import TemporaryDirectory def _fixup_find_links(find_links): """Ensure find-links option end-up being a list of strings.""" if isinstance(find_links, six.string_types): return find_links.split() assert isinstance(find_links, (tuple, list)) return find_links def _legacy_fetch_build_egg(dist, req): """Fetch an egg needed for building. Legacy path using EasyInstall. """ tmp_dist = dist.__class__({'script_args': ['easy_install']}) opts = tmp_dist.get_option_dict('easy_install') opts.clear() opts.update( (k, v) for k, v in dist.get_option_dict('easy_install').items() if k in ( # don't use any other settings 'find_links', 'site_dirs', 'index_url', 'optimize', 'site_dirs', 'allow_hosts', )) if dist.dependency_links: links = dist.dependency_links[:] if 'find_links' in opts: links = _fixup_find_links(opts['find_links'][1]) + links opts['find_links'] = ('setup', links) install_dir = dist.get_egg_cache_dir() cmd = easy_install( tmp_dist, args=["x"], install_dir=install_dir, exclude_scripts=True, always_copy=False, build_directory=None, editable=False, upgrade=False, multi_version=True, no_report=True, user=False ) cmd.ensure_finalized() return cmd.easy_install(req) def fetch_build_egg(dist, req): """Fetch an egg needed for building. Use pip/wheel to fetch/build a wheel.""" # Check pip is available. try: pkg_resources.get_distribution('pip') except pkg_resources.DistributionNotFound: dist.announce( 'WARNING: The pip package is not available, falling back ' 'to EasyInstall for handling setup_requires/test_requires; ' 'this is deprecated and will be removed in a future version.' , log.WARN ) return _legacy_fetch_build_egg(dist, req) # Warn if wheel is not. try: pkg_resources.get_distribution('wheel') except pkg_resources.DistributionNotFound: dist.announce('WARNING: The wheel package is not available.', log.WARN) # Ignore environment markers; if supplied, it is required. req = strip_marker(req) # Take easy_install options into account, but do not override relevant # pip environment variables (like PIP_INDEX_URL or PIP_QUIET); they'll # take precedence. opts = dist.get_option_dict('easy_install') if 'allow_hosts' in opts: raise DistutilsError('the `allow-hosts` option is not supported ' 'when using pip to install requirements.') if 'PIP_QUIET' in os.environ or 'PIP_VERBOSE' in os.environ: quiet = False else: quiet = True if 'PIP_INDEX_URL' in os.environ: index_url = None elif 'index_url' in opts: index_url = opts['index_url'][1] else: index_url = None if 'find_links' in opts: find_links = _fixup_find_links(opts['find_links'][1])[:] else: find_links = [] if dist.dependency_links: find_links.extend(dist.dependency_links) eggs_dir = os.path.realpath(dist.get_egg_cache_dir()) environment = pkg_resources.Environment() for egg_dist in pkg_resources.find_distributions(eggs_dir): if egg_dist in req and environment.can_add(egg_dist): return egg_dist with TemporaryDirectory() as tmpdir: cmd = [ sys.executable, '-m', 'pip', '--disable-pip-version-check', 'wheel', '--no-deps', '-w', tmpdir, ] if quiet: cmd.append('--quiet') if index_url is not None: cmd.extend(('--index-url', index_url)) if find_links is not None: for link in find_links: cmd.extend(('--find-links', link)) # If requirement is a PEP 508 direct URL, directly pass # the URL to pip, as `req @ url` does not work on the # command line. if req.url: cmd.append(req.url) else: cmd.append(str(req)) try: subprocess.check_call(cmd) except subprocess.CalledProcessError as e: raise DistutilsError(str(e)) wheel = Wheel(glob.glob(os.path.join(tmpdir, '*.whl'))[0]) dist_location = os.path.join(eggs_dir, wheel.egg_name()) wheel.install_as_egg(dist_location) dist_metadata = pkg_resources.PathMetadata( dist_location, os.path.join(dist_location, 'EGG-INFO')) dist = pkg_resources.Distribution.from_filename( dist_location, metadata=dist_metadata) return dist def strip_marker(req): """ Return a new requirement without the environment marker to avoid calling pip with something like `babel; extra == "i18n"`, which would always be ignored. """ # create a copy to avoid mutating the input req = pkg_resources.Requirement.parse(str(req)) req.marker = None return req
""" Ory Kratos API Documentation for all public and administrative Ory Kratos APIs. Public and administrative APIs are exposed on different ports. Public APIs can face the public internet without any protection while administrative APIs should never be exposed without prior authorization. To protect the administative API port you should use something like Nginx, Ory Oathkeeper, or any other technology capable of authorizing incoming requests. # noqa: E501 The version of the OpenAPI document: v0.8.2-alpha.1 Contact: hi@ory.sh Generated by: https://openapi-generator.tech """ import sys import unittest import ory_kratos_client from ory_kratos_client.model.ui_text import UiText globals()['UiText'] = UiText from ory_kratos_client.model.ui_node_input_attributes import UiNodeInputAttributes class TestUiNodeInputAttributes(unittest.TestCase): """UiNodeInputAttributes unit test stubs""" def setUp(self): pass def tearDown(self): pass def testUiNodeInputAttributes(self): """Test UiNodeInputAttributes""" # FIXME: construct object with mandatory attributes with example values # model = UiNodeInputAttributes() # noqa: E501 pass if __name__ == '__main__': unittest.main()
# pylint: disable=W0231,E1101 import collections from datetime import timedelta import functools import gc import json import operator from textwrap import dedent import warnings import weakref import numpy as np from pandas._libs import Timestamp, iNaT, properties import pandas.compat as compat from pandas.compat import ( cPickle as pkl, isidentifier, lrange, lzip, map, set_function_name, string_types, to_str, zip) from pandas.compat.numpy import function as nv from pandas.errors import AbstractMethodError from pandas.util._decorators import ( Appender, Substitution, rewrite_axis_style_signature) from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs from pandas.core.dtypes.cast import maybe_promote, maybe_upcast_putmask from pandas.core.dtypes.common import ( ensure_int64, ensure_object, is_bool, is_bool_dtype, is_datetime64_any_dtype, is_datetime64tz_dtype, is_dict_like, is_extension_array_dtype, is_integer, is_list_like, is_number, is_numeric_dtype, is_object_dtype, is_period_arraylike, is_re_compilable, is_scalar, is_timedelta64_dtype, pandas_dtype) from pandas.core.dtypes.generic import ABCDataFrame, ABCPanel, ABCSeries from pandas.core.dtypes.inference import is_hashable from pandas.core.dtypes.missing import isna, notna import pandas as pd from pandas.core import config, missing, nanops import pandas.core.algorithms as algos from pandas.core.base import PandasObject, SelectionMixin import pandas.core.common as com from pandas.core.index import ( Index, InvalidIndexError, MultiIndex, RangeIndex, ensure_index) from pandas.core.indexes.datetimes import DatetimeIndex from pandas.core.indexes.period import Period, PeriodIndex import pandas.core.indexing as indexing from pandas.core.internals import BlockManager from pandas.core.ops import _align_method_FRAME from pandas.io.formats.format import DataFrameFormatter, format_percentiles from pandas.io.formats.printing import pprint_thing from pandas.tseries.frequencies import to_offset # goal is to be able to define the docs close to function, while still being # able to share _shared_docs = dict() _shared_doc_kwargs = dict( axes='keywords for axes', klass='NDFrame', axes_single_arg='int or labels for object', args_transpose='axes to permute (int or label for object)', optional_by=""" by : str or list of str Name or list of names to sort by""") # sentinel value to use as kwarg in place of None when None has special meaning # and needs to be distinguished from a user explicitly passing None. sentinel = object() def _single_replace(self, to_replace, method, inplace, limit): """ Replaces values in a Series using the fill method specified when no replacement value is given in the replace method """ if self.ndim != 1: raise TypeError('cannot replace {0} with method {1} on a {2}' .format(to_replace, method, type(self).__name__)) orig_dtype = self.dtype result = self if inplace else self.copy() fill_f = missing.get_fill_func(method) mask = missing.mask_missing(result.values, to_replace) values = fill_f(result.values, limit=limit, mask=mask) if values.dtype == orig_dtype and inplace: return result = pd.Series(values, index=self.index, dtype=self.dtype).__finalize__(self) if inplace: self._update_inplace(result._data) return return result class NDFrame(PandasObject, SelectionMixin): """ N-dimensional analogue of DataFrame. Store multi-dimensional in a size-mutable, labeled data structure Parameters ---------- data : BlockManager axes : list copy : boolean, default False """ _internal_names = ['_data', '_cacher', '_item_cache', '_cache', '_is_copy', '_subtyp', '_name', '_index', '_default_kind', '_default_fill_value', '_metadata', '__array_struct__', '__array_interface__'] _internal_names_set = set(_internal_names) _accessors = frozenset() _deprecations = frozenset(['as_blocks', 'blocks', 'convert_objects', 'is_copy']) _metadata = [] _is_copy = None # dummy attribute so that datetime.__eq__(Series/DataFrame) defers # by returning NotImplemented timetuple = None # ---------------------------------------------------------------------- # Constructors def __init__(self, data, axes=None, copy=False, dtype=None, fastpath=False): if not fastpath: if dtype is not None: data = data.astype(dtype) elif copy: data = data.copy() if axes is not None: for i, ax in enumerate(axes): data = data.reindex_axis(ax, axis=i) object.__setattr__(self, '_is_copy', None) object.__setattr__(self, '_data', data) object.__setattr__(self, '_item_cache', {}) def _init_mgr(self, mgr, axes=None, dtype=None, copy=False): """ passed a manager and a axes dict """ for a, axe in axes.items(): if axe is not None: mgr = mgr.reindex_axis(axe, axis=self._get_block_manager_axis(a), copy=False) # make a copy if explicitly requested if copy: mgr = mgr.copy() if dtype is not None: # avoid further copies if we can if len(mgr.blocks) > 1 or mgr.blocks[0].values.dtype != dtype: mgr = mgr.astype(dtype=dtype) return mgr # ---------------------------------------------------------------------- @property def is_copy(self): """ Return the copy. """ warnings.warn("Attribute 'is_copy' is deprecated and will be removed " "in a future version.", FutureWarning, stacklevel=2) return self._is_copy @is_copy.setter def is_copy(self, msg): warnings.warn("Attribute 'is_copy' is deprecated and will be removed " "in a future version.", FutureWarning, stacklevel=2) self._is_copy = msg def _validate_dtype(self, dtype): """ validate the passed dtype """ if dtype is not None: dtype = pandas_dtype(dtype) # a compound dtype if dtype.kind == 'V': raise NotImplementedError("compound dtypes are not implemented" " in the {0} constructor" .format(self.__class__.__name__)) return dtype # ---------------------------------------------------------------------- # Construction @property def _constructor(self): """Used when a manipulation result has the same dimensions as the original. """ raise AbstractMethodError(self) @property def _constructor_sliced(self): """Used when a manipulation result has one lower dimension(s) as the original, such as DataFrame single columns slicing. """ raise AbstractMethodError(self) @property def _constructor_expanddim(self): """Used when a manipulation result has one higher dimension as the original, such as Series.to_frame() and DataFrame.to_panel() """ raise NotImplementedError # ---------------------------------------------------------------------- # Axis @classmethod def _setup_axes(cls, axes, info_axis=None, stat_axis=None, aliases=None, slicers=None, axes_are_reversed=False, build_axes=True, ns=None, docs=None): """Provide axes setup for the major PandasObjects. Parameters ---------- axes : the names of the axes in order (lowest to highest) info_axis_num : the axis of the selector dimension (int) stat_axis_num : the number of axis for the default stats (int) aliases : other names for a single axis (dict) slicers : how axes slice to others (dict) axes_are_reversed : boolean whether to treat passed axes as reversed (DataFrame) build_axes : setup the axis properties (default True) """ cls._AXIS_ORDERS = axes cls._AXIS_NUMBERS = {a: i for i, a in enumerate(axes)} cls._AXIS_LEN = len(axes) cls._AXIS_ALIASES = aliases or dict() cls._AXIS_IALIASES = {v: k for k, v in cls._AXIS_ALIASES.items()} cls._AXIS_NAMES = dict(enumerate(axes)) cls._AXIS_SLICEMAP = slicers or None cls._AXIS_REVERSED = axes_are_reversed # typ setattr(cls, '_typ', cls.__name__.lower()) # indexing support cls._ix = None if info_axis is not None: cls._info_axis_number = info_axis cls._info_axis_name = axes[info_axis] if stat_axis is not None: cls._stat_axis_number = stat_axis cls._stat_axis_name = axes[stat_axis] # setup the actual axis if build_axes: def set_axis(a, i): setattr(cls, a, properties.AxisProperty(i, docs.get(a, a))) cls._internal_names_set.add(a) if axes_are_reversed: m = cls._AXIS_LEN - 1 for i, a in cls._AXIS_NAMES.items(): set_axis(a, m - i) else: for i, a in cls._AXIS_NAMES.items(): set_axis(a, i) assert not isinstance(ns, dict) def _construct_axes_dict(self, axes=None, **kwargs): """Return an axes dictionary for myself.""" d = {a: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)} d.update(kwargs) return d @staticmethod def _construct_axes_dict_from(self, axes, **kwargs): """Return an axes dictionary for the passed axes.""" d = {a: ax for a, ax in zip(self._AXIS_ORDERS, axes)} d.update(kwargs) return d def _construct_axes_dict_for_slice(self, axes=None, **kwargs): """Return an axes dictionary for myself.""" d = {self._AXIS_SLICEMAP[a]: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)} d.update(kwargs) return d def _construct_axes_from_arguments( self, args, kwargs, require_all=False, sentinel=None): """Construct and returns axes if supplied in args/kwargs. If require_all, raise if all axis arguments are not supplied return a tuple of (axes, kwargs). sentinel specifies the default parameter when an axis is not supplied; useful to distinguish when a user explicitly passes None in scenarios where None has special meaning. """ # construct the args args = list(args) for a in self._AXIS_ORDERS: # if we have an alias for this axis alias = self._AXIS_IALIASES.get(a) if alias is not None: if a in kwargs: if alias in kwargs: raise TypeError("arguments are mutually exclusive " "for [%s,%s]" % (a, alias)) continue if alias in kwargs: kwargs[a] = kwargs.pop(alias) continue # look for a argument by position if a not in kwargs: try: kwargs[a] = args.pop(0) except IndexError: if require_all: raise TypeError("not enough/duplicate arguments " "specified!") axes = {a: kwargs.pop(a, sentinel) for a in self._AXIS_ORDERS} return axes, kwargs @classmethod def _from_axes(cls, data, axes, **kwargs): # for construction from BlockManager if isinstance(data, BlockManager): return cls(data, **kwargs) else: if cls._AXIS_REVERSED: axes = axes[::-1] d = cls._construct_axes_dict_from(cls, axes, copy=False) d.update(kwargs) return cls(data, **d) @classmethod def _get_axis_number(cls, axis): axis = cls._AXIS_ALIASES.get(axis, axis) if is_integer(axis): if axis in cls._AXIS_NAMES: return axis else: try: return cls._AXIS_NUMBERS[axis] except KeyError: pass raise ValueError('No axis named {0} for object type {1}' .format(axis, type(cls))) @classmethod def _get_axis_name(cls, axis): axis = cls._AXIS_ALIASES.get(axis, axis) if isinstance(axis, string_types): if axis in cls._AXIS_NUMBERS: return axis else: try: return cls._AXIS_NAMES[axis] except KeyError: pass raise ValueError('No axis named {0} for object type {1}' .format(axis, type(cls))) def _get_axis(self, axis): name = self._get_axis_name(axis) return getattr(self, name) @classmethod def _get_block_manager_axis(cls, axis): """Map the axis to the block_manager axis.""" axis = cls._get_axis_number(axis) if cls._AXIS_REVERSED: m = cls._AXIS_LEN - 1 return m - axis return axis def _get_axis_resolvers(self, axis): # index or columns axis_index = getattr(self, axis) d = dict() prefix = axis[0] for i, name in enumerate(axis_index.names): if name is not None: key = level = name else: # prefix with 'i' or 'c' depending on the input axis # e.g., you must do ilevel_0 for the 0th level of an unnamed # multiiindex key = '{prefix}level_{i}'.format(prefix=prefix, i=i) level = i level_values = axis_index.get_level_values(level) s = level_values.to_series() s.index = axis_index d[key] = s # put the index/columns itself in the dict if isinstance(axis_index, MultiIndex): dindex = axis_index else: dindex = axis_index.to_series() d[axis] = dindex return d def _get_index_resolvers(self): d = {} for axis_name in self._AXIS_ORDERS: d.update(self._get_axis_resolvers(axis_name)) return d @property def _info_axis(self): return getattr(self, self._info_axis_name) @property def _stat_axis(self): return getattr(self, self._stat_axis_name) @property def shape(self): """ Return a tuple of axis dimensions """ return tuple(len(self._get_axis(a)) for a in self._AXIS_ORDERS) @property def axes(self): """ Return index label(s) of the internal NDFrame """ # we do it this way because if we have reversed axes, then # the block manager shows then reversed return [self._get_axis(a) for a in self._AXIS_ORDERS] @property def ndim(self): """ Return an int representing the number of axes / array dimensions. Return 1 if Series. Otherwise return 2 if DataFrame. See Also -------- ndarray.ndim : Number of array dimensions. Examples -------- >>> s = pd.Series({'a': 1, 'b': 2, 'c': 3}) >>> s.ndim 1 >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.ndim 2 """ return self._data.ndim @property def size(self): """ Return an int representing the number of elements in this object. Return the number of rows if Series. Otherwise return the number of rows times number of columns if DataFrame. See Also -------- ndarray.size : Number of elements in the array. Examples -------- >>> s = pd.Series({'a': 1, 'b': 2, 'c': 3}) >>> s.size 3 >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.size 4 """ return np.prod(self.shape) @property def _selected_obj(self): """ internal compat with SelectionMixin """ return self @property def _obj_with_exclusions(self): """ internal compat with SelectionMixin """ return self def _expand_axes(self, key): new_axes = [] for k, ax in zip(key, self.axes): if k not in ax: if type(k) != ax.dtype.type: ax = ax.astype('O') new_axes.append(ax.insert(len(ax), k)) else: new_axes.append(ax) return new_axes def set_axis(self, labels, axis=0, inplace=None): """ Assign desired index to given axis. Indexes for column or row labels can be changed by assigning a list-like or Index. .. versionchanged:: 0.21.0 The signature is now `labels` and `axis`, consistent with the rest of pandas API. Previously, the `axis` and `labels` arguments were respectively the first and second positional arguments. Parameters ---------- labels : list-like, Index The values for the new index. axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to update. The value 0 identifies the rows, and 1 identifies the columns. inplace : bool, default None Whether to return a new %(klass)s instance. .. warning:: ``inplace=None`` currently falls back to to True, but in a future version, will default to False. Use inplace=True explicitly rather than relying on the default. Returns ------- renamed : %(klass)s or None An object of same type as caller if inplace=False, None otherwise. See Also -------- DataFrame.rename_axis : Alter the name of the index or columns. Examples -------- **Series** >>> s = pd.Series([1, 2, 3]) >>> s 0 1 1 2 2 3 dtype: int64 >>> s.set_axis(['a', 'b', 'c'], axis=0, inplace=False) a 1 b 2 c 3 dtype: int64 The original object is not modified. >>> s 0 1 1 2 2 3 dtype: int64 **DataFrame** >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) Change the row labels. >>> df.set_axis(['a', 'b', 'c'], axis='index', inplace=False) A B a 1 4 b 2 5 c 3 6 Change the column labels. >>> df.set_axis(['I', 'II'], axis='columns', inplace=False) I II 0 1 4 1 2 5 2 3 6 Now, update the labels inplace. >>> df.set_axis(['i', 'ii'], axis='columns', inplace=True) >>> df i ii 0 1 4 1 2 5 2 3 6 """ if is_scalar(labels): warnings.warn( 'set_axis now takes "labels" as first argument, and ' '"axis" as named parameter. The old form, with "axis" as ' 'first parameter and \"labels\" as second, is still supported ' 'but will be deprecated in a future version of pandas.', FutureWarning, stacklevel=2) labels, axis = axis, labels if inplace is None: warnings.warn( 'set_axis currently defaults to operating inplace.\nThis ' 'will change in a future version of pandas, use ' 'inplace=True to avoid this warning.', FutureWarning, stacklevel=2) inplace = True if inplace: setattr(self, self._get_axis_name(axis), labels) else: obj = self.copy() obj.set_axis(labels, axis=axis, inplace=True) return obj def _set_axis(self, axis, labels): self._data.set_axis(axis, labels) self._clear_item_cache() def transpose(self, *args, **kwargs): """ Permute the dimensions of the %(klass)s Parameters ---------- args : %(args_transpose)s copy : boolean, default False Make a copy of the underlying data. Mixed-dtype data will always result in a copy Returns ------- y : same as input Examples -------- >>> p.transpose(2, 0, 1) >>> p.transpose(2, 0, 1, copy=True) """ # construct the args axes, kwargs = self._construct_axes_from_arguments(args, kwargs, require_all=True) axes_names = tuple(self._get_axis_name(axes[a]) for a in self._AXIS_ORDERS) axes_numbers = tuple(self._get_axis_number(axes[a]) for a in self._AXIS_ORDERS) # we must have unique axes if len(axes) != len(set(axes)): raise ValueError('Must specify %s unique axes' % self._AXIS_LEN) new_axes = self._construct_axes_dict_from(self, [self._get_axis(x) for x in axes_names]) new_values = self.values.transpose(axes_numbers) if kwargs.pop('copy', None) or (len(args) and args[-1]): new_values = new_values.copy() nv.validate_transpose_for_generic(self, kwargs) return self._constructor(new_values, **new_axes).__finalize__(self) def swapaxes(self, axis1, axis2, copy=True): """ Interchange axes and swap values axes appropriately. Returns ------- y : same as input """ i = self._get_axis_number(axis1) j = self._get_axis_number(axis2) if i == j: if copy: return self.copy() return self mapping = {i: j, j: i} new_axes = (self._get_axis(mapping.get(k, k)) for k in range(self._AXIS_LEN)) new_values = self.values.swapaxes(i, j) if copy: new_values = new_values.copy() return self._constructor(new_values, *new_axes).__finalize__(self) def droplevel(self, level, axis=0): """ Return DataFrame with requested index / column level(s) removed. .. versionadded:: 0.24.0 Parameters ---------- level : int, str, or list-like If a string is given, must be the name of a level If list-like, elements must be names or positional indexes of levels. axis : {0 or 'index', 1 or 'columns'}, default 0 Returns ------- DataFrame.droplevel() Examples -------- >>> df = pd.DataFrame([ ... [1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12] ... ]).set_index([0, 1]).rename_axis(['a', 'b']) >>> df.columns = pd.MultiIndex.from_tuples([ ... ('c', 'e'), ('d', 'f') ... ], names=['level_1', 'level_2']) >>> df level_1 c d level_2 e f a b 1 2 3 4 5 6 7 8 9 10 11 12 >>> df.droplevel('a') level_1 c d level_2 e f b 2 3 4 6 7 8 10 11 12 >>> df.droplevel('level2', axis=1) level_1 c d a b 1 2 3 4 5 6 7 8 9 10 11 12 """ labels = self._get_axis(axis) new_labels = labels.droplevel(level) result = self.set_axis(new_labels, axis=axis, inplace=False) return result def pop(self, item): """ Return item and drop from frame. Raise KeyError if not found. Parameters ---------- item : str Column label to be popped Returns ------- popped : Series Examples -------- >>> df = pd.DataFrame([('falcon', 'bird', 389.0), ... ('parrot', 'bird', 24.0), ... ('lion', 'mammal', 80.5), ... ('monkey', 'mammal', np.nan)], ... columns=('name', 'class', 'max_speed')) >>> df name class max_speed 0 falcon bird 389.0 1 parrot bird 24.0 2 lion mammal 80.5 3 monkey mammal NaN >>> df.pop('class') 0 bird 1 bird 2 mammal 3 mammal Name: class, dtype: object >>> df name max_speed 0 falcon 389.0 1 parrot 24.0 2 lion 80.5 3 monkey NaN """ result = self[item] del self[item] try: result._reset_cacher() except AttributeError: pass return result def squeeze(self, axis=None): """ Squeeze 1 dimensional axis objects into scalars. Series or DataFrames with a single element are squeezed to a scalar. DataFrames with a single column or a single row are squeezed to a Series. Otherwise the object is unchanged. This method is most useful when you don't know if your object is a Series or DataFrame, but you do know it has just a single column. In that case you can safely call `squeeze` to ensure you have a Series. Parameters ---------- axis : {0 or 'index', 1 or 'columns', None}, default None A specific axis to squeeze. By default, all length-1 axes are squeezed. .. versionadded:: 0.20.0 Returns ------- DataFrame, Series, or scalar The projection after squeezing `axis` or all the axes. See Also -------- Series.iloc : Integer-location based indexing for selecting scalars. DataFrame.iloc : Integer-location based indexing for selecting Series. Series.to_frame : Inverse of DataFrame.squeeze for a single-column DataFrame. Examples -------- >>> primes = pd.Series([2, 3, 5, 7]) Slicing might produce a Series with a single value: >>> even_primes = primes[primes % 2 == 0] >>> even_primes 0 2 dtype: int64 >>> even_primes.squeeze() 2 Squeezing objects with more than one value in every axis does nothing: >>> odd_primes = primes[primes % 2 == 1] >>> odd_primes 1 3 2 5 3 7 dtype: int64 >>> odd_primes.squeeze() 1 3 2 5 3 7 dtype: int64 Squeezing is even more effective when used with DataFrames. >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b']) >>> df a b 0 1 2 1 3 4 Slicing a single column will produce a DataFrame with the columns having only one value: >>> df_a = df[['a']] >>> df_a a 0 1 1 3 So the columns can be squeezed down, resulting in a Series: >>> df_a.squeeze('columns') 0 1 1 3 Name: a, dtype: int64 Slicing a single row from a single column will produce a single scalar DataFrame: >>> df_0a = df.loc[df.index < 1, ['a']] >>> df_0a a 0 1 Squeezing the rows produces a single scalar Series: >>> df_0a.squeeze('rows') a 1 Name: 0, dtype: int64 Squeezing all axes wil project directly into a scalar: >>> df_0a.squeeze() 1 """ axis = (self._AXIS_NAMES if axis is None else (self._get_axis_number(axis),)) try: return self.iloc[ tuple(0 if i in axis and len(a) == 1 else slice(None) for i, a in enumerate(self.axes))] except Exception: return self def swaplevel(self, i=-2, j=-1, axis=0): """ Swap levels i and j in a MultiIndex on a particular axis Parameters ---------- i, j : int, string (can be mixed) Level of index to be swapped. Can pass level name as string. Returns ------- swapped : same type as caller (new object) .. versionchanged:: 0.18.1 The indexes ``i`` and ``j`` are now optional, and default to the two innermost levels of the index. """ axis = self._get_axis_number(axis) result = self.copy() labels = result._data.axes[axis] result._data.set_axis(axis, labels.swaplevel(i, j)) return result # ---------------------------------------------------------------------- # Rename def rename(self, *args, **kwargs): """ Alter axes input function or functions. Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series will be left as-is. Extra labels listed don't throw an error. Alternatively, change ``Series.name`` with a scalar value (Series only). Parameters ---------- %(axes)s : scalar, list-like, dict-like or function, optional Scalar or list-like will alter the ``Series.name`` attribute, and raise on DataFrame or Panel. dict-like or functions are transformations to apply to that axis' values copy : boolean, default True Also copy underlying data inplace : boolean, default False Whether to return a new %(klass)s. If True then value of copy is ignored. level : int or level name, default None In case of a MultiIndex, only rename labels in the specified level. Returns ------- renamed : %(klass)s (new object) See Also -------- pandas.NDFrame.rename_axis Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s 0 1 1 2 2 3 dtype: int64 >>> s.rename("my_name") # scalar, changes Series.name 0 1 1 2 2 3 Name: my_name, dtype: int64 >>> s.rename(lambda x: x ** 2) # function, changes labels 0 1 1 2 4 3 dtype: int64 >>> s.rename({1: 3, 2: 5}) # mapping, changes labels 0 1 3 2 5 3 dtype: int64 Since ``DataFrame`` doesn't have a ``.name`` attribute, only mapping-type arguments are allowed. >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) >>> df.rename(2) Traceback (most recent call last): ... TypeError: 'int' object is not callable ``DataFrame.rename`` supports two calling conventions * ``(index=index_mapper, columns=columns_mapper, ...)`` * ``(mapper, axis={'index', 'columns'}, ...)`` We *highly* recommend using keyword arguments to clarify your intent. >>> df.rename(index=str, columns={"A": "a", "B": "c"}) a c 0 1 4 1 2 5 2 3 6 >>> df.rename(index=str, columns={"A": "a", "C": "c"}) a B 0 1 4 1 2 5 2 3 6 Using axis-style parameters >>> df.rename(str.lower, axis='columns') a b 0 1 4 1 2 5 2 3 6 >>> df.rename({1: 2, 2: 4}, axis='index') A B 0 1 4 2 2 5 4 3 6 See the :ref:`user guide <basics.rename>` for more. """ axes, kwargs = self._construct_axes_from_arguments(args, kwargs) copy = kwargs.pop('copy', True) inplace = kwargs.pop('inplace', False) level = kwargs.pop('level', None) axis = kwargs.pop('axis', None) if axis is not None: # Validate the axis self._get_axis_number(axis) if kwargs: raise TypeError('rename() got an unexpected keyword ' 'argument "{0}"'.format(list(kwargs.keys())[0])) if com.count_not_none(*axes.values()) == 0: raise TypeError('must pass an index to rename') self._consolidate_inplace() result = self if inplace else self.copy(deep=copy) # start in the axis order to eliminate too many copies for axis in lrange(self._AXIS_LEN): v = axes.get(self._AXIS_NAMES[axis]) if v is None: continue f = com._get_rename_function(v) baxis = self._get_block_manager_axis(axis) if level is not None: level = self.axes[axis]._get_level_number(level) result._data = result._data.rename_axis(f, axis=baxis, copy=copy, level=level) result._clear_item_cache() if inplace: self._update_inplace(result._data) else: return result.__finalize__(self) @rewrite_axis_style_signature('mapper', [('copy', True), ('inplace', False)]) def rename_axis(self, mapper=sentinel, **kwargs): """ Set the name of the axis for the index or columns. Parameters ---------- mapper : scalar, list-like, optional Value to set the axis name attribute. index, columns : scalar, list-like, dict-like or function, optional A scalar, list-like, dict-like or functions transformations to apply to that axis' values. Use either ``mapper`` and ``axis`` to specify the axis to target with ``mapper``, or ``index`` and/or ``columns``. .. versionchanged:: 0.24.0 axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to rename. copy : bool, default True Also copy underlying data. inplace : bool, default False Modifies the object directly, instead of creating a new Series or DataFrame. Returns ------- Series, DataFrame, or None The same type as the caller or None if `inplace` is True. See Also -------- Series.rename : Alter Series index labels or name. DataFrame.rename : Alter DataFrame index labels or name. Index.rename : Set new names on index. Notes ----- Prior to version 0.21.0, ``rename_axis`` could also be used to change the axis *labels* by passing a mapping or scalar. This behavior is deprecated and will be removed in a future version. Use ``rename`` instead. ``DataFrame.rename_axis`` supports two calling conventions * ``(index=index_mapper, columns=columns_mapper, ...)`` * ``(mapper, axis={'index', 'columns'}, ...)`` The first calling convention will only modify the names of the index and/or the names of the Index object that is the columns. In this case, the parameter ``copy`` is ignored. The second calling convention will modify the names of the the corresponding index if mapper is a list or a scalar. However, if mapper is dict-like or a function, it will use the deprecated behavior of modifying the axis *labels*. We *highly* recommend using keyword arguments to clarify your intent. Examples -------- **Series** >>> s = pd.Series(["dog", "cat", "monkey"]) >>> s 0 dog 1 cat 2 monkey dtype: object >>> s.rename_axis("animal") animal 0 dog 1 cat 2 monkey dtype: object **DataFrame** >>> df = pd.DataFrame({"num_legs": [4, 4, 2], ... "num_arms": [0, 0, 2]}, ... ["dog", "cat", "monkey"]) >>> df num_legs num_arms dog 4 0 cat 4 0 monkey 2 2 >>> df = df.rename_axis("animal") >>> df num_legs num_arms animal dog 4 0 cat 4 0 monkey 2 2 >>> df = df.rename_axis("limbs", axis="columns") >>> df limbs num_legs num_arms animal dog 4 0 cat 4 0 monkey 2 2 **MultiIndex** >>> df.index = pd.MultiIndex.from_product([['mammal'], ... ['dog', 'cat', 'monkey']], ... names=['type', 'name']) >>> df limbs num_legs num_arms type name mammal dog 4 0 cat 4 0 monkey 2 2 >>> df.rename_axis(index={'type': 'class'}) limbs num_legs num_arms class name mammal dog 4 0 cat 4 0 monkey 2 2 >>> df.rename_axis(columns=str.upper) LIMBS num_legs num_arms type name mammal dog 4 0 cat 4 0 monkey 2 2 """ axes, kwargs = self._construct_axes_from_arguments( (), kwargs, sentinel=sentinel) copy = kwargs.pop('copy', True) inplace = kwargs.pop('inplace', False) axis = kwargs.pop('axis', 0) if axis is not None: axis = self._get_axis_number(axis) if kwargs: raise TypeError('rename_axis() got an unexpected keyword ' 'argument "{0}"'.format(list(kwargs.keys())[0])) inplace = validate_bool_kwarg(inplace, 'inplace') if (mapper is not sentinel): # Use v0.23 behavior if a scalar or list non_mapper = is_scalar(mapper) or (is_list_like(mapper) and not is_dict_like(mapper)) if non_mapper: return self._set_axis_name(mapper, axis=axis, inplace=inplace) else: # Deprecated (v0.21) behavior is if mapper is specified, # and not a list or scalar, then call rename msg = ("Using 'rename_axis' to alter labels is deprecated. " "Use '.rename' instead") warnings.warn(msg, FutureWarning, stacklevel=3) axis = self._get_axis_name(axis) d = {'copy': copy, 'inplace': inplace} d[axis] = mapper return self.rename(**d) else: # Use new behavior. Means that index and/or columns # is specified result = self if inplace else self.copy(deep=copy) for axis in lrange(self._AXIS_LEN): v = axes.get(self._AXIS_NAMES[axis]) if v is sentinel: continue non_mapper = is_scalar(v) or (is_list_like(v) and not is_dict_like(v)) if non_mapper: newnames = v else: f = com._get_rename_function(v) curnames = self._get_axis(axis).names newnames = [f(name) for name in curnames] result._set_axis_name(newnames, axis=axis, inplace=True) if not inplace: return result def _set_axis_name(self, name, axis=0, inplace=False): """ Set the name(s) of the axis. Parameters ---------- name : str or list of str Name(s) to set. axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to set the label. The value 0 or 'index' specifies index, and the value 1 or 'columns' specifies columns. inplace : bool, default False If `True`, do operation inplace and return None. .. versionadded:: 0.21.0 Returns ------- Series, DataFrame, or None The same type as the caller or `None` if `inplace` is `True`. See Also -------- DataFrame.rename : Alter the axis labels of :class:`DataFrame`. Series.rename : Alter the index labels or set the index name of :class:`Series`. Index.rename : Set the name of :class:`Index` or :class:`MultiIndex`. Examples -------- >>> df = pd.DataFrame({"num_legs": [4, 4, 2]}, ... ["dog", "cat", "monkey"]) >>> df num_legs dog 4 cat 4 monkey 2 >>> df._set_axis_name("animal") num_legs animal dog 4 cat 4 monkey 2 >>> df.index = pd.MultiIndex.from_product( ... [["mammal"], ['dog', 'cat', 'monkey']]) >>> df._set_axis_name(["type", "name"]) legs type name mammal dog 4 cat 4 monkey 2 """ pd.MultiIndex.from_product([["mammal"], ['dog', 'cat', 'monkey']]) axis = self._get_axis_number(axis) idx = self._get_axis(axis).set_names(name) inplace = validate_bool_kwarg(inplace, 'inplace') renamed = self if inplace else self.copy() renamed.set_axis(idx, axis=axis, inplace=True) if not inplace: return renamed # ---------------------------------------------------------------------- # Comparison Methods def _indexed_same(self, other): return all(self._get_axis(a).equals(other._get_axis(a)) for a in self._AXIS_ORDERS) def equals(self, other): """ Test whether two objects contain the same elements. This function allows two Series or DataFrames to be compared against each other to see if they have the same shape and elements. NaNs in the same location are considered equal. The column headers do not need to have the same type, but the elements within the columns must be the same dtype. Parameters ---------- other : Series or DataFrame The other Series or DataFrame to be compared with the first. Returns ------- bool True if all elements are the same in both objects, False otherwise. See Also -------- Series.eq : Compare two Series objects of the same length and return a Series where each element is True if the element in each Series is equal, False otherwise. DataFrame.eq : Compare two DataFrame objects of the same shape and return a DataFrame where each element is True if the respective element in each DataFrame is equal, False otherwise. assert_series_equal : Return True if left and right Series are equal, False otherwise. assert_frame_equal : Return True if left and right DataFrames are equal, False otherwise. numpy.array_equal : Return True if two arrays have the same shape and elements, False otherwise. Notes ----- This function requires that the elements have the same dtype as their respective elements in the other Series or DataFrame. However, the column labels do not need to have the same type, as long as they are still considered equal. Examples -------- >>> df = pd.DataFrame({1: [10], 2: [20]}) >>> df 1 2 0 10 20 DataFrames df and exactly_equal have the same types and values for their elements and column labels, which will return True. >>> exactly_equal = pd.DataFrame({1: [10], 2: [20]}) >>> exactly_equal 1 2 0 10 20 >>> df.equals(exactly_equal) True DataFrames df and different_column_type have the same element types and values, but have different types for the column labels, which will still return True. >>> different_column_type = pd.DataFrame({1.0: [10], 2.0: [20]}) >>> different_column_type 1.0 2.0 0 10 20 >>> df.equals(different_column_type) True DataFrames df and different_data_type have different types for the same values for their elements, and will return False even though their column labels are the same values and types. >>> different_data_type = pd.DataFrame({1: [10.0], 2: [20.0]}) >>> different_data_type 1 2 0 10.0 20.0 >>> df.equals(different_data_type) False """ if not isinstance(other, self._constructor): return False return self._data.equals(other._data) # ------------------------------------------------------------------------- # Unary Methods def __neg__(self): values = com.values_from_object(self) if is_bool_dtype(values): arr = operator.inv(values) elif (is_numeric_dtype(values) or is_timedelta64_dtype(values) or is_object_dtype(values)): arr = operator.neg(values) else: raise TypeError("Unary negative expects numeric dtype, not {}" .format(values.dtype)) return self.__array_wrap__(arr) def __pos__(self): values = com.values_from_object(self) if (is_bool_dtype(values) or is_period_arraylike(values)): arr = values elif (is_numeric_dtype(values) or is_timedelta64_dtype(values) or is_object_dtype(values)): arr = operator.pos(values) else: raise TypeError("Unary plus expects numeric dtype, not {}" .format(values.dtype)) return self.__array_wrap__(arr) def __invert__(self): try: arr = operator.inv(com.values_from_object(self)) return self.__array_wrap__(arr) except Exception: # inv fails with 0 len if not np.prod(self.shape): return self raise def __nonzero__(self): raise ValueError("The truth value of a {0} is ambiguous. " "Use a.empty, a.bool(), a.item(), a.any() or a.all()." .format(self.__class__.__name__)) __bool__ = __nonzero__ def bool(self): """ Return the bool of a single element PandasObject. This must be a boolean scalar value, either True or False. Raise a ValueError if the PandasObject does not have exactly 1 element, or that element is not boolean """ v = self.squeeze() if isinstance(v, (bool, np.bool_)): return bool(v) elif is_scalar(v): raise ValueError("bool cannot act on a non-boolean single element " "{0}".format(self.__class__.__name__)) self.__nonzero__() def __abs__(self): return self.abs() def __round__(self, decimals=0): return self.round(decimals) # ------------------------------------------------------------------------- # Label or Level Combination Helpers # # A collection of helper methods for DataFrame/Series operations that # accept a combination of column/index labels and levels. All such # operations should utilize/extend these methods when possible so that we # have consistent precedence and validation logic throughout the library. def _is_level_reference(self, key, axis=0): """ Test whether a key is a level reference for a given axis. To be considered a level reference, `key` must be a string that: - (axis=0): Matches the name of an index level and does NOT match a column label. - (axis=1): Matches the name of a column level and does NOT match an index label. Parameters ---------- key : str Potential level name for the given axis axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- is_level : bool """ axis = self._get_axis_number(axis) if self.ndim > 2: raise NotImplementedError( "_is_level_reference is not implemented for {type}" .format(type=type(self))) return (key is not None and is_hashable(key) and key in self.axes[axis].names and not self._is_label_reference(key, axis=axis)) def _is_label_reference(self, key, axis=0): """ Test whether a key is a label reference for a given axis. To be considered a label reference, `key` must be a string that: - (axis=0): Matches a column label - (axis=1): Matches an index label Parameters ---------- key: str Potential label name axis: int, default 0 Axis perpendicular to the axis that labels are associated with (0 means search for column labels, 1 means search for index labels) Returns ------- is_label: bool """ axis = self._get_axis_number(axis) other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis] if self.ndim > 2: raise NotImplementedError( "_is_label_reference is not implemented for {type}" .format(type=type(self))) return (key is not None and is_hashable(key) and any(key in self.axes[ax] for ax in other_axes)) def _is_label_or_level_reference(self, key, axis=0): """ Test whether a key is a label or level reference for a given axis. To be considered either a label or a level reference, `key` must be a string that: - (axis=0): Matches a column label or an index level - (axis=1): Matches an index label or a column level Parameters ---------- key: str Potential label or level name axis: int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- is_label_or_level: bool """ if self.ndim > 2: raise NotImplementedError( "_is_label_or_level_reference is not implemented for {type}" .format(type=type(self))) return (self._is_level_reference(key, axis=axis) or self._is_label_reference(key, axis=axis)) def _check_label_or_level_ambiguity(self, key, axis=0): """ Check whether `key` is ambiguous. By ambiguous, we mean that it matches both a level of the input `axis` and a label of the other axis. Parameters ---------- key: str or object label or level name axis: int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Raises ------ ValueError: `key` is ambiguous """ axis = self._get_axis_number(axis) other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis] if self.ndim > 2: raise NotImplementedError( "_check_label_or_level_ambiguity is not implemented for {type}" .format(type=type(self))) if (key is not None and is_hashable(key) and key in self.axes[axis].names and any(key in self.axes[ax] for ax in other_axes)): # Build an informative and grammatical warning level_article, level_type = (('an', 'index') if axis == 0 else ('a', 'column')) label_article, label_type = (('a', 'column') if axis == 0 else ('an', 'index')) msg = ("'{key}' is both {level_article} {level_type} level and " "{label_article} {label_type} label, which is ambiguous." ).format(key=key, level_article=level_article, level_type=level_type, label_article=label_article, label_type=label_type) raise ValueError(msg) def _get_label_or_level_values(self, key, axis=0): """ Return a 1-D array of values associated with `key`, a label or level from the given `axis`. Retrieval logic: - (axis=0): Return column values if `key` matches a column label. Otherwise return index level values if `key` matches an index level. - (axis=1): Return row values if `key` matches an index label. Otherwise return column level values if 'key' matches a column level Parameters ---------- key: str Label or level name. axis: int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- values: np.ndarray Raises ------ KeyError if `key` matches neither a label nor a level ValueError if `key` matches multiple labels FutureWarning if `key` is ambiguous. This will become an ambiguity error in a future version """ axis = self._get_axis_number(axis) other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis] if self.ndim > 2: raise NotImplementedError( "_get_label_or_level_values is not implemented for {type}" .format(type=type(self))) if self._is_label_reference(key, axis=axis): self._check_label_or_level_ambiguity(key, axis=axis) values = self.xs(key, axis=other_axes[0])._values elif self._is_level_reference(key, axis=axis): values = self.axes[axis].get_level_values(key)._values else: raise KeyError(key) # Check for duplicates if values.ndim > 1: if other_axes and isinstance( self._get_axis(other_axes[0]), MultiIndex): multi_message = ('\n' 'For a multi-index, the label must be a ' 'tuple with elements corresponding to ' 'each level.') else: multi_message = '' label_axis_name = 'column' if axis == 0 else 'index' raise ValueError(("The {label_axis_name} label '{key}' " "is not unique.{multi_message}") .format(key=key, label_axis_name=label_axis_name, multi_message=multi_message)) return values def _drop_labels_or_levels(self, keys, axis=0): """ Drop labels and/or levels for the given `axis`. For each key in `keys`: - (axis=0): If key matches a column label then drop the column. Otherwise if key matches an index level then drop the level. - (axis=1): If key matches an index label then drop the row. Otherwise if key matches a column level then drop the level. Parameters ---------- keys: str or list of str labels or levels to drop axis: int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- dropped: DataFrame Raises ------ ValueError if any `keys` match neither a label nor a level """ axis = self._get_axis_number(axis) if self.ndim > 2: raise NotImplementedError( "_drop_labels_or_levels is not implemented for {type}" .format(type=type(self))) # Validate keys keys = com.maybe_make_list(keys) invalid_keys = [k for k in keys if not self._is_label_or_level_reference(k, axis=axis)] if invalid_keys: raise ValueError(("The following keys are not valid labels or " "levels for axis {axis}: {invalid_keys}") .format(axis=axis, invalid_keys=invalid_keys)) # Compute levels and labels to drop levels_to_drop = [k for k in keys if self._is_level_reference(k, axis=axis)] labels_to_drop = [k for k in keys if not self._is_level_reference(k, axis=axis)] # Perform copy upfront and then use inplace operations below. # This ensures that we always perform exactly one copy. # ``copy`` and/or ``inplace`` options could be added in the future. dropped = self.copy() if axis == 0: # Handle dropping index levels if levels_to_drop: dropped.reset_index(levels_to_drop, drop=True, inplace=True) # Handle dropping columns labels if labels_to_drop: dropped.drop(labels_to_drop, axis=1, inplace=True) else: # Handle dropping column levels if levels_to_drop: if isinstance(dropped.columns, MultiIndex): # Drop the specified levels from the MultiIndex dropped.columns = dropped.columns.droplevel(levels_to_drop) else: # Drop the last level of Index by replacing with # a RangeIndex dropped.columns = RangeIndex(dropped.columns.size) # Handle dropping index labels if labels_to_drop: dropped.drop(labels_to_drop, axis=0, inplace=True) return dropped # ---------------------------------------------------------------------- # Iteration def __hash__(self): raise TypeError('{0!r} objects are mutable, thus they cannot be' ' hashed'.format(self.__class__.__name__)) def __iter__(self): """Iterate over infor axis""" return iter(self._info_axis) # can we get a better explanation of this? def keys(self): """Get the 'info axis' (see Indexing for more) This is index for Series, columns for DataFrame and major_axis for Panel. """ return self._info_axis def iteritems(self): """Iterate over (label, values) on info axis This is index for Series, columns for DataFrame, major_axis for Panel, and so on. """ for h in self._info_axis: yield h, self[h] def __len__(self): """Returns length of info axis""" return len(self._info_axis) def __contains__(self, key): """True if the key is in the info axis""" return key in self._info_axis @property def empty(self): """ Indicator whether DataFrame is empty. True if DataFrame is entirely empty (no items), meaning any of the axes are of length 0. Returns ------- bool If DataFrame is empty, return True, if not return False. See Also -------- pandas.Series.dropna pandas.DataFrame.dropna Notes ----- If DataFrame contains only NaNs, it is still not considered empty. See the example below. Examples -------- An example of an actual empty DataFrame. Notice the index is empty: >>> df_empty = pd.DataFrame({'A' : []}) >>> df_empty Empty DataFrame Columns: [A] Index: [] >>> df_empty.empty True If we only have NaNs in our DataFrame, it is not considered empty! We will need to drop the NaNs to make the DataFrame empty: >>> df = pd.DataFrame({'A' : [np.nan]}) >>> df A 0 NaN >>> df.empty False >>> df.dropna().empty True """ return any(len(self._get_axis(a)) == 0 for a in self._AXIS_ORDERS) # ---------------------------------------------------------------------- # Array Interface # This is also set in IndexOpsMixin # GH#23114 Ensure ndarray.__op__(DataFrame) returns NotImplemented __array_priority__ = 1000 def __array__(self, dtype=None): return com.values_from_object(self) def __array_wrap__(self, result, context=None): d = self._construct_axes_dict(self._AXIS_ORDERS, copy=False) return self._constructor(result, **d).__finalize__(self) # ideally we would define this to avoid the getattr checks, but # is slower # @property # def __array_interface__(self): # """ provide numpy array interface method """ # values = self.values # return dict(typestr=values.dtype.str,shape=values.shape,data=values) def to_dense(self): """ Return dense representation of NDFrame (as opposed to sparse). """ # compat return self # ---------------------------------------------------------------------- # Picklability def __getstate__(self): meta = {k: getattr(self, k, None) for k in self._metadata} return dict(_data=self._data, _typ=self._typ, _metadata=self._metadata, **meta) def __setstate__(self, state): if isinstance(state, BlockManager): self._data = state elif isinstance(state, dict): typ = state.get('_typ') if typ is not None: # set in the order of internal names # to avoid definitional recursion # e.g. say fill_value needing _data to be # defined meta = set(self._internal_names + self._metadata) for k in list(meta): if k in state: v = state[k] object.__setattr__(self, k, v) for k, v in state.items(): if k not in meta: object.__setattr__(self, k, v) else: self._unpickle_series_compat(state) elif isinstance(state[0], dict): if len(state) == 5: self._unpickle_sparse_frame_compat(state) else: self._unpickle_frame_compat(state) elif len(state) == 4: self._unpickle_panel_compat(state) elif len(state) == 2: self._unpickle_series_compat(state) else: # pragma: no cover # old pickling format, for compatibility self._unpickle_matrix_compat(state) self._item_cache = {} # ---------------------------------------------------------------------- # Rendering Methods def __unicode__(self): # unicode representation based upon iterating over self # (since, by definition, `PandasContainers` are iterable) prepr = '[%s]' % ','.join(map(pprint_thing, self)) return '%s(%s)' % (self.__class__.__name__, prepr) def _repr_latex_(self): """ Returns a LaTeX representation for a particular object. Mainly for use with nbconvert (jupyter notebook conversion to pdf). """ if config.get_option('display.latex.repr'): return self.to_latex() else: return None def _repr_data_resource_(self): """ Not a real Jupyter special repr method, but we use the same naming convention. """ if config.get_option("display.html.table_schema"): data = self.head(config.get_option('display.max_rows')) payload = json.loads(data.to_json(orient='table'), object_pairs_hook=collections.OrderedDict) return payload # ---------------------------------------------------------------------- # I/O Methods _shared_docs['to_excel'] = """ Write %(klass)s to an Excel sheet. To write a single %(klass)s to an Excel .xlsx file it is only necessary to specify a target file name. To write to multiple sheets it is necessary to create an `ExcelWriter` object with a target file name, and specify a sheet in the file to write to. Multiple sheets may be written to by specifying unique `sheet_name`. With all data written to the file it is necessary to save the changes. Note that creating an `ExcelWriter` object with a file name that already exists will result in the contents of the existing file being erased. Parameters ---------- excel_writer : str or ExcelWriter object File path or existing ExcelWriter. sheet_name : str, default 'Sheet1' Name of sheet which will contain DataFrame. na_rep : str, default '' Missing data representation. float_format : str, optional Format string for floating point numbers. For example ``float_format="%%.2f"`` will format 0.1234 to 0.12. columns : sequence or list of str, optional Columns to write. header : bool or list of str, default True Write out the column names. If a list of string is given it is assumed to be aliases for the column names. index : bool, default True Write row names (index). index_label : str or sequence, optional Column label for index column(s) if desired. If not specified, and `header` and `index` are True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. startrow : int, default 0 Upper left cell row to dump data frame. startcol : int, default 0 Upper left cell column to dump data frame. engine : str, optional Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and ``io.excel.xlsm.writer``. merge_cells : bool, default True Write MultiIndex and Hierarchical Rows as merged cells. encoding : str, optional Encoding of the resulting excel file. Only necessary for xlwt, other writers support unicode natively. inf_rep : str, default 'inf' Representation for infinity (there is no native representation for infinity in Excel). verbose : bool, default True Display more information in the error logs. freeze_panes : tuple of int (length 2), optional Specifies the one-based bottommost row and rightmost column that is to be frozen. .. versionadded:: 0.20.0. See Also -------- to_csv : Write DataFrame to a comma-separated values (csv) file. ExcelWriter : Class for writing DataFrame objects into excel sheets. read_excel : Read an Excel file into a pandas DataFrame. read_csv : Read a comma-separated values (csv) file into DataFrame. Notes ----- For compatibility with :meth:`~DataFrame.to_csv`, to_excel serializes lists and dicts to strings before writing. Once a workbook has been saved it is not possible write further data without rewriting the whole workbook. Examples -------- Create, write to and save a workbook: >>> df1 = pd.DataFrame([['a', 'b'], ['c', 'd']], ... index=['row 1', 'row 2'], ... columns=['col 1', 'col 2']) >>> df1.to_excel("output.xlsx") # doctest: +SKIP To specify the sheet name: >>> df1.to_excel("output.xlsx", ... sheet_name='Sheet_name_1') # doctest: +SKIP If you wish to write to more than one sheet in the workbook, it is necessary to specify an ExcelWriter object: >>> df2 = df1.copy() >>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP ... df1.to_excel(writer, sheet_name='Sheet_name_1') ... df2.to_excel(writer, sheet_name='Sheet_name_2') To set the library that is used to write the Excel file, you can pass the `engine` keyword (the default engine is automatically chosen depending on the file extension): >>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP """ @Appender(_shared_docs["to_excel"] % dict(klass="object")) def to_excel(self, excel_writer, sheet_name="Sheet1", na_rep="", float_format=None, columns=None, header=True, index=True, index_label=None, startrow=0, startcol=0, engine=None, merge_cells=True, encoding=None, inf_rep="inf", verbose=True, freeze_panes=None): df = self if isinstance(self, ABCDataFrame) else self.to_frame() from pandas.io.formats.excel import ExcelFormatter formatter = ExcelFormatter(df, na_rep=na_rep, cols=columns, header=header, float_format=float_format, index=index, index_label=index_label, merge_cells=merge_cells, inf_rep=inf_rep) formatter.write(excel_writer, sheet_name=sheet_name, startrow=startrow, startcol=startcol, freeze_panes=freeze_panes, engine=engine) def to_json(self, path_or_buf=None, orient=None, date_format=None, double_precision=10, force_ascii=True, date_unit='ms', default_handler=None, lines=False, compression='infer', index=True): """ Convert the object to a JSON string. Note NaN's and None will be converted to null and datetime objects will be converted to UNIX timestamps. Parameters ---------- path_or_buf : string or file handle, optional File path or object. If not specified, the result is returned as a string. orient : string Indication of expected JSON string format. * Series - default is 'index' - allowed values are: {'split','records','index','table'} * DataFrame - default is 'columns' - allowed values are: {'split','records','index','columns','values','table'} * The format of the JSON string - 'split' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values]} - 'records' : list like [{column -> value}, ... , {column -> value}] - 'index' : dict like {index -> {column -> value}} - 'columns' : dict like {column -> {index -> value}} - 'values' : just the values array - 'table' : dict like {'schema': {schema}, 'data': {data}} describing the data, and the data component is like ``orient='records'``. .. versionchanged:: 0.20.0 date_format : {None, 'epoch', 'iso'} Type of date conversion. 'epoch' = epoch milliseconds, 'iso' = ISO8601. The default depends on the `orient`. For ``orient='table'``, the default is 'iso'. For all other orients, the default is 'epoch'. double_precision : int, default 10 The number of decimal places to use when encoding floating point values. force_ascii : bool, default True Force encoded string to be ASCII. date_unit : string, default 'ms' (milliseconds) The time unit to encode to, governs timestamp and ISO8601 precision. One of 's', 'ms', 'us', 'ns' for second, millisecond, microsecond, and nanosecond respectively. default_handler : callable, default None Handler to call if object cannot otherwise be converted to a suitable format for JSON. Should receive a single argument which is the object to convert and return a serialisable object. lines : bool, default False If 'orient' is 'records' write out line delimited json format. Will throw ValueError if incorrect 'orient' since others are not list like. .. versionadded:: 0.19.0 compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None} A string representing the compression to use in the output file, only used when the first argument is a filename. By default, the compression is inferred from the filename. .. versionadded:: 0.21.0 .. versionchanged:: 0.24.0 'infer' option added and set to default index : bool, default True Whether to include the index values in the JSON string. Not including the index (``index=False``) is only supported when orient is 'split' or 'table'. .. versionadded:: 0.23.0 See Also -------- read_json Examples -------- >>> df = pd.DataFrame([['a', 'b'], ['c', 'd']], ... index=['row 1', 'row 2'], ... columns=['col 1', 'col 2']) >>> df.to_json(orient='split') '{"columns":["col 1","col 2"], "index":["row 1","row 2"], "data":[["a","b"],["c","d"]]}' Encoding/decoding a Dataframe using ``'records'`` formatted JSON. Note that index labels are not preserved with this encoding. >>> df.to_json(orient='records') '[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]' Encoding/decoding a Dataframe using ``'index'`` formatted JSON: >>> df.to_json(orient='index') '{"row 1":{"col 1":"a","col 2":"b"},"row 2":{"col 1":"c","col 2":"d"}}' Encoding/decoding a Dataframe using ``'columns'`` formatted JSON: >>> df.to_json(orient='columns') '{"col 1":{"row 1":"a","row 2":"c"},"col 2":{"row 1":"b","row 2":"d"}}' Encoding/decoding a Dataframe using ``'values'`` formatted JSON: >>> df.to_json(orient='values') '[["a","b"],["c","d"]]' Encoding with Table Schema >>> df.to_json(orient='table') '{"schema": {"fields": [{"name": "index", "type": "string"}, {"name": "col 1", "type": "string"}, {"name": "col 2", "type": "string"}], "primaryKey": "index", "pandas_version": "0.20.0"}, "data": [{"index": "row 1", "col 1": "a", "col 2": "b"}, {"index": "row 2", "col 1": "c", "col 2": "d"}]}' """ from pandas.io import json if date_format is None and orient == 'table': date_format = 'iso' elif date_format is None: date_format = 'epoch' return json.to_json(path_or_buf=path_or_buf, obj=self, orient=orient, date_format=date_format, double_precision=double_precision, force_ascii=force_ascii, date_unit=date_unit, default_handler=default_handler, lines=lines, compression=compression, index=index) def to_hdf(self, path_or_buf, key, **kwargs): """ Write the contained data to an HDF5 file using HDFStore. Hierarchical Data Format (HDF) is self-describing, allowing an application to interpret the structure and contents of a file with no outside information. One HDF file can hold a mix of related objects which can be accessed as a group or as individual objects. In order to add another DataFrame or Series to an existing HDF file please use append mode and a different a key. For more information see the :ref:`user guide <io.hdf5>`. Parameters ---------- path_or_buf : str or pandas.HDFStore File path or HDFStore object. key : str Identifier for the group in the store. mode : {'a', 'w', 'r+'}, default 'a' Mode to open file: - 'w': write, a new file is created (an existing file with the same name would be deleted). - 'a': append, an existing file is opened for reading and writing, and if the file does not exist it is created. - 'r+': similar to 'a', but the file must already exist. format : {'fixed', 'table'}, default 'fixed' Possible values: - 'fixed': Fixed format. Fast writing/reading. Not-appendable, nor searchable. - 'table': Table format. Write as a PyTables Table structure which may perform worse but allow more flexible operations like searching / selecting subsets of the data. append : bool, default False For Table formats, append the input data to the existing. data_columns : list of columns or True, optional List of columns to create as indexed data columns for on-disk queries, or True to use all columns. By default only the axes of the object are indexed. See :ref:`io.hdf5-query-data-columns`. Applicable only to format='table'. complevel : {0-9}, optional Specifies a compression level for data. A value of 0 disables compression. complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib' Specifies the compression library to be used. As of v0.20.2 these additional compressors for Blosc are supported (default if no compressor specified: 'blosc:blosclz'): {'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy', 'blosc:zlib', 'blosc:zstd'}. Specifying a compression library which is not available issues a ValueError. fletcher32 : bool, default False If applying compression use the fletcher32 checksum. dropna : bool, default False If true, ALL nan rows will not be written to store. errors : str, default 'strict' Specifies how encoding and decoding errors are to be handled. See the errors argument for :func:`open` for a full list of options. See Also -------- DataFrame.read_hdf : Read from HDF file. DataFrame.to_parquet : Write a DataFrame to the binary parquet format. DataFrame.to_sql : Write to a sql table. DataFrame.to_feather : Write out feather-format for DataFrames. DataFrame.to_csv : Write out to a csv file. Examples -------- >>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}, ... index=['a', 'b', 'c']) >>> df.to_hdf('data.h5', key='df', mode='w') We can add another object to the same file: >>> s = pd.Series([1, 2, 3, 4]) >>> s.to_hdf('data.h5', key='s') Reading from HDF file: >>> pd.read_hdf('data.h5', 'df') A B a 1 4 b 2 5 c 3 6 >>> pd.read_hdf('data.h5', 's') 0 1 1 2 2 3 3 4 dtype: int64 Deleting file with data: >>> import os >>> os.remove('data.h5') """ from pandas.io import pytables return pytables.to_hdf(path_or_buf, key, self, **kwargs) def to_msgpack(self, path_or_buf=None, encoding='utf-8', **kwargs): """ Serialize object to input file path using msgpack format. THIS IS AN EXPERIMENTAL LIBRARY and the storage format may not be stable until a future release. Parameters ---------- path : string File path, buffer-like, or None if None, return generated string append : bool whether to append to an existing msgpack (default is False) compress : type of compressor (zlib or blosc), default to None (no compression) """ from pandas.io import packers return packers.to_msgpack(path_or_buf, self, encoding=encoding, **kwargs) def to_sql(self, name, con, schema=None, if_exists='fail', index=True, index_label=None, chunksize=None, dtype=None, method=None): """ Write records stored in a DataFrame to a SQL database. Databases supported by SQLAlchemy [1]_ are supported. Tables can be newly created, appended to, or overwritten. Parameters ---------- name : string Name of SQL table. con : sqlalchemy.engine.Engine or sqlite3.Connection Using SQLAlchemy makes it possible to use any DB supported by that library. Legacy support is provided for sqlite3.Connection objects. schema : string, optional Specify the schema (if database flavor supports this). If None, use default schema. if_exists : {'fail', 'replace', 'append'}, default 'fail' How to behave if the table already exists. * fail: Raise a ValueError. * replace: Drop the table before inserting new values. * append: Insert new values to the existing table. index : bool, default True Write DataFrame index as a column. Uses `index_label` as the column name in the table. index_label : string or sequence, default None Column label for index column(s). If None is given (default) and `index` is True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. chunksize : int, optional Rows will be written in batches of this size at a time. By default, all rows will be written at once. dtype : dict, optional Specifying the datatype for columns. The keys should be the column names and the values should be the SQLAlchemy types or strings for the sqlite3 legacy mode. method : {None, 'multi', callable}, default None Controls the SQL insertion clause used: * None : Uses standard SQL ``INSERT`` clause (one per row). * 'multi': Pass multiple values in a single ``INSERT`` clause. * callable with signature ``(pd_table, conn, keys, data_iter)``. Details and a sample callable implementation can be found in the section :ref:`insert method <io.sql.method>`. .. versionadded:: 0.24.0 Raises ------ ValueError When the table already exists and `if_exists` is 'fail' (the default). See Also -------- read_sql : Read a DataFrame from a table. Notes ----- Timezone aware datetime columns will be written as ``Timestamp with timezone`` type with SQLAlchemy if supported by the database. Otherwise, the datetimes will be stored as timezone unaware timestamps local to the original timezone. .. versionadded:: 0.24.0 References ---------- .. [1] http://docs.sqlalchemy.org .. [2] https://www.python.org/dev/peps/pep-0249/ Examples -------- Create an in-memory SQLite database. >>> from sqlalchemy import create_engine >>> engine = create_engine('sqlite://', echo=False) Create a table from scratch with 3 rows. >>> df = pd.DataFrame({'name' : ['User 1', 'User 2', 'User 3']}) >>> df name 0 User 1 1 User 2 2 User 3 >>> df.to_sql('users', con=engine) >>> engine.execute("SELECT * FROM users").fetchall() [(0, 'User 1'), (1, 'User 2'), (2, 'User 3')] >>> df1 = pd.DataFrame({'name' : ['User 4', 'User 5']}) >>> df1.to_sql('users', con=engine, if_exists='append') >>> engine.execute("SELECT * FROM users").fetchall() [(0, 'User 1'), (1, 'User 2'), (2, 'User 3'), (0, 'User 4'), (1, 'User 5')] Overwrite the table with just ``df1``. >>> df1.to_sql('users', con=engine, if_exists='replace', ... index_label='id') >>> engine.execute("SELECT * FROM users").fetchall() [(0, 'User 4'), (1, 'User 5')] Specify the dtype (especially useful for integers with missing values). Notice that while pandas is forced to store the data as floating point, the database supports nullable integers. When fetching the data with Python, we get back integer scalars. >>> df = pd.DataFrame({"A": [1, None, 2]}) >>> df A 0 1.0 1 NaN 2 2.0 >>> from sqlalchemy.types import Integer >>> df.to_sql('integers', con=engine, index=False, ... dtype={"A": Integer()}) >>> engine.execute("SELECT * FROM integers").fetchall() [(1,), (None,), (2,)] """ from pandas.io import sql sql.to_sql(self, name, con, schema=schema, if_exists=if_exists, index=index, index_label=index_label, chunksize=chunksize, dtype=dtype, method=method) def to_pickle(self, path, compression='infer', protocol=pkl.HIGHEST_PROTOCOL): """ Pickle (serialize) object to file. Parameters ---------- path : str File path where the pickled object will be stored. compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, \ default 'infer' A string representing the compression to use in the output file. By default, infers from the file extension in specified path. .. versionadded:: 0.20.0 protocol : int Int which indicates which protocol should be used by the pickler, default HIGHEST_PROTOCOL (see [1]_ paragraph 12.1.2). The possible values for this parameter depend on the version of Python. For Python 2.x, possible values are 0, 1, 2. For Python>=3.0, 3 is a valid value. For Python >= 3.4, 4 is a valid value. A negative value for the protocol parameter is equivalent to setting its value to HIGHEST_PROTOCOL. .. [1] https://docs.python.org/3/library/pickle.html .. versionadded:: 0.21.0 See Also -------- read_pickle : Load pickled pandas object (or any object) from file. DataFrame.to_hdf : Write DataFrame to an HDF5 file. DataFrame.to_sql : Write DataFrame to a SQL database. DataFrame.to_parquet : Write a DataFrame to the binary parquet format. Examples -------- >>> original_df = pd.DataFrame({"foo": range(5), "bar": range(5, 10)}) >>> original_df foo bar 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9 >>> original_df.to_pickle("./dummy.pkl") >>> unpickled_df = pd.read_pickle("./dummy.pkl") >>> unpickled_df foo bar 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9 >>> import os >>> os.remove("./dummy.pkl") """ from pandas.io.pickle import to_pickle return to_pickle(self, path, compression=compression, protocol=protocol) def to_clipboard(self, excel=True, sep=None, **kwargs): r""" Copy object to the system clipboard. Write a text representation of object to the system clipboard. This can be pasted into Excel, for example. Parameters ---------- excel : bool, default True - True, use the provided separator, writing in a csv format for allowing easy pasting into excel. - False, write a string representation of the object to the clipboard. sep : str, default ``'\t'`` Field delimiter. **kwargs These parameters will be passed to DataFrame.to_csv. See Also -------- DataFrame.to_csv : Write a DataFrame to a comma-separated values (csv) file. read_clipboard : Read text from clipboard and pass to read_table. Notes ----- Requirements for your platform. - Linux : `xclip`, or `xsel` (with `gtk` or `PyQt4` modules) - Windows : none - OS X : none Examples -------- Copy the contents of a DataFrame to the clipboard. >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C']) >>> df.to_clipboard(sep=',') ... # Wrote the following to the system clipboard: ... # ,A,B,C ... # 0,1,2,3 ... # 1,4,5,6 We can omit the the index by passing the keyword `index` and setting it to false. >>> df.to_clipboard(sep=',', index=False) ... # Wrote the following to the system clipboard: ... # A,B,C ... # 1,2,3 ... # 4,5,6 """ from pandas.io import clipboards clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs) def to_xarray(self): """ Return an xarray object from the pandas object. Returns ------- xarray.DataArray or xarray.Dataset Data in the pandas structure converted to Dataset if the object is a DataFrame, or a DataArray if the object is a Series. See Also -------- DataFrame.to_hdf : Write DataFrame to an HDF5 file. DataFrame.to_parquet : Write a DataFrame to the binary parquet format. Notes ----- See the `xarray docs <http://xarray.pydata.org/en/stable/>`__ Examples -------- >>> df = pd.DataFrame([('falcon', 'bird', 389.0, 2), ... ('parrot', 'bird', 24.0, 2), ... ('lion', 'mammal', 80.5, 4), ... ('monkey', 'mammal', np.nan, 4)], ... columns=['name', 'class', 'max_speed', ... 'num_legs']) >>> df name class max_speed num_legs 0 falcon bird 389.0 2 1 parrot bird 24.0 2 2 lion mammal 80.5 4 3 monkey mammal NaN 4 >>> df.to_xarray() <xarray.Dataset> Dimensions: (index: 4) Coordinates: * index (index) int64 0 1 2 3 Data variables: name (index) object 'falcon' 'parrot' 'lion' 'monkey' class (index) object 'bird' 'bird' 'mammal' 'mammal' max_speed (index) float64 389.0 24.0 80.5 nan num_legs (index) int64 2 2 4 4 >>> df['max_speed'].to_xarray() <xarray.DataArray 'max_speed' (index: 4)> array([389. , 24. , 80.5, nan]) Coordinates: * index (index) int64 0 1 2 3 >>> dates = pd.to_datetime(['2018-01-01', '2018-01-01', ... '2018-01-02', '2018-01-02']) >>> df_multiindex = pd.DataFrame({'date': dates, ... 'animal': ['falcon', 'parrot', 'falcon', ... 'parrot'], ... 'speed': [350, 18, 361, 15]}).set_index(['date', ... 'animal']) >>> df_multiindex speed date animal 2018-01-01 falcon 350 parrot 18 2018-01-02 falcon 361 parrot 15 >>> df_multiindex.to_xarray() <xarray.Dataset> Dimensions: (animal: 2, date: 2) Coordinates: * date (date) datetime64[ns] 2018-01-01 2018-01-02 * animal (animal) object 'falcon' 'parrot' Data variables: speed (date, animal) int64 350 18 361 15 """ try: import xarray except ImportError: # Give a nice error message raise ImportError("the xarray library is not installed\n" "you can install via conda\n" "conda install xarray\n" "or via pip\n" "pip install xarray\n") if self.ndim == 1: return xarray.DataArray.from_series(self) elif self.ndim == 2: return xarray.Dataset.from_dataframe(self) # > 2 dims coords = [(a, self._get_axis(a)) for a in self._AXIS_ORDERS] return xarray.DataArray(self, coords=coords, ) def to_latex(self, buf=None, columns=None, col_space=None, header=True, index=True, na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True, bold_rows=False, column_format=None, longtable=None, escape=None, encoding=None, decimal='.', multicolumn=None, multicolumn_format=None, multirow=None): r""" Render an object to a LaTeX tabular environment table. Render an object to a tabular environment table. You can splice this into a LaTeX document. Requires \usepackage{booktabs}. .. versionchanged:: 0.20.2 Added to Series Parameters ---------- buf : file descriptor or None Buffer to write to. If None, the output is returned as a string. columns : list of label, optional The subset of columns to write. Writes all columns by default. col_space : int, optional The minimum width of each column. header : bool or list of str, default True Write out the column names. If a list of strings is given, it is assumed to be aliases for the column names. index : bool, default True Write row names (index). na_rep : str, default 'NaN' Missing data representation. formatters : list of functions or dict of {str: function}, optional Formatter functions to apply to columns' elements by position or name. The result of each function must be a unicode string. List must be of length equal to the number of columns. float_format : str, optional Format string for floating point numbers. sparsify : bool, optional Set to False for a DataFrame with a hierarchical index to print every multiindex key at each row. By default, the value will be read from the config module. index_names : bool, default True Prints the names of the indexes. bold_rows : bool, default False Make the row labels bold in the output. column_format : str, optional The columns format as specified in `LaTeX table format <https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g. 'rcl' for 3 columns. By default, 'l' will be used for all columns except columns of numbers, which default to 'r'. longtable : bool, optional By default, the value will be read from the pandas config module. Use a longtable environment instead of tabular. Requires adding a \usepackage{longtable} to your LaTeX preamble. escape : bool, optional By default, the value will be read from the pandas config module. When set to False prevents from escaping latex special characters in column names. encoding : str, optional A string representing the encoding to use in the output file, defaults to 'ascii' on Python 2 and 'utf-8' on Python 3. decimal : str, default '.' Character recognized as decimal separator, e.g. ',' in Europe. .. versionadded:: 0.18.0 multicolumn : bool, default True Use \multicolumn to enhance MultiIndex columns. The default will be read from the config module. .. versionadded:: 0.20.0 multicolumn_format : str, default 'l' The alignment for multicolumns, similar to `column_format` The default will be read from the config module. .. versionadded:: 0.20.0 multirow : bool, default False Use \multirow to enhance MultiIndex rows. Requires adding a \usepackage{multirow} to your LaTeX preamble. Will print centered labels (instead of top-aligned) across the contained rows, separating groups via clines. The default will be read from the pandas config module. .. versionadded:: 0.20.0 Returns ------- str or None If buf is None, returns the resulting LateX format as a string. Otherwise returns None. See Also -------- DataFrame.to_string : Render a DataFrame to a console-friendly tabular output. DataFrame.to_html : Render a DataFrame as an HTML table. Examples -------- >>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'], ... 'mask': ['red', 'purple'], ... 'weapon': ['sai', 'bo staff']}) >>> df.to_latex(index=False) # doctest: +NORMALIZE_WHITESPACE '\\begin{tabular}{lll}\n\\toprule\n name & mask & weapon \\\\\n\\midrule\n Raphael & red & sai \\\\\n Donatello & purple & bo staff \\\\\n\\bottomrule\n\\end{tabular}\n' """ # Get defaults from the pandas config if self.ndim == 1: self = self.to_frame() if longtable is None: longtable = config.get_option("display.latex.longtable") if escape is None: escape = config.get_option("display.latex.escape") if multicolumn is None: multicolumn = config.get_option("display.latex.multicolumn") if multicolumn_format is None: multicolumn_format = config.get_option( "display.latex.multicolumn_format") if multirow is None: multirow = config.get_option("display.latex.multirow") formatter = DataFrameFormatter(self, buf=buf, columns=columns, col_space=col_space, na_rep=na_rep, header=header, index=index, formatters=formatters, float_format=float_format, bold_rows=bold_rows, sparsify=sparsify, index_names=index_names, escape=escape, decimal=decimal) formatter.to_latex(column_format=column_format, longtable=longtable, encoding=encoding, multicolumn=multicolumn, multicolumn_format=multicolumn_format, multirow=multirow) if buf is None: return formatter.buf.getvalue() def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None, columns=None, header=True, index=True, index_label=None, mode='w', encoding=None, compression='infer', quoting=None, quotechar='"', line_terminator=None, chunksize=None, tupleize_cols=None, date_format=None, doublequote=True, escapechar=None, decimal='.'): r""" Write object to a comma-separated values (csv) file. .. versionchanged:: 0.24.0 The order of arguments for Series was changed. Parameters ---------- path_or_buf : str or file handle, default None File path or object, if None is provided the result is returned as a string. .. versionchanged:: 0.24.0 Was previously named "path" for Series. sep : str, default ',' String of length 1. Field delimiter for the output file. na_rep : str, default '' Missing data representation. float_format : str, default None Format string for floating point numbers. columns : sequence, optional Columns to write. header : bool or list of str, default True Write out the column names. If a list of strings is given it is assumed to be aliases for the column names. .. versionchanged:: 0.24.0 Previously defaulted to False for Series. index : bool, default True Write row names (index). index_label : str or sequence, or False, default None Column label for index column(s) if desired. If None is given, and `header` and `index` are True, then the index names are used. A sequence should be given if the object uses MultiIndex. If False do not print fields for index names. Use index_label=False for easier importing in R. mode : str Python write mode, default 'w'. encoding : str, optional A string representing the encoding to use in the output file, defaults to 'ascii' on Python 2 and 'utf-8' on Python 3. compression : str, default 'infer' Compression mode among the following possible values: {'infer', 'gzip', 'bz2', 'zip', 'xz', None}. If 'infer' and `path_or_buf` is path-like, then detect compression from the following extensions: '.gz', '.bz2', '.zip' or '.xz'. (otherwise no compression). .. versionchanged:: 0.24.0 'infer' option added and set to default. quoting : optional constant from csv module Defaults to csv.QUOTE_MINIMAL. If you have set a `float_format` then floats are converted to strings and thus csv.QUOTE_NONNUMERIC will treat them as non-numeric. quotechar : str, default '\"' String of length 1. Character used to quote fields. line_terminator : string, optional The newline character or character sequence to use in the output file. Defaults to `os.linesep`, which depends on the OS in which this method is called ('\n' for linux, '\r\n' for Windows, i.e.). .. versionchanged:: 0.24.0 chunksize : int or None Rows to write at a time. tupleize_cols : bool, default False Write MultiIndex columns as a list of tuples (if True) or in the new, expanded format, where each MultiIndex column is a row in the CSV (if False). .. deprecated:: 0.21.0 This argument will be removed and will always write each row of the multi-index as a separate row in the CSV file. date_format : str, default None Format string for datetime objects. doublequote : bool, default True Control quoting of `quotechar` inside a field. escapechar : str, default None String of length 1. Character used to escape `sep` and `quotechar` when appropriate. decimal : str, default '.' Character recognized as decimal separator. E.g. use ',' for European data. Returns ------- None or str If path_or_buf is None, returns the resulting csv format as a string. Otherwise returns None. See Also -------- read_csv : Load a CSV file into a DataFrame. to_excel : Load an Excel file into a DataFrame. Examples -------- >>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'], ... 'mask': ['red', 'purple'], ... 'weapon': ['sai', 'bo staff']}) >>> df.to_csv(index=False) 'name,mask,weapon\nRaphael,red,sai\nDonatello,purple,bo staff\n' """ df = self if isinstance(self, ABCDataFrame) else self.to_frame() if tupleize_cols is not None: warnings.warn("The 'tupleize_cols' parameter is deprecated and " "will be removed in a future version", FutureWarning, stacklevel=2) else: tupleize_cols = False from pandas.io.formats.csvs import CSVFormatter formatter = CSVFormatter(df, path_or_buf, line_terminator=line_terminator, sep=sep, encoding=encoding, compression=compression, quoting=quoting, na_rep=na_rep, float_format=float_format, cols=columns, header=header, index=index, index_label=index_label, mode=mode, chunksize=chunksize, quotechar=quotechar, tupleize_cols=tupleize_cols, date_format=date_format, doublequote=doublequote, escapechar=escapechar, decimal=decimal) formatter.save() if path_or_buf is None: return formatter.path_or_buf.getvalue() # ---------------------------------------------------------------------- # Fancy Indexing @classmethod def _create_indexer(cls, name, indexer): """Create an indexer like _name in the class.""" if getattr(cls, name, None) is None: _indexer = functools.partial(indexer, name) setattr(cls, name, property(_indexer, doc=indexer.__doc__)) def get(self, key, default=None): """ Get item from object for given key (DataFrame column, Panel slice, etc.). Returns default value if not found. Parameters ---------- key : object Returns ------- value : same type as items contained in object """ try: return self[key] except (KeyError, ValueError, IndexError): return default def __getitem__(self, item): return self._get_item_cache(item) def _get_item_cache(self, item): """Return the cached item, item represents a label indexer.""" cache = self._item_cache res = cache.get(item) if res is None: values = self._data.get(item) res = self._box_item_values(item, values) cache[item] = res res._set_as_cached(item, self) # for a chain res._is_copy = self._is_copy return res def _set_as_cached(self, item, cacher): """Set the _cacher attribute on the calling object with a weakref to cacher. """ self._cacher = (item, weakref.ref(cacher)) def _reset_cacher(self): """Reset the cacher.""" if hasattr(self, '_cacher'): del self._cacher def _iget_item_cache(self, item): """Return the cached item, item represents a positional indexer.""" ax = self._info_axis if ax.is_unique: lower = self._get_item_cache(ax[item]) else: lower = self._take(item, axis=self._info_axis_number) return lower def _box_item_values(self, key, values): raise AbstractMethodError(self) def _maybe_cache_changed(self, item, value): """The object has called back to us saying maybe it has changed. """ self._data.set(item, value) @property def _is_cached(self): """Return boolean indicating if self is cached or not.""" return getattr(self, '_cacher', None) is not None def _get_cacher(self): """return my cacher or None""" cacher = getattr(self, '_cacher', None) if cacher is not None: cacher = cacher[1]() return cacher @property def _is_view(self): """Return boolean indicating if self is view of another array """ return self._data.is_view def _maybe_update_cacher(self, clear=False, verify_is_copy=True): """ See if we need to update our parent cacher if clear, then clear our cache. Parameters ---------- clear : boolean, default False clear the item cache verify_is_copy : boolean, default True provide is_copy checks """ cacher = getattr(self, '_cacher', None) if cacher is not None: ref = cacher[1]() # we are trying to reference a dead referant, hence # a copy if ref is None: del self._cacher else: try: ref._maybe_cache_changed(cacher[0], self) except Exception: pass if verify_is_copy: self._check_setitem_copy(stacklevel=5, t='referant') if clear: self._clear_item_cache() def _clear_item_cache(self, i=None): if i is not None: self._item_cache.pop(i, None) else: self._item_cache.clear() def _slice(self, slobj, axis=0, kind=None): """ Construct a slice of this container. kind parameter is maintained for compatibility with Series slicing. """ axis = self._get_block_manager_axis(axis) result = self._constructor(self._data.get_slice(slobj, axis=axis)) result = result.__finalize__(self) # this could be a view # but only in a single-dtyped view slicable case is_copy = axis != 0 or result._is_view result._set_is_copy(self, copy=is_copy) return result def _set_item(self, key, value): self._data.set(key, value) self._clear_item_cache() def _set_is_copy(self, ref=None, copy=True): if not copy: self._is_copy = None else: if ref is not None: self._is_copy = weakref.ref(ref) else: self._is_copy = None def _check_is_chained_assignment_possible(self): """ Check if we are a view, have a cacher, and are of mixed type. If so, then force a setitem_copy check. Should be called just near setting a value Will return a boolean if it we are a view and are cached, but a single-dtype meaning that the cacher should be updated following setting. """ if self._is_view and self._is_cached: ref = self._get_cacher() if ref is not None and ref._is_mixed_type: self._check_setitem_copy(stacklevel=4, t='referant', force=True) return True elif self._is_copy: self._check_setitem_copy(stacklevel=4, t='referant') return False def _check_setitem_copy(self, stacklevel=4, t='setting', force=False): """ Parameters ---------- stacklevel : integer, default 4 the level to show of the stack when the error is output t : string, the type of setting error force : boolean, default False if True, then force showing an error validate if we are doing a settitem on a chained copy. If you call this function, be sure to set the stacklevel such that the user will see the error *at the level of setting* It is technically possible to figure out that we are setting on a copy even WITH a multi-dtyped pandas object. In other words, some blocks may be views while other are not. Currently _is_view will ALWAYS return False for multi-blocks to avoid having to handle this case. df = DataFrame(np.arange(0,9), columns=['count']) df['group'] = 'b' # This technically need not raise SettingWithCopy if both are view # (which is not # generally guaranteed but is usually True. However, # this is in general not a good practice and we recommend using .loc. df.iloc[0:5]['group'] = 'a' """ if force or self._is_copy: value = config.get_option('mode.chained_assignment') if value is None: return # see if the copy is not actually referred; if so, then dissolve # the copy weakref try: gc.collect(2) if not gc.get_referents(self._is_copy()): self._is_copy = None return except Exception: pass # we might be a false positive try: if self._is_copy().shape == self.shape: self._is_copy = None return except Exception: pass # a custom message if isinstance(self._is_copy, string_types): t = self._is_copy elif t == 'referant': t = ("\n" "A value is trying to be set on a copy of a slice from a " "DataFrame\n\n" "See the caveats in the documentation: " "http://pandas.pydata.org/pandas-docs/stable/" "indexing.html#indexing-view-versus-copy" ) else: t = ("\n" "A value is trying to be set on a copy of a slice from a " "DataFrame.\n" "Try using .loc[row_indexer,col_indexer] = value " "instead\n\nSee the caveats in the documentation: " "http://pandas.pydata.org/pandas-docs/stable/" "indexing.html#indexing-view-versus-copy" ) if value == 'raise': raise com.SettingWithCopyError(t) elif value == 'warn': warnings.warn(t, com.SettingWithCopyWarning, stacklevel=stacklevel) def __delitem__(self, key): """ Delete item """ deleted = False maybe_shortcut = False if hasattr(self, 'columns') and isinstance(self.columns, MultiIndex): try: maybe_shortcut = key not in self.columns._engine except TypeError: pass if maybe_shortcut: # Allow shorthand to delete all columns whose first len(key) # elements match key: if not isinstance(key, tuple): key = (key, ) for col in self.columns: if isinstance(col, tuple) and col[:len(key)] == key: del self[col] deleted = True if not deleted: # If the above loop ran and didn't delete anything because # there was no match, this call should raise the appropriate # exception: self._data.delete(key) # delete from the caches try: del self._item_cache[key] except KeyError: pass def _take(self, indices, axis=0, is_copy=True): """ Return the elements in the given *positional* indices along an axis. This means that we are not indexing according to actual values in the index attribute of the object. We are indexing according to the actual position of the element in the object. This is the internal version of ``.take()`` and will contain a wider selection of parameters useful for internal use but not as suitable for public usage. Parameters ---------- indices : array-like An array of ints indicating which positions to take. axis : int, default 0 The axis on which to select elements. "0" means that we are selecting rows, "1" means that we are selecting columns, etc. is_copy : bool, default True Whether to return a copy of the original object or not. Returns ------- taken : same type as caller An array-like containing the elements taken from the object. See Also -------- numpy.ndarray.take numpy.take """ self._consolidate_inplace() new_data = self._data.take(indices, axis=self._get_block_manager_axis(axis), verify=True) result = self._constructor(new_data).__finalize__(self) # Maybe set copy if we didn't actually change the index. if is_copy: if not result._get_axis(axis).equals(self._get_axis(axis)): result._set_is_copy(self) return result def take(self, indices, axis=0, convert=None, is_copy=True, **kwargs): """ Return the elements in the given *positional* indices along an axis. This means that we are not indexing according to actual values in the index attribute of the object. We are indexing according to the actual position of the element in the object. Parameters ---------- indices : array-like An array of ints indicating which positions to take. axis : {0 or 'index', 1 or 'columns', None}, default 0 The axis on which to select elements. ``0`` means that we are selecting rows, ``1`` means that we are selecting columns. convert : bool, default True Whether to convert negative indices into positive ones. For example, ``-1`` would map to the ``len(axis) - 1``. The conversions are similar to the behavior of indexing a regular Python list. .. deprecated:: 0.21.0 In the future, negative indices will always be converted. is_copy : bool, default True Whether to return a copy of the original object or not. **kwargs For compatibility with :meth:`numpy.take`. Has no effect on the output. Returns ------- taken : same type as caller An array-like containing the elements taken from the object. See Also -------- DataFrame.loc : Select a subset of a DataFrame by labels. DataFrame.iloc : Select a subset of a DataFrame by positions. numpy.take : Take elements from an array along an axis. Examples -------- >>> df = pd.DataFrame([('falcon', 'bird', 389.0), ... ('parrot', 'bird', 24.0), ... ('lion', 'mammal', 80.5), ... ('monkey', 'mammal', np.nan)], ... columns=['name', 'class', 'max_speed'], ... index=[0, 2, 3, 1]) >>> df name class max_speed 0 falcon bird 389.0 2 parrot bird 24.0 3 lion mammal 80.5 1 monkey mammal NaN Take elements at positions 0 and 3 along the axis 0 (default). Note how the actual indices selected (0 and 1) do not correspond to our selected indices 0 and 3. That's because we are selecting the 0th and 3rd rows, not rows whose indices equal 0 and 3. >>> df.take([0, 3]) name class max_speed 0 falcon bird 389.0 1 monkey mammal NaN Take elements at indices 1 and 2 along the axis 1 (column selection). >>> df.take([1, 2], axis=1) class max_speed 0 bird 389.0 2 bird 24.0 3 mammal 80.5 1 mammal NaN We may take elements using negative integers for positive indices, starting from the end of the object, just like with Python lists. >>> df.take([-1, -2]) name class max_speed 1 monkey mammal NaN 3 lion mammal 80.5 """ if convert is not None: msg = ("The 'convert' parameter is deprecated " "and will be removed in a future version.") warnings.warn(msg, FutureWarning, stacklevel=2) nv.validate_take(tuple(), kwargs) return self._take(indices, axis=axis, is_copy=is_copy) def xs(self, key, axis=0, level=None, drop_level=True): """ Return cross-section from the Series/DataFrame. This method takes a `key` argument to select data at a particular level of a MultiIndex. Parameters ---------- key : label or tuple of label Label contained in the index, or partially in a MultiIndex. axis : {0 or 'index', 1 or 'columns'}, default 0 Axis to retrieve cross-section on. level : object, defaults to first n levels (n=1 or len(key)) In case of a key partially contained in a MultiIndex, indicate which levels are used. Levels can be referred by label or position. drop_level : bool, default True If False, returns object with same levels as self. Returns ------- Series or DataFrame Cross-section from the original Series or DataFrame corresponding to the selected index levels. See Also -------- DataFrame.loc : Access a group of rows and columns by label(s) or a boolean array. DataFrame.iloc : Purely integer-location based indexing for selection by position. Notes ----- `xs` can not be used to set values. MultiIndex Slicers is a generic way to get/set values on any level or levels. It is a superset of `xs` functionality, see :ref:`MultiIndex Slicers <advanced.mi_slicers>`. Examples -------- >>> d = {'num_legs': [4, 4, 2, 2], ... 'num_wings': [0, 0, 2, 2], ... 'class': ['mammal', 'mammal', 'mammal', 'bird'], ... 'animal': ['cat', 'dog', 'bat', 'penguin'], ... 'locomotion': ['walks', 'walks', 'flies', 'walks']} >>> df = pd.DataFrame(data=d) >>> df = df.set_index(['class', 'animal', 'locomotion']) >>> df num_legs num_wings class animal locomotion mammal cat walks 4 0 dog walks 4 0 bat flies 2 2 bird penguin walks 2 2 Get values at specified index >>> df.xs('mammal') num_legs num_wings animal locomotion cat walks 4 0 dog walks 4 0 bat flies 2 2 Get values at several indexes >>> df.xs(('mammal', 'dog')) num_legs num_wings locomotion walks 4 0 Get values at specified index and level >>> df.xs('cat', level=1) num_legs num_wings class locomotion mammal walks 4 0 Get values at several indexes and levels >>> df.xs(('bird', 'walks'), ... level=[0, 'locomotion']) num_legs num_wings animal penguin 2 2 Get values at specified column and axis >>> df.xs('num_wings', axis=1) class animal locomotion mammal cat walks 0 dog walks 0 bat flies 2 bird penguin walks 2 Name: num_wings, dtype: int64 """ axis = self._get_axis_number(axis) labels = self._get_axis(axis) if level is not None: loc, new_ax = labels.get_loc_level(key, level=level, drop_level=drop_level) # create the tuple of the indexer indexer = [slice(None)] * self.ndim indexer[axis] = loc indexer = tuple(indexer) result = self.iloc[indexer] setattr(result, result._get_axis_name(axis), new_ax) return result if axis == 1: return self[key] self._consolidate_inplace() index = self.index if isinstance(index, MultiIndex): loc, new_index = self.index.get_loc_level(key, drop_level=drop_level) else: loc = self.index.get_loc(key) if isinstance(loc, np.ndarray): if loc.dtype == np.bool_: inds, = loc.nonzero() return self._take(inds, axis=axis) else: return self._take(loc, axis=axis) if not is_scalar(loc): new_index = self.index[loc] if is_scalar(loc): new_values = self._data.fast_xs(loc) # may need to box a datelike-scalar # # if we encounter an array-like and we only have 1 dim # that means that their are list/ndarrays inside the Series! # so just return them (GH 6394) if not is_list_like(new_values) or self.ndim == 1: return com.maybe_box_datetimelike(new_values) result = self._constructor_sliced( new_values, index=self.columns, name=self.index[loc], dtype=new_values.dtype) else: result = self.iloc[loc] result.index = new_index # this could be a view # but only in a single-dtyped view slicable case result._set_is_copy(self, copy=not result._is_view) return result _xs = xs def select(self, crit, axis=0): """ Return data corresponding to axis labels matching criteria. .. deprecated:: 0.21.0 Use df.loc[df.index.map(crit)] to select via labels Parameters ---------- crit : function To be called on each index (label). Should return True or False axis : int Returns ------- selection : same type as caller """ warnings.warn("'select' is deprecated and will be removed in a " "future release. You can use " ".loc[labels.map(crit)] as a replacement", FutureWarning, stacklevel=2) axis = self._get_axis_number(axis) axis_name = self._get_axis_name(axis) axis_values = self._get_axis(axis) if len(axis_values) > 0: new_axis = axis_values[ np.asarray([bool(crit(label)) for label in axis_values])] else: new_axis = axis_values return self.reindex(**{axis_name: new_axis}) def reindex_like(self, other, method=None, copy=True, limit=None, tolerance=None): """ Return an object with matching indices as other object. Conform the object to the same index on all axes. Optional filling logic, placing NaN in locations having no value in the previous index. A new object is produced unless the new index is equivalent to the current one and copy=False. Parameters ---------- other : Object of the same data type Its row and column indices are used to define the new indices of this object. method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'} Method to use for filling holes in reindexed DataFrame. Please note: this is only applicable to DataFrames/Series with a monotonically increasing/decreasing index. * None (default): don't fill gaps * pad / ffill: propagate last valid observation forward to next valid * backfill / bfill: use next valid observation to fill gap * nearest: use nearest valid observations to fill gap copy : bool, default True Return a new object, even if the passed indexes are the same. limit : int, default None Maximum number of consecutive labels to fill for inexact matches. tolerance : optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations most satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like includes list, tuple, array, Series, and must be the same size as the index and its dtype must exactly match the index's type. .. versionadded:: 0.21.0 (list-like tolerance) Returns ------- Series or DataFrame Same type as caller, but with changed indices on each axis. See Also -------- DataFrame.set_index : Set row labels. DataFrame.reset_index : Remove row labels or move them to new columns. DataFrame.reindex : Change to new indices or expand indices. Notes ----- Same as calling ``.reindex(index=other.index, columns=other.columns,...)``. Examples -------- >>> df1 = pd.DataFrame([[24.3, 75.7, 'high'], ... [31, 87.8, 'high'], ... [22, 71.6, 'medium'], ... [35, 95, 'medium']], ... columns=['temp_celsius', 'temp_fahrenheit', 'windspeed'], ... index=pd.date_range(start='2014-02-12', ... end='2014-02-15', freq='D')) >>> df1 temp_celsius temp_fahrenheit windspeed 2014-02-12 24.3 75.7 high 2014-02-13 31.0 87.8 high 2014-02-14 22.0 71.6 medium 2014-02-15 35.0 95.0 medium >>> df2 = pd.DataFrame([[28, 'low'], ... [30, 'low'], ... [35.1, 'medium']], ... columns=['temp_celsius', 'windspeed'], ... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13', ... '2014-02-15'])) >>> df2 temp_celsius windspeed 2014-02-12 28.0 low 2014-02-13 30.0 low 2014-02-15 35.1 medium >>> df2.reindex_like(df1) temp_celsius temp_fahrenheit windspeed 2014-02-12 28.0 NaN low 2014-02-13 30.0 NaN low 2014-02-14 NaN NaN NaN 2014-02-15 35.1 NaN medium """ d = other._construct_axes_dict(axes=self._AXIS_ORDERS, method=method, copy=copy, limit=limit, tolerance=tolerance) return self.reindex(**d) def drop(self, labels=None, axis=0, index=None, columns=None, level=None, inplace=False, errors='raise'): inplace = validate_bool_kwarg(inplace, 'inplace') if labels is not None: if index is not None or columns is not None: raise ValueError("Cannot specify both 'labels' and " "'index'/'columns'") axis_name = self._get_axis_name(axis) axes = {axis_name: labels} elif index is not None or columns is not None: axes, _ = self._construct_axes_from_arguments((index, columns), {}) else: raise ValueError("Need to specify at least one of 'labels', " "'index' or 'columns'") obj = self for axis, labels in axes.items(): if labels is not None: obj = obj._drop_axis(labels, axis, level=level, errors=errors) if inplace: self._update_inplace(obj) else: return obj def _drop_axis(self, labels, axis, level=None, errors='raise'): """ Drop labels from specified axis. Used in the ``drop`` method internally. Parameters ---------- labels : single label or list-like axis : int or axis name level : int or level name, default None For MultiIndex errors : {'ignore', 'raise'}, default 'raise' If 'ignore', suppress error and existing labels are dropped. """ axis = self._get_axis_number(axis) axis_name = self._get_axis_name(axis) axis = self._get_axis(axis) if axis.is_unique: if level is not None: if not isinstance(axis, MultiIndex): raise AssertionError('axis must be a MultiIndex') new_axis = axis.drop(labels, level=level, errors=errors) else: new_axis = axis.drop(labels, errors=errors) result = self.reindex(**{axis_name: new_axis}) # Case for non-unique axis else: labels = ensure_object(com.index_labels_to_array(labels)) if level is not None: if not isinstance(axis, MultiIndex): raise AssertionError('axis must be a MultiIndex') indexer = ~axis.get_level_values(level).isin(labels) # GH 18561 MultiIndex.drop should raise if label is absent if errors == 'raise' and indexer.all(): raise KeyError('{} not found in axis'.format(labels)) else: indexer = ~axis.isin(labels) # Check if label doesn't exist along axis labels_missing = (axis.get_indexer_for(labels) == -1).any() if errors == 'raise' and labels_missing: raise KeyError('{} not found in axis'.format(labels)) slicer = [slice(None)] * self.ndim slicer[self._get_axis_number(axis_name)] = indexer result = self.loc[tuple(slicer)] return result def _update_inplace(self, result, verify_is_copy=True): """ Replace self internals with result. Parameters ---------- verify_is_copy : boolean, default True provide is_copy checks """ # NOTE: This does *not* call __finalize__ and that's an explicit # decision that we may revisit in the future. self._reset_cache() self._clear_item_cache() self._data = getattr(result, '_data', result) self._maybe_update_cacher(verify_is_copy=verify_is_copy) def add_prefix(self, prefix): """ Prefix labels with string `prefix`. For Series, the row labels are prefixed. For DataFrame, the column labels are prefixed. Parameters ---------- prefix : str The string to add before each label. Returns ------- Series or DataFrame New Series or DataFrame with updated labels. See Also -------- Series.add_suffix: Suffix row labels with string `suffix`. DataFrame.add_suffix: Suffix column labels with string `suffix`. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.add_prefix('item_') item_0 1 item_1 2 item_2 3 item_3 4 dtype: int64 >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}) >>> df A B 0 1 3 1 2 4 2 3 5 3 4 6 >>> df.add_prefix('col_') col_A col_B 0 1 3 1 2 4 2 3 5 3 4 6 """ f = functools.partial('{prefix}{}'.format, prefix=prefix) mapper = {self._info_axis_name: f} return self.rename(**mapper) def add_suffix(self, suffix): """ Suffix labels with string `suffix`. For Series, the row labels are suffixed. For DataFrame, the column labels are suffixed. Parameters ---------- suffix : str The string to add after each label. Returns ------- Series or DataFrame New Series or DataFrame with updated labels. See Also -------- Series.add_prefix: Prefix row labels with string `prefix`. DataFrame.add_prefix: Prefix column labels with string `prefix`. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.add_suffix('_item') 0_item 1 1_item 2 2_item 3 3_item 4 dtype: int64 >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}) >>> df A B 0 1 3 1 2 4 2 3 5 3 4 6 >>> df.add_suffix('_col') A_col B_col 0 1 3 1 2 4 2 3 5 3 4 6 """ f = functools.partial('{}{suffix}'.format, suffix=suffix) mapper = {self._info_axis_name: f} return self.rename(**mapper) def sort_values(self, by=None, axis=0, ascending=True, inplace=False, kind='quicksort', na_position='last'): """ Sort by the values along either axis. Parameters ----------%(optional_by)s axis : %(axes_single_arg)s, default 0 Axis to be sorted. ascending : bool or list of bool, default True Sort ascending vs. descending. Specify list for multiple sort orders. If this is a list of bools, must match the length of the by. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort' Choice of sorting algorithm. See also ndarray.np.sort for more information. `mergesort` is the only stable algorithm. For DataFrames, this option is only applied when sorting on a single column or label. na_position : {'first', 'last'}, default 'last' Puts NaNs at the beginning if `first`; `last` puts NaNs at the end. Returns ------- sorted_obj : DataFrame or None DataFrame with sorted values if inplace=False, None otherwise. Examples -------- >>> df = pd.DataFrame({ ... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'], ... 'col2': [2, 1, 9, 8, 7, 4], ... 'col3': [0, 1, 9, 4, 2, 3], ... }) >>> df col1 col2 col3 0 A 2 0 1 A 1 1 2 B 9 9 3 NaN 8 4 4 D 7 2 5 C 4 3 Sort by col1 >>> df.sort_values(by=['col1']) col1 col2 col3 0 A 2 0 1 A 1 1 2 B 9 9 5 C 4 3 4 D 7 2 3 NaN 8 4 Sort by multiple columns >>> df.sort_values(by=['col1', 'col2']) col1 col2 col3 1 A 1 1 0 A 2 0 2 B 9 9 5 C 4 3 4 D 7 2 3 NaN 8 4 Sort Descending >>> df.sort_values(by='col1', ascending=False) col1 col2 col3 4 D 7 2 5 C 4 3 2 B 9 9 0 A 2 0 1 A 1 1 3 NaN 8 4 Putting NAs first >>> df.sort_values(by='col1', ascending=False, na_position='first') col1 col2 col3 3 NaN 8 4 4 D 7 2 5 C 4 3 2 B 9 9 0 A 2 0 1 A 1 1 """ raise NotImplementedError("sort_values has not been implemented " "on Panel or Panel4D objects.") def sort_index(self, axis=0, level=None, ascending=True, inplace=False, kind='quicksort', na_position='last', sort_remaining=True): """ Sort object by labels (along an axis). Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 The axis along which to sort. The value 0 identifies the rows, and 1 identifies the columns. level : int or level name or list of ints or list of level names If not None, sort on values in specified index level(s). ascending : bool, default True Sort ascending vs. descending. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort' Choice of sorting algorithm. See also ndarray.np.sort for more information. `mergesort` is the only stable algorithm. For DataFrames, this option is only applied when sorting on a single column or label. na_position : {'first', 'last'}, default 'last' Puts NaNs at the beginning if `first`; `last` puts NaNs at the end. Not implemented for MultiIndex. sort_remaining : bool, default True If True and sorting by level and index is multilevel, sort by other levels too (in order) after sorting by specified level. Returns ------- sorted_obj : DataFrame or None DataFrame with sorted index if inplace=False, None otherwise. """ inplace = validate_bool_kwarg(inplace, 'inplace') axis = self._get_axis_number(axis) axis_name = self._get_axis_name(axis) labels = self._get_axis(axis) if level is not None: raise NotImplementedError("level is not implemented") if inplace: raise NotImplementedError("inplace is not implemented") sort_index = labels.argsort() if not ascending: sort_index = sort_index[::-1] new_axis = labels.take(sort_index) return self.reindex(**{axis_name: new_axis}) def reindex(self, *args, **kwargs): """ Conform %(klass)s to new index with optional filling logic, placing NA/NaN in locations having no value in the previous index. A new object is produced unless the new index is equivalent to the current one and ``copy=False``. Parameters ---------- %(optional_labels)s %(axes)s : array-like, optional New labels / index to conform to, should be specified using keywords. Preferably an Index object to avoid duplicating data %(optional_axis)s method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'} Method to use for filling holes in reindexed DataFrame. Please note: this is only applicable to DataFrames/Series with a monotonically increasing/decreasing index. * None (default): don't fill gaps * pad / ffill: propagate last valid observation forward to next valid * backfill / bfill: use next valid observation to fill gap * nearest: use nearest valid observations to fill gap copy : bool, default True Return a new object, even if the passed indexes are the same. level : int or name Broadcast across a level, matching Index values on the passed MultiIndex level. fill_value : scalar, default np.NaN Value to use for missing values. Defaults to NaN, but can be any "compatible" value. limit : int, default None Maximum number of consecutive elements to forward or backward fill. tolerance : optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations most satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like includes list, tuple, array, Series, and must be the same size as the index and its dtype must exactly match the index's type. .. versionadded:: 0.21.0 (list-like tolerance) Returns ------- %(klass)s with changed index. See Also -------- DataFrame.set_index : Set row labels. DataFrame.reset_index : Remove row labels or move them to new columns. DataFrame.reindex_like : Change to same indices as other DataFrame. Examples -------- ``DataFrame.reindex`` supports two calling conventions * ``(index=index_labels, columns=column_labels, ...)`` * ``(labels, axis={'index', 'columns'}, ...)`` We *highly* recommend using keyword arguments to clarify your intent. Create a dataframe with some fictional data. >>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror'] >>> df = pd.DataFrame({ ... 'http_status': [200,200,404,404,301], ... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]}, ... index=index) >>> df http_status response_time Firefox 200 0.04 Chrome 200 0.02 Safari 404 0.07 IE10 404 0.08 Konqueror 301 1.00 Create a new index and reindex the dataframe. By default values in the new index that do not have corresponding records in the dataframe are assigned ``NaN``. >>> new_index= ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10', ... 'Chrome'] >>> df.reindex(new_index) http_status response_time Safari 404.0 0.07 Iceweasel NaN NaN Comodo Dragon NaN NaN IE10 404.0 0.08 Chrome 200.0 0.02 We can fill in the missing values by passing a value to the keyword ``fill_value``. Because the index is not monotonically increasing or decreasing, we cannot use arguments to the keyword ``method`` to fill the ``NaN`` values. >>> df.reindex(new_index, fill_value=0) http_status response_time Safari 404 0.07 Iceweasel 0 0.00 Comodo Dragon 0 0.00 IE10 404 0.08 Chrome 200 0.02 >>> df.reindex(new_index, fill_value='missing') http_status response_time Safari 404 0.07 Iceweasel missing missing Comodo Dragon missing missing IE10 404 0.08 Chrome 200 0.02 We can also reindex the columns. >>> df.reindex(columns=['http_status', 'user_agent']) http_status user_agent Firefox 200 NaN Chrome 200 NaN Safari 404 NaN IE10 404 NaN Konqueror 301 NaN Or we can use "axis-style" keyword arguments >>> df.reindex(['http_status', 'user_agent'], axis="columns") http_status user_agent Firefox 200 NaN Chrome 200 NaN Safari 404 NaN IE10 404 NaN Konqueror 301 NaN To further illustrate the filling functionality in ``reindex``, we will create a dataframe with a monotonically increasing index (for example, a sequence of dates). >>> date_index = pd.date_range('1/1/2010', periods=6, freq='D') >>> df2 = pd.DataFrame({"prices": [100, 101, np.nan, 100, 89, 88]}, ... index=date_index) >>> df2 prices 2010-01-01 100.0 2010-01-02 101.0 2010-01-03 NaN 2010-01-04 100.0 2010-01-05 89.0 2010-01-06 88.0 Suppose we decide to expand the dataframe to cover a wider date range. >>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D') >>> df2.reindex(date_index2) prices 2009-12-29 NaN 2009-12-30 NaN 2009-12-31 NaN 2010-01-01 100.0 2010-01-02 101.0 2010-01-03 NaN 2010-01-04 100.0 2010-01-05 89.0 2010-01-06 88.0 2010-01-07 NaN The index entries that did not have a value in the original data frame (for example, '2009-12-29') are by default filled with ``NaN``. If desired, we can fill in the missing values using one of several options. For example, to back-propagate the last valid value to fill the ``NaN`` values, pass ``bfill`` as an argument to the ``method`` keyword. >>> df2.reindex(date_index2, method='bfill') prices 2009-12-29 100.0 2009-12-30 100.0 2009-12-31 100.0 2010-01-01 100.0 2010-01-02 101.0 2010-01-03 NaN 2010-01-04 100.0 2010-01-05 89.0 2010-01-06 88.0 2010-01-07 NaN Please note that the ``NaN`` value present in the original dataframe (at index value 2010-01-03) will not be filled by any of the value propagation schemes. This is because filling while reindexing does not look at dataframe values, but only compares the original and desired indexes. If you do want to fill in the ``NaN`` values present in the original dataframe, use the ``fillna()`` method. See the :ref:`user guide <basics.reindexing>` for more. """ # TODO: Decide if we care about having different examples for different # kinds # construct the args axes, kwargs = self._construct_axes_from_arguments(args, kwargs) method = missing.clean_reindex_fill_method(kwargs.pop('method', None)) level = kwargs.pop('level', None) copy = kwargs.pop('copy', True) limit = kwargs.pop('limit', None) tolerance = kwargs.pop('tolerance', None) fill_value = kwargs.pop('fill_value', None) # Series.reindex doesn't use / need the axis kwarg # We pop and ignore it here, to make writing Series/Frame generic code # easier kwargs.pop("axis", None) if kwargs: raise TypeError('reindex() got an unexpected keyword ' 'argument "{0}"'.format(list(kwargs.keys())[0])) self._consolidate_inplace() # if all axes that are requested to reindex are equal, then only copy # if indicated must have index names equal here as well as values if all(self._get_axis(axis).identical(ax) for axis, ax in axes.items() if ax is not None): if copy: return self.copy() return self # check if we are a multi reindex if self._needs_reindex_multi(axes, method, level): try: return self._reindex_multi(axes, copy, fill_value) except Exception: pass # perform the reindex on the axes return self._reindex_axes(axes, level, limit, tolerance, method, fill_value, copy).__finalize__(self) def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy): """Perform the reindex for all the axes.""" obj = self for a in self._AXIS_ORDERS: labels = axes[a] if labels is None: continue ax = self._get_axis(a) new_index, indexer = ax.reindex(labels, level=level, limit=limit, tolerance=tolerance, method=method) axis = self._get_axis_number(a) obj = obj._reindex_with_indexers({axis: [new_index, indexer]}, fill_value=fill_value, copy=copy, allow_dups=False) return obj def _needs_reindex_multi(self, axes, method, level): """Check if we do need a multi reindex.""" return ((com.count_not_none(*axes.values()) == self._AXIS_LEN) and method is None and level is None and not self._is_mixed_type) def _reindex_multi(self, axes, copy, fill_value): return NotImplemented _shared_docs['reindex_axis'] = (""" Conform input object to new index. .. deprecated:: 0.21.0 Use `reindex` instead. By default, places NaN in locations having no value in the previous index. A new object is produced unless the new index is equivalent to the current one and copy=False. Parameters ---------- labels : array-like New labels / index to conform to. Preferably an Index object to avoid duplicating data. axis : %(axes_single_arg)s Indicate whether to use rows or columns. method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}, optional Method to use for filling holes in reindexed DataFrame: * default: don't fill gaps. * pad / ffill: propagate last valid observation forward to next valid. * backfill / bfill: use next valid observation to fill gap. * nearest: use nearest valid observations to fill gap. level : int or str Broadcast across a level, matching Index values on the passed MultiIndex level. copy : bool, default True Return a new object, even if the passed indexes are the same. limit : int, optional Maximum number of consecutive elements to forward or backward fill. fill_value : float, default NaN Value used to fill in locations having no value in the previous index. .. versionadded:: 0.21.0 (list-like tolerance) Returns ------- %(klass)s Returns a new DataFrame object with new indices, unless the new index is equivalent to the current one and copy=False. See Also -------- DataFrame.set_index : Set row labels. DataFrame.reset_index : Remove row labels or move them to new columns. DataFrame.reindex : Change to new indices or expand indices. DataFrame.reindex_like : Change to same indices as other DataFrame. Examples -------- >>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]}, ... index=['dog', 'hawk']) >>> df num_legs num_wings dog 4 0 hawk 2 2 >>> df.reindex(['num_wings', 'num_legs', 'num_heads'], ... axis='columns') num_wings num_legs num_heads dog 0 4 NaN hawk 2 2 NaN """) @Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs) def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True, limit=None, fill_value=None): msg = ("'.reindex_axis' is deprecated and will be removed in a future " "version. Use '.reindex' instead.") self._consolidate_inplace() axis_name = self._get_axis_name(axis) axis_values = self._get_axis(axis_name) method = missing.clean_reindex_fill_method(method) warnings.warn(msg, FutureWarning, stacklevel=3) new_index, indexer = axis_values.reindex(labels, method, level, limit=limit) return self._reindex_with_indexers({axis: [new_index, indexer]}, fill_value=fill_value, copy=copy) def _reindex_with_indexers(self, reindexers, fill_value=None, copy=False, allow_dups=False): """allow_dups indicates an internal call here """ # reindex doing multiple operations on different axes if indicated new_data = self._data for axis in sorted(reindexers.keys()): index, indexer = reindexers[axis] baxis = self._get_block_manager_axis(axis) if index is None: continue index = ensure_index(index) if indexer is not None: indexer = ensure_int64(indexer) # TODO: speed up on homogeneous DataFrame objects new_data = new_data.reindex_indexer(index, indexer, axis=baxis, fill_value=fill_value, allow_dups=allow_dups, copy=copy) if copy and new_data is self._data: new_data = new_data.copy() return self._constructor(new_data).__finalize__(self) def filter(self, items=None, like=None, regex=None, axis=None): """ Subset rows or columns of dataframe according to labels in the specified index. Note that this routine does not filter a dataframe on its contents. The filter is applied to the labels of the index. Parameters ---------- items : list-like List of axis to restrict to (must not all be present). like : string Keep axis where "arg in col == True". regex : string (regular expression) Keep axis with re.search(regex, col) == True. axis : int or string axis name The axis to filter on. By default this is the info axis, 'index' for Series, 'columns' for DataFrame. Returns ------- same type as input object See Also -------- DataFrame.loc Notes ----- The ``items``, ``like``, and ``regex`` parameters are enforced to be mutually exclusive. ``axis`` defaults to the info axis that is used when indexing with ``[]``. Examples -------- >>> df = pd.DataFrame(np.array(([1,2,3], [4,5,6])), ... index=['mouse', 'rabbit'], ... columns=['one', 'two', 'three']) >>> # select columns by name >>> df.filter(items=['one', 'three']) one three mouse 1 3 rabbit 4 6 >>> # select columns by regular expression >>> df.filter(regex='e$', axis=1) one three mouse 1 3 rabbit 4 6 >>> # select rows containing 'bbi' >>> df.filter(like='bbi', axis=0) one two three rabbit 4 5 6 """ import re nkw = com.count_not_none(items, like, regex) if nkw > 1: raise TypeError('Keyword arguments `items`, `like`, or `regex` ' 'are mutually exclusive') if axis is None: axis = self._info_axis_name labels = self._get_axis(axis) if items is not None: name = self._get_axis_name(axis) return self.reindex( **{name: [r for r in items if r in labels]}) elif like: def f(x): return like in to_str(x) values = labels.map(f) return self.loc(axis=axis)[values] elif regex: def f(x): return matcher.search(to_str(x)) is not None matcher = re.compile(regex) values = labels.map(f) return self.loc(axis=axis)[values] else: raise TypeError('Must pass either `items`, `like`, or `regex`') def head(self, n=5): """ Return the first `n` rows. This function returns the first `n` rows for the object based on position. It is useful for quickly testing if your object has the right type of data in it. Parameters ---------- n : int, default 5 Number of rows to select. Returns ------- obj_head : same type as caller The first `n` rows of the caller object. See Also -------- DataFrame.tail: Returns the last `n` rows. Examples -------- >>> df = pd.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion', ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']}) >>> df animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the first 5 lines >>> df.head() animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey Viewing the first `n` lines (three in this case) >>> df.head(3) animal 0 alligator 1 bee 2 falcon """ return self.iloc[:n] def tail(self, n=5): """ Return the last `n` rows. This function returns last `n` rows from the object based on position. It is useful for quickly verifying data, for example, after sorting or appending rows. Parameters ---------- n : int, default 5 Number of rows to select. Returns ------- type of caller The last `n` rows of the caller object. See Also -------- DataFrame.head : The first `n` rows of the caller object. Examples -------- >>> df = pd.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion', ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']}) >>> df animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the last 5 lines >>> df.tail() animal 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the last `n` lines (three in this case) >>> df.tail(3) animal 6 shark 7 whale 8 zebra """ if n == 0: return self.iloc[0:0] return self.iloc[-n:] def sample(self, n=None, frac=None, replace=False, weights=None, random_state=None, axis=None): """ Return a random sample of items from an axis of object. You can use `random_state` for reproducibility. Parameters ---------- n : int, optional Number of items from axis to return. Cannot be used with `frac`. Default = 1 if `frac` = None. frac : float, optional Fraction of axis items to return. Cannot be used with `n`. replace : bool, default False Sample with or without replacement. weights : str or ndarray-like, optional Default 'None' results in equal probability weighting. If passed a Series, will align with target object on index. Index values in weights not found in sampled object will be ignored and index values in sampled object not in weights will be assigned weights of zero. If called on a DataFrame, will accept the name of a column when axis = 0. Unless weights are a Series, weights must be same length as axis being sampled. If weights do not sum to 1, they will be normalized to sum to 1. Missing values in the weights column will be treated as zero. Infinite values not allowed. random_state : int or numpy.random.RandomState, optional Seed for the random number generator (if int), or numpy RandomState object. axis : int or string, optional Axis to sample. Accepts axis number or name. Default is stat axis for given data type (0 for Series and DataFrames, 1 for Panels). Returns ------- Series or DataFrame A new object of same type as caller containing `n` items randomly sampled from the caller object. See Also -------- numpy.random.choice: Generates a random sample from a given 1-D numpy array. Examples -------- >>> df = pd.DataFrame({'num_legs': [2, 4, 8, 0], ... 'num_wings': [2, 0, 0, 0], ... 'num_specimen_seen': [10, 2, 1, 8]}, ... index=['falcon', 'dog', 'spider', 'fish']) >>> df num_legs num_wings num_specimen_seen falcon 2 2 10 dog 4 0 2 spider 8 0 1 fish 0 0 8 Extract 3 random elements from the ``Series`` ``df['num_legs']``: Note that we use `random_state` to ensure the reproducibility of the examples. >>> df['num_legs'].sample(n=3, random_state=1) fish 0 spider 8 falcon 2 Name: num_legs, dtype: int64 A random 50% sample of the ``DataFrame`` with replacement: >>> df.sample(frac=0.5, replace=True, random_state=1) num_legs num_wings num_specimen_seen dog 4 0 2 fish 0 0 8 Using a DataFrame column as weights. Rows with larger value in the `num_specimen_seen` column are more likely to be sampled. >>> df.sample(n=2, weights='num_specimen_seen', random_state=1) num_legs num_wings num_specimen_seen falcon 2 2 10 fish 0 0 8 """ if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) axis_length = self.shape[axis] # Process random_state argument rs = com.random_state(random_state) # Check weights for compliance if weights is not None: # If a series, align with frame if isinstance(weights, pd.Series): weights = weights.reindex(self.axes[axis]) # Strings acceptable if a dataframe and axis = 0 if isinstance(weights, string_types): if isinstance(self, pd.DataFrame): if axis == 0: try: weights = self[weights] except KeyError: raise KeyError("String passed to weights not a " "valid column") else: raise ValueError("Strings can only be passed to " "weights when sampling from rows on " "a DataFrame") else: raise ValueError("Strings cannot be passed as weights " "when sampling from a Series or Panel.") weights = pd.Series(weights, dtype='float64') if len(weights) != axis_length: raise ValueError("Weights and axis to be sampled must be of " "same length") if (weights == np.inf).any() or (weights == -np.inf).any(): raise ValueError("weight vector may not include `inf` values") if (weights < 0).any(): raise ValueError("weight vector many not include negative " "values") # If has nan, set to zero. weights = weights.fillna(0) # Renormalize if don't sum to 1 if weights.sum() != 1: if weights.sum() != 0: weights = weights / weights.sum() else: raise ValueError("Invalid weights: weights sum to zero") weights = weights.values # If no frac or n, default to n=1. if n is None and frac is None: n = 1 elif n is not None and frac is None and n % 1 != 0: raise ValueError("Only integers accepted as `n` values") elif n is None and frac is not None: n = int(round(frac * axis_length)) elif n is not None and frac is not None: raise ValueError('Please enter a value for `frac` OR `n`, not ' 'both') # Check for negative sizes if n < 0: raise ValueError("A negative number of rows requested. Please " "provide positive value.") locs = rs.choice(axis_length, size=n, replace=replace, p=weights) return self.take(locs, axis=axis, is_copy=False) _shared_docs['pipe'] = (r""" Apply func(self, \*args, \*\*kwargs). Parameters ---------- func : function function to apply to the %(klass)s. ``args``, and ``kwargs`` are passed into ``func``. Alternatively a ``(callable, data_keyword)`` tuple where ``data_keyword`` is a string indicating the keyword of ``callable`` that expects the %(klass)s. args : iterable, optional positional arguments passed into ``func``. kwargs : mapping, optional a dictionary of keyword arguments passed into ``func``. Returns ------- object : the return type of ``func``. See Also -------- DataFrame.apply DataFrame.applymap Series.map Notes ----- Use ``.pipe`` when chaining together functions that expect Series, DataFrames or GroupBy objects. Instead of writing >>> f(g(h(df), arg1=a), arg2=b, arg3=c) You can write >>> (df.pipe(h) ... .pipe(g, arg1=a) ... .pipe(f, arg2=b, arg3=c) ... ) If you have a function that takes the data as (say) the second argument, pass a tuple indicating which keyword expects the data. For example, suppose ``f`` takes its data as ``arg2``: >>> (df.pipe(h) ... .pipe(g, arg1=a) ... .pipe((f, 'arg2'), arg1=a, arg3=c) ... ) """) @Appender(_shared_docs['pipe'] % _shared_doc_kwargs) def pipe(self, func, *args, **kwargs): return com._pipe(self, func, *args, **kwargs) _shared_docs['aggregate'] = dedent(""" Aggregate using one or more operations over the specified axis. %(versionadded)s Parameters ---------- func : function, str, list or dict Function to use for aggregating the data. If a function, must either work when passed a %(klass)s or when passed to %(klass)s.apply. Accepted combinations are: - function - string function name - list of functions and/or function names, e.g. ``[np.sum, 'mean']`` - dict of axis labels -> functions, function names or list of such. %(axis)s *args Positional arguments to pass to `func`. **kwargs Keyword arguments to pass to `func`. Returns ------- DataFrame, Series or scalar if DataFrame.agg is called with a single function, returns a Series if DataFrame.agg is called with several functions, returns a DataFrame if Series.agg is called with single function, returns a scalar if Series.agg is called with several functions, returns a Series %(see_also)s Notes ----- `agg` is an alias for `aggregate`. Use the alias. A passed user-defined-function will be passed a Series for evaluation. %(examples)s """) _shared_docs['transform'] = (""" Call ``func`` on self producing a %(klass)s with transformed values and that has the same axis length as self. .. versionadded:: 0.20.0 Parameters ---------- func : function, str, list or dict Function to use for transforming the data. If a function, must either work when passed a %(klass)s or when passed to %(klass)s.apply. Accepted combinations are: - function - string function name - list of functions and/or function names, e.g. ``[np.exp. 'sqrt']`` - dict of axis labels -> functions, function names or list of such. %(axis)s *args Positional arguments to pass to `func`. **kwargs Keyword arguments to pass to `func`. Returns ------- %(klass)s A %(klass)s that must have the same length as self. Raises ------ ValueError : If the returned %(klass)s has a different length than self. See Also -------- %(klass)s.agg : Only perform aggregating type operations. %(klass)s.apply : Invoke function on a %(klass)s. Examples -------- >>> df = pd.DataFrame({'A': range(3), 'B': range(1, 4)}) >>> df A B 0 0 1 1 1 2 2 2 3 >>> df.transform(lambda x: x + 1) A B 0 1 2 1 2 3 2 3 4 Even though the resulting %(klass)s must have the same length as the input %(klass)s, it is possible to provide several input functions: >>> s = pd.Series(range(3)) >>> s 0 0 1 1 2 2 dtype: int64 >>> s.transform([np.sqrt, np.exp]) sqrt exp 0 0.000000 1.000000 1 1.000000 2.718282 2 1.414214 7.389056 """) # ---------------------------------------------------------------------- # Attribute access def __finalize__(self, other, method=None, **kwargs): """ Propagate metadata from other to self. Parameters ---------- other : the object from which to get the attributes that we are going to propagate method : optional, a passed method name ; possibly to take different types of propagation actions based on this """ if isinstance(other, NDFrame): for name in self._metadata: object.__setattr__(self, name, getattr(other, name, None)) return self def __getattr__(self, name): """After regular attribute access, try looking up the name This allows simpler access to columns for interactive use. """ # Note: obj.x will always call obj.__getattribute__('x') prior to # calling obj.__getattr__('x'). if (name in self._internal_names_set or name in self._metadata or name in self._accessors): return object.__getattribute__(self, name) else: if self._info_axis._can_hold_identifiers_and_holds_name(name): return self[name] return object.__getattribute__(self, name) def __setattr__(self, name, value): """After regular attribute access, try setting the name This allows simpler access to columns for interactive use. """ # first try regular attribute access via __getattribute__, so that # e.g. ``obj.x`` and ``obj.x = 4`` will always reference/modify # the same attribute. try: object.__getattribute__(self, name) return object.__setattr__(self, name, value) except AttributeError: pass # if this fails, go on to more involved attribute setting # (note that this matches __getattr__, above). if name in self._internal_names_set: object.__setattr__(self, name, value) elif name in self._metadata: object.__setattr__(self, name, value) else: try: existing = getattr(self, name) if isinstance(existing, Index): object.__setattr__(self, name, value) elif name in self._info_axis: self[name] = value else: object.__setattr__(self, name, value) except (AttributeError, TypeError): if isinstance(self, ABCDataFrame) and (is_list_like(value)): warnings.warn("Pandas doesn't allow columns to be " "created via a new attribute name - see " "https://pandas.pydata.org/pandas-docs/" "stable/indexing.html#attribute-access", stacklevel=2) object.__setattr__(self, name, value) def _dir_additions(self): """ add the string-like attributes from the info_axis. If info_axis is a MultiIndex, it's first level values are used. """ additions = {c for c in self._info_axis.unique(level=0)[:100] if isinstance(c, string_types) and isidentifier(c)} return super(NDFrame, self)._dir_additions().union(additions) # ---------------------------------------------------------------------- # Getting and setting elements # ---------------------------------------------------------------------- # Consolidation of internals def _protect_consolidate(self, f): """Consolidate _data -- if the blocks have changed, then clear the cache """ blocks_before = len(self._data.blocks) result = f() if len(self._data.blocks) != blocks_before: self._clear_item_cache() return result def _consolidate_inplace(self): """Consolidate data in place and return None""" def f(): self._data = self._data.consolidate() self._protect_consolidate(f) def _consolidate(self, inplace=False): """ Compute NDFrame with "consolidated" internals (data of each dtype grouped together in a single ndarray). Parameters ---------- inplace : boolean, default False If False return new object, otherwise modify existing object Returns ------- consolidated : same type as caller """ inplace = validate_bool_kwarg(inplace, 'inplace') if inplace: self._consolidate_inplace() else: f = lambda: self._data.consolidate() cons_data = self._protect_consolidate(f) return self._constructor(cons_data).__finalize__(self) @property def _is_mixed_type(self): f = lambda: self._data.is_mixed_type return self._protect_consolidate(f) @property def _is_numeric_mixed_type(self): f = lambda: self._data.is_numeric_mixed_type return self._protect_consolidate(f) @property def _is_datelike_mixed_type(self): f = lambda: self._data.is_datelike_mixed_type return self._protect_consolidate(f) def _check_inplace_setting(self, value): """ check whether we allow in-place setting with this type of value """ if self._is_mixed_type: if not self._is_numeric_mixed_type: # allow an actual np.nan thru try: if np.isnan(value): return True except Exception: pass raise TypeError('Cannot do inplace boolean setting on ' 'mixed-types with a non np.nan value') return True def _get_numeric_data(self): return self._constructor( self._data.get_numeric_data()).__finalize__(self) def _get_bool_data(self): return self._constructor(self._data.get_bool_data()).__finalize__(self) # ---------------------------------------------------------------------- # Internal Interface Methods def as_matrix(self, columns=None): """ Convert the frame to its Numpy-array representation. .. deprecated:: 0.23.0 Use :meth:`DataFrame.values` instead. Parameters ---------- columns : list, optional, default:None If None, return all columns, otherwise, returns specified columns. Returns ------- values : ndarray If the caller is heterogeneous and contains booleans or objects, the result will be of dtype=object. See Notes. See Also -------- DataFrame.values Notes ----- Return is NOT a Numpy-matrix, rather, a Numpy-array. The dtype will be a lower-common-denominator dtype (implicit upcasting); that is to say if the dtypes (even of numeric types) are mixed, the one that accommodates all will be chosen. Use this with care if you are not dealing with the blocks. e.g. If the dtypes are float16 and float32, dtype will be upcast to float32. If dtypes are int32 and uint8, dtype will be upcase to int32. By numpy.find_common_type convention, mixing int64 and uint64 will result in a float64 dtype. This method is provided for backwards compatibility. Generally, it is recommended to use '.values'. """ warnings.warn("Method .as_matrix will be removed in a future version. " "Use .values instead.", FutureWarning, stacklevel=2) self._consolidate_inplace() return self._data.as_array(transpose=self._AXIS_REVERSED, items=columns) @property def values(self): """ Return a Numpy representation of the DataFrame. .. warning:: We recommend using :meth:`DataFrame.to_numpy` instead. Only the values in the DataFrame will be returned, the axes labels will be removed. Returns ------- numpy.ndarray The values of the DataFrame. See Also -------- DataFrame.to_numpy : Recommended alternative to this method. pandas.DataFrame.index : Retrieve the index labels. pandas.DataFrame.columns : Retrieving the column names. Notes ----- The dtype will be a lower-common-denominator dtype (implicit upcasting); that is to say if the dtypes (even of numeric types) are mixed, the one that accommodates all will be chosen. Use this with care if you are not dealing with the blocks. e.g. If the dtypes are float16 and float32, dtype will be upcast to float32. If dtypes are int32 and uint8, dtype will be upcast to int32. By :func:`numpy.find_common_type` convention, mixing int64 and uint64 will result in a float64 dtype. Examples -------- A DataFrame where all columns are the same type (e.g., int64) results in an array of the same type. >>> df = pd.DataFrame({'age': [ 3, 29], ... 'height': [94, 170], ... 'weight': [31, 115]}) >>> df age height weight 0 3 94 31 1 29 170 115 >>> df.dtypes age int64 height int64 weight int64 dtype: object >>> df.values array([[ 3, 94, 31], [ 29, 170, 115]], dtype=int64) A DataFrame with mixed type columns(e.g., str/object, int64, float32) results in an ndarray of the broadest type that accommodates these mixed types (e.g., object). >>> df2 = pd.DataFrame([('parrot', 24.0, 'second'), ... ('lion', 80.5, 1), ... ('monkey', np.nan, None)], ... columns=('name', 'max_speed', 'rank')) >>> df2.dtypes name object max_speed float64 rank object dtype: object >>> df2.values array([['parrot', 24.0, 'second'], ['lion', 80.5, 1], ['monkey', nan, None]], dtype=object) """ self._consolidate_inplace() return self._data.as_array(transpose=self._AXIS_REVERSED) @property def _values(self): """internal implementation""" return self.values @property def _get_values(self): # compat return self.values def get_values(self): """ Return an ndarray after converting sparse values to dense. This is the same as ``.values`` for non-sparse data. For sparse data contained in a `pandas.SparseArray`, the data are first converted to a dense representation. Returns ------- numpy.ndarray Numpy representation of DataFrame See Also -------- values : Numpy representation of DataFrame. pandas.SparseArray : Container for sparse data. Examples -------- >>> df = pd.DataFrame({'a': [1, 2], 'b': [True, False], ... 'c': [1.0, 2.0]}) >>> df a b c 0 1 True 1.0 1 2 False 2.0 >>> df.get_values() array([[1, True, 1.0], [2, False, 2.0]], dtype=object) >>> df = pd.DataFrame({"a": pd.SparseArray([1, None, None]), ... "c": [1.0, 2.0, 3.0]}) >>> df a c 0 1.0 1.0 1 NaN 2.0 2 NaN 3.0 >>> df.get_values() array([[ 1., 1.], [nan, 2.], [nan, 3.]]) """ return self.values def get_dtype_counts(self): """ Return counts of unique dtypes in this object. Returns ------- dtype : Series Series with the count of columns with each dtype. See Also -------- dtypes : Return the dtypes in this object. Examples -------- >>> a = [['a', 1, 1.0], ['b', 2, 2.0], ['c', 3, 3.0]] >>> df = pd.DataFrame(a, columns=['str', 'int', 'float']) >>> df str int float 0 a 1 1.0 1 b 2 2.0 2 c 3 3.0 >>> df.get_dtype_counts() float64 1 int64 1 object 1 dtype: int64 """ from pandas import Series return Series(self._data.get_dtype_counts()) def get_ftype_counts(self): """ Return counts of unique ftypes in this object. .. deprecated:: 0.23.0 This is useful for SparseDataFrame or for DataFrames containing sparse arrays. Returns ------- dtype : Series Series with the count of columns with each type and sparsity (dense/sparse) See Also -------- ftypes : Return ftypes (indication of sparse/dense and dtype) in this object. Examples -------- >>> a = [['a', 1, 1.0], ['b', 2, 2.0], ['c', 3, 3.0]] >>> df = pd.DataFrame(a, columns=['str', 'int', 'float']) >>> df str int float 0 a 1 1.0 1 b 2 2.0 2 c 3 3.0 >>> df.get_ftype_counts() # doctest: +SKIP float64:dense 1 int64:dense 1 object:dense 1 dtype: int64 """ warnings.warn("get_ftype_counts is deprecated and will " "be removed in a future version", FutureWarning, stacklevel=2) from pandas import Series return Series(self._data.get_ftype_counts()) @property def dtypes(self): """ Return the dtypes in the DataFrame. This returns a Series with the data type of each column. The result's index is the original DataFrame's columns. Columns with mixed types are stored with the ``object`` dtype. See :ref:`the User Guide <basics.dtypes>` for more. Returns ------- pandas.Series The data type of each column. See Also -------- pandas.DataFrame.ftypes : Dtype and sparsity information. Examples -------- >>> df = pd.DataFrame({'float': [1.0], ... 'int': [1], ... 'datetime': [pd.Timestamp('20180310')], ... 'string': ['foo']}) >>> df.dtypes float float64 int int64 datetime datetime64[ns] string object dtype: object """ from pandas import Series return Series(self._data.get_dtypes(), index=self._info_axis, dtype=np.object_) @property def ftypes(self): """ Return the ftypes (indication of sparse/dense and dtype) in DataFrame. This returns a Series with the data type of each column. The result's index is the original DataFrame's columns. Columns with mixed types are stored with the ``object`` dtype. See :ref:`the User Guide <basics.dtypes>` for more. Returns ------- pandas.Series The data type and indication of sparse/dense of each column. See Also -------- pandas.DataFrame.dtypes: Series with just dtype information. pandas.SparseDataFrame : Container for sparse tabular data. Notes ----- Sparse data should have the same dtypes as its dense representation. Examples -------- >>> arr = np.random.RandomState(0).randn(100, 4) >>> arr[arr < .8] = np.nan >>> pd.DataFrame(arr).ftypes 0 float64:dense 1 float64:dense 2 float64:dense 3 float64:dense dtype: object >>> pd.SparseDataFrame(arr).ftypes 0 float64:sparse 1 float64:sparse 2 float64:sparse 3 float64:sparse dtype: object """ from pandas import Series return Series(self._data.get_ftypes(), index=self._info_axis, dtype=np.object_) def as_blocks(self, copy=True): """ Convert the frame to a dict of dtype -> Constructor Types that each has a homogeneous dtype. .. deprecated:: 0.21.0 NOTE: the dtypes of the blocks WILL BE PRESERVED HERE (unlike in as_matrix) Parameters ---------- copy : boolean, default True Returns ------- values : a dict of dtype -> Constructor Types """ warnings.warn("as_blocks is deprecated and will " "be removed in a future version", FutureWarning, stacklevel=2) return self._to_dict_of_blocks(copy=copy) @property def blocks(self): """ Internal property, property synonym for as_blocks(). .. deprecated:: 0.21.0 """ return self.as_blocks() def _to_dict_of_blocks(self, copy=True): """ Return a dict of dtype -> Constructor Types that each is a homogeneous dtype. Internal ONLY """ return {k: self._constructor(v).__finalize__(self) for k, v, in self._data.to_dict(copy=copy).items()} def astype(self, dtype, copy=True, errors='raise', **kwargs): """ Cast a pandas object to a specified dtype ``dtype``. Parameters ---------- dtype : data type, or dict of column name -> data type Use a numpy.dtype or Python type to cast entire pandas object to the same type. Alternatively, use {col: dtype, ...}, where col is a column label and dtype is a numpy.dtype or Python type to cast one or more of the DataFrame's columns to column-specific types. copy : bool, default True Return a copy when ``copy=True`` (be very careful setting ``copy=False`` as changes to values then may propagate to other pandas objects). errors : {'raise', 'ignore'}, default 'raise' Control raising of exceptions on invalid data for provided dtype. - ``raise`` : allow exceptions to be raised - ``ignore`` : suppress exceptions. On error return original object .. versionadded:: 0.20.0 kwargs : keyword arguments to pass on to the constructor Returns ------- casted : same type as caller See Also -------- to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. to_numeric : Convert argument to a numeric type. numpy.ndarray.astype : Cast a numpy array to a specified type. Examples -------- >>> ser = pd.Series([1, 2], dtype='int32') >>> ser 0 1 1 2 dtype: int32 >>> ser.astype('int64') 0 1 1 2 dtype: int64 Convert to categorical type: >>> ser.astype('category') 0 1 1 2 dtype: category Categories (2, int64): [1, 2] Convert to ordered categorical type with custom ordering: >>> cat_dtype = pd.api.types.CategoricalDtype( ... categories=[2, 1], ordered=True) >>> ser.astype(cat_dtype) 0 1 1 2 dtype: category Categories (2, int64): [2 < 1] Note that using ``copy=False`` and changing data on a new pandas object may propagate changes: >>> s1 = pd.Series([1,2]) >>> s2 = s1.astype('int64', copy=False) >>> s2[0] = 10 >>> s1 # note that s1[0] has changed too 0 10 1 2 dtype: int64 """ if is_dict_like(dtype): if self.ndim == 1: # i.e. Series if len(dtype) > 1 or self.name not in dtype: raise KeyError('Only the Series name can be used for ' 'the key in Series dtype mappings.') new_type = dtype[self.name] return self.astype(new_type, copy, errors, **kwargs) elif self.ndim > 2: raise NotImplementedError( 'astype() only accepts a dtype arg of type dict when ' 'invoked on Series and DataFrames. A single dtype must be ' 'specified when invoked on a Panel.' ) for col_name in dtype.keys(): if col_name not in self: raise KeyError('Only a column name can be used for the ' 'key in a dtype mappings argument.') results = [] for col_name, col in self.iteritems(): if col_name in dtype: results.append(col.astype(dtype[col_name], copy=copy)) else: results.append(results.append(col.copy() if copy else col)) elif is_extension_array_dtype(dtype) and self.ndim > 1: # GH 18099/22869: columnwise conversion to extension dtype # GH 24704: use iloc to handle duplicate column names results = (self.iloc[:, i].astype(dtype, copy=copy) for i in range(len(self.columns))) else: # else, only a single dtype is given new_data = self._data.astype(dtype=dtype, copy=copy, errors=errors, **kwargs) return self._constructor(new_data).__finalize__(self) # GH 19920: retain column metadata after concat result = pd.concat(results, axis=1, copy=False) result.columns = self.columns return result def copy(self, deep=True): """ Make a copy of this object's indices and data. When ``deep=True`` (default), a new object will be created with a copy of the calling object's data and indices. Modifications to the data or indices of the copy will not be reflected in the original object (see notes below). When ``deep=False``, a new object will be created without copying the calling object's data or index (only references to the data and index are copied). Any changes to the data of the original will be reflected in the shallow copy (and vice versa). Parameters ---------- deep : bool, default True Make a deep copy, including a copy of the data and the indices. With ``deep=False`` neither the indices nor the data are copied. Returns ------- copy : Series, DataFrame or Panel Object type matches caller. Notes ----- When ``deep=True``, data is copied but actual Python objects will not be copied recursively, only the reference to the object. This is in contrast to `copy.deepcopy` in the Standard Library, which recursively copies object data (see examples below). While ``Index`` objects are copied when ``deep=True``, the underlying numpy array is not copied for performance reasons. Since ``Index`` is immutable, the underlying data can be safely shared and a copy is not needed. Examples -------- >>> s = pd.Series([1, 2], index=["a", "b"]) >>> s a 1 b 2 dtype: int64 >>> s_copy = s.copy() >>> s_copy a 1 b 2 dtype: int64 **Shallow copy versus default (deep) copy:** >>> s = pd.Series([1, 2], index=["a", "b"]) >>> deep = s.copy() >>> shallow = s.copy(deep=False) Shallow copy shares data and index with original. >>> s is shallow False >>> s.values is shallow.values and s.index is shallow.index True Deep copy has own copy of data and index. >>> s is deep False >>> s.values is deep.values or s.index is deep.index False Updates to the data shared by shallow copy and original is reflected in both; deep copy remains unchanged. >>> s[0] = 3 >>> shallow[1] = 4 >>> s a 3 b 4 dtype: int64 >>> shallow a 3 b 4 dtype: int64 >>> deep a 1 b 2 dtype: int64 Note that when copying an object containing Python objects, a deep copy will copy the data, but will not do so recursively. Updating a nested data object will be reflected in the deep copy. >>> s = pd.Series([[1, 2], [3, 4]]) >>> deep = s.copy() >>> s[0][0] = 10 >>> s 0 [10, 2] 1 [3, 4] dtype: object >>> deep 0 [10, 2] 1 [3, 4] dtype: object """ data = self._data.copy(deep=deep) return self._constructor(data).__finalize__(self) def __copy__(self, deep=True): return self.copy(deep=deep) def __deepcopy__(self, memo=None): """ Parameters ---------- memo, default None Standard signature. Unused """ if memo is None: memo = {} return self.copy(deep=True) def _convert(self, datetime=False, numeric=False, timedelta=False, coerce=False, copy=True): """ Attempt to infer better dtype for object columns Parameters ---------- datetime : boolean, default False If True, convert to date where possible. numeric : boolean, default False If True, attempt to convert to numbers (including strings), with unconvertible values becoming NaN. timedelta : boolean, default False If True, convert to timedelta where possible. coerce : boolean, default False If True, force conversion with unconvertible values converted to nulls (NaN or NaT) copy : boolean, default True If True, return a copy even if no copy is necessary (e.g. no conversion was done). Note: This is meant for internal use, and should not be confused with inplace. Returns ------- converted : same as input object """ return self._constructor( self._data.convert(datetime=datetime, numeric=numeric, timedelta=timedelta, coerce=coerce, copy=copy)).__finalize__(self) def convert_objects(self, convert_dates=True, convert_numeric=False, convert_timedeltas=True, copy=True): """ Attempt to infer better dtype for object columns. .. deprecated:: 0.21.0 Parameters ---------- convert_dates : boolean, default True If True, convert to date where possible. If 'coerce', force conversion, with unconvertible values becoming NaT. convert_numeric : boolean, default False If True, attempt to coerce to numbers (including strings), with unconvertible values becoming NaN. convert_timedeltas : boolean, default True If True, convert to timedelta where possible. If 'coerce', force conversion, with unconvertible values becoming NaT. copy : boolean, default True If True, return a copy even if no copy is necessary (e.g. no conversion was done). Note: This is meant for internal use, and should not be confused with inplace. Returns ------- converted : same as input object See Also -------- to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. to_numeric : Convert argument to numeric type. """ msg = ("convert_objects is deprecated. To re-infer data dtypes for " "object columns, use {klass}.infer_objects()\nFor all " "other conversions use the data-type specific converters " "pd.to_datetime, pd.to_timedelta and pd.to_numeric." ).format(klass=self.__class__.__name__) warnings.warn(msg, FutureWarning, stacklevel=2) return self._constructor( self._data.convert(convert_dates=convert_dates, convert_numeric=convert_numeric, convert_timedeltas=convert_timedeltas, copy=copy)).__finalize__(self) def infer_objects(self): """ Attempt to infer better dtypes for object columns. Attempts soft conversion of object-dtyped columns, leaving non-object and unconvertible columns unchanged. The inference rules are the same as during normal Series/DataFrame construction. .. versionadded:: 0.21.0 Returns ------- converted : same type as input object See Also -------- to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. to_numeric : Convert argument to numeric type. Examples -------- >>> df = pd.DataFrame({"A": ["a", 1, 2, 3]}) >>> df = df.iloc[1:] >>> df A 1 1 2 2 3 3 >>> df.dtypes A object dtype: object >>> df.infer_objects().dtypes A int64 dtype: object """ # numeric=False necessary to only soft convert; # python objects will still be converted to # native numpy numeric types return self._constructor( self._data.convert(datetime=True, numeric=False, timedelta=True, coerce=False, copy=True)).__finalize__(self) # ---------------------------------------------------------------------- # Filling NA's def fillna(self, value=None, method=None, axis=None, inplace=False, limit=None, downcast=None): """ Fill NA/NaN values using the specified method. Parameters ---------- value : scalar, dict, Series, or DataFrame Value to use to fill holes (e.g. 0), alternately a dict/Series/DataFrame of values specifying which value to use for each index (for a Series) or column (for a DataFrame). (values not in the dict/Series/DataFrame will not be filled). This value cannot be a list. method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None Method to use for filling holes in reindexed Series pad / ffill: propagate last valid observation forward to next valid backfill / bfill: use NEXT valid observation to fill gap axis : %(axes_single_arg)s inplace : boolean, default False If True, fill in place. Note: this will modify any other views on this object, (e.g. a no-copy slice for a column in a DataFrame). limit : int, default None If method is specified, this is the maximum number of consecutive NaN values to forward/backward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. If method is not specified, this is the maximum number of entries along the entire axis where NaNs will be filled. Must be greater than 0 if not None. downcast : dict, default is None a dict of item->dtype of what to downcast if possible, or the string 'infer' which will try to downcast to an appropriate equal type (e.g. float64 to int64 if possible) Returns ------- filled : %(klass)s See Also -------- interpolate : Fill NaN values using interpolation. reindex, asfreq Examples -------- >>> df = pd.DataFrame([[np.nan, 2, np.nan, 0], ... [3, 4, np.nan, 1], ... [np.nan, np.nan, np.nan, 5], ... [np.nan, 3, np.nan, 4]], ... columns=list('ABCD')) >>> df A B C D 0 NaN 2.0 NaN 0 1 3.0 4.0 NaN 1 2 NaN NaN NaN 5 3 NaN 3.0 NaN 4 Replace all NaN elements with 0s. >>> df.fillna(0) A B C D 0 0.0 2.0 0.0 0 1 3.0 4.0 0.0 1 2 0.0 0.0 0.0 5 3 0.0 3.0 0.0 4 We can also propagate non-null values forward or backward. >>> df.fillna(method='ffill') A B C D 0 NaN 2.0 NaN 0 1 3.0 4.0 NaN 1 2 3.0 4.0 NaN 5 3 3.0 3.0 NaN 4 Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1, 2, and 3 respectively. >>> values = {'A': 0, 'B': 1, 'C': 2, 'D': 3} >>> df.fillna(value=values) A B C D 0 0.0 2.0 2.0 0 1 3.0 4.0 2.0 1 2 0.0 1.0 2.0 5 3 0.0 3.0 2.0 4 Only replace the first NaN element. >>> df.fillna(value=values, limit=1) A B C D 0 0.0 2.0 2.0 0 1 3.0 4.0 NaN 1 2 NaN 1.0 NaN 5 3 NaN 3.0 NaN 4 """ inplace = validate_bool_kwarg(inplace, 'inplace') value, method = validate_fillna_kwargs(value, method) self._consolidate_inplace() # set the default here, so functions examining the signaure # can detect if something was set (e.g. in groupby) (GH9221) if axis is None: axis = 0 axis = self._get_axis_number(axis) from pandas import DataFrame if value is None: if self._is_mixed_type and axis == 1: if inplace: raise NotImplementedError() result = self.T.fillna(method=method, limit=limit).T # need to downcast here because of all of the transposes result._data = result._data.downcast() return result # > 3d if self.ndim > 3: raise NotImplementedError('Cannot fillna with a method for > ' '3dims') # 3d elif self.ndim == 3: # fill in 2d chunks result = {col: s.fillna(method=method, value=value) for col, s in self.iteritems()} prelim_obj = self._constructor.from_dict(result) new_obj = prelim_obj.__finalize__(self) new_data = new_obj._data else: # 2d or less new_data = self._data.interpolate(method=method, axis=axis, limit=limit, inplace=inplace, coerce=True, downcast=downcast) else: if len(self._get_axis(axis)) == 0: return self if self.ndim == 1: if isinstance(value, (dict, ABCSeries)): from pandas import Series value = Series(value) elif not is_list_like(value): pass else: raise TypeError('"value" parameter must be a scalar, dict ' 'or Series, but you passed a ' '"{0}"'.format(type(value).__name__)) new_data = self._data.fillna(value=value, limit=limit, inplace=inplace, downcast=downcast) elif isinstance(value, (dict, ABCSeries)): if axis == 1: raise NotImplementedError('Currently only can fill ' 'with dict/Series column ' 'by column') result = self if inplace else self.copy() for k, v in compat.iteritems(value): if k not in result: continue obj = result[k] obj.fillna(v, limit=limit, inplace=True, downcast=downcast) return result if not inplace else None elif not is_list_like(value): new_data = self._data.fillna(value=value, limit=limit, inplace=inplace, downcast=downcast) elif isinstance(value, DataFrame) and self.ndim == 2: new_data = self.where(self.notna(), value) else: raise ValueError("invalid fill value with a %s" % type(value)) if inplace: self._update_inplace(new_data) else: return self._constructor(new_data).__finalize__(self) def ffill(self, axis=None, inplace=False, limit=None, downcast=None): """ Synonym for :meth:`DataFrame.fillna` with ``method='ffill'``. """ return self.fillna(method='ffill', axis=axis, inplace=inplace, limit=limit, downcast=downcast) def bfill(self, axis=None, inplace=False, limit=None, downcast=None): """ Synonym for :meth:`DataFrame.fillna` with ``method='bfill'``. """ return self.fillna(method='bfill', axis=axis, inplace=inplace, limit=limit, downcast=downcast) _shared_docs['replace'] = (""" Replace values given in `to_replace` with `value`. Values of the %(klass)s are replaced with other values dynamically. This differs from updating with ``.loc`` or ``.iloc``, which require you to specify a location to update with some value. Parameters ---------- to_replace : str, regex, list, dict, Series, int, float, or None How to find the values that will be replaced. * numeric, str or regex: - numeric: numeric values equal to `to_replace` will be replaced with `value` - str: string exactly matching `to_replace` will be replaced with `value` - regex: regexs matching `to_replace` will be replaced with `value` * list of str, regex, or numeric: - First, if `to_replace` and `value` are both lists, they **must** be the same length. - Second, if ``regex=True`` then all of the strings in **both** lists will be interpreted as regexs otherwise they will match directly. This doesn't matter much for `value` since there are only a few possible substitution regexes you can use. - str, regex and numeric rules apply as above. * dict: - Dicts can be used to specify different replacement values for different existing values. For example, ``{'a': 'b', 'y': 'z'}`` replaces the value 'a' with 'b' and 'y' with 'z'. To use a dict in this way the `value` parameter should be `None`. - For a DataFrame a dict can specify that different values should be replaced in different columns. For example, ``{'a': 1, 'b': 'z'}`` looks for the value 1 in column 'a' and the value 'z' in column 'b' and replaces these values with whatever is specified in `value`. The `value` parameter should not be ``None`` in this case. You can treat this as a special case of passing two lists except that you are specifying the column to search in. - For a DataFrame nested dictionaries, e.g., ``{'a': {'b': np.nan}}``, are read as follows: look in column 'a' for the value 'b' and replace it with NaN. The `value` parameter should be ``None`` to use a nested dict in this way. You can nest regular expressions as well. Note that column names (the top-level dictionary keys in a nested dictionary) **cannot** be regular expressions. * None: - This means that the `regex` argument must be a string, compiled regular expression, or list, dict, ndarray or Series of such elements. If `value` is also ``None`` then this **must** be a nested dictionary or Series. See the examples section for examples of each of these. value : scalar, dict, list, str, regex, default None Value to replace any values matching `to_replace` with. For a DataFrame a dict of values can be used to specify which value to use for each column (columns not in the dict will not be filled). Regular expressions, strings and lists or dicts of such objects are also allowed. inplace : bool, default False If True, in place. Note: this will modify any other views on this object (e.g. a column from a DataFrame). Returns the caller if this is True. limit : int, default None Maximum size gap to forward or backward fill. regex : bool or same types as `to_replace`, default False Whether to interpret `to_replace` and/or `value` as regular expressions. If this is ``True`` then `to_replace` *must* be a string. Alternatively, this could be a regular expression or a list, dict, or array of regular expressions in which case `to_replace` must be ``None``. method : {'pad', 'ffill', 'bfill', `None`} The method to use when for replacement, when `to_replace` is a scalar, list or tuple and `value` is ``None``. .. versionchanged:: 0.23.0 Added to DataFrame. Returns ------- %(klass)s Object after replacement. Raises ------ AssertionError * If `regex` is not a ``bool`` and `to_replace` is not ``None``. TypeError * If `to_replace` is a ``dict`` and `value` is not a ``list``, ``dict``, ``ndarray``, or ``Series`` * If `to_replace` is ``None`` and `regex` is not compilable into a regular expression or is a list, dict, ndarray, or Series. * When replacing multiple ``bool`` or ``datetime64`` objects and the arguments to `to_replace` does not match the type of the value being replaced ValueError * If a ``list`` or an ``ndarray`` is passed to `to_replace` and `value` but they are not the same length. See Also -------- %(klass)s.fillna : Fill NA values. %(klass)s.where : Replace values based on boolean condition. Series.str.replace : Simple string replacement. Notes ----- * Regex substitution is performed under the hood with ``re.sub``. The rules for substitution for ``re.sub`` are the same. * Regular expressions will only substitute on strings, meaning you cannot provide, for example, a regular expression matching floating point numbers and expect the columns in your frame that have a numeric dtype to be matched. However, if those floating point numbers *are* strings, then you can do this. * This method has *a lot* of options. You are encouraged to experiment and play with this method to gain intuition about how it works. * When dict is used as the `to_replace` value, it is like key(s) in the dict are the to_replace part and value(s) in the dict are the value parameter. Examples -------- **Scalar `to_replace` and `value`** >>> s = pd.Series([0, 1, 2, 3, 4]) >>> s.replace(0, 5) 0 5 1 1 2 2 3 3 4 4 dtype: int64 >>> df = pd.DataFrame({'A': [0, 1, 2, 3, 4], ... 'B': [5, 6, 7, 8, 9], ... 'C': ['a', 'b', 'c', 'd', 'e']}) >>> df.replace(0, 5) A B C 0 5 5 a 1 1 6 b 2 2 7 c 3 3 8 d 4 4 9 e **List-like `to_replace`** >>> df.replace([0, 1, 2, 3], 4) A B C 0 4 5 a 1 4 6 b 2 4 7 c 3 4 8 d 4 4 9 e >>> df.replace([0, 1, 2, 3], [4, 3, 2, 1]) A B C 0 4 5 a 1 3 6 b 2 2 7 c 3 1 8 d 4 4 9 e >>> s.replace([1, 2], method='bfill') 0 0 1 3 2 3 3 3 4 4 dtype: int64 **dict-like `to_replace`** >>> df.replace({0: 10, 1: 100}) A B C 0 10 5 a 1 100 6 b 2 2 7 c 3 3 8 d 4 4 9 e >>> df.replace({'A': 0, 'B': 5}, 100) A B C 0 100 100 a 1 1 6 b 2 2 7 c 3 3 8 d 4 4 9 e >>> df.replace({'A': {0: 100, 4: 400}}) A B C 0 100 5 a 1 1 6 b 2 2 7 c 3 3 8 d 4 400 9 e **Regular expression `to_replace`** >>> df = pd.DataFrame({'A': ['bat', 'foo', 'bait'], ... 'B': ['abc', 'bar', 'xyz']}) >>> df.replace(to_replace=r'^ba.$', value='new', regex=True) A B 0 new abc 1 foo new 2 bait xyz >>> df.replace({'A': r'^ba.$'}, {'A': 'new'}, regex=True) A B 0 new abc 1 foo bar 2 bait xyz >>> df.replace(regex=r'^ba.$', value='new') A B 0 new abc 1 foo new 2 bait xyz >>> df.replace(regex={r'^ba.$': 'new', 'foo': 'xyz'}) A B 0 new abc 1 xyz new 2 bait xyz >>> df.replace(regex=[r'^ba.$', 'foo'], value='new') A B 0 new abc 1 new new 2 bait xyz Note that when replacing multiple ``bool`` or ``datetime64`` objects, the data types in the `to_replace` parameter must match the data type of the value being replaced: >>> df = pd.DataFrame({'A': [True, False, True], ... 'B': [False, True, False]}) >>> df.replace({'a string': 'new value', True: False}) # raises Traceback (most recent call last): ... TypeError: Cannot compare types 'ndarray(dtype=bool)' and 'str' This raises a ``TypeError`` because one of the ``dict`` keys is not of the correct type for replacement. Compare the behavior of ``s.replace({'a': None})`` and ``s.replace('a', None)`` to understand the peculiarities of the `to_replace` parameter: >>> s = pd.Series([10, 'a', 'a', 'b', 'a']) When one uses a dict as the `to_replace` value, it is like the value(s) in the dict are equal to the `value` parameter. ``s.replace({'a': None})`` is equivalent to ``s.replace(to_replace={'a': None}, value=None, method=None)``: >>> s.replace({'a': None}) 0 10 1 None 2 None 3 b 4 None dtype: object When ``value=None`` and `to_replace` is a scalar, list or tuple, `replace` uses the method parameter (default 'pad') to do the replacement. So this is why the 'a' values are being replaced by 10 in rows 1 and 2 and 'b' in row 4 in this case. The command ``s.replace('a', None)`` is actually equivalent to ``s.replace(to_replace='a', value=None, method='pad')``: >>> s.replace('a', None) 0 10 1 10 2 10 3 b 4 b dtype: object """) @Appender(_shared_docs['replace'] % _shared_doc_kwargs) def replace(self, to_replace=None, value=None, inplace=False, limit=None, regex=False, method='pad'): inplace = validate_bool_kwarg(inplace, 'inplace') if not is_bool(regex) and to_replace is not None: raise AssertionError("'to_replace' must be 'None' if 'regex' is " "not a bool") self._consolidate_inplace() if value is None: # passing a single value that is scalar like # when value is None (GH5319), for compat if not is_dict_like(to_replace) and not is_dict_like(regex): to_replace = [to_replace] if isinstance(to_replace, (tuple, list)): if isinstance(self, pd.DataFrame): return self.apply(_single_replace, args=(to_replace, method, inplace, limit)) return _single_replace(self, to_replace, method, inplace, limit) if not is_dict_like(to_replace): if not is_dict_like(regex): raise TypeError('If "to_replace" and "value" are both None' ' and "to_replace" is not a list, then ' 'regex must be a mapping') to_replace = regex regex = True items = list(compat.iteritems(to_replace)) keys, values = lzip(*items) or ([], []) are_mappings = [is_dict_like(v) for v in values] if any(are_mappings): if not all(are_mappings): raise TypeError("If a nested mapping is passed, all values" " of the top level mapping must be " "mappings") # passed a nested dict/Series to_rep_dict = {} value_dict = {} for k, v in items: keys, values = lzip(*v.items()) or ([], []) if set(keys) & set(values): raise ValueError("Replacement not allowed with " "overlapping keys and values") to_rep_dict[k] = list(keys) value_dict[k] = list(values) to_replace, value = to_rep_dict, value_dict else: to_replace, value = keys, values return self.replace(to_replace, value, inplace=inplace, limit=limit, regex=regex) else: # need a non-zero len on all axes for a in self._AXIS_ORDERS: if not len(self._get_axis(a)): return self new_data = self._data if is_dict_like(to_replace): if is_dict_like(value): # {'A' : NA} -> {'A' : 0} res = self if inplace else self.copy() for c, src in compat.iteritems(to_replace): if c in value and c in self: # object conversion is handled in # series.replace which is called recursivelly res[c] = res[c].replace(to_replace=src, value=value[c], inplace=False, regex=regex) return None if inplace else res # {'A': NA} -> 0 elif not is_list_like(value): keys = [(k, src) for k, src in compat.iteritems(to_replace) if k in self] keys_len = len(keys) - 1 for i, (k, src) in enumerate(keys): convert = i == keys_len new_data = new_data.replace(to_replace=src, value=value, filter=[k], inplace=inplace, regex=regex, convert=convert) else: raise TypeError('value argument must be scalar, dict, or ' 'Series') elif is_list_like(to_replace): # [NA, ''] -> [0, 'missing'] if is_list_like(value): if len(to_replace) != len(value): raise ValueError('Replacement lists must match ' 'in length. Expecting %d got %d ' % (len(to_replace), len(value))) new_data = self._data.replace_list(src_list=to_replace, dest_list=value, inplace=inplace, regex=regex) else: # [NA, ''] -> 0 new_data = self._data.replace(to_replace=to_replace, value=value, inplace=inplace, regex=regex) elif to_replace is None: if not (is_re_compilable(regex) or is_list_like(regex) or is_dict_like(regex)): raise TypeError("'regex' must be a string or a compiled " "regular expression or a list or dict of " "strings or regular expressions, you " "passed a" " {0!r}".format(type(regex).__name__)) return self.replace(regex, value, inplace=inplace, limit=limit, regex=True) else: # dest iterable dict-like if is_dict_like(value): # NA -> {'A' : 0, 'B' : -1} new_data = self._data for k, v in compat.iteritems(value): if k in self: new_data = new_data.replace(to_replace=to_replace, value=v, filter=[k], inplace=inplace, regex=regex) elif not is_list_like(value): # NA -> 0 new_data = self._data.replace(to_replace=to_replace, value=value, inplace=inplace, regex=regex) else: msg = ('Invalid "to_replace" type: ' '{0!r}').format(type(to_replace).__name__) raise TypeError(msg) # pragma: no cover if inplace: self._update_inplace(new_data) else: return self._constructor(new_data).__finalize__(self) _shared_docs['interpolate'] = """ Please note that only ``method='linear'`` is supported for DataFrame/Series with a MultiIndex. Parameters ---------- method : str, default 'linear' Interpolation technique to use. One of: * 'linear': Ignore the index and treat the values as equally spaced. This is the only method supported on MultiIndexes. * 'time': Works on daily and higher resolution data to interpolate given length of interval. * 'index', 'values': use the actual numerical values of the index. * 'pad': Fill in NaNs using existing values. * 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'spline', 'barycentric', 'polynomial': Passed to `scipy.interpolate.interp1d`. Both 'polynomial' and 'spline' require that you also specify an `order` (int), e.g. ``df.interpolate(method='polynomial', order=5)``. These use the numerical values of the index. * 'krogh', 'piecewise_polynomial', 'spline', 'pchip', 'akima': Wrappers around the SciPy interpolation methods of similar names. See `Notes`. * 'from_derivatives': Refers to `scipy.interpolate.BPoly.from_derivatives` which replaces 'piecewise_polynomial' interpolation method in scipy 0.18. .. versionadded:: 0.18.1 Added support for the 'akima' method. Added interpolate method 'from_derivatives' which replaces 'piecewise_polynomial' in SciPy 0.18; backwards-compatible with SciPy < 0.18 axis : {0 or 'index', 1 or 'columns', None}, default None Axis to interpolate along. limit : int, optional Maximum number of consecutive NaNs to fill. Must be greater than 0. inplace : bool, default False Update the data in place if possible. limit_direction : {'forward', 'backward', 'both'}, default 'forward' If limit is specified, consecutive NaNs will be filled in this direction. limit_area : {`None`, 'inside', 'outside'}, default None If limit is specified, consecutive NaNs will be filled with this restriction. * ``None``: No fill restriction. * 'inside': Only fill NaNs surrounded by valid values (interpolate). * 'outside': Only fill NaNs outside valid values (extrapolate). .. versionadded:: 0.21.0 downcast : optional, 'infer' or None, defaults to None Downcast dtypes if possible. **kwargs Keyword arguments to pass on to the interpolating function. Returns ------- Series or DataFrame Returns the same object type as the caller, interpolated at some or all ``NaN`` values See Also -------- fillna : Fill missing values using different methods. scipy.interpolate.Akima1DInterpolator : Piecewise cubic polynomials (Akima interpolator). scipy.interpolate.BPoly.from_derivatives : Piecewise polynomial in the Bernstein basis. scipy.interpolate.interp1d : Interpolate a 1-D function. scipy.interpolate.KroghInterpolator : Interpolate polynomial (Krogh interpolator). scipy.interpolate.PchipInterpolator : PCHIP 1-d monotonic cubic interpolation. scipy.interpolate.CubicSpline : Cubic spline data interpolator. Notes ----- The 'krogh', 'piecewise_polynomial', 'spline', 'pchip' and 'akima' methods are wrappers around the respective SciPy implementations of similar names. These use the actual numerical values of the index. For more information on their behavior, see the `SciPy documentation <http://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation>`__ and `SciPy tutorial <http://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html>`__. Examples -------- Filling in ``NaN`` in a :class:`~pandas.Series` via linear interpolation. >>> s = pd.Series([0, 1, np.nan, 3]) >>> s 0 0.0 1 1.0 2 NaN 3 3.0 dtype: float64 >>> s.interpolate() 0 0.0 1 1.0 2 2.0 3 3.0 dtype: float64 Filling in ``NaN`` in a Series by padding, but filling at most two consecutive ``NaN`` at a time. >>> s = pd.Series([np.nan, "single_one", np.nan, ... "fill_two_more", np.nan, np.nan, np.nan, ... 4.71, np.nan]) >>> s 0 NaN 1 single_one 2 NaN 3 fill_two_more 4 NaN 5 NaN 6 NaN 7 4.71 8 NaN dtype: object >>> s.interpolate(method='pad', limit=2) 0 NaN 1 single_one 2 single_one 3 fill_two_more 4 fill_two_more 5 fill_two_more 6 NaN 7 4.71 8 4.71 dtype: object Filling in ``NaN`` in a Series via polynomial interpolation or splines: Both 'polynomial' and 'spline' methods require that you also specify an ``order`` (int). >>> s = pd.Series([0, 2, np.nan, 8]) >>> s.interpolate(method='polynomial', order=2) 0 0.000000 1 2.000000 2 4.666667 3 8.000000 dtype: float64 Fill the DataFrame forward (that is, going down) along each column using linear interpolation. Note how the last entry in column 'a' is interpolated differently, because there is no entry after it to use for interpolation. Note how the first entry in column 'b' remains ``NaN``, because there is no entry befofe it to use for interpolation. >>> df = pd.DataFrame([(0.0, np.nan, -1.0, 1.0), ... (np.nan, 2.0, np.nan, np.nan), ... (2.0, 3.0, np.nan, 9.0), ... (np.nan, 4.0, -4.0, 16.0)], ... columns=list('abcd')) >>> df a b c d 0 0.0 NaN -1.0 1.0 1 NaN 2.0 NaN NaN 2 2.0 3.0 NaN 9.0 3 NaN 4.0 -4.0 16.0 >>> df.interpolate(method='linear', limit_direction='forward', axis=0) a b c d 0 0.0 NaN -1.0 1.0 1 1.0 2.0 -2.0 5.0 2 2.0 3.0 -3.0 9.0 3 2.0 4.0 -4.0 16.0 Using polynomial interpolation. >>> df['d'].interpolate(method='polynomial', order=2) 0 1.0 1 4.0 2 9.0 3 16.0 Name: d, dtype: float64 """ @Appender(_shared_docs['interpolate'] % _shared_doc_kwargs) def interpolate(self, method='linear', axis=0, limit=None, inplace=False, limit_direction='forward', limit_area=None, downcast=None, **kwargs): """ Interpolate values according to different methods. """ inplace = validate_bool_kwarg(inplace, 'inplace') if self.ndim > 2: raise NotImplementedError("Interpolate has not been implemented " "on Panel and Panel 4D objects.") if axis == 0: ax = self._info_axis_name _maybe_transposed_self = self elif axis == 1: _maybe_transposed_self = self.T ax = 1 else: _maybe_transposed_self = self ax = _maybe_transposed_self._get_axis_number(ax) if _maybe_transposed_self.ndim == 2: alt_ax = 1 - ax else: alt_ax = ax if (isinstance(_maybe_transposed_self.index, MultiIndex) and method != 'linear'): raise ValueError("Only `method=linear` interpolation is supported " "on MultiIndexes.") if _maybe_transposed_self._data.get_dtype_counts().get( 'object') == len(_maybe_transposed_self.T): raise TypeError("Cannot interpolate with all object-dtype columns " "in the DataFrame. Try setting at least one " "column to a numeric dtype.") # create/use the index if method == 'linear': # prior default index = np.arange(len(_maybe_transposed_self._get_axis(alt_ax))) else: index = _maybe_transposed_self._get_axis(alt_ax) if isna(index).any(): raise NotImplementedError("Interpolation with NaNs in the index " "has not been implemented. Try filling " "those NaNs before interpolating.") data = _maybe_transposed_self._data new_data = data.interpolate(method=method, axis=ax, index=index, values=_maybe_transposed_self, limit=limit, limit_direction=limit_direction, limit_area=limit_area, inplace=inplace, downcast=downcast, **kwargs) if inplace: if axis == 1: new_data = self._constructor(new_data).T._data self._update_inplace(new_data) else: res = self._constructor(new_data).__finalize__(self) if axis == 1: res = res.T return res # ---------------------------------------------------------------------- # Timeseries methods Methods def asof(self, where, subset=None): """ Return the last row(s) without any NaNs before `where`. The last row (for each element in `where`, if list) without any NaN is taken. In case of a :class:`~pandas.DataFrame`, the last row without NaN considering only the subset of columns (if not `None`) .. versionadded:: 0.19.0 For DataFrame If there is no good value, NaN is returned for a Series or a Series of NaN values for a DataFrame Parameters ---------- where : date or array-like of dates Date(s) before which the last row(s) are returned. subset : str or array-like of str, default `None` For DataFrame, if not `None`, only use these columns to check for NaNs. Returns ------- scalar, Series, or DataFrame * scalar : when `self` is a Series and `where` is a scalar * Series: when `self` is a Series and `where` is an array-like, or when `self` is a DataFrame and `where` is a scalar * DataFrame : when `self` is a DataFrame and `where` is an array-like See Also -------- merge_asof : Perform an asof merge. Similar to left join. Notes ----- Dates are assumed to be sorted. Raises if this is not the case. Examples -------- A Series and a scalar `where`. >>> s = pd.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40]) >>> s 10 1.0 20 2.0 30 NaN 40 4.0 dtype: float64 >>> s.asof(20) 2.0 For a sequence `where`, a Series is returned. The first value is NaN, because the first element of `where` is before the first index value. >>> s.asof([5, 20]) 5 NaN 20 2.0 dtype: float64 Missing values are not considered. The following is ``2.0``, not NaN, even though NaN is at the index location for ``30``. >>> s.asof(30) 2.0 Take all columns into consideration >>> df = pd.DataFrame({'a': [10, 20, 30, 40, 50], ... 'b': [None, None, None, None, 500]}, ... index=pd.DatetimeIndex(['2018-02-27 09:01:00', ... '2018-02-27 09:02:00', ... '2018-02-27 09:03:00', ... '2018-02-27 09:04:00', ... '2018-02-27 09:05:00'])) >>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30', ... '2018-02-27 09:04:30'])) a b 2018-02-27 09:03:30 NaN NaN 2018-02-27 09:04:30 NaN NaN Take a single column into consideration >>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30', ... '2018-02-27 09:04:30']), ... subset=['a']) a b 2018-02-27 09:03:30 30.0 NaN 2018-02-27 09:04:30 40.0 NaN """ if isinstance(where, compat.string_types): from pandas import to_datetime where = to_datetime(where) if not self.index.is_monotonic: raise ValueError("asof requires a sorted index") is_series = isinstance(self, ABCSeries) if is_series: if subset is not None: raise ValueError("subset is not valid for Series") elif self.ndim > 2: raise NotImplementedError("asof is not implemented " "for {type}".format(type=type(self))) else: if subset is None: subset = self.columns if not is_list_like(subset): subset = [subset] is_list = is_list_like(where) if not is_list: start = self.index[0] if isinstance(self.index, PeriodIndex): where = Period(where, freq=self.index.freq).ordinal start = start.ordinal if where < start: if not is_series: from pandas import Series return Series(index=self.columns, name=where) return np.nan # It's always much faster to use a *while* loop here for # Series than pre-computing all the NAs. However a # *while* loop is extremely expensive for DataFrame # so we later pre-compute all the NAs and use the same # code path whether *where* is a scalar or list. # See PR: https://github.com/pandas-dev/pandas/pull/14476 if is_series: loc = self.index.searchsorted(where, side='right') if loc > 0: loc -= 1 values = self._values while loc > 0 and isna(values[loc]): loc -= 1 return values[loc] if not isinstance(where, Index): where = Index(where) if is_list else Index([where]) nulls = self.isna() if is_series else self[subset].isna().any(1) if nulls.all(): if is_series: return self._constructor(np.nan, index=where, name=self.name) elif is_list: from pandas import DataFrame return DataFrame(np.nan, index=where, columns=self.columns) else: from pandas import Series return Series(np.nan, index=self.columns, name=where[0]) locs = self.index.asof_locs(where, ~(nulls.values)) # mask the missing missing = locs == -1 data = self.take(locs, is_copy=False) data.index = where data.loc[missing] = np.nan return data if is_list else data.iloc[-1] # ---------------------------------------------------------------------- # Action Methods _shared_docs['isna'] = """ Detect missing values. Return a boolean same-sized object indicating if the values are NA. NA values, such as None or :attr:`numpy.NaN`, gets mapped to True values. Everything else gets mapped to False values. Characters such as empty strings ``''`` or :attr:`numpy.inf` are not considered NA values (unless you set ``pandas.options.mode.use_inf_as_na = True``). Returns ------- %(klass)s Mask of bool values for each element in %(klass)s that indicates whether an element is not an NA value. See Also -------- %(klass)s.isnull : Alias of isna. %(klass)s.notna : Boolean inverse of isna. %(klass)s.dropna : Omit axes labels with missing values. isna : Top-level isna. Examples -------- Show which entries in a DataFrame are NA. >>> df = pd.DataFrame({'age': [5, 6, np.NaN], ... 'born': [pd.NaT, pd.Timestamp('1939-05-27'), ... pd.Timestamp('1940-04-25')], ... 'name': ['Alfred', 'Batman', ''], ... 'toy': [None, 'Batmobile', 'Joker']}) >>> df age born name toy 0 5.0 NaT Alfred None 1 6.0 1939-05-27 Batman Batmobile 2 NaN 1940-04-25 Joker >>> df.isna() age born name toy 0 False True False True 1 False False False False 2 True False False False Show which entries in a Series are NA. >>> ser = pd.Series([5, 6, np.NaN]) >>> ser 0 5.0 1 6.0 2 NaN dtype: float64 >>> ser.isna() 0 False 1 False 2 True dtype: bool """ @Appender(_shared_docs['isna'] % _shared_doc_kwargs) def isna(self): return isna(self).__finalize__(self) @Appender(_shared_docs['isna'] % _shared_doc_kwargs) def isnull(self): return isna(self).__finalize__(self) _shared_docs['notna'] = """ Detect existing (non-missing) values. Return a boolean same-sized object indicating if the values are not NA. Non-missing values get mapped to True. Characters such as empty strings ``''`` or :attr:`numpy.inf` are not considered NA values (unless you set ``pandas.options.mode.use_inf_as_na = True``). NA values, such as None or :attr:`numpy.NaN`, get mapped to False values. Returns ------- %(klass)s Mask of bool values for each element in %(klass)s that indicates whether an element is not an NA value. See Also -------- %(klass)s.notnull : Alias of notna. %(klass)s.isna : Boolean inverse of notna. %(klass)s.dropna : Omit axes labels with missing values. notna : Top-level notna. Examples -------- Show which entries in a DataFrame are not NA. >>> df = pd.DataFrame({'age': [5, 6, np.NaN], ... 'born': [pd.NaT, pd.Timestamp('1939-05-27'), ... pd.Timestamp('1940-04-25')], ... 'name': ['Alfred', 'Batman', ''], ... 'toy': [None, 'Batmobile', 'Joker']}) >>> df age born name toy 0 5.0 NaT Alfred None 1 6.0 1939-05-27 Batman Batmobile 2 NaN 1940-04-25 Joker >>> df.notna() age born name toy 0 True False True False 1 True True True True 2 False True True True Show which entries in a Series are not NA. >>> ser = pd.Series([5, 6, np.NaN]) >>> ser 0 5.0 1 6.0 2 NaN dtype: float64 >>> ser.notna() 0 True 1 True 2 False dtype: bool """ @Appender(_shared_docs['notna'] % _shared_doc_kwargs) def notna(self): return notna(self).__finalize__(self) @Appender(_shared_docs['notna'] % _shared_doc_kwargs) def notnull(self): return notna(self).__finalize__(self) def _clip_with_scalar(self, lower, upper, inplace=False): if ((lower is not None and np.any(isna(lower))) or (upper is not None and np.any(isna(upper)))): raise ValueError("Cannot use an NA value as a clip threshold") result = self mask = isna(self.values) with np.errstate(all='ignore'): if upper is not None: subset = self.to_numpy() <= upper result = result.where(subset, upper, axis=None, inplace=False) if lower is not None: subset = self.to_numpy() >= lower result = result.where(subset, lower, axis=None, inplace=False) if np.any(mask): result[mask] = np.nan if inplace: self._update_inplace(result) else: return result def _clip_with_one_bound(self, threshold, method, axis, inplace): if axis is not None: axis = self._get_axis_number(axis) # method is self.le for upper bound and self.ge for lower bound if is_scalar(threshold) and is_number(threshold): if method.__name__ == 'le': return self._clip_with_scalar(None, threshold, inplace=inplace) return self._clip_with_scalar(threshold, None, inplace=inplace) subset = method(threshold, axis=axis) | isna(self) # GH #15390 # In order for where method to work, the threshold must # be transformed to NDFrame from other array like structure. if (not isinstance(threshold, ABCSeries)) and is_list_like(threshold): if isinstance(self, ABCSeries): threshold = pd.Series(threshold, index=self.index) else: threshold = _align_method_FRAME(self, threshold, axis) return self.where(subset, threshold, axis=axis, inplace=inplace) def clip(self, lower=None, upper=None, axis=None, inplace=False, *args, **kwargs): """ Trim values at input threshold(s). Assigns values outside boundary to boundary values. Thresholds can be singular values or array like, and in the latter case the clipping is performed element-wise in the specified axis. Parameters ---------- lower : float or array_like, default None Minimum threshold value. All values below this threshold will be set to it. upper : float or array_like, default None Maximum threshold value. All values above this threshold will be set to it. axis : int or string axis name, optional Align object with lower and upper along the given axis. inplace : boolean, default False Whether to perform the operation in place on the data. .. versionadded:: 0.21.0 *args, **kwargs Additional keywords have no effect but might be accepted for compatibility with numpy. Returns ------- Series or DataFrame Same type as calling object with the values outside the clip boundaries replaced Examples -------- >>> data = {'col_0': [9, -3, 0, -1, 5], 'col_1': [-2, -7, 6, 8, -5]} >>> df = pd.DataFrame(data) >>> df col_0 col_1 0 9 -2 1 -3 -7 2 0 6 3 -1 8 4 5 -5 Clips per column using lower and upper thresholds: >>> df.clip(-4, 6) col_0 col_1 0 6 -2 1 -3 -4 2 0 6 3 -1 6 4 5 -4 Clips using specific lower and upper thresholds per column element: >>> t = pd.Series([2, -4, -1, 6, 3]) >>> t 0 2 1 -4 2 -1 3 6 4 3 dtype: int64 >>> df.clip(t, t + 4, axis=0) col_0 col_1 0 6 2 1 -3 -4 2 0 3 3 6 8 4 5 3 """ if isinstance(self, ABCPanel): raise NotImplementedError("clip is not supported yet for panels") inplace = validate_bool_kwarg(inplace, 'inplace') axis = nv.validate_clip_with_axis(axis, args, kwargs) if axis is not None: axis = self._get_axis_number(axis) # GH 17276 # numpy doesn't like NaN as a clip value # so ignore # GH 19992 # numpy doesn't drop a list-like bound containing NaN if not is_list_like(lower) and np.any(pd.isnull(lower)): lower = None if not is_list_like(upper) and np.any(pd.isnull(upper)): upper = None # GH 2747 (arguments were reversed) if lower is not None and upper is not None: if is_scalar(lower) and is_scalar(upper): lower, upper = min(lower, upper), max(lower, upper) # fast-path for scalars if ((lower is None or (is_scalar(lower) and is_number(lower))) and (upper is None or (is_scalar(upper) and is_number(upper)))): return self._clip_with_scalar(lower, upper, inplace=inplace) result = self if lower is not None: result = result._clip_with_one_bound(lower, method=self.ge, axis=axis, inplace=inplace) if upper is not None: if inplace: result = self result = result._clip_with_one_bound(upper, method=self.le, axis=axis, inplace=inplace) return result def clip_upper(self, threshold, axis=None, inplace=False): """ Trim values above a given threshold. .. deprecated:: 0.24.0 Use clip(upper=threshold) instead. Elements above the `threshold` will be changed to match the `threshold` value(s). Threshold can be a single value or an array, in the latter case it performs the truncation element-wise. Parameters ---------- threshold : numeric or array-like Maximum value allowed. All values above threshold will be set to this value. * float : every value is compared to `threshold`. * array-like : The shape of `threshold` should match the object it's compared to. When `self` is a Series, `threshold` should be the length. When `self` is a DataFrame, `threshold` should 2-D and the same shape as `self` for ``axis=None``, or 1-D and the same length as the axis being compared. axis : {0 or 'index', 1 or 'columns'}, default 0 Align object with `threshold` along the given axis. inplace : boolean, default False Whether to perform the operation in place on the data. .. versionadded:: 0.21.0 Returns ------- Series or DataFrame Original data with values trimmed. See Also -------- Series.clip : General purpose method to trim Series values to given threshold(s). DataFrame.clip : General purpose method to trim DataFrame values to given threshold(s). Examples -------- >>> s = pd.Series([1, 2, 3, 4, 5]) >>> s 0 1 1 2 2 3 3 4 4 5 dtype: int64 >>> s.clip(upper=3) 0 1 1 2 2 3 3 3 4 3 dtype: int64 >>> elemwise_thresholds = [5, 4, 3, 2, 1] >>> elemwise_thresholds [5, 4, 3, 2, 1] >>> s.clip(upper=elemwise_thresholds) 0 1 1 2 2 3 3 2 4 1 dtype: int64 """ warnings.warn('clip_upper(threshold) is deprecated, ' 'use clip(upper=threshold) instead', FutureWarning, stacklevel=2) return self._clip_with_one_bound(threshold, method=self.le, axis=axis, inplace=inplace) def clip_lower(self, threshold, axis=None, inplace=False): """ Trim values below a given threshold. .. deprecated:: 0.24.0 Use clip(lower=threshold) instead. Elements below the `threshold` will be changed to match the `threshold` value(s). Threshold can be a single value or an array, in the latter case it performs the truncation element-wise. Parameters ---------- threshold : numeric or array-like Minimum value allowed. All values below threshold will be set to this value. * float : every value is compared to `threshold`. * array-like : The shape of `threshold` should match the object it's compared to. When `self` is a Series, `threshold` should be the length. When `self` is a DataFrame, `threshold` should 2-D and the same shape as `self` for ``axis=None``, or 1-D and the same length as the axis being compared. axis : {0 or 'index', 1 or 'columns'}, default 0 Align `self` with `threshold` along the given axis. inplace : boolean, default False Whether to perform the operation in place on the data. .. versionadded:: 0.21.0 Returns ------- Series or DataFrame Original data with values trimmed. See Also -------- Series.clip : General purpose method to trim Series values to given threshold(s). DataFrame.clip : General purpose method to trim DataFrame values to given threshold(s). Examples -------- Series single threshold clipping: >>> s = pd.Series([5, 6, 7, 8, 9]) >>> s.clip(lower=8) 0 8 1 8 2 8 3 8 4 9 dtype: int64 Series clipping element-wise using an array of thresholds. `threshold` should be the same length as the Series. >>> elemwise_thresholds = [4, 8, 7, 2, 5] >>> s.clip(lower=elemwise_thresholds) 0 5 1 8 2 7 3 8 4 9 dtype: int64 DataFrames can be compared to a scalar. >>> df = pd.DataFrame({"A": [1, 3, 5], "B": [2, 4, 6]}) >>> df A B 0 1 2 1 3 4 2 5 6 >>> df.clip(lower=3) A B 0 3 3 1 3 4 2 5 6 Or to an array of values. By default, `threshold` should be the same shape as the DataFrame. >>> df.clip(lower=np.array([[3, 4], [2, 2], [6, 2]])) A B 0 3 4 1 3 4 2 6 6 Control how `threshold` is broadcast with `axis`. In this case `threshold` should be the same length as the axis specified by `axis`. >>> df.clip(lower=[3, 3, 5], axis='index') A B 0 3 3 1 3 4 2 5 6 >>> df.clip(lower=[4, 5], axis='columns') A B 0 4 5 1 4 5 2 5 6 """ warnings.warn('clip_lower(threshold) is deprecated, ' 'use clip(lower=threshold) instead', FutureWarning, stacklevel=2) return self._clip_with_one_bound(threshold, method=self.ge, axis=axis, inplace=inplace) def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True, group_keys=True, squeeze=False, observed=False, **kwargs): """ Group DataFrame or Series using a mapper or by a Series of columns. A groupby operation involves some combination of splitting the object, applying a function, and combining the results. This can be used to group large amounts of data and compute operations on these groups. Parameters ---------- by : mapping, function, label, or list of labels Used to determine the groups for the groupby. If ``by`` is a function, it's called on each value of the object's index. If a dict or Series is passed, the Series or dict VALUES will be used to determine the groups (the Series' values are first aligned; see ``.align()`` method). If an ndarray is passed, the values are used as-is determine the groups. A label or list of labels may be passed to group by the columns in ``self``. Notice that a tuple is interpreted a (single) key. axis : {0 or 'index', 1 or 'columns'}, default 0 Split along rows (0) or columns (1). level : int, level name, or sequence of such, default None If the axis is a MultiIndex (hierarchical), group by a particular level or levels. as_index : bool, default True For aggregated output, return object with group labels as the index. Only relevant for DataFrame input. as_index=False is effectively "SQL-style" grouped output. sort : bool, default True Sort group keys. Get better performance by turning this off. Note this does not influence the order of observations within each group. Groupby preserves the order of rows within each group. group_keys : bool, default True When calling apply, add group keys to index to identify pieces. squeeze : bool, default False Reduce the dimensionality of the return type if possible, otherwise return a consistent type. observed : bool, default False This only applies if any of the groupers are Categoricals. If True: only show observed values for categorical groupers. If False: show all values for categorical groupers. .. versionadded:: 0.23.0 **kwargs Optional, only accepts keyword argument 'mutated' and is passed to groupby. Returns ------- DataFrameGroupBy or SeriesGroupBy Depends on the calling object and returns groupby object that contains information about the groups. See Also -------- resample : Convenience method for frequency conversion and resampling of time series. Notes ----- See the `user guide <http://pandas.pydata.org/pandas-docs/stable/groupby.html>`_ for more. Examples -------- >>> df = pd.DataFrame({'Animal' : ['Falcon', 'Falcon', ... 'Parrot', 'Parrot'], ... 'Max Speed' : [380., 370., 24., 26.]}) >>> df Animal Max Speed 0 Falcon 380.0 1 Falcon 370.0 2 Parrot 24.0 3 Parrot 26.0 >>> df.groupby(['Animal']).mean() Max Speed Animal Falcon 375.0 Parrot 25.0 **Hierarchical Indexes** We can groupby different levels of a hierarchical index using the `level` parameter: >>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'], ... ['Capitve', 'Wild', 'Capitve', 'Wild']] >>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type')) >>> df = pd.DataFrame({'Max Speed' : [390., 350., 30., 20.]}, ... index=index) >>> df Max Speed Animal Type Falcon Capitve 390.0 Wild 350.0 Parrot Capitve 30.0 Wild 20.0 >>> df.groupby(level=0).mean() Max Speed Animal Falcon 370.0 Parrot 25.0 >>> df.groupby(level=1).mean() Max Speed Type Capitve 210.0 Wild 185.0 """ from pandas.core.groupby.groupby import groupby if level is None and by is None: raise TypeError("You have to supply one of 'by' and 'level'") axis = self._get_axis_number(axis) return groupby(self, by=by, axis=axis, level=level, as_index=as_index, sort=sort, group_keys=group_keys, squeeze=squeeze, observed=observed, **kwargs) def asfreq(self, freq, method=None, how=None, normalize=False, fill_value=None): """ Convert TimeSeries to specified frequency. Optionally provide filling method to pad/backfill missing values. Returns the original data conformed to a new index with the specified frequency. ``resample`` is more appropriate if an operation, such as summarization, is necessary to represent the data at the new frequency. Parameters ---------- freq : DateOffset object, or string method : {'backfill'/'bfill', 'pad'/'ffill'}, default None Method to use for filling holes in reindexed Series (note this does not fill NaNs that already were present): * 'pad' / 'ffill': propagate last valid observation forward to next valid * 'backfill' / 'bfill': use NEXT valid observation to fill how : {'start', 'end'}, default end For PeriodIndex only, see PeriodIndex.asfreq normalize : bool, default False Whether to reset output index to midnight fill_value : scalar, optional Value to use for missing values, applied during upsampling (note this does not fill NaNs that already were present). .. versionadded:: 0.20.0 Returns ------- converted : same type as caller See Also -------- reindex Notes ----- To learn more about the frequency strings, please see `this link <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. Examples -------- Start by creating a series with 4 one minute timestamps. >>> index = pd.date_range('1/1/2000', periods=4, freq='T') >>> series = pd.Series([0.0, None, 2.0, 3.0], index=index) >>> df = pd.DataFrame({'s':series}) >>> df s 2000-01-01 00:00:00 0.0 2000-01-01 00:01:00 NaN 2000-01-01 00:02:00 2.0 2000-01-01 00:03:00 3.0 Upsample the series into 30 second bins. >>> df.asfreq(freq='30S') s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 NaN 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 NaN 2000-01-01 00:03:00 3.0 Upsample again, providing a ``fill value``. >>> df.asfreq(freq='30S', fill_value=9.0) s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 9.0 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 9.0 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 9.0 2000-01-01 00:03:00 3.0 Upsample again, providing a ``method``. >>> df.asfreq(freq='30S', method='bfill') s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 2.0 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 3.0 2000-01-01 00:03:00 3.0 """ from pandas.core.resample import asfreq return asfreq(self, freq, method=method, how=how, normalize=normalize, fill_value=fill_value) def at_time(self, time, asof=False, axis=None): """ Select values at particular time of day (e.g. 9:30AM). Parameters ---------- time : datetime.time or string axis : {0 or 'index', 1 or 'columns'}, default 0 .. versionadded:: 0.24.0 Returns ------- values_at_time : same type as caller Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- between_time : Select values between particular times of the day. first : Select initial periods of time series based on a date offset. last : Select final periods of time series based on a date offset. DatetimeIndex.indexer_at_time : Get just the index locations for values at particular time of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='12H') >>> ts = pd.DataFrame({'A': [1,2,3,4]}, index=i) >>> ts A 2018-04-09 00:00:00 1 2018-04-09 12:00:00 2 2018-04-10 00:00:00 3 2018-04-10 12:00:00 4 >>> ts.at_time('12:00') A 2018-04-09 12:00:00 2 2018-04-10 12:00:00 4 """ if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) index = self._get_axis(axis) try: indexer = index.indexer_at_time(time, asof=asof) except AttributeError: raise TypeError('Index must be DatetimeIndex') return self._take(indexer, axis=axis) def between_time(self, start_time, end_time, include_start=True, include_end=True, axis=None): """ Select values between particular times of the day (e.g., 9:00-9:30 AM). By setting ``start_time`` to be later than ``end_time``, you can get the times that are *not* between the two times. Parameters ---------- start_time : datetime.time or string end_time : datetime.time or string include_start : boolean, default True include_end : boolean, default True axis : {0 or 'index', 1 or 'columns'}, default 0 .. versionadded:: 0.24.0 Returns ------- values_between_time : same type as caller Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- at_time : Select values at a particular time of the day. first : Select initial periods of time series based on a date offset. last : Select final periods of time series based on a date offset. DatetimeIndex.indexer_between_time : Get just the index locations for values between particular times of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='1D20min') >>> ts = pd.DataFrame({'A': [1,2,3,4]}, index=i) >>> ts A 2018-04-09 00:00:00 1 2018-04-10 00:20:00 2 2018-04-11 00:40:00 3 2018-04-12 01:00:00 4 >>> ts.between_time('0:15', '0:45') A 2018-04-10 00:20:00 2 2018-04-11 00:40:00 3 You get the times that are *not* between two times by setting ``start_time`` later than ``end_time``: >>> ts.between_time('0:45', '0:15') A 2018-04-09 00:00:00 1 2018-04-12 01:00:00 4 """ if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) index = self._get_axis(axis) try: indexer = index.indexer_between_time( start_time, end_time, include_start=include_start, include_end=include_end) except AttributeError: raise TypeError('Index must be DatetimeIndex') return self._take(indexer, axis=axis) def resample(self, rule, how=None, axis=0, fill_method=None, closed=None, label=None, convention='start', kind=None, loffset=None, limit=None, base=0, on=None, level=None): """ Resample time-series data. Convenience method for frequency conversion and resampling of time series. Object must have a datetime-like index (`DatetimeIndex`, `PeriodIndex`, or `TimedeltaIndex`), or pass datetime-like values to the `on` or `level` keyword. Parameters ---------- rule : str The offset string or object representing target conversion. how : str Method for down/re-sampling, default to 'mean' for downsampling. .. deprecated:: 0.18.0 The new syntax is ``.resample(...).mean()``, or ``.resample(...).apply(<func>)`` axis : {0 or 'index', 1 or 'columns'}, default 0 Which axis to use for up- or down-sampling. For `Series` this will default to 0, i.e. along the rows. Must be `DatetimeIndex`, `TimedeltaIndex` or `PeriodIndex`. fill_method : str, default None Filling method for upsampling. .. deprecated:: 0.18.0 The new syntax is ``.resample(...).<func>()``, e.g. ``.resample(...).pad()`` closed : {'right', 'left'}, default None Which side of bin interval is closed. The default is 'left' for all frequency offsets except for 'M', 'A', 'Q', 'BM', 'BA', 'BQ', and 'W' which all have a default of 'right'. label : {'right', 'left'}, default None Which bin edge label to label bucket with. The default is 'left' for all frequency offsets except for 'M', 'A', 'Q', 'BM', 'BA', 'BQ', and 'W' which all have a default of 'right'. convention : {'start', 'end', 's', 'e'}, default 'start' For `PeriodIndex` only, controls whether to use the start or end of `rule`. kind : {'timestamp', 'period'}, optional, default None Pass 'timestamp' to convert the resulting index to a `DateTimeIndex` or 'period' to convert it to a `PeriodIndex`. By default the input representation is retained. loffset : timedelta, default None Adjust the resampled time labels. limit : int, default None Maximum size gap when reindexing with `fill_method`. .. deprecated:: 0.18.0 base : int, default 0 For frequencies that evenly subdivide 1 day, the "origin" of the aggregated intervals. For example, for '5min' frequency, base could range from 0 through 4. Defaults to 0. on : str, optional For a DataFrame, column to use instead of index for resampling. Column must be datetime-like. .. versionadded:: 0.19.0 level : str or int, optional For a MultiIndex, level (name or number) to use for resampling. `level` must be datetime-like. .. versionadded:: 0.19.0 Returns ------- Resampler object See Also -------- groupby : Group by mapping, function, label, or list of labels. Series.resample : Resample a Series. DataFrame.resample: Resample a DataFrame. Notes ----- See the `user guide <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#resampling>`_ for more. To learn more about the offset strings, please see `this link <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. Examples -------- Start by creating a series with 9 one minute timestamps. >>> index = pd.date_range('1/1/2000', periods=9, freq='T') >>> series = pd.Series(range(9), index=index) >>> series 2000-01-01 00:00:00 0 2000-01-01 00:01:00 1 2000-01-01 00:02:00 2 2000-01-01 00:03:00 3 2000-01-01 00:04:00 4 2000-01-01 00:05:00 5 2000-01-01 00:06:00 6 2000-01-01 00:07:00 7 2000-01-01 00:08:00 8 Freq: T, dtype: int64 Downsample the series into 3 minute bins and sum the values of the timestamps falling into a bin. >>> series.resample('3T').sum() 2000-01-01 00:00:00 3 2000-01-01 00:03:00 12 2000-01-01 00:06:00 21 Freq: 3T, dtype: int64 Downsample the series into 3 minute bins as above, but label each bin using the right edge instead of the left. Please note that the value in the bucket used as the label is not included in the bucket, which it labels. For example, in the original series the bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed value in the resampled bucket with the label ``2000-01-01 00:03:00`` does not include 3 (if it did, the summed value would be 6, not 3). To include this value close the right side of the bin interval as illustrated in the example below this one. >>> series.resample('3T', label='right').sum() 2000-01-01 00:03:00 3 2000-01-01 00:06:00 12 2000-01-01 00:09:00 21 Freq: 3T, dtype: int64 Downsample the series into 3 minute bins as above, but close the right side of the bin interval. >>> series.resample('3T', label='right', closed='right').sum() 2000-01-01 00:00:00 0 2000-01-01 00:03:00 6 2000-01-01 00:06:00 15 2000-01-01 00:09:00 15 Freq: 3T, dtype: int64 Upsample the series into 30 second bins. >>> series.resample('30S').asfreq()[0:5] # Select first 5 rows 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 1.0 2000-01-01 00:01:30 NaN 2000-01-01 00:02:00 2.0 Freq: 30S, dtype: float64 Upsample the series into 30 second bins and fill the ``NaN`` values using the ``pad`` method. >>> series.resample('30S').pad()[0:5] 2000-01-01 00:00:00 0 2000-01-01 00:00:30 0 2000-01-01 00:01:00 1 2000-01-01 00:01:30 1 2000-01-01 00:02:00 2 Freq: 30S, dtype: int64 Upsample the series into 30 second bins and fill the ``NaN`` values using the ``bfill`` method. >>> series.resample('30S').bfill()[0:5] 2000-01-01 00:00:00 0 2000-01-01 00:00:30 1 2000-01-01 00:01:00 1 2000-01-01 00:01:30 2 2000-01-01 00:02:00 2 Freq: 30S, dtype: int64 Pass a custom function via ``apply`` >>> def custom_resampler(array_like): ... return np.sum(array_like) + 5 ... >>> series.resample('3T').apply(custom_resampler) 2000-01-01 00:00:00 8 2000-01-01 00:03:00 17 2000-01-01 00:06:00 26 Freq: 3T, dtype: int64 For a Series with a PeriodIndex, the keyword `convention` can be used to control whether to use the start or end of `rule`. Resample a year by quarter using 'start' `convention`. Values are assigned to the first quarter of the period. >>> s = pd.Series([1, 2], index=pd.period_range('2012-01-01', ... freq='A', ... periods=2)) >>> s 2012 1 2013 2 Freq: A-DEC, dtype: int64 >>> s.resample('Q', convention='start').asfreq() 2012Q1 1.0 2012Q2 NaN 2012Q3 NaN 2012Q4 NaN 2013Q1 2.0 2013Q2 NaN 2013Q3 NaN 2013Q4 NaN Freq: Q-DEC, dtype: float64 Resample quarters by month using 'end' `convention`. Values are assigned to the last month of the period. >>> q = pd.Series([1, 2, 3, 4], index=pd.period_range('2018-01-01', ... freq='Q', ... periods=4)) >>> q 2018Q1 1 2018Q2 2 2018Q3 3 2018Q4 4 Freq: Q-DEC, dtype: int64 >>> q.resample('M', convention='end').asfreq() 2018-03 1.0 2018-04 NaN 2018-05 NaN 2018-06 2.0 2018-07 NaN 2018-08 NaN 2018-09 3.0 2018-10 NaN 2018-11 NaN 2018-12 4.0 Freq: M, dtype: float64 For DataFrame objects, the keyword `on` can be used to specify the column instead of the index for resampling. >>> d = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19], ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]}) >>> df = pd.DataFrame(d) >>> df['week_starting'] = pd.date_range('01/01/2018', ... periods=8, ... freq='W') >>> df price volume week_starting 0 10 50 2018-01-07 1 11 60 2018-01-14 2 9 40 2018-01-21 3 13 100 2018-01-28 4 14 50 2018-02-04 5 18 100 2018-02-11 6 17 40 2018-02-18 7 19 50 2018-02-25 >>> df.resample('M', on='week_starting').mean() price volume week_starting 2018-01-31 10.75 62.5 2018-02-28 17.00 60.0 For a DataFrame with MultiIndex, the keyword `level` can be used to specify on which level the resampling needs to take place. >>> days = pd.date_range('1/1/2000', periods=4, freq='D') >>> d2 = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19], ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]}) >>> df2 = pd.DataFrame(d2, ... index=pd.MultiIndex.from_product([days, ... ['morning', ... 'afternoon']] ... )) >>> df2 price volume 2000-01-01 morning 10 50 afternoon 11 60 2000-01-02 morning 9 40 afternoon 13 100 2000-01-03 morning 14 50 afternoon 18 100 2000-01-04 morning 17 40 afternoon 19 50 >>> df2.resample('D', level=0).sum() price volume 2000-01-01 21 110 2000-01-02 22 140 2000-01-03 32 150 2000-01-04 36 90 """ from pandas.core.resample import (resample, _maybe_process_deprecations) axis = self._get_axis_number(axis) r = resample(self, freq=rule, label=label, closed=closed, axis=axis, kind=kind, loffset=loffset, convention=convention, base=base, key=on, level=level) return _maybe_process_deprecations(r, how=how, fill_method=fill_method, limit=limit) def first(self, offset): """ Convenience method for subsetting initial periods of time series data based on a date offset. Parameters ---------- offset : string, DateOffset, dateutil.relativedelta Returns ------- subset : same type as caller Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- last : Select final periods of time series based on a date offset. at_time : Select values at a particular time of the day. between_time : Select values between particular times of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='2D') >>> ts = pd.DataFrame({'A': [1,2,3,4]}, index=i) >>> ts A 2018-04-09 1 2018-04-11 2 2018-04-13 3 2018-04-15 4 Get the rows for the first 3 days: >>> ts.first('3D') A 2018-04-09 1 2018-04-11 2 Notice the data for 3 first calender days were returned, not the first 3 days observed in the dataset, and therefore data for 2018-04-13 was not returned. """ if not isinstance(self.index, DatetimeIndex): raise TypeError("'first' only supports a DatetimeIndex index") if len(self.index) == 0: return self offset = to_offset(offset) end_date = end = self.index[0] + offset # Tick-like, e.g. 3 weeks if not offset.isAnchored() and hasattr(offset, '_inc'): if end_date in self.index: end = self.index.searchsorted(end_date, side='left') return self.iloc[:end] return self.loc[:end] def last(self, offset): """ Convenience method for subsetting final periods of time series data based on a date offset. Parameters ---------- offset : string, DateOffset, dateutil.relativedelta Returns ------- subset : same type as caller Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- first : Select initial periods of time series based on a date offset. at_time : Select values at a particular time of the day. between_time : Select values between particular times of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='2D') >>> ts = pd.DataFrame({'A': [1,2,3,4]}, index=i) >>> ts A 2018-04-09 1 2018-04-11 2 2018-04-13 3 2018-04-15 4 Get the rows for the last 3 days: >>> ts.last('3D') A 2018-04-13 3 2018-04-15 4 Notice the data for 3 last calender days were returned, not the last 3 observed days in the dataset, and therefore data for 2018-04-11 was not returned. """ if not isinstance(self.index, DatetimeIndex): raise TypeError("'last' only supports a DatetimeIndex index") if len(self.index) == 0: return self offset = to_offset(offset) start_date = self.index[-1] - offset start = self.index.searchsorted(start_date, side='right') return self.iloc[start:] def rank(self, axis=0, method='average', numeric_only=None, na_option='keep', ascending=True, pct=False): """ Compute numerical data ranks (1 through n) along axis. Equal values are assigned a rank that is the average of the ranks of those values. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 index to direct ranking method : {'average', 'min', 'max', 'first', 'dense'} * average: average rank of group * min: lowest rank in group * max: highest rank in group * first: ranks assigned in order they appear in the array * dense: like 'min', but rank always increases by 1 between groups numeric_only : boolean, default None Include only float, int, boolean data. Valid only for DataFrame or Panel objects na_option : {'keep', 'top', 'bottom'} * keep: leave NA values where they are * top: smallest rank if ascending * bottom: smallest rank if descending ascending : boolean, default True False for ranks by high (1) to low (N) pct : boolean, default False Computes percentage rank of data Returns ------- ranks : same type as caller """ axis = self._get_axis_number(axis) if self.ndim > 2: msg = "rank does not make sense when ndim > 2" raise NotImplementedError(msg) if na_option not in {'keep', 'top', 'bottom'}: msg = "na_option must be one of 'keep', 'top', or 'bottom'" raise ValueError(msg) def ranker(data): ranks = algos.rank(data.values, axis=axis, method=method, ascending=ascending, na_option=na_option, pct=pct) ranks = self._constructor(ranks, **data._construct_axes_dict()) return ranks.__finalize__(self) # if numeric_only is None, and we can't get anything, we try with # numeric_only=True if numeric_only is None: try: return ranker(self) except TypeError: numeric_only = True if numeric_only: data = self._get_numeric_data() else: data = self return ranker(data) _shared_docs['align'] = (""" Align two objects on their axes with the specified join method for each axis Index. Parameters ---------- other : DataFrame or Series join : {'outer', 'inner', 'left', 'right'}, default 'outer' axis : allowed axis of the other object, default None Align on index (0), columns (1), or both (None) level : int or level name, default None Broadcast across a level, matching Index values on the passed MultiIndex level copy : boolean, default True Always returns new objects. If copy=False and no reindexing is required then original objects are returned. fill_value : scalar, default np.NaN Value to use for missing values. Defaults to NaN, but can be any "compatible" value method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None Method to use for filling holes in reindexed Series pad / ffill: propagate last valid observation forward to next valid backfill / bfill: use NEXT valid observation to fill gap limit : int, default None If method is specified, this is the maximum number of consecutive NaN values to forward/backward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. If method is not specified, this is the maximum number of entries along the entire axis where NaNs will be filled. Must be greater than 0 if not None. fill_axis : %(axes_single_arg)s, default 0 Filling axis, method and limit broadcast_axis : %(axes_single_arg)s, default None Broadcast values along this axis, if aligning two objects of different dimensions Returns ------- (left, right) : (%(klass)s, type of other) Aligned objects """) @Appender(_shared_docs['align'] % _shared_doc_kwargs) def align(self, other, join='outer', axis=None, level=None, copy=True, fill_value=None, method=None, limit=None, fill_axis=0, broadcast_axis=None): from pandas import DataFrame, Series method = missing.clean_fill_method(method) if broadcast_axis == 1 and self.ndim != other.ndim: if isinstance(self, Series): # this means other is a DataFrame, and we need to broadcast # self cons = self._constructor_expanddim df = cons({c: self for c in other.columns}, **other._construct_axes_dict()) return df._align_frame(other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis) elif isinstance(other, Series): # this means self is a DataFrame, and we need to broadcast # other cons = other._constructor_expanddim df = cons({c: other for c in self.columns}, **self._construct_axes_dict()) return self._align_frame(df, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis) if axis is not None: axis = self._get_axis_number(axis) if isinstance(other, DataFrame): return self._align_frame(other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis) elif isinstance(other, Series): return self._align_series(other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis) else: # pragma: no cover raise TypeError('unsupported type: %s' % type(other)) def _align_frame(self, other, join='outer', axis=None, level=None, copy=True, fill_value=None, method=None, limit=None, fill_axis=0): # defaults join_index, join_columns = None, None ilidx, iridx = None, None clidx, cridx = None, None is_series = isinstance(self, ABCSeries) if axis is None or axis == 0: if not self.index.equals(other.index): join_index, ilidx, iridx = self.index.join( other.index, how=join, level=level, return_indexers=True) if axis is None or axis == 1: if not is_series and not self.columns.equals(other.columns): join_columns, clidx, cridx = self.columns.join( other.columns, how=join, level=level, return_indexers=True) if is_series: reindexers = {0: [join_index, ilidx]} else: reindexers = {0: [join_index, ilidx], 1: [join_columns, clidx]} left = self._reindex_with_indexers(reindexers, copy=copy, fill_value=fill_value, allow_dups=True) # other must be always DataFrame right = other._reindex_with_indexers({0: [join_index, iridx], 1: [join_columns, cridx]}, copy=copy, fill_value=fill_value, allow_dups=True) if method is not None: left = left.fillna(axis=fill_axis, method=method, limit=limit) right = right.fillna(axis=fill_axis, method=method, limit=limit) # if DatetimeIndex have different tz, convert to UTC if is_datetime64tz_dtype(left.index): if left.index.tz != right.index.tz: if join_index is not None: left.index = join_index right.index = join_index return left.__finalize__(self), right.__finalize__(other) def _align_series(self, other, join='outer', axis=None, level=None, copy=True, fill_value=None, method=None, limit=None, fill_axis=0): is_series = isinstance(self, ABCSeries) # series/series compat, other must always be a Series if is_series: if axis: raise ValueError('cannot align series to a series other than ' 'axis 0') # equal if self.index.equals(other.index): join_index, lidx, ridx = None, None, None else: join_index, lidx, ridx = self.index.join(other.index, how=join, level=level, return_indexers=True) left = self._reindex_indexer(join_index, lidx, copy) right = other._reindex_indexer(join_index, ridx, copy) else: # one has > 1 ndim fdata = self._data if axis == 0: join_index = self.index lidx, ridx = None, None if not self.index.equals(other.index): join_index, lidx, ridx = self.index.join( other.index, how=join, level=level, return_indexers=True) if lidx is not None: fdata = fdata.reindex_indexer(join_index, lidx, axis=1) elif axis == 1: join_index = self.columns lidx, ridx = None, None if not self.columns.equals(other.index): join_index, lidx, ridx = self.columns.join( other.index, how=join, level=level, return_indexers=True) if lidx is not None: fdata = fdata.reindex_indexer(join_index, lidx, axis=0) else: raise ValueError('Must specify axis=0 or 1') if copy and fdata is self._data: fdata = fdata.copy() left = self._constructor(fdata) if ridx is None: right = other else: right = other.reindex(join_index, level=level) # fill fill_na = notna(fill_value) or (method is not None) if fill_na: left = left.fillna(fill_value, method=method, limit=limit, axis=fill_axis) right = right.fillna(fill_value, method=method, limit=limit) # if DatetimeIndex have different tz, convert to UTC if is_series or (not is_series and axis == 0): if is_datetime64tz_dtype(left.index): if left.index.tz != right.index.tz: if join_index is not None: left.index = join_index right.index = join_index return left.__finalize__(self), right.__finalize__(other) def _where(self, cond, other=np.nan, inplace=False, axis=None, level=None, errors='raise', try_cast=False): """ Equivalent to public method `where`, except that `other` is not applied as a function even if callable. Used in __setitem__. """ inplace = validate_bool_kwarg(inplace, 'inplace') # align the cond to same shape as myself cond = com.apply_if_callable(cond, self) if isinstance(cond, NDFrame): cond, _ = cond.align(self, join='right', broadcast_axis=1) else: if not hasattr(cond, 'shape'): cond = np.asanyarray(cond) if cond.shape != self.shape: raise ValueError('Array conditional must be same shape as ' 'self') cond = self._constructor(cond, **self._construct_axes_dict()) # make sure we are boolean fill_value = True if inplace else False cond = cond.fillna(fill_value) msg = "Boolean array expected for the condition, not {dtype}" if not isinstance(cond, pd.DataFrame): # This is a single-dimensional object. if not is_bool_dtype(cond): raise ValueError(msg.format(dtype=cond.dtype)) elif not cond.empty: for dt in cond.dtypes: if not is_bool_dtype(dt): raise ValueError(msg.format(dtype=dt)) cond = -cond if inplace else cond # try to align with other try_quick = True if hasattr(other, 'align'): # align with me if other.ndim <= self.ndim: _, other = self.align(other, join='left', axis=axis, level=level, fill_value=np.nan) # if we are NOT aligned, raise as we cannot where index if (axis is None and not all(other._get_axis(i).equals(ax) for i, ax in enumerate(self.axes))): raise InvalidIndexError # slice me out of the other else: raise NotImplementedError("cannot align with a higher " "dimensional NDFrame") if isinstance(other, np.ndarray): if other.shape != self.shape: if self.ndim == 1: icond = cond.values # GH 2745 / GH 4192 # treat like a scalar if len(other) == 1: other = np.array(other[0]) # GH 3235 # match True cond to other elif len(cond[icond]) == len(other): # try to not change dtype at first (if try_quick) if try_quick: try: new_other = com.values_from_object(self) new_other = new_other.copy() new_other[icond] = other other = new_other except Exception: try_quick = False # let's create a new (if we failed at the above # or not try_quick if not try_quick: dtype, fill_value = maybe_promote(other.dtype) new_other = np.empty(len(icond), dtype=dtype) new_other.fill(fill_value) maybe_upcast_putmask(new_other, icond, other) other = new_other else: raise ValueError('Length of replacements must equal ' 'series length') else: raise ValueError('other must be the same shape as self ' 'when an ndarray') # we are the same shape, so create an actual object for alignment else: other = self._constructor(other, **self._construct_axes_dict()) if axis is None: axis = 0 if self.ndim == getattr(other, 'ndim', 0): align = True else: align = (self._get_axis_number(axis) == 1) block_axis = self._get_block_manager_axis(axis) if inplace: # we may have different type blocks come out of putmask, so # reconstruct the block manager self._check_inplace_setting(other) new_data = self._data.putmask(mask=cond, new=other, align=align, inplace=True, axis=block_axis, transpose=self._AXIS_REVERSED) self._update_inplace(new_data) else: new_data = self._data.where(other=other, cond=cond, align=align, errors=errors, try_cast=try_cast, axis=block_axis, transpose=self._AXIS_REVERSED) return self._constructor(new_data).__finalize__(self) _shared_docs['where'] = (""" Replace values where the condition is %(cond_rev)s. Parameters ---------- cond : boolean %(klass)s, array-like, or callable Where `cond` is %(cond)s, keep the original value. Where %(cond_rev)s, replace with corresponding value from `other`. If `cond` is callable, it is computed on the %(klass)s and should return boolean %(klass)s or array. The callable must not change input %(klass)s (though pandas doesn't check it). .. versionadded:: 0.18.1 A callable can be used as cond. other : scalar, %(klass)s, or callable Entries where `cond` is %(cond_rev)s are replaced with corresponding value from `other`. If other is callable, it is computed on the %(klass)s and should return scalar or %(klass)s. The callable must not change input %(klass)s (though pandas doesn't check it). .. versionadded:: 0.18.1 A callable can be used as other. inplace : boolean, default False Whether to perform the operation in place on the data. axis : int, default None Alignment axis if needed. level : int, default None Alignment level if needed. errors : str, {'raise', 'ignore'}, default `raise` Note that currently this parameter won't affect the results and will always coerce to a suitable dtype. - `raise` : allow exceptions to be raised. - `ignore` : suppress exceptions. On error return original object. try_cast : boolean, default False Try to cast the result back to the input type (if possible). raise_on_error : boolean, default True Whether to raise on invalid data types (e.g. trying to where on strings). .. deprecated:: 0.21.0 Use `errors`. Returns ------- wh : same type as caller See Also -------- :func:`DataFrame.%(name_other)s` : Return an object of same shape as self. Notes ----- The %(name)s method is an application of the if-then idiom. For each element in the calling DataFrame, if ``cond`` is ``%(cond)s`` the element is used; otherwise the corresponding element from the DataFrame ``other`` is used. The signature for :func:`DataFrame.where` differs from :func:`numpy.where`. Roughly ``df1.where(m, df2)`` is equivalent to ``np.where(m, df1, df2)``. For further details and examples see the ``%(name)s`` documentation in :ref:`indexing <indexing.where_mask>`. Examples -------- >>> s = pd.Series(range(5)) >>> s.where(s > 0) 0 NaN 1 1.0 2 2.0 3 3.0 4 4.0 dtype: float64 >>> s.mask(s > 0) 0 0.0 1 NaN 2 NaN 3 NaN 4 NaN dtype: float64 >>> s.where(s > 1, 10) 0 10 1 10 2 2 3 3 4 4 dtype: int64 >>> df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=['A', 'B']) >>> m = df %% 3 == 0 >>> df.where(m, -df) A B 0 0 -1 1 -2 3 2 -4 -5 3 6 -7 4 -8 9 >>> df.where(m, -df) == np.where(m, df, -df) A B 0 True True 1 True True 2 True True 3 True True 4 True True >>> df.where(m, -df) == df.mask(~m, -df) A B 0 True True 1 True True 2 True True 3 True True 4 True True """) @Appender(_shared_docs['where'] % dict(_shared_doc_kwargs, cond="True", cond_rev="False", name='where', name_other='mask')) def where(self, cond, other=np.nan, inplace=False, axis=None, level=None, errors='raise', try_cast=False, raise_on_error=None): if raise_on_error is not None: warnings.warn( "raise_on_error is deprecated in " "favor of errors='raise|ignore'", FutureWarning, stacklevel=2) if raise_on_error: errors = 'raise' else: errors = 'ignore' other = com.apply_if_callable(other, self) return self._where(cond, other, inplace, axis, level, errors=errors, try_cast=try_cast) @Appender(_shared_docs['where'] % dict(_shared_doc_kwargs, cond="False", cond_rev="True", name='mask', name_other='where')) def mask(self, cond, other=np.nan, inplace=False, axis=None, level=None, errors='raise', try_cast=False, raise_on_error=None): if raise_on_error is not None: warnings.warn( "raise_on_error is deprecated in " "favor of errors='raise|ignore'", FutureWarning, stacklevel=2) if raise_on_error: errors = 'raise' else: errors = 'ignore' inplace = validate_bool_kwarg(inplace, 'inplace') cond = com.apply_if_callable(cond, self) # see gh-21891 if not hasattr(cond, "__invert__"): cond = np.array(cond) return self.where(~cond, other=other, inplace=inplace, axis=axis, level=level, try_cast=try_cast, errors=errors) _shared_docs['shift'] = (""" Shift index by desired number of periods with an optional time `freq`. When `freq` is not passed, shift the index without realigning the data. If `freq` is passed (in this case, the index must be date or datetime, or it will raise a `NotImplementedError`), the index will be increased using the periods and the `freq`. Parameters ---------- periods : int Number of periods to shift. Can be positive or negative. freq : DateOffset, tseries.offsets, timedelta, or str, optional Offset to use from the tseries module or time rule (e.g. 'EOM'). If `freq` is specified then the index values are shifted but the data is not realigned. That is, use `freq` if you would like to extend the index when shifting and preserve the original data. axis : {0 or 'index', 1 or 'columns', None}, default None Shift direction. fill_value : object, optional The scalar value to use for newly introduced missing values. the default depends on the dtype of `self`. For numeric data, ``np.nan`` is used. For datetime, timedelta, or period data, etc. :attr:`NaT` is used. For extension dtypes, ``self.dtype.na_value`` is used. .. versionchanged:: 0.24.0 Returns ------- %(klass)s Copy of input object, shifted. See Also -------- Index.shift : Shift values of Index. DatetimeIndex.shift : Shift values of DatetimeIndex. PeriodIndex.shift : Shift values of PeriodIndex. tshift : Shift the time index, using the index's frequency if available. Examples -------- >>> df = pd.DataFrame({'Col1': [10, 20, 15, 30, 45], ... 'Col2': [13, 23, 18, 33, 48], ... 'Col3': [17, 27, 22, 37, 52]}) >>> df.shift(periods=3) Col1 Col2 Col3 0 NaN NaN NaN 1 NaN NaN NaN 2 NaN NaN NaN 3 10.0 13.0 17.0 4 20.0 23.0 27.0 >>> df.shift(periods=1, axis='columns') Col1 Col2 Col3 0 NaN 10.0 13.0 1 NaN 20.0 23.0 2 NaN 15.0 18.0 3 NaN 30.0 33.0 4 NaN 45.0 48.0 >>> df.shift(periods=3, fill_value=0) Col1 Col2 Col3 0 0 0 0 1 0 0 0 2 0 0 0 3 10 13 17 4 20 23 27 """) @Appender(_shared_docs['shift'] % _shared_doc_kwargs) def shift(self, periods=1, freq=None, axis=0, fill_value=None): if periods == 0: return self.copy() block_axis = self._get_block_manager_axis(axis) if freq is None: new_data = self._data.shift(periods=periods, axis=block_axis, fill_value=fill_value) else: return self.tshift(periods, freq) return self._constructor(new_data).__finalize__(self) def slice_shift(self, periods=1, axis=0): """ Equivalent to `shift` without copying data. The shifted data will not include the dropped periods and the shifted axis will be smaller than the original. Parameters ---------- periods : int Number of periods to move, can be positive or negative Returns ------- shifted : same type as caller Notes ----- While the `slice_shift` is faster than `shift`, you may pay for it later during alignment. """ if periods == 0: return self if periods > 0: vslicer = slice(None, -periods) islicer = slice(periods, None) else: vslicer = slice(-periods, None) islicer = slice(None, periods) new_obj = self._slice(vslicer, axis=axis) shifted_axis = self._get_axis(axis)[islicer] new_obj.set_axis(shifted_axis, axis=axis, inplace=True) return new_obj.__finalize__(self) def tshift(self, periods=1, freq=None, axis=0): """ Shift the time index, using the index's frequency if available. Parameters ---------- periods : int Number of periods to move, can be positive or negative freq : DateOffset, timedelta, or time rule string, default None Increment to use from the tseries module or time rule (e.g. 'EOM') axis : int or basestring Corresponds to the axis that contains the Index Returns ------- shifted : NDFrame Notes ----- If freq is not specified then tries to use the freq or inferred_freq attributes of the index. If neither of those attributes exist, a ValueError is thrown """ index = self._get_axis(axis) if freq is None: freq = getattr(index, 'freq', None) if freq is None: freq = getattr(index, 'inferred_freq', None) if freq is None: msg = 'Freq was not given and was not set in the index' raise ValueError(msg) if periods == 0: return self if isinstance(freq, string_types): freq = to_offset(freq) block_axis = self._get_block_manager_axis(axis) if isinstance(index, PeriodIndex): orig_freq = to_offset(index.freq) if freq == orig_freq: new_data = self._data.copy() new_data.axes[block_axis] = index.shift(periods) else: msg = ('Given freq %s does not match PeriodIndex freq %s' % (freq.rule_code, orig_freq.rule_code)) raise ValueError(msg) else: new_data = self._data.copy() new_data.axes[block_axis] = index.shift(periods, freq) return self._constructor(new_data).__finalize__(self) def truncate(self, before=None, after=None, axis=None, copy=True): """ Truncate a Series or DataFrame before and after some index value. This is a useful shorthand for boolean indexing based on index values above or below certain thresholds. Parameters ---------- before : date, string, int Truncate all rows before this index value. after : date, string, int Truncate all rows after this index value. axis : {0 or 'index', 1 or 'columns'}, optional Axis to truncate. Truncates the index (rows) by default. copy : boolean, default is True, Return a copy of the truncated section. Returns ------- type of caller The truncated Series or DataFrame. See Also -------- DataFrame.loc : Select a subset of a DataFrame by label. DataFrame.iloc : Select a subset of a DataFrame by position. Notes ----- If the index being truncated contains only datetime values, `before` and `after` may be specified as strings instead of Timestamps. Examples -------- >>> df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'], ... 'B': ['f', 'g', 'h', 'i', 'j'], ... 'C': ['k', 'l', 'm', 'n', 'o']}, ... index=[1, 2, 3, 4, 5]) >>> df A B C 1 a f k 2 b g l 3 c h m 4 d i n 5 e j o >>> df.truncate(before=2, after=4) A B C 2 b g l 3 c h m 4 d i n The columns of a DataFrame can be truncated. >>> df.truncate(before="A", after="B", axis="columns") A B 1 a f 2 b g 3 c h 4 d i 5 e j For Series, only rows can be truncated. >>> df['A'].truncate(before=2, after=4) 2 b 3 c 4 d Name: A, dtype: object The index values in ``truncate`` can be datetimes or string dates. >>> dates = pd.date_range('2016-01-01', '2016-02-01', freq='s') >>> df = pd.DataFrame(index=dates, data={'A': 1}) >>> df.tail() A 2016-01-31 23:59:56 1 2016-01-31 23:59:57 1 2016-01-31 23:59:58 1 2016-01-31 23:59:59 1 2016-02-01 00:00:00 1 >>> df.truncate(before=pd.Timestamp('2016-01-05'), ... after=pd.Timestamp('2016-01-10')).tail() A 2016-01-09 23:59:56 1 2016-01-09 23:59:57 1 2016-01-09 23:59:58 1 2016-01-09 23:59:59 1 2016-01-10 00:00:00 1 Because the index is a DatetimeIndex containing only dates, we can specify `before` and `after` as strings. They will be coerced to Timestamps before truncation. >>> df.truncate('2016-01-05', '2016-01-10').tail() A 2016-01-09 23:59:56 1 2016-01-09 23:59:57 1 2016-01-09 23:59:58 1 2016-01-09 23:59:59 1 2016-01-10 00:00:00 1 Note that ``truncate`` assumes a 0 value for any unspecified time component (midnight). This differs from partial string slicing, which returns any partially matching dates. >>> df.loc['2016-01-05':'2016-01-10', :].tail() A 2016-01-10 23:59:55 1 2016-01-10 23:59:56 1 2016-01-10 23:59:57 1 2016-01-10 23:59:58 1 2016-01-10 23:59:59 1 """ if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) ax = self._get_axis(axis) # GH 17935 # Check that index is sorted if not ax.is_monotonic_increasing and not ax.is_monotonic_decreasing: raise ValueError("truncate requires a sorted index") # if we have a date index, convert to dates, otherwise # treat like a slice if ax.is_all_dates: from pandas.core.tools.datetimes import to_datetime before = to_datetime(before) after = to_datetime(after) if before is not None and after is not None: if before > after: raise ValueError('Truncate: %s must be after %s' % (after, before)) slicer = [slice(None, None)] * self._AXIS_LEN slicer[axis] = slice(before, after) result = self.loc[tuple(slicer)] if isinstance(ax, MultiIndex): setattr(result, self._get_axis_name(axis), ax.truncate(before, after)) if copy: result = result.copy() return result def tz_convert(self, tz, axis=0, level=None, copy=True): """ Convert tz-aware axis to target time zone. Parameters ---------- tz : string or pytz.timezone object axis : the axis to convert level : int, str, default None If axis ia a MultiIndex, convert a specific level. Otherwise must be None copy : boolean, default True Also make a copy of the underlying data Returns ------- Raises ------ TypeError If the axis is tz-naive. """ axis = self._get_axis_number(axis) ax = self._get_axis(axis) def _tz_convert(ax, tz): if not hasattr(ax, 'tz_convert'): if len(ax) > 0: ax_name = self._get_axis_name(axis) raise TypeError('%s is not a valid DatetimeIndex or ' 'PeriodIndex' % ax_name) else: ax = DatetimeIndex([], tz=tz) else: ax = ax.tz_convert(tz) return ax # if a level is given it must be a MultiIndex level or # equivalent to the axis name if isinstance(ax, MultiIndex): level = ax._get_level_number(level) new_level = _tz_convert(ax.levels[level], tz) ax = ax.set_levels(new_level, level=level) else: if level not in (None, 0, ax.name): raise ValueError("The level {0} is not valid".format(level)) ax = _tz_convert(ax, tz) result = self._constructor(self._data, copy=copy) result = result.set_axis(ax, axis=axis, inplace=False) return result.__finalize__(self) def tz_localize(self, tz, axis=0, level=None, copy=True, ambiguous='raise', nonexistent='raise'): """ Localize tz-naive index of a Series or DataFrame to target time zone. This operation localizes the Index. To localize the values in a timezone-naive Series, use :meth:`Series.dt.tz_localize`. Parameters ---------- tz : string or pytz.timezone object axis : the axis to localize level : int, str, default None If axis ia a MultiIndex, localize a specific level. Otherwise must be None copy : boolean, default True Also make a copy of the underlying data ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise' When clocks moved backward due to DST, ambiguous times may arise. For example in Central European Time (UTC+01), when going from 03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at 00:30:00 UTC and at 01:30:00 UTC. In such a situation, the `ambiguous` parameter dictates how ambiguous times should be handled. - 'infer' will attempt to infer fall dst-transition hours based on order - bool-ndarray where True signifies a DST time, False designates a non-DST time (note that this flag is only applicable for ambiguous times) - 'NaT' will return NaT where there are ambiguous times - 'raise' will raise an AmbiguousTimeError if there are ambiguous times nonexistent : str, default 'raise' A nonexistent time does not exist in a particular timezone where clocks moved forward due to DST. Valid valuse are: - 'shift_forward' will shift the nonexistent time forward to the closest existing time - 'shift_backward' will shift the nonexistent time backward to the closest existing time - 'NaT' will return NaT where there are nonexistent times - timedelta objects will shift nonexistent times by the timedelta - 'raise' will raise an NonExistentTimeError if there are nonexistent times .. versionadded:: 0.24.0 Returns ------- Series or DataFrame Same type as the input. Raises ------ TypeError If the TimeSeries is tz-aware and tz is not None. Examples -------- Localize local times: >>> s = pd.Series([1], ... index=pd.DatetimeIndex(['2018-09-15 01:30:00'])) >>> s.tz_localize('CET') 2018-09-15 01:30:00+02:00 1 dtype: int64 Be careful with DST changes. When there is sequential data, pandas can infer the DST time: >>> s = pd.Series(range(7), index=pd.DatetimeIndex([ ... '2018-10-28 01:30:00', ... '2018-10-28 02:00:00', ... '2018-10-28 02:30:00', ... '2018-10-28 02:00:00', ... '2018-10-28 02:30:00', ... '2018-10-28 03:00:00', ... '2018-10-28 03:30:00'])) >>> s.tz_localize('CET', ambiguous='infer') 2018-10-28 01:30:00+02:00 0 2018-10-28 02:00:00+02:00 1 2018-10-28 02:30:00+02:00 2 2018-10-28 02:00:00+01:00 3 2018-10-28 02:30:00+01:00 4 2018-10-28 03:00:00+01:00 5 2018-10-28 03:30:00+01:00 6 dtype: int64 In some cases, inferring the DST is impossible. In such cases, you can pass an ndarray to the ambiguous parameter to set the DST explicitly >>> s = pd.Series(range(3), index=pd.DatetimeIndex([ ... '2018-10-28 01:20:00', ... '2018-10-28 02:36:00', ... '2018-10-28 03:46:00'])) >>> s.tz_localize('CET', ambiguous=np.array([True, True, False])) 2018-10-28 01:20:00+02:00 0 2018-10-28 02:36:00+02:00 1 2018-10-28 03:46:00+01:00 2 dtype: int64 If the DST transition causes nonexistent times, you can shift these dates forward or backwards with a timedelta object or `'shift_forward'` or `'shift_backwards'`. >>> s = pd.Series(range(2), index=pd.DatetimeIndex([ ... '2015-03-29 02:30:00', ... '2015-03-29 03:30:00'])) >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_forward') 2015-03-29 03:00:00+02:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64 >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_backward') 2015-03-29 01:59:59.999999999+01:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64 >>> s.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H')) 2015-03-29 03:30:00+02:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64 """ nonexistent_options = ('raise', 'NaT', 'shift_forward', 'shift_backward') if nonexistent not in nonexistent_options and not isinstance( nonexistent, timedelta): raise ValueError("The nonexistent argument must be one of 'raise'," " 'NaT', 'shift_forward', 'shift_backward' or" " a timedelta object") axis = self._get_axis_number(axis) ax = self._get_axis(axis) def _tz_localize(ax, tz, ambiguous, nonexistent): if not hasattr(ax, 'tz_localize'): if len(ax) > 0: ax_name = self._get_axis_name(axis) raise TypeError('%s is not a valid DatetimeIndex or ' 'PeriodIndex' % ax_name) else: ax = DatetimeIndex([], tz=tz) else: ax = ax.tz_localize( tz, ambiguous=ambiguous, nonexistent=nonexistent ) return ax # if a level is given it must be a MultiIndex level or # equivalent to the axis name if isinstance(ax, MultiIndex): level = ax._get_level_number(level) new_level = _tz_localize( ax.levels[level], tz, ambiguous, nonexistent ) ax = ax.set_levels(new_level, level=level) else: if level not in (None, 0, ax.name): raise ValueError("The level {0} is not valid".format(level)) ax = _tz_localize(ax, tz, ambiguous, nonexistent) result = self._constructor(self._data, copy=copy) result = result.set_axis(ax, axis=axis, inplace=False) return result.__finalize__(self) # ---------------------------------------------------------------------- # Numeric Methods def abs(self): """ Return a Series/DataFrame with absolute numeric value of each element. This function only applies to elements that are all numeric. Returns ------- abs Series/DataFrame containing the absolute value of each element. See Also -------- numpy.absolute : Calculate the absolute value element-wise. Notes ----- For ``complex`` inputs, ``1.2 + 1j``, the absolute value is :math:`\\sqrt{ a^2 + b^2 }`. Examples -------- Absolute numeric values in a Series. >>> s = pd.Series([-1.10, 2, -3.33, 4]) >>> s.abs() 0 1.10 1 2.00 2 3.33 3 4.00 dtype: float64 Absolute numeric values in a Series with complex numbers. >>> s = pd.Series([1.2 + 1j]) >>> s.abs() 0 1.56205 dtype: float64 Absolute numeric values in a Series with a Timedelta element. >>> s = pd.Series([pd.Timedelta('1 days')]) >>> s.abs() 0 1 days dtype: timedelta64[ns] Select rows with data closest to certain value using argsort (from `StackOverflow <https://stackoverflow.com/a/17758115>`__). >>> df = pd.DataFrame({ ... 'a': [4, 5, 6, 7], ... 'b': [10, 20, 30, 40], ... 'c': [100, 50, -30, -50] ... }) >>> df a b c 0 4 10 100 1 5 20 50 2 6 30 -30 3 7 40 -50 >>> df.loc[(df.c - 43).abs().argsort()] a b c 1 5 20 50 0 4 10 100 2 6 30 -30 3 7 40 -50 """ return np.abs(self) def describe(self, percentiles=None, include=None, exclude=None): """ Generate descriptive statistics that summarize the central tendency, dispersion and shape of a dataset's distribution, excluding ``NaN`` values. Analyzes both numeric and object series, as well as ``DataFrame`` column sets of mixed data types. The output will vary depending on what is provided. Refer to the notes below for more detail. Parameters ---------- percentiles : list-like of numbers, optional The percentiles to include in the output. All should fall between 0 and 1. The default is ``[.25, .5, .75]``, which returns the 25th, 50th, and 75th percentiles. include : 'all', list-like of dtypes or None (default), optional A white list of data types to include in the result. Ignored for ``Series``. Here are the options: - 'all' : All columns of the input will be included in the output. - A list-like of dtypes : Limits the results to the provided data types. To limit the result to numeric types submit ``numpy.number``. To limit it instead to object columns submit the ``numpy.object`` data type. Strings can also be used in the style of ``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To select pandas categorical columns, use ``'category'`` - None (default) : The result will include all numeric columns. exclude : list-like of dtypes or None (default), optional, A black list of data types to omit from the result. Ignored for ``Series``. Here are the options: - A list-like of dtypes : Excludes the provided data types from the result. To exclude numeric types submit ``numpy.number``. To exclude object columns submit the data type ``numpy.object``. Strings can also be used in the style of ``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To exclude pandas categorical columns, use ``'category'`` - None (default) : The result will exclude nothing. Returns ------- Series or DataFrame Summary statistics of the Series or Dataframe provided. See Also -------- DataFrame.count: Count number of non-NA/null observations. DataFrame.max: Maximum of the values in the object. DataFrame.min: Minimum of the values in the object. DataFrame.mean: Mean of the values. DataFrame.std: Standard deviation of the obersvations. DataFrame.select_dtypes: Subset of a DataFrame including/excluding columns based on their dtype. Notes ----- For numeric data, the result's index will include ``count``, ``mean``, ``std``, ``min``, ``max`` as well as lower, ``50`` and upper percentiles. By default the lower percentile is ``25`` and the upper percentile is ``75``. The ``50`` percentile is the same as the median. For object data (e.g. strings or timestamps), the result's index will include ``count``, ``unique``, ``top``, and ``freq``. The ``top`` is the most common value. The ``freq`` is the most common value's frequency. Timestamps also include the ``first`` and ``last`` items. If multiple object values have the highest count, then the ``count`` and ``top`` results will be arbitrarily chosen from among those with the highest count. For mixed data types provided via a ``DataFrame``, the default is to return only an analysis of numeric columns. If the dataframe consists only of object and categorical data without any numeric columns, the default is to return an analysis of both the object and categorical columns. If ``include='all'`` is provided as an option, the result will include a union of attributes of each type. The `include` and `exclude` parameters can be used to limit which columns in a ``DataFrame`` are analyzed for the output. The parameters are ignored when analyzing a ``Series``. Examples -------- Describing a numeric ``Series``. >>> s = pd.Series([1, 2, 3]) >>> s.describe() count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.5 50% 2.0 75% 2.5 max 3.0 dtype: float64 Describing a categorical ``Series``. >>> s = pd.Series(['a', 'a', 'b', 'c']) >>> s.describe() count 4 unique 3 top a freq 2 dtype: object Describing a timestamp ``Series``. >>> s = pd.Series([ ... np.datetime64("2000-01-01"), ... np.datetime64("2010-01-01"), ... np.datetime64("2010-01-01") ... ]) >>> s.describe() count 3 unique 2 top 2010-01-01 00:00:00 freq 2 first 2000-01-01 00:00:00 last 2010-01-01 00:00:00 dtype: object Describing a ``DataFrame``. By default only numeric fields are returned. >>> df = pd.DataFrame({'categorical': pd.Categorical(['d','e','f']), ... 'numeric': [1, 2, 3], ... 'object': ['a', 'b', 'c'] ... }) >>> df.describe() numeric count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.5 50% 2.0 75% 2.5 max 3.0 Describing all columns of a ``DataFrame`` regardless of data type. >>> df.describe(include='all') categorical numeric object count 3 3.0 3 unique 3 NaN 3 top f NaN c freq 1 NaN 1 mean NaN 2.0 NaN std NaN 1.0 NaN min NaN 1.0 NaN 25% NaN 1.5 NaN 50% NaN 2.0 NaN 75% NaN 2.5 NaN max NaN 3.0 NaN Describing a column from a ``DataFrame`` by accessing it as an attribute. >>> df.numeric.describe() count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.5 50% 2.0 75% 2.5 max 3.0 Name: numeric, dtype: float64 Including only numeric columns in a ``DataFrame`` description. >>> df.describe(include=[np.number]) numeric count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.5 50% 2.0 75% 2.5 max 3.0 Including only string columns in a ``DataFrame`` description. >>> df.describe(include=[np.object]) object count 3 unique 3 top c freq 1 Including only categorical columns from a ``DataFrame`` description. >>> df.describe(include=['category']) categorical count 3 unique 3 top f freq 1 Excluding numeric columns from a ``DataFrame`` description. >>> df.describe(exclude=[np.number]) categorical object count 3 3 unique 3 3 top f c freq 1 1 Excluding object columns from a ``DataFrame`` description. >>> df.describe(exclude=[np.object]) categorical numeric count 3 3.0 unique 3 NaN top f NaN freq 1 NaN mean NaN 2.0 std NaN 1.0 min NaN 1.0 25% NaN 1.5 50% NaN 2.0 75% NaN 2.5 max NaN 3.0 """ if self.ndim >= 3: msg = "describe is not implemented on Panel objects." raise NotImplementedError(msg) elif self.ndim == 2 and self.columns.size == 0: raise ValueError("Cannot describe a DataFrame without columns") if percentiles is not None: # explicit conversion of `percentiles` to list percentiles = list(percentiles) # get them all to be in [0, 1] self._check_percentile(percentiles) # median should always be included if 0.5 not in percentiles: percentiles.append(0.5) percentiles = np.asarray(percentiles) else: percentiles = np.array([0.25, 0.5, 0.75]) # sort and check for duplicates unique_pcts = np.unique(percentiles) if len(unique_pcts) < len(percentiles): raise ValueError("percentiles cannot contain duplicates") percentiles = unique_pcts formatted_percentiles = format_percentiles(percentiles) def describe_numeric_1d(series): stat_index = (['count', 'mean', 'std', 'min'] + formatted_percentiles + ['max']) d = ([series.count(), series.mean(), series.std(), series.min()] + series.quantile(percentiles).tolist() + [series.max()]) return pd.Series(d, index=stat_index, name=series.name) def describe_categorical_1d(data): names = ['count', 'unique'] objcounts = data.value_counts() count_unique = len(objcounts[objcounts != 0]) result = [data.count(), count_unique] if result[1] > 0: top, freq = objcounts.index[0], objcounts.iloc[0] if is_datetime64_any_dtype(data): tz = data.dt.tz asint = data.dropna().values.view('i8') top = Timestamp(top) if top.tzinfo is not None and tz is not None: # Don't tz_localize(None) if key is already tz-aware top = top.tz_convert(tz) else: top = top.tz_localize(tz) names += ['top', 'freq', 'first', 'last'] result += [top, freq, Timestamp(asint.min(), tz=tz), Timestamp(asint.max(), tz=tz)] else: names += ['top', 'freq'] result += [top, freq] return pd.Series(result, index=names, name=data.name) def describe_1d(data): if is_bool_dtype(data): return describe_categorical_1d(data) elif is_numeric_dtype(data): return describe_numeric_1d(data) elif is_timedelta64_dtype(data): return describe_numeric_1d(data) else: return describe_categorical_1d(data) if self.ndim == 1: return describe_1d(self) elif (include is None) and (exclude is None): # when some numerics are found, keep only numerics data = self.select_dtypes(include=[np.number]) if len(data.columns) == 0: data = self elif include == 'all': if exclude is not None: msg = "exclude must be None when include is 'all'" raise ValueError(msg) data = self else: data = self.select_dtypes(include=include, exclude=exclude) ldesc = [describe_1d(s) for _, s in data.iteritems()] # set a convenient order for rows names = [] ldesc_indexes = sorted((x.index for x in ldesc), key=len) for idxnames in ldesc_indexes: for name in idxnames: if name not in names: names.append(name) d = pd.concat(ldesc, join_axes=pd.Index([names]), axis=1) d.columns = data.columns.copy() return d def _check_percentile(self, q): """ Validate percentiles (used by describe and quantile). """ msg = ("percentiles should all be in the interval [0, 1]. " "Try {0} instead.") q = np.asarray(q) if q.ndim == 0: if not 0 <= q <= 1: raise ValueError(msg.format(q / 100.0)) else: if not all(0 <= qs <= 1 for qs in q): raise ValueError(msg.format(q / 100.0)) return q _shared_docs['pct_change'] = """ Percentage change between the current and a prior element. Computes the percentage change from the immediately previous row by default. This is useful in comparing the percentage of change in a time series of elements. Parameters ---------- periods : int, default 1 Periods to shift for forming percent change. fill_method : str, default 'pad' How to handle NAs before computing percent changes. limit : int, default None The number of consecutive NAs to fill before stopping. freq : DateOffset, timedelta, or offset alias string, optional Increment to use from time series API (e.g. 'M' or BDay()). **kwargs Additional keyword arguments are passed into `DataFrame.shift` or `Series.shift`. Returns ------- chg : Series or DataFrame The same type as the calling object. See Also -------- Series.diff : Compute the difference of two elements in a Series. DataFrame.diff : Compute the difference of two elements in a DataFrame. Series.shift : Shift the index by some number of periods. DataFrame.shift : Shift the index by some number of periods. Examples -------- **Series** >>> s = pd.Series([90, 91, 85]) >>> s 0 90 1 91 2 85 dtype: int64 >>> s.pct_change() 0 NaN 1 0.011111 2 -0.065934 dtype: float64 >>> s.pct_change(periods=2) 0 NaN 1 NaN 2 -0.055556 dtype: float64 See the percentage change in a Series where filling NAs with last valid observation forward to next valid. >>> s = pd.Series([90, 91, None, 85]) >>> s 0 90.0 1 91.0 2 NaN 3 85.0 dtype: float64 >>> s.pct_change(fill_method='ffill') 0 NaN 1 0.011111 2 0.000000 3 -0.065934 dtype: float64 **DataFrame** Percentage change in French franc, Deutsche Mark, and Italian lira from 1980-01-01 to 1980-03-01. >>> df = pd.DataFrame({ ... 'FR': [4.0405, 4.0963, 4.3149], ... 'GR': [1.7246, 1.7482, 1.8519], ... 'IT': [804.74, 810.01, 860.13]}, ... index=['1980-01-01', '1980-02-01', '1980-03-01']) >>> df FR GR IT 1980-01-01 4.0405 1.7246 804.74 1980-02-01 4.0963 1.7482 810.01 1980-03-01 4.3149 1.8519 860.13 >>> df.pct_change() FR GR IT 1980-01-01 NaN NaN NaN 1980-02-01 0.013810 0.013684 0.006549 1980-03-01 0.053365 0.059318 0.061876 Percentage of change in GOOG and APPL stock volume. Shows computing the percentage change between columns. >>> df = pd.DataFrame({ ... '2016': [1769950, 30586265], ... '2015': [1500923, 40912316], ... '2014': [1371819, 41403351]}, ... index=['GOOG', 'APPL']) >>> df 2016 2015 2014 GOOG 1769950 1500923 1371819 APPL 30586265 40912316 41403351 >>> df.pct_change(axis='columns') 2016 2015 2014 GOOG NaN -0.151997 -0.086016 APPL NaN 0.337604 0.012002 """ @Appender(_shared_docs['pct_change'] % _shared_doc_kwargs) def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None, **kwargs): # TODO: Not sure if above is correct - need someone to confirm. axis = self._get_axis_number(kwargs.pop('axis', self._stat_axis_name)) if fill_method is None: data = self else: data = self.fillna(method=fill_method, limit=limit, axis=axis) rs = (data.div(data.shift(periods=periods, freq=freq, axis=axis, **kwargs)) - 1) rs = rs.reindex_like(data) if freq is None: mask = isna(com.values_from_object(data)) np.putmask(rs.values, mask, np.nan) return rs def _agg_by_level(self, name, axis=0, level=0, skipna=True, **kwargs): if axis is None: raise ValueError("Must specify 'axis' when aggregating by level.") grouped = self.groupby(level=level, axis=axis, sort=False) if hasattr(grouped, name) and skipna: return getattr(grouped, name)(**kwargs) axis = self._get_axis_number(axis) method = getattr(type(self), name) applyf = lambda x: method(x, axis=axis, skipna=skipna, **kwargs) return grouped.aggregate(applyf) @classmethod def _add_numeric_operations(cls): """ Add the operations to the cls; evaluate the doc strings again """ axis_descr, name, name2 = _doc_parms(cls) cls.any = _make_logical_function( cls, 'any', name, name2, axis_descr, _any_desc, nanops.nanany, _any_see_also, _any_examples, empty_value=False) cls.all = _make_logical_function( cls, 'all', name, name2, axis_descr, _all_desc, nanops.nanall, _all_see_also, _all_examples, empty_value=True) @Substitution(outname='mad', desc="Return the mean absolute deviation of the values " "for the requested axis.", name1=name, name2=name2, axis_descr=axis_descr, min_count='', see_also='', examples='') @Appender(_num_doc) def mad(self, axis=None, skipna=None, level=None): if skipna is None: skipna = True if axis is None: axis = self._stat_axis_number if level is not None: return self._agg_by_level('mad', axis=axis, level=level, skipna=skipna) data = self._get_numeric_data() if axis == 0: demeaned = data - data.mean(axis=0) else: demeaned = data.sub(data.mean(axis=1), axis=0) return np.abs(demeaned).mean(axis=axis, skipna=skipna) cls.mad = mad cls.sem = _make_stat_function_ddof( cls, 'sem', name, name2, axis_descr, "Return unbiased standard error of the mean over requested " "axis.\n\nNormalized by N-1 by default. This can be changed " "using the ddof argument", nanops.nansem) cls.var = _make_stat_function_ddof( cls, 'var', name, name2, axis_descr, "Return unbiased variance over requested axis.\n\nNormalized by " "N-1 by default. This can be changed using the ddof argument", nanops.nanvar) cls.std = _make_stat_function_ddof( cls, 'std', name, name2, axis_descr, "Return sample standard deviation over requested axis." "\n\nNormalized by N-1 by default. This can be changed using the " "ddof argument", nanops.nanstd) @Substitution(outname='compounded', desc="Return the compound percentage of the values for " "the requested axis.", name1=name, name2=name2, axis_descr=axis_descr, min_count='', see_also='', examples='') @Appender(_num_doc) def compound(self, axis=None, skipna=None, level=None): if skipna is None: skipna = True return (1 + self).prod(axis=axis, skipna=skipna, level=level) - 1 cls.compound = compound cls.cummin = _make_cum_function( cls, 'cummin', name, name2, axis_descr, "minimum", lambda y, axis: np.minimum.accumulate(y, axis), "min", np.inf, np.nan, _cummin_examples) cls.cumsum = _make_cum_function( cls, 'cumsum', name, name2, axis_descr, "sum", lambda y, axis: y.cumsum(axis), "sum", 0., np.nan, _cumsum_examples) cls.cumprod = _make_cum_function( cls, 'cumprod', name, name2, axis_descr, "product", lambda y, axis: y.cumprod(axis), "prod", 1., np.nan, _cumprod_examples) cls.cummax = _make_cum_function( cls, 'cummax', name, name2, axis_descr, "maximum", lambda y, axis: np.maximum.accumulate(y, axis), "max", -np.inf, np.nan, _cummax_examples) cls.sum = _make_min_count_stat_function( cls, 'sum', name, name2, axis_descr, """Return the sum of the values for the requested axis.\n This is equivalent to the method ``numpy.sum``.""", nanops.nansum, _stat_func_see_also, _sum_examples) cls.mean = _make_stat_function( cls, 'mean', name, name2, axis_descr, 'Return the mean of the values for the requested axis.', nanops.nanmean) cls.skew = _make_stat_function( cls, 'skew', name, name2, axis_descr, 'Return unbiased skew over requested axis\nNormalized by N-1.', nanops.nanskew) cls.kurt = _make_stat_function( cls, 'kurt', name, name2, axis_descr, "Return unbiased kurtosis over requested axis using Fisher's " "definition of\nkurtosis (kurtosis of normal == 0.0). Normalized " "by N-1.", nanops.nankurt) cls.kurtosis = cls.kurt cls.prod = _make_min_count_stat_function( cls, 'prod', name, name2, axis_descr, 'Return the product of the values for the requested axis.', nanops.nanprod, examples=_prod_examples) cls.product = cls.prod cls.median = _make_stat_function( cls, 'median', name, name2, axis_descr, 'Return the median of the values for the requested axis.', nanops.nanmedian) cls.max = _make_stat_function( cls, 'max', name, name2, axis_descr, """Return the maximum of the values for the requested axis.\n If you want the *index* of the maximum, use ``idxmax``. This is the equivalent of the ``numpy.ndarray`` method ``argmax``.""", nanops.nanmax, _stat_func_see_also, _max_examples) cls.min = _make_stat_function( cls, 'min', name, name2, axis_descr, """Return the minimum of the values for the requested axis.\n If you want the *index* of the minimum, use ``idxmin``. This is the equivalent of the ``numpy.ndarray`` method ``argmin``.""", nanops.nanmin, _stat_func_see_also, _min_examples) @classmethod def _add_series_only_operations(cls): """ Add the series only operations to the cls; evaluate the doc strings again. """ axis_descr, name, name2 = _doc_parms(cls) def nanptp(values, axis=0, skipna=True): nmax = nanops.nanmax(values, axis, skipna) nmin = nanops.nanmin(values, axis, skipna) warnings.warn("Method .ptp is deprecated and will be removed " "in a future version. Use numpy.ptp instead.", FutureWarning, stacklevel=4) return nmax - nmin cls.ptp = _make_stat_function( cls, 'ptp', name, name2, axis_descr, """Returns the difference between the maximum value and the minimum value in the object. This is the equivalent of the ``numpy.ndarray`` method ``ptp``.\n\n.. deprecated:: 0.24.0 Use numpy.ptp instead""", nanptp) @classmethod def _add_series_or_dataframe_operations(cls): """ Add the series or dataframe only operations to the cls; evaluate the doc strings again. """ from pandas.core import window as rwindow @Appender(rwindow.rolling.__doc__) def rolling(self, window, min_periods=None, center=False, win_type=None, on=None, axis=0, closed=None): axis = self._get_axis_number(axis) return rwindow.rolling(self, window=window, min_periods=min_periods, center=center, win_type=win_type, on=on, axis=axis, closed=closed) cls.rolling = rolling @Appender(rwindow.expanding.__doc__) def expanding(self, min_periods=1, center=False, axis=0): axis = self._get_axis_number(axis) return rwindow.expanding(self, min_periods=min_periods, center=center, axis=axis) cls.expanding = expanding @Appender(rwindow.ewm.__doc__) def ewm(self, com=None, span=None, halflife=None, alpha=None, min_periods=0, adjust=True, ignore_na=False, axis=0): axis = self._get_axis_number(axis) return rwindow.ewm(self, com=com, span=span, halflife=halflife, alpha=alpha, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, axis=axis) cls.ewm = ewm @Appender(_shared_docs['transform'] % dict(axis="", **_shared_doc_kwargs)) def transform(self, func, *args, **kwargs): result = self.agg(func, *args, **kwargs) if is_scalar(result) or len(result) != len(self): raise ValueError("transforms cannot produce " "aggregated results") return result # ---------------------------------------------------------------------- # Misc methods _shared_docs['valid_index'] = """ Return index for %(position)s non-NA/null value. Returns -------- scalar : type of index Notes -------- If all elements are non-NA/null, returns None. Also returns None for empty %(klass)s. """ def _find_valid_index(self, how): """ Retrieves the index of the first valid value. Parameters ---------- how : {'first', 'last'} Use this parameter to change between the first or last valid index. Returns ------- idx_first_valid : type of index """ assert how in ['first', 'last'] if len(self) == 0: # early stop return None is_valid = ~self.isna() if self.ndim == 2: is_valid = is_valid.any(1) # reduce axis 1 if how == 'first': idxpos = is_valid.values[::].argmax() if how == 'last': idxpos = len(self) - 1 - is_valid.values[::-1].argmax() chk_notna = is_valid.iat[idxpos] idx = self.index[idxpos] if not chk_notna: return None return idx @Appender(_shared_docs['valid_index'] % {'position': 'first', 'klass': 'NDFrame'}) def first_valid_index(self): return self._find_valid_index('first') @Appender(_shared_docs['valid_index'] % {'position': 'last', 'klass': 'NDFrame'}) def last_valid_index(self): return self._find_valid_index('last') def _doc_parms(cls): """Return a tuple of the doc parms.""" axis_descr = "{%s}" % ', '.join(["{0} ({1})".format(a, i) for i, a in enumerate(cls._AXIS_ORDERS)]) name = (cls._constructor_sliced.__name__ if cls._AXIS_LEN > 1 else 'scalar') name2 = cls.__name__ return axis_descr, name, name2 _num_doc = """ %(desc)s Parameters ---------- axis : %(axis_descr)s Axis for the function to be applied on. skipna : bool, default True Exclude NA/null values when computing the result. level : int or level name, default None If the axis is a MultiIndex (hierarchical), count along a particular level, collapsing into a %(name1)s. numeric_only : bool, default None Include only float, int, boolean columns. If None, will attempt to use everything, then use only numeric data. Not implemented for Series. %(min_count)s\ **kwargs Additional keyword arguments to be passed to the function. Returns ------- %(outname)s : %(name1)s or %(name2)s (if level specified) %(see_also)s %(examples)s\ """ _num_ddof_doc = """ %(desc)s Parameters ---------- axis : %(axis_descr)s skipna : boolean, default True Exclude NA/null values. If an entire row/column is NA, the result will be NA level : int or level name, default None If the axis is a MultiIndex (hierarchical), count along a particular level, collapsing into a %(name1)s ddof : int, default 1 Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. numeric_only : boolean, default None Include only float, int, boolean columns. If None, will attempt to use everything, then use only numeric data. Not implemented for Series. Returns ------- %(outname)s : %(name1)s or %(name2)s (if level specified)\n""" _bool_doc = """ %(desc)s Parameters ---------- axis : {0 or 'index', 1 or 'columns', None}, default 0 Indicate which axis or axes should be reduced. * 0 / 'index' : reduce the index, return a Series whose index is the original column labels. * 1 / 'columns' : reduce the columns, return a Series whose index is the original index. * None : reduce all axes, return a scalar. bool_only : bool, default None Include only boolean columns. If None, will attempt to use everything, then use only boolean data. Not implemented for Series. skipna : bool, default True Exclude NA/null values. If the entire row/column is NA and skipna is True, then the result will be %(empty_value)s, as for an empty row/column. If skipna is False, then NA are treated as True, because these are not equal to zero. level : int or level name, default None If the axis is a MultiIndex (hierarchical), count along a particular level, collapsing into a %(name1)s. **kwargs : any, default None Additional keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- %(name1)s or %(name2)s If level is specified, then, %(name2)s is returned; otherwise, %(name1)s is returned. %(see_also)s %(examples)s""" _all_desc = """\ Return whether all elements are True, potentially over an axis. Returns True unless there at least one element within a series or along a Dataframe axis that is False or equivalent (e.g. zero or empty).""" _all_examples = """\ Examples -------- **Series** >>> pd.Series([True, True]).all() True >>> pd.Series([True, False]).all() False >>> pd.Series([]).all() True >>> pd.Series([np.nan]).all() True >>> pd.Series([np.nan]).all(skipna=False) True **DataFrames** Create a dataframe from a dictionary. >>> df = pd.DataFrame({'col1': [True, True], 'col2': [True, False]}) >>> df col1 col2 0 True True 1 True False Default behaviour checks if column-wise values all return True. >>> df.all() col1 True col2 False dtype: bool Specify ``axis='columns'`` to check if row-wise values all return True. >>> df.all(axis='columns') 0 True 1 False dtype: bool Or ``axis=None`` for whether every value is True. >>> df.all(axis=None) False """ _all_see_also = """\ See Also -------- Series.all : Return True if all elements are True. DataFrame.any : Return True if one (or more) elements are True. """ _cnum_doc = """ Return cumulative %(desc)s over a DataFrame or Series axis. Returns a DataFrame or Series of the same size containing the cumulative %(desc)s. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 The index or the name of the axis. 0 is equivalent to None or 'index'. skipna : boolean, default True Exclude NA/null values. If an entire row/column is NA, the result will be NA. *args, **kwargs : Additional keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- %(outname)s : %(name1)s or %(name2)s\n See Also -------- core.window.Expanding.%(accum_func_name)s : Similar functionality but ignores ``NaN`` values. %(name2)s.%(accum_func_name)s : Return the %(desc)s over %(name2)s axis. %(name2)s.cummax : Return cumulative maximum over %(name2)s axis. %(name2)s.cummin : Return cumulative minimum over %(name2)s axis. %(name2)s.cumsum : Return cumulative sum over %(name2)s axis. %(name2)s.cumprod : Return cumulative product over %(name2)s axis. %(examples)s """ _cummin_examples = """\ Examples -------- **Series** >>> s = pd.Series([2, np.nan, 5, -1, 0]) >>> s 0 2.0 1 NaN 2 5.0 3 -1.0 4 0.0 dtype: float64 By default, NA values are ignored. >>> s.cummin() 0 2.0 1 NaN 2 2.0 3 -1.0 4 -1.0 dtype: float64 To include NA values in the operation, use ``skipna=False`` >>> s.cummin(skipna=False) 0 2.0 1 NaN 2 NaN 3 NaN 4 NaN dtype: float64 **DataFrame** >>> df = pd.DataFrame([[2.0, 1.0], ... [3.0, np.nan], ... [1.0, 0.0]], ... columns=list('AB')) >>> df A B 0 2.0 1.0 1 3.0 NaN 2 1.0 0.0 By default, iterates over rows and finds the minimum in each column. This is equivalent to ``axis=None`` or ``axis='index'``. >>> df.cummin() A B 0 2.0 1.0 1 2.0 NaN 2 1.0 0.0 To iterate over columns and find the minimum in each row, use ``axis=1`` >>> df.cummin(axis=1) A B 0 2.0 1.0 1 3.0 NaN 2 1.0 0.0 """ _cumsum_examples = """\ Examples -------- **Series** >>> s = pd.Series([2, np.nan, 5, -1, 0]) >>> s 0 2.0 1 NaN 2 5.0 3 -1.0 4 0.0 dtype: float64 By default, NA values are ignored. >>> s.cumsum() 0 2.0 1 NaN 2 7.0 3 6.0 4 6.0 dtype: float64 To include NA values in the operation, use ``skipna=False`` >>> s.cumsum(skipna=False) 0 2.0 1 NaN 2 NaN 3 NaN 4 NaN dtype: float64 **DataFrame** >>> df = pd.DataFrame([[2.0, 1.0], ... [3.0, np.nan], ... [1.0, 0.0]], ... columns=list('AB')) >>> df A B 0 2.0 1.0 1 3.0 NaN 2 1.0 0.0 By default, iterates over rows and finds the sum in each column. This is equivalent to ``axis=None`` or ``axis='index'``. >>> df.cumsum() A B 0 2.0 1.0 1 5.0 NaN 2 6.0 1.0 To iterate over columns and find the sum in each row, use ``axis=1`` >>> df.cumsum(axis=1) A B 0 2.0 3.0 1 3.0 NaN 2 1.0 1.0 """ _cumprod_examples = """\ Examples -------- **Series** >>> s = pd.Series([2, np.nan, 5, -1, 0]) >>> s 0 2.0 1 NaN 2 5.0 3 -1.0 4 0.0 dtype: float64 By default, NA values are ignored. >>> s.cumprod() 0 2.0 1 NaN 2 10.0 3 -10.0 4 -0.0 dtype: float64 To include NA values in the operation, use ``skipna=False`` >>> s.cumprod(skipna=False) 0 2.0 1 NaN 2 NaN 3 NaN 4 NaN dtype: float64 **DataFrame** >>> df = pd.DataFrame([[2.0, 1.0], ... [3.0, np.nan], ... [1.0, 0.0]], ... columns=list('AB')) >>> df A B 0 2.0 1.0 1 3.0 NaN 2 1.0 0.0 By default, iterates over rows and finds the product in each column. This is equivalent to ``axis=None`` or ``axis='index'``. >>> df.cumprod() A B 0 2.0 1.0 1 6.0 NaN 2 6.0 0.0 To iterate over columns and find the product in each row, use ``axis=1`` >>> df.cumprod(axis=1) A B 0 2.0 2.0 1 3.0 NaN 2 1.0 0.0 """ _cummax_examples = """\ Examples -------- **Series** >>> s = pd.Series([2, np.nan, 5, -1, 0]) >>> s 0 2.0 1 NaN 2 5.0 3 -1.0 4 0.0 dtype: float64 By default, NA values are ignored. >>> s.cummax() 0 2.0 1 NaN 2 5.0 3 5.0 4 5.0 dtype: float64 To include NA values in the operation, use ``skipna=False`` >>> s.cummax(skipna=False) 0 2.0 1 NaN 2 NaN 3 NaN 4 NaN dtype: float64 **DataFrame** >>> df = pd.DataFrame([[2.0, 1.0], ... [3.0, np.nan], ... [1.0, 0.0]], ... columns=list('AB')) >>> df A B 0 2.0 1.0 1 3.0 NaN 2 1.0 0.0 By default, iterates over rows and finds the maximum in each column. This is equivalent to ``axis=None`` or ``axis='index'``. >>> df.cummax() A B 0 2.0 1.0 1 3.0 NaN 2 3.0 1.0 To iterate over columns and find the maximum in each row, use ``axis=1`` >>> df.cummax(axis=1) A B 0 2.0 2.0 1 3.0 NaN 2 1.0 1.0 """ _any_see_also = """\ See Also -------- numpy.any : Numpy version of this method. Series.any : Return whether any element is True. Series.all : Return whether all elements are True. DataFrame.any : Return whether any element is True over requested axis. DataFrame.all : Return whether all elements are True over requested axis. """ _any_desc = """\ Return whether any element is True, potentially over an axis. Returns False unless there at least one element within a series or along a Dataframe axis that is True or equivalent (e.g. non-zero or non-empty).""" _any_examples = """\ Examples -------- **Series** For Series input, the output is a scalar indicating whether any element is True. >>> pd.Series([False, False]).any() False >>> pd.Series([True, False]).any() True >>> pd.Series([]).any() False >>> pd.Series([np.nan]).any() False >>> pd.Series([np.nan]).any(skipna=False) True **DataFrame** Whether each column contains at least one True element (the default). >>> df = pd.DataFrame({"A": [1, 2], "B": [0, 2], "C": [0, 0]}) >>> df A B C 0 1 0 0 1 2 2 0 >>> df.any() A True B True C False dtype: bool Aggregating over the columns. >>> df = pd.DataFrame({"A": [True, False], "B": [1, 2]}) >>> df A B 0 True 1 1 False 2 >>> df.any(axis='columns') 0 True 1 True dtype: bool >>> df = pd.DataFrame({"A": [True, False], "B": [1, 0]}) >>> df A B 0 True 1 1 False 0 >>> df.any(axis='columns') 0 True 1 False dtype: bool Aggregating over the entire DataFrame with ``axis=None``. >>> df.any(axis=None) True `any` for an empty DataFrame is an empty Series. >>> pd.DataFrame([]).any() Series([], dtype: bool) """ _shared_docs['stat_func_example'] = """\ Examples -------- >>> idx = pd.MultiIndex.from_arrays([ ... ['warm', 'warm', 'cold', 'cold'], ... ['dog', 'falcon', 'fish', 'spider']], ... names=['blooded', 'animal']) >>> s = pd.Series([4, 2, 0, 8], name='legs', index=idx) >>> s blooded animal warm dog 4 falcon 2 cold fish 0 spider 8 Name: legs, dtype: int64 >>> s.{stat_func}() {default_output} {verb} using level names, as well as indices. >>> s.{stat_func}(level='blooded') blooded warm {level_output_0} cold {level_output_1} Name: legs, dtype: int64 >>> s.{stat_func}(level=0) blooded warm {level_output_0} cold {level_output_1} Name: legs, dtype: int64 """ _sum_examples = _shared_docs['stat_func_example'].format( stat_func='sum', verb='Sum', default_output=14, level_output_0=6, level_output_1=8) _sum_examples += """ By default, the sum of an empty or all-NA Series is ``0``. >>> pd.Series([]).sum() # min_count=0 is the default 0.0 This can be controlled with the ``min_count`` parameter. For example, if you'd like the sum of an empty series to be NaN, pass ``min_count=1``. >>> pd.Series([]).sum(min_count=1) nan Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and empty series identically. >>> pd.Series([np.nan]).sum() 0.0 >>> pd.Series([np.nan]).sum(min_count=1) nan """ _max_examples = _shared_docs['stat_func_example'].format( stat_func='max', verb='Max', default_output=8, level_output_0=4, level_output_1=8) _min_examples = _shared_docs['stat_func_example'].format( stat_func='min', verb='Min', default_output=0, level_output_0=2, level_output_1=0) _stat_func_see_also = """ See Also -------- Series.sum : Return the sum. Series.min : Return the minimum. Series.max : Return the maximum. Series.idxmin : Return the index of the minimum. Series.idxmax : Return the index of the maximum. DataFrame.min : Return the sum over the requested axis. DataFrame.min : Return the minimum over the requested axis. DataFrame.max : Return the maximum over the requested axis. DataFrame.idxmin : Return the index of the minimum over the requested axis. DataFrame.idxmax : Return the index of the maximum over the requested axis. """ _prod_examples = """\ Examples -------- By default, the product of an empty or all-NA Series is ``1`` >>> pd.Series([]).prod() 1.0 This can be controlled with the ``min_count`` parameter >>> pd.Series([]).prod(min_count=1) nan Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and empty series identically. >>> pd.Series([np.nan]).prod() 1.0 >>> pd.Series([np.nan]).prod(min_count=1) nan """ _min_count_stub = """\ min_count : int, default 0 The required number of valid values to perform the operation. If fewer than ``min_count`` non-NA values are present the result will be NA. .. versionadded :: 0.22.0 Added with the default being 0. This means the sum of an all-NA or empty Series is 0, and the product of an all-NA or empty Series is 1. """ def _make_min_count_stat_function(cls, name, name1, name2, axis_descr, desc, f, see_also='', examples=''): @Substitution(outname=name, desc=desc, name1=name1, name2=name2, axis_descr=axis_descr, min_count=_min_count_stub, see_also=see_also, examples=examples) @Appender(_num_doc) def stat_func(self, axis=None, skipna=None, level=None, numeric_only=None, min_count=0, **kwargs): if name == 'sum': nv.validate_sum(tuple(), kwargs) elif name == 'prod': nv.validate_prod(tuple(), kwargs) else: nv.validate_stat_func(tuple(), kwargs, fname=name) if skipna is None: skipna = True if axis is None: axis = self._stat_axis_number if level is not None: return self._agg_by_level(name, axis=axis, level=level, skipna=skipna, min_count=min_count) return self._reduce(f, name, axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count) return set_function_name(stat_func, name, cls) def _make_stat_function(cls, name, name1, name2, axis_descr, desc, f, see_also='', examples=''): @Substitution(outname=name, desc=desc, name1=name1, name2=name2, axis_descr=axis_descr, min_count='', see_also=see_also, examples=examples) @Appender(_num_doc) def stat_func(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs): if name == 'median': nv.validate_median(tuple(), kwargs) else: nv.validate_stat_func(tuple(), kwargs, fname=name) if skipna is None: skipna = True if axis is None: axis = self._stat_axis_number if level is not None: return self._agg_by_level(name, axis=axis, level=level, skipna=skipna) return self._reduce(f, name, axis=axis, skipna=skipna, numeric_only=numeric_only) return set_function_name(stat_func, name, cls) def _make_stat_function_ddof(cls, name, name1, name2, axis_descr, desc, f): @Substitution(outname=name, desc=desc, name1=name1, name2=name2, axis_descr=axis_descr) @Appender(_num_ddof_doc) def stat_func(self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None, **kwargs): nv.validate_stat_ddof_func(tuple(), kwargs, fname=name) if skipna is None: skipna = True if axis is None: axis = self._stat_axis_number if level is not None: return self._agg_by_level(name, axis=axis, level=level, skipna=skipna, ddof=ddof) return self._reduce(f, name, axis=axis, numeric_only=numeric_only, skipna=skipna, ddof=ddof) return set_function_name(stat_func, name, cls) def _make_cum_function(cls, name, name1, name2, axis_descr, desc, accum_func, accum_func_name, mask_a, mask_b, examples): @Substitution(outname=name, desc=desc, name1=name1, name2=name2, axis_descr=axis_descr, accum_func_name=accum_func_name, examples=examples) @Appender(_cnum_doc) def cum_func(self, axis=None, skipna=True, *args, **kwargs): skipna = nv.validate_cum_func_with_skipna(skipna, args, kwargs, name) if axis is None: axis = self._stat_axis_number else: axis = self._get_axis_number(axis) y = com.values_from_object(self).copy() if (skipna and issubclass(y.dtype.type, (np.datetime64, np.timedelta64))): result = accum_func(y, axis) mask = isna(self) np.putmask(result, mask, iNaT) elif skipna and not issubclass(y.dtype.type, (np.integer, np.bool_)): mask = isna(self) np.putmask(y, mask, mask_a) result = accum_func(y, axis) np.putmask(result, mask, mask_b) else: result = accum_func(y, axis) d = self._construct_axes_dict() d['copy'] = False return self._constructor(result, **d).__finalize__(self) return set_function_name(cum_func, name, cls) def _make_logical_function(cls, name, name1, name2, axis_descr, desc, f, see_also, examples, empty_value): @Substitution(outname=name, desc=desc, name1=name1, name2=name2, axis_descr=axis_descr, see_also=see_also, examples=examples, empty_value=empty_value) @Appender(_bool_doc) def logical_func(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs): nv.validate_logical_func(tuple(), kwargs, fname=name) if level is not None: if bool_only is not None: raise NotImplementedError("Option bool_only is not " "implemented with option level.") return self._agg_by_level(name, axis=axis, level=level, skipna=skipna) return self._reduce(f, name, axis=axis, skipna=skipna, numeric_only=bool_only, filter_type='bool') return set_function_name(logical_func, name, cls) # install the indexes for _name, _indexer in indexing.get_indexers_list(): NDFrame._create_indexer(_name, _indexer)
# -*-coding:utf-8-*- import random import torch from torch.utils import data from torch import nn #nn:神经网络缩写 import numpy as np from d2l import torch as d2l #构造人造数据集 def synthetic_data(w,b,num_examples): """生成y = Xw + b + 噪声""" ''' 使用线性模型参数 𝐰=[2,−3.4]⊤ 、 𝑏=4.2 和噪声项 𝜖 生成数据集及其标签: ''' X = torch.normal(0,1,(num_examples, len(w))) #num_examples * 2的矩阵 y = torch.matmul(X,w) + b #num_examples * 1的矩阵 y += torch.normal(0, 0.01, y.shape) return X, y.reshape((-1,1)) #函数功能:数据集读取 #输入:批量大小batch_size, 特征矩阵features, 标签向量labels #输出:大小为batch_size的小批量样本(特征+标签) def data_iter(batch_size, features, labels): num_examples = len(features) indices = list(range(num_examples)) #range相当于从0到n-1这些数,然后转成List格式 random.shuffle(indices) #将索引打乱,强啊! #例如batch_size为16的话,相当于16个为一组 # i每轮循环步进16次,即每轮循环i为当前新batch的首项 for i in range(0,num_examples,batch_size): #每轮循环中从list中切片[i:i+16],从而得到当前batch(一组16个数据) batch_indices = torch.tensor(indices[i:min(i + batch_size, num_examples)]) yield features[batch_indices], labels[batch_indices] #yield就是 return 返回一个值,并且记住这个返回的位置,下次迭代就从这个位置后开 #定义线性回归模型 def linreg(X, w, b): return torch.matmul(X, w) + b #定义损失函数(均方误差) def squared_loss(y_hat, y): return (y_hat - y.reshape(y_hat.shape))**2 / 2 #定义优化算法:小批量随机梯度下降 #params为模型参数w与b,lr为学习率Learning rate def sgd(params, lr, batch_size): #更新时无需计算梯度 with torch.no_grad(): for param in params: param -= lr * param.grad / batch_size param.grad.zero_() def my_train(): # 初始化模型参数 w = torch.normal(0, 0.01, size=(2, 1), requires_grad=True) b = torch.zeros(1, requires_grad=True) # 训练参数设置 lr = 0.03 # default:0.03 num_epochs = 3 # 整个数据扫三遍 net = linreg # 如此可以方便后期换成不同的模型 loss = squared_loss batch_size = 10 # default:10 # 训练 for epoch in range(num_epochs): for X, y in data_iter(batch_size, features, labels): l = loss(net(X, w, b), y) # 由小批量计算损失 # 因为`l`形状是(`batch_size`, 1),而不是一个标量 # `l`中的所有元素被加到一起,并以此计算关于[`w`, `b`]的梯度 # 由此优化时需除上一个 batch_size l.sum().backward() sgd([w, b], lr, batch_size) with torch.no_grad(): train_l = loss(net(features, w, b), labels) print(f'epoch {epoch + 1}, loss {float(train_l.mean()):f}') print(f'w的估计误差: {true_w - w.reshape(true_w.shape)}') print(f'b的估计误差: {true_b - b}') #以下为使用PyTorch实现: def load_array(data_arrays, batch_size, is_train=True): """构造一个PyTorch数据迭代器""" dataset = data.TensorDataset(*data_arrays) return data.DataLoader(dataset, batch_size, shuffle=is_train) def PyTorch_train(): batch_size = 10 #数据读取 PyTorch_data_iter = load_array((features, labels), batch_size) #网络结构定义:单层神经网络:仅一层全连接层 # nn.Linear 中第一个指定输入特征形状,即 2, # 第二个指定输出特征形状,输出特征形状为单个标量,因此为 1 net = nn.Sequential(nn.Linear(2,1)) #模型参数定义 #通过_ 结尾的方法将参数替换,从而初始化参数 net[0].weight.data.normal_(0, 0.01) net[0].bias.data.fill_(0) #损失函数 '''平方𝐿2范数,默认情况下,它返回[所有样本]损失的平均值(mean) reduction参数的不同取值 none: no reduction will be applied. mean: the sum of the output will be divided by the number of elements in the output. sum: the output will be summed 若reduction = 'sum',则在优化时应除上样本数,本例中每次传入10个样本(batch_size),相当于lr = lr/10 ''' loss = nn.MSELoss() #实例化SGD trainer = torch.optim.SGD(net.parameters(), lr=0.03) #训练 num_epochs = 3 for epoch in range(num_epochs): for X, y in PyTorch_data_iter: l = loss(net(X), y) trainer.zero_grad() l.backward() #与从零实现相比,均值计算已在Loss函数中完成 trainer.step() l = loss(net(features), labels) print(f'epoch {epoch + 1}, loss {l:f}') w = net[0].weight.data print('w的估计误差:', true_w - w.reshape(true_w.shape)) b = net[0].bias.data print('b的估计误差:', true_b - b) if __name__ == "__main__": #数据集生成有关参数 true_w = torch.tensor([2,-3.4]) true_b = 4.2 features, labels = synthetic_data(true_w,true_b,1000) ''' features 中的每一行都包含一个二维数据样本, labels 中的每一行都包含一维标签值(一个标量) ''' # print('features:', features[0], '\nlabel:', labels[0]) ''' 通过生成第二个特征 features[:, 1] 和 labels 的散点图 可以直观地观察到两者之间的线性关系。 ''' # d2l.set_figsize() # d2l.plt.scatter(features[:, (1)].detach().numpy(), # labels.detach().numpy(), 1); #从零开始实现 my_train() #torch实现 PyTorch_train() #Q: # 1.似乎epoch相同的情况下,从零开始实现效果要好些? # 2.loss function reduction= 'mean' , lr=0.03: # epoch 1, loss 0.000361 # epoch 2, loss 0.000095 # epoch 3, loss 0.000095 # w的估计误差: tensor([-0.0002, -0.0002]) # b的估计误差: tensor([8.6784e-05]) # # loss function reduction = 'sum', lr =0.03/batch_size=0.003 # epoch 1, loss 0.176310 # epoch 2, loss 0.091707 # epoch 3, loss 0.092358 # w的估计误差: tensor([0.0007, 0.0012]) # b的估计误差: tensor([0.0005]) #这两种方式理论上等价,为何loss function取mean显著由于取sum的方式
import asyncio import aioredis async def main(): # Redis client bound to single connection (no auto reconnection). redis = await aioredis.create_redis( 'redis://localhost') await redis.set('my-key', 'value') val = await redis.get('my-key') print(val) # gracefully closing underlying connection redis.close() await redis.wait_closed() async def redis_pool(): # Redis client bound to pool of connections (auto-reconnecting). redis = await aioredis.create_redis_pool( 'redis://localhost') await redis.set('my-key', 'value') val = await redis.get('my-key') print(val) # gracefully closing underlying connection redis.close() await redis.wait_closed() if __name__ == '__main__': asyncio.run(main()) asyncio.run(redis_pool())
# AUTOGENERATED FILE - DO NOT MODIFY! # This file was generated by Djinni from my_enum.djinni from djinni.support import MultiSet # default imported in all files from djinni.exception import CPyException # default imported in all files from djinni import exception # this forces run of __init__.py which gives cpp option to call back into py to create exception from enum import IntEnum class MyEnum(IntEnum): Option1 = 0 Option2 = 1 Option3 = 2
from typing import List from PyQt5.QtCore import QRectF from actor.text_actor import TextActor from config.hot_key import KeyCombo, HotKey from observer.base_observer import BaseObserver from observer.event.events import CustomEvent from observer.vector_map_reprojector import VectorReprojector from vector.vector import Vector class CorrespondenceManager(BaseObserver): # TODO: Propose a better class name, and try to clean the logic of # this part. """ This class is mainly used to reproject correspondences loaded from old json files in which correspondences are without reprojected correspondence coords, and refresh correspondences on changing node. """ def __init__(self, editor: 'MapBasedCalibrator'): super().__init__(editor) self.QT_EVENT_CALLBACK_PRIORITY_TUPLES = [ (CustomEvent.CorrespondencesLoadedEvent, self.on_correspondences_loaded, 0), (CustomEvent.TrajectoryNodeChangedEvent, self.on_trajectory_node_changed, 0), (CustomEvent.CalibrationOptimizedEvent, self.on_calibration_optimized, 0), (CustomEvent.KeyComboPressedEvent, self.on_key_press, 0), ] self._correspondence_id_actors = [] # type: List[TextActor] def on_key_press(self, key_combo: KeyCombo): if key_combo.is_same(HotKey.TOGGLE_CORRESPONDENCE_ID_DISPLAYING.value): self._toggle_correspondence_displaying() def on_correspondences_loaded(self): self._update_correspondence_reprojected_coords() self._prepare_correspondence_actors() self._enable_current_frame_correspondences() self.editor.side_bar_widget \ .show_correspondences_checkbox.setChecked(True) self.update() def on_trajectory_node_changed(self): self._enable_current_frame_correspondences() self.update() def on_calibration_optimized(self): self._update_correspondence_reprojected_coords() self._prepare_correspondence_actors() self.update() def _update_correspondence_reprojected_coords(self): if self.editor.layer_manager.trajectory_layer() is None: return if self.editor.layer_manager.correspondence_layer( create_new_layer=False) is None: return unreprojected_correspondences = \ self.editor.layer_manager.correspondence_layer().correspondences() if len(unreprojected_correspondences) == 0: return camera_intrinsics = \ self.editor.layer_manager.trajectory_layer().camera_config() vector_reprojector = VectorReprojector() vector_reprojector.set_intrinsics(camera_intrinsics) for correspondence in unreprojected_correspondences: camera_extrinsic = self.editor.layer_manager.trajectory_layer() \ .get_node_by_timestamp(correspondence.timestamp()).T_camera_to_world() vector_reprojector.set_extrinsic(camera_extrinsic) reprojected_shape = vector_reprojector.reproject( Vector(correspondence.reprojected_shape().origin_vertices())) if reprojected_shape is not None: correspondence.set_reprojected_shape(reprojected_shape) def _prepare_correspondence_actors(self): if self.editor.layer_manager.correspondence_layer( create_new_layer=False) is None: return for correspondence in self.editor.layer_manager \ .correspondence_layer().correspondences(): correspondence.build_actor() def _enable_current_frame_correspondences(self): if self.editor.layer_manager.trajectory_layer() is None: return if self.editor.trajectory_navigator.current_trajectory_node() is None: return if self.editor.layer_manager.correspondence_layer( create_new_layer=False) is None: return current_timestamp = self.editor.trajectory_navigator \ .current_trajectory_node().timestamp() correspondences = \ self.editor.layer_manager.correspondence_layer().correspondences() for correspondence in correspondences: if correspondence.timestamp() == current_timestamp: self.renderer.add_actor(correspondence.actor()) else: self.renderer.remove_actor(correspondence.actor()) def _toggle_correspondence_displaying(self): # FIXME: Support dynamic id displaying in the future. if self.editor.layer_manager.correspondence_layer( create_new_layer=False) is None: return if len(self._correspondence_id_actors) > 0: for correspondence_id_actor in self._correspondence_id_actors: self.renderer.remove_actor(correspondence_id_actor) self._correspondence_id_actors.clear() else: for correspondence in self.editor.layer_manager \ .correspondence_layer().correspondences(): id_actor = TextActor() id_actor.set_text(str(correspondence.id())) id_actor.property().set_color(54, 191, 153) screen_position = \ self.renderer.camera().transform_geometry( correspondence.actor().geometry()).data()[0, :] id_actor.geometry().set_qt_geometry( QRectF(screen_position[0], screen_position[1], 30, 30) ) self.renderer.add_actor(id_actor) self._correspondence_id_actors.append(id_actor) self.update()
# Standard Library import ast from collections import defaultdict from copy import copy # Local Modules import saplings.utilities as utils import saplings.tokenization as tkn from saplings.entities import ObjectNode, Function, Class, ClassInstance # import utilities as utils # import tokenization as tkn # from entities import ObjectNode, Function, Class, ClassInstance ########## # SAPLINGS ########## class Saplings(ast.NodeVisitor): def __init__(self, tree, object_hierarchies=[], namespace={}, track_modules=True): """ Extracts object hierarchies for imported modules in a program, given its AST. Parameters ---------- tree : ast.AST the AST representation of the program to be analyzed object_hierarchies : {list, optional} root nodes of existing object hierarchies namespace : {dict, optional} mapping of identifiers to ObjectNodes/Functions/Classes/ClassInstances """ self._object_hierarchies = [obj_node for obj_node in object_hierarchies] self._track_modules = track_modules # Maps active identifiers to namespace entities (e.g. ObjectNodes, # Functions, Classes, and ClassInstances) self._namespace = {name: entity for name, entity in namespace.items()} # Keeps track of functions defined in the current scope self._functions = set() # Namespace entity produced by the first evaluated return statement in # the AST self._return_value = None # True when a Return, Continue, or Break node is hit –– stops traversal # of tree self._is_traversal_halted = False self.visit(tree) self._process_uncalled_functions() ## Overloaded Methods ## def visit(self, node): """ Overloaded AST visitor. Behaves the same as ast.NodeVisitor.visit except it only traverses a subtree when self._is_traversal_halted == False. Parameters ---------- node : ast.AST root node of tree/subtree to be traversed Returns ------- output (if any) of overloaded node visitor functions """ method = "visit_" + node.__class__.__name__ visitor = getattr(self, method, self.generic_visit) if not self._is_traversal_halted: return visitor(node) ## Helpers ## def _process_uncalled_functions(self): """ Processes uncalled functions. If a function is defined but never called, we process the function body after the traversal is over and in the state of the namespace in which it was defined. """ # Don't process closures, as they may be called in a different scope returns_closure = isinstance(self._return_value, Function) if returns_closure and self._return_value in self._functions: self._functions.remove(self._return_value) while any(not f.called for f in self._functions): for function in self._functions.copy(): if function.called: self._functions.remove(function) continue self._process_function(function, function.init_namespace) def _process_subtree_in_new_scope(self, tree, namespace): """ Used to process a subtree in a different scope/namespace from the current scope. Can act as a "sandbox," where changes to the namespace don't affect the containing scope. Parameters ---------- tree : ast.AST root node of the subtree to process namespace : dict namespace within which the subtree should be processed Returns ------- Saplings instance of a Saplings object """ return Saplings(tree, self._object_hierarchies, namespace, self._track_modules) def _process_node(self, node): """ Processes an AST node. Processing involves tokenizing the node and then feeding it into _process_attribute_chain. Parameters ---------- node : ast.AST node to be processed Returns ------- list tokenized node {ObjectNode, Function, Class, ClassInstance, None} namespace entity produced by the node / attribute chain dict dictionary containing the class instance context (if any) the node was processed in """ tokenized_node = tkn.recursively_tokenize_node(node, []) entity, instance = self._process_attribute_chain(tokenized_node) return tokenized_node, entity, instance def _break_and_process_nested_chains(self, tokens, current_entity, current_instance): """ TODO """ for token in tokens: if not isinstance(token, tkn.CallToken): continue for arg_token in token: self._process_attribute_chain(arg_token.arg_val) current_entity = None current_instance["entity"] = None current_instance["init_index"] = 0 def _process_function_call(self, function, arguments): """ TODO """ if function.is_closure: func_namespace = { **self._namespace.copy(), **function.init_namespace } else: func_namespace = self._namespace.copy() return_value, _ = self._process_function( function, func_namespace, arguments ) # TODO (V2): Handle tuple returns (blocked by data structure handling) if isinstance(return_value, ObjectNode): return_value.increment_count() return return_value def _bind_entity_to_arguments(self, entity, arguments): """ TODO """ hidden_arg = tkn.ArgToken([tkn.NameToken("")]) self._namespace[""] = entity return [hidden_arg] + arguments def _process_method_call(self, function, arguments, class_instance=None): """ TODO """ if class_instance: # Method is called by instance if function.method_type == "class": arguments = self._bind_entity_to_arguments( class_instance.class_entity, arguments ) elif function.method_type == "instance": arguments = self._bind_entity_to_arguments( class_instance, arguments ) else: # Method is called by class arguments = self._bind_entity_to_arguments( function.containing_class, arguments ) return_value = self._process_function_call(function, arguments) if "" in self._namespace: del self._namespace[""] return return_value ## Processors ## def _process_module(self, module, standard_import=False): """ Takes a module and searches the set of object hierarchies for a matching root node. If no match is found, new root nodes are generated and appended to the set. Parameters ---------- module : string identifier for a module, sometimes a period-separated string of sub-modules standard_import : bool indicates whether the module was imported normally or the result of `from X import Y` Returns ------- ObjectNode terminal object hierarchy node for the module """ sub_modules = module.split('.') # For module.submodule1.submodule2... root_module = sub_modules[0] term_node = None for root in self._object_hierarchies: matching_module = utils.find_matching_node(root, root_module) if matching_module: term_node = matching_module break if not term_node: root_node = ObjectNode(root_module, order=-1) if standard_import: self._namespace[root_module] = root_node term_node = root_node self._object_hierarchies.append(term_node) for index in range(len(sub_modules[1:])): sub_module = sub_modules[index + 1] sub_module_alias = '.'.join([root_module] + sub_modules[1:index + 2]) matching_sub_module = utils.find_matching_node(term_node, sub_module) if matching_sub_module: term_node = matching_sub_module else: new_sub_module = ObjectNode(sub_module) if standard_import: self._namespace[sub_module_alias] = new_sub_module term_node.add_child(new_sub_module) term_node = new_sub_module return term_node def _process_default_args(self, arg_names, defaults, namespace): """ Takes arguments from a user-defined function's signature and processes their default values. Parameters ---------- arg_names : list list of argument names (strings) defaults : list list of default values for the args (ast.AST nodes) namespace : dict namespace in which the function with the defaults was defined Returns ------- dict map of argument names to their default value's namespace entity list list of argument names whose defaults had no corresponding namespace entity """ num_args, num_defaults = len(arg_names), len(defaults) arg_to_default_node, null_defaults = {}, [] for index, default in enumerate(defaults): if not default: # Only kw_defaults can be None continue arg_name_index = index + (num_args - num_defaults) arg_name = arg_names[arg_name_index] tokenized_default = tkn.recursively_tokenize_node(default, []) default_node, _ = self._process_subtree_in_new_scope( ast.Module(body=[]), namespace )._process_attribute_chain(tokenized_default) if not default_node: null_defaults.append(arg_name) continue arg_to_default_node[arg_name] = default_node return arg_to_default_node, null_defaults def _process_function(self, function, namespace, arguments=[]): """ Processes the arguments and body of a user-defined function. If the function is recursive, the recursive calls are not processed (otherwise this would throw `Saplings` into an infinite loop). If the function returns a closure, that function is added to the list of functions in the current scope. Parameters ---------- function : Function function that's being called namespace : dict namespace within which the function should be processed arguments : list, optional arguments passed into the function when called (as a list of ArgTokens) Returns ------- {ObjectNode, Function, Class, ClassInstance, None} namespace entity corresponding to the return value of the function; None if the function has no return value or returns something we don't care about (i.e. something that isn't tracked in the namespace) """ parameters = function.def_node.args pos_params = [a.arg for a in parameters.args] kw_params = [a.arg for a in parameters.kwonlyargs] # Namespace entities corresponding to default values default_entities, null_defaults = self._process_default_args( pos_params, parameters.defaults, namespace ) kw_default_entities, null_kw_defaults = self._process_default_args( kw_params, parameters.kw_defaults, namespace ) # Update namespace with default values namespace = {**namespace, **default_entities, **kw_default_entities} for null_arg_name in null_defaults + null_kw_defaults: if null_arg_name in namespace: del namespace[null_arg_name] utils.delete_sub_aliases(null_arg_name, namespace) for index, argument in enumerate(arguments): if argument.arg_name == '': # Positional argument if index < len(pos_params): arg_name = pos_params[index] else: # *arg self._process_attribute_chain(argument.arg_val) continue elif argument.arg_name is not None: # Keyword argument arg_name = argument.arg_name if arg_name not in pos_params + kw_params: # **kwargs self._process_attribute_chain(argument.arg_val) continue else: # **kwargs self._process_attribute_chain(argument.arg_val) continue arg_entity, _ = self._process_attribute_chain(argument.arg_val) if not arg_entity: if arg_name in namespace: del namespace[arg_name] utils.delete_sub_aliases(arg_name, namespace) continue utils.delete_sub_aliases(arg_name, namespace) namespace[arg_name] = arg_entity # TODO (V2): Handle star args and **kwargs (blocked by data structure # handling) if parameters.vararg and parameters.vararg.arg in namespace: del namespace[parameters.vararg.arg] utils.delete_sub_aliases(parameters.vararg.arg, namespace) if parameters.kwarg and parameters.kwarg.arg in namespace: del namespace[parameters.kwarg.arg] utils.delete_sub_aliases(parameters.kwarg.arg, namespace) # Handles recursive functions by deleting all names of the function node for name, node in list(namespace.items()): if node == function: del namespace[name] # Processes function body func_saplings = self._process_subtree_in_new_scope( ast.Module(body=function.def_node.body), namespace ) function.called = True return_value = func_saplings._return_value # If the function returns a closure then treat it like a function # defined in the current scope by adding it to self._functions if isinstance(return_value, Function): return_value.is_closure = True # TODO (V1): What if the function is defined in an outer scope but # returned in this scope? Then it's not a closure. Handle these. if not return_value.called: self._functions.add(return_value) elif isinstance(return_value, Class): for name, entity in return_value.init_instance_namespace.items(): if isinstance(entity, Function): entity.is_closure = True if not entity.called: self._functions.add(entity) return return_value, func_saplings def _process_assignment(self, target, val_entity): """ Handles variable assignments and aliasing. There are three types of assignments that we consider: 1. An identifier for an active namespace entity being reassigned to another active entity 2. An identifier for an active namespace entity being reassigned to a non-entity (i.e. AST node) 3. An identifier for a non-entity being assigned to an active namespace entity For any one of these, the current namespace is modified. Parameters ---------- target : ast.AST node representing the left-hand-side of the assignment val_entity : {ObjectNode, Function, Class, ClassInstance, None} namespace entity corresponding to the right-hand-side of the assignment """ tokenized_target, targ_entity, instance = self._process_node(target) # TODO (V2): Handle assignments to data structures. For an assignment # like foo = [bar(i) for i in range(10)], foo.__index__() should be an # alias for bar(). # TODO (V2): Handle assignments to class variables that propagate to # class instances (e.g. MyClass.variable = ...; my_instance.variable.foo()) if instance["entity"]: namespace = instance["entity"].namespace targ_str = tkn.stringify_tokenized_nodes(tokenized_target[instance["init_index"] + 1:]) else: namespace = self._namespace targ_str = tkn.stringify_tokenized_nodes(tokenized_target) # Type I: Known entity reassigned to other known entity (E2 = E1) if targ_entity and val_entity: namespace[targ_str] = val_entity utils.delete_sub_aliases(targ_str, namespace) # Type II: Known entity reassigned to non-entity (E1 = NE1) elif targ_entity and not val_entity: del namespace[targ_str] utils.delete_sub_aliases(targ_str, namespace) # Type III: Non-entity assigned to known entity (NE1 = E1) elif not targ_entity and val_entity: namespace[targ_str] = val_entity def _process_attribute_chain(self, attribute_chain): """ Master function for processing attribute chains. An attribute chain is a list of `Name` and `Call` tokens such that each `Name` token is an (n + 1)th-order attribute of the nearest prior `Name` token, where n is the number of `Call` tokens separating the two. For example: ```python module.bar(my.var, my_func).lorem[0] + ipsum ``` This expression is an `ast.BinOp` node which, when passed into `recursively_tokenize_node`, produces the following attribute chain: ```python [ NameToken("module"), NameToken("bar"), CallToken([ ArgToken([NameToken("my"), NameToken("var")]), ArgToken([NameToken("my_func")]) ]), NameToken("lorem") NameToken("__index__"), CallToken([ ArgToken([ast.Num(0)]) ]), NameToken("__add__"), CallToken([ ArgToken([NameToken("ipsum")]) ]) ] ``` The attribute chain is given to this function as `attribute_chain`, which checks the namespace for entities referenced in the chain ... TODO Parameters ---------- tokens : list list of `NameToken`s and `CallToken`s Returns ------- {ObjectNode, Function, Class, ClassInstance}, context_dict """ current_entity = None current_instance = {"entity": None, "init_index": 0} for index, token in enumerate(attribute_chain): if index and not current_entity: self._break_and_process_nested_chains( attribute_chain[index:], current_entity, current_instance ) break if isinstance(token, tkn.CallToken): if isinstance(current_entity, Function): if current_instance["entity"]: # Process call of function from instance of a # user-defined class current_entity = self._process_method_call( current_entity, token.args, current_instance["entity"] ) else: if current_entity.method_type == "class": # Process call of function from user-defined class current_entity = self._process_method_call( current_entity, token.args ) else: # Process call of user-defined function that is # either unbound to a class or a static method current_entity = self._process_function_call( current_entity, token.args ) if isinstance(current_entity, ClassInstance): current_instance["entity"] = current_entity current_instance["init_index"] = index continue elif isinstance(current_entity, Class): # Process instantiation of user-defined class init_namespace = current_entity.init_instance_namespace class_instance = ClassInstance( current_entity, init_namespace.copy() ) if "__init__" in init_namespace: constructor = init_namespace["__init__"] if isinstance(constructor, Function): self._process_method_call( constructor, token.args, class_instance ) else: # If __init__ is not callable, class cannot be # instantiated self._break_and_process_nested_chains( attribute_chain[index + 1:], current_entity, current_instance ) break # BUG: __init__ may be a lambda function or an # ObjectNode (e.g. __init__ = module.imported_init) else: # BUG: If __init__ is defined in the base class then # it's a black box and may make unknown changes to the # instance namespace pass current_entity = class_instance current_instance["entity"] = class_instance current_instance["init_index"] = index continue elif isinstance(current_entity, ClassInstance): # Process call of instance of user-defined class if "__call__" in current_entity.namespace: call_entity = current_entity.namespace["__call__"] # BUG: __call__ may be a lambda function or an # ObjectNode (e.g. __call__ = module.imported_call) if isinstance(call_entity, Function): current_entity = self._process_method_call( call_entity, token.args, current_entity ) if isinstance(current_entity, ClassInstance): current_instance["entity"] = current_entity current_instance["init_index"] = index continue self._break_and_process_nested_chains( attribute_chain[index + 1:], current_entity, current_instance ) break # BUG: If __call__ is defined in the base class then # breaking could produce false negatives else: for arg_token in token: self._process_attribute_chain(arg_token.arg_val) elif not isinstance(token, tkn.NameToken): # token is ast.AST node self.visit(token) # TODO (V1): Handle IfExps and Lambdas (e.g. (lambda x: x.attr)(module.foo)) self._break_and_process_nested_chains( attribute_chain[index + 1:], current_entity, current_instance ) break if current_instance["entity"]: namespace = current_instance["entity"].namespace token_seq = attribute_chain[current_instance["init_index"] + 1:index + 1] else: namespace = self._namespace token_seq = attribute_chain[:index + 1] token_str = tkn.stringify_tokenized_nodes(token_seq) if token_str in namespace: current_entity = namespace[token_str] if isinstance(current_entity, ClassInstance): current_instance["entity"] = current_entity current_instance["init_index"] = index elif isinstance(current_entity, ObjectNode): current_entity.increment_count() elif isinstance(current_entity, ObjectNode): # Base node exists –– create and append its child current_entity = current_entity.add_child(ObjectNode(str(token))) namespace[token_str] = current_entity else: current_entity = None last_token_is_instance = current_instance["init_index"] == len(attribute_chain) - 1 if isinstance(current_entity, ClassInstance) and last_token_is_instance: current_instance = {"entity": None, "init_index": 0} return current_entity, current_instance ## Aliasing Handlers ## def visit_Import(self, node): """ TODO """ if not self._track_modules: return for module in node.names: if module.name.startswith('.'): # Ignores relative imports continue alias = module.asname if module.asname else module.name module_leaf_node = self._process_module( module=module.name, standard_import=not bool(module.asname) ) self._namespace[alias] = module_leaf_node def visit_ImportFrom(self, node): """ TODO """ if not self._track_modules: return if node.level: # Ignores relative imports return module_node = self._process_module( module=node.module, standard_import=False ) for alias in node.names: if alias.name == '*': # Ignore star imports continue child_exists = False alias_id = alias.asname if alias.asname else alias.name for child in module_node.children: if alias.name == child.name: child_exists = True self._namespace[alias_id] = child break if not child_exists: new_child = ObjectNode(alias.name) self._namespace[alias_id] = new_child module_node.add_child(new_child) def visit_Assign(self, node): """ TODO """ if isinstance(node.value, ast.Tuple): values = [] for value in node.value.elts: _, val_entity, _ = self._process_node(value) values.append(val_entity) else: _, values, _ = self._process_node(node.value) targets = node.targets if hasattr(node, "targets") else (node.target,) for target in targets: # Multiple assignment (e.g. a = b = ...) if isinstance(target, ast.Tuple): # Unpacking (e.g. a, b = ...) for index, elt in enumerate(target.elts): if isinstance(values, list): self._process_assignment(elt, values[index]) else: self._process_assignment(elt, values) elif isinstance(values, list): for value in values: self._process_assignment(target, value) else: self._process_assignment(target, values) def visit_AnnAssign(self, node): self.visit_Assign(node) def visit_AugAssign(self, node): """ TODO """ target = node.target value = ast.BinOp(left=copy(target), op=node.op, right=node.value) _, val_entity, _ = self._process_node(value) self._process_assignment(target, val_entity) def visit_Delete(self, node): """ TODO """ for target in node.targets: target_str = utils.stringify_node(target) utils.delete_sub_aliases(target_str, self._namespace) ## Function and Class Handlers ## def visit_FunctionDef(self, node): """ Handles user-defined functions. When a user-defined function is called, it can return a module construct (i.e. a reference to a d-tree node). But as Python is dynamically typed, we don't know the return type until the function is called. Thus, we only traverse the function body and process those nodes when it's called and we know the types of the inputs. And if a user-defined function is never called, we process it at the end of the AST traversal and in the namespace it was defined in. This processing is done by self._process_function. All this visitor does is alias the node (i.e. saves it to self._namespace) and adds it to the self._func_state_lookup_table, along with a copy of the namespace in the state the function is defined in. Parameters ---------- node : ast.FunctionDef name : raw string of the function name args : ast.arguments node body : list of nodes inside the function decorator_list : list of decorators to be applied returns : return annotation (Python 3 only) type_comment : string containing the PEP 484 type comment """ # NOTE: namespace is only used if the function is never called or if its # a closure function = Function( node, self._namespace.copy(), is_closure=False, called=False ) self._namespace[node.name] = function self._functions.add(function) if node.decorator_list: decorator_call_node = utils.create_decorator_call_node( node.decorator_list, ast.Name(node.name) ) _, entity, _ = self._process_node(decorator_call_node) if not entity: return function self._namespace[node.name] = entity if isinstance(entity, Function): self._functions.add(entity) return entity return function def visit_AsyncFunctionDef(self, node): return self.visit_FunctionDef(node) def visit_Lambda(self, node): """ TODO """ namespace = self._namespace.copy() args = node.args.args + node.args.kwonlyargs if node.args.vararg: args += [node.args.vararg] if node.args.kwarg: args += [node.args.kwarg] # node.args.default for arg in args: arg_name = arg.arg if arg_name not in namespace: continue del namespace[arg_name] self._process_subtree_in_new_scope(node.body, namespace) # TODO (V2): Handle assignments to lambdas and lambda function calls def visit_Return(self, node): """ TODO """ if node.value: _, self._return_value, _ = self._process_node(node.value) # BUG: What about instance, returned by _process_node? self._is_traversal_halted = True def visit_ClassDef(self, node): """ TODO """ for base_node in node.bases: # TODO (V2): Handle inheritance self.visit(ast.Call(func=base_node, args=[], keywords=[])) # TODO (V2): Handle metaclasses methods, nested_classes, static_variables = [], [], [] stripped_body = [] # ;) for n in node.body: if isinstance(n, (ast.FunctionDef, ast.AsyncFunctionDef)): for decorator in n.decorator_list: if not hasattr(decorator, "id"): continue if decorator.id == "staticmethod": n.method_type = "static" break elif decorator.id == "classmethod": n.method_type = "class" break else: n.method_type = "instance" methods.append(n) continue elif isinstance(n, ast.ClassDef): nested_classes.append(n) continue elif isinstance(n, (ast.Assign, ast.AnnAssign, ast.AugAssign)): # BUG: Static variables can be defined without an assignment. # For example: # class foo(object): # for x in range(10): # continue # foo.x is valid. targets = [n.target] if not isinstance(n, ast.Assign) else n.targets for target in targets: if isinstance(target, ast.Tuple): for element in target.elts: static_variables.append(utils.stringify_node(element)) else: static_variables.append(utils.stringify_node(target)) stripped_body.append(n) class_level_namespace = self._process_subtree_in_new_scope( ast.Module(body=stripped_body), self._namespace.copy() )._namespace class_entity = Class(node, self._namespace.copy()) self._namespace[node.name] = class_entity static_variable_map = {} for name, n in class_level_namespace.items(): if name in static_variables: self._namespace['.'.join((node.name, name))] = n static_variable_map[name] = n def create_callable_attribute_map(callables): callable_map = {} for callable in callables: callable_name = callable.name # Handles callables that are accessed by the enclosing class adjusted_name = '.'.join((node.name, callable_name)) callable.name = adjusted_name if isinstance(callable, (ast.FunctionDef, ast.AsyncFunctionDef)): entity = self.visit_FunctionDef(callable) elif isinstance(callable, ast.ClassDef): entity = self.visit_ClassDef(callable) callable.name = callable_name if isinstance(entity, Function): entity.method_type = callable.method_type entity.containing_class = class_entity callable_map[callable_name] = entity return callable_map method_map = create_callable_attribute_map(methods) nested_class_map = create_callable_attribute_map(nested_classes) # Everything here is an attribute of `self` class_entity.init_instance_namespace = { **static_variable_map, **method_map, **nested_class_map } return class_entity ## Control Flow Handlers ## def visit_If(self, node): """ Namespace changes in the first `If` block persist into the parent context, but changes made in `Elif` or `Else` blocks do not. TODO Parameters ---------- node : ast.If test ... body ... orelse ... """ self.visit(node.test) sub_namespaces = [] for if_body in [ast.Module(body=node.body)] + node.orelse: sub_namespace = self._process_subtree_in_new_scope( tree=if_body, namespace=self._namespace.copy() )._namespace sub_namespaces.append(sub_namespace) for sub_namespace in sub_namespaces: utils.diff_and_clean_namespaces(self._namespace, sub_namespace) def visit_For(self, node): """ TODO Parameters ---------- node : ast.For target : node holding variable(s) the loop assigns to iter : node holding item to be looped over body : list of nodes to execute orelse : list of nodes to execute (only executed if the loop finishes normally, rather than via a break statement) type_comment : string containing the PEP 484 comment """ # We treat the target as a subscript of iter target_assignment = ast.Assign( target=node.target, value=ast.Call( func=ast.Attribute( value=node.iter, attr="__iter__", ctx=ast.Load() ), args=[], keywords=[] ) ) self.visit(ast.Module(body=[target_assignment] + node.body)) if not self._is_traversal_halted: self.visit(ast.Module(body=node.orelse)) # If loop is broken by anything other than a return statement, then we # don't want to halt the traversal outside of the loop if not self._return_value: self._is_traversal_halted = False def visit_AsyncFor(self, node): self.visit_For(node) def visit_While(self, node): """ TODO """ self.visit(ast.Module(body=node.body)) if not self._is_traversal_halted: self.visit(ast.Module(body=node.orelse)) # If loop is broken by anything other than a return statement, then we # don't want to halt the traversal outside of the loop if not self._return_value: self._is_traversal_halted = False def visit_Try(self, node): """ TODO """ try_namespace = self._process_subtree_in_new_scope( tree=ast.Module(body=node.body + node.orelse), namespace=self._namespace.copy() )._namespace sub_namespaces = [try_namespace] for except_handler_node in node.handlers: except_namespace = self.visit_ExceptHandler(except_handler_node) sub_namespaces.append(except_namespace) for sub_namespace in sub_namespaces: utils.diff_and_clean_namespaces(self._namespace, sub_namespace) # node.finalbody is executed no matter what self.visit(ast.Module(body=node.finalbody)) def visit_ExceptHandler(self, node): """ TODO """ body_to_process = node.body if node.type and node.name: exception_alias_assign_node = ast.Assign( targets=[ast.Name(id=node.name, ctx=ast.Store())], value=node.type ) body_to_process.insert(0, exception_alias_assign_node) elif node.type: self.visit(node.type) except_namespace = self._process_subtree_in_new_scope( ast.Module(body=body_to_process), self._namespace.copy() )._namespace return except_namespace def visit_withitem(self, node): """ TODO """ # TODO (V1): Add call nodes for .__enter__() and .__exit__() if node.optional_vars: assign_node = ast.Assign( targets=[node.optional_vars], value=node.context_expr ) self.visit(assign_node) else: self.visit(node.context_expr) def visit_Continue(self, node): self._is_traversal_halted = True def visit_Break(self, node): self._is_traversal_halted = True ## Core Handlers ## @utils.attribute_chain_handler def visit_Name(self, node): pass @utils.attribute_chain_handler def visit_Attribute(self, node): pass @utils.attribute_chain_handler def visit_Call(self, node): pass @utils.attribute_chain_handler def visit_Subscript(self, node): pass @utils.attribute_chain_handler def visit_BinOp(self, node): pass @utils.attribute_chain_handler def visit_Compare(self, node): pass ## Data Structure Handlers ## # TODO (V2): Allow Type I assignments to dictionaries, lists, sets, and # tuples. Right now, assignments to data structures are treated as Type II. # For example, "attr" would not be captured in the following script: # import module # my_var = [module.func0(), module.func1(), module.func2()] # my_var[0].attr def _comprehension_helper(self, elts, generators): """ TODO """ comprehension_body = [] for generator in generators: iter_node = ast.Assign( target=generator.target, value=ast.Subscript( value=generator.iter, slice=ast.Index(value=ast.NameConstant(None)), ctx=ast.Load() ) ) comprehension_body.append(iter_node) comprehension_body.extend(generator.ifs) self._process_subtree_in_new_scope( ast.Module(body=comprehension_body + elts), self._namespace.copy() ) def visit_ListComp(self, node): self._comprehension_helper([node.elt], node.generators) def visit_SetComp(self, node): return self.visit_ListComp(node) def visit_GeneratorExp(self, node): return self.visit_ListComp(node) def visit_DictComp(self, node): self._comprehension_helper([node.key, node.value], node.generators) ## Public Methods ## def get_trees(self): """ TODO """ trees = [] for root_node in self._object_hierarchies: utils.consolidate_call_nodes(root_node) trees.append(root_node) return trees
from json import loads, dump import os class MStore: _data = {} _is_changed = False filename = "" indent = None sort_keys = False def __init__(self, *args, **kwargs): """Manipulate a file as python dict: - use load and save to manipulate file - make dict operations as: * a['b'] = 'c' * del a['b'] """ self.filename = kwargs.get("filename", "") self.indent = kwargs.get("indent", None) self.sort_keys = kwargs.get("sort_keys", False) if self.filename: self.load(self.filename) def load(self, filename=""): """Get the file and open as dict, if not exists, create it.""" try: if not self.filename: self.filename = filename if not os.path.isfile(self.filename): self.save() with open(self.filename, 'r', encoding="utf-8") as fd: self._data = fd.read() if not self._data: self._data = "{}" self._data = loads(self._data) except Exception as e: raise Exception(f"Error {str(e)}") def save(self): """Save dict in file.""" try: with open(self.filename, 'w', encoding="utf-8") as fd: dump( self._data, fd, ensure_ascii = False, indent=self.indent, sort_keys=self.sort_keys) return {"success": True} except Exception as e: return {"error": str(e)} def __repr__(self): return str(self._data) def __setitem__(self, key, val): self._data[key] = val self.save() def __getitem__(self, key): return self.get(key) def __delitem__(self, key): return self.keys() def __contains__(self, key): return self.exists(key) def __len__(self): return self.count() def __iter__(self): for key in self.keys(): yield key def get(self, key): '''Get the key-value pairs stored at `key`. If the key is not found, a `KeyError` exception will be thrown. ''' return self.store_get(key) def keys(self): '''Return a list of all the keys in the storage. ''' return self.store_keys() def exists(self, key): '''Check if a key exists in the store. ''' return self.store_exists(key) def count(self): '''Return the number of entries in the storage. ''' return self.store_count() def store_get(self, key): return self._data[key] def store_exists(self, key): return key in self._data def store_put(self, key, value): self._data[key] = value self._is_changed = True return True def store_delete(self, key): del self._data[key] self._is_changed = True self.save() return True def store_find(self, filters): for key, values in iter(self._data.items()): found = True for fkey, fvalue in iter(filters.items()): if fkey not in values: found = False break if values[fkey] != fvalue: found = False break if found: yield key, values def store_count(self): return len(self._data) def store_keys(self): return list(self._data.keys())
import random from PIL import Image, ImageDraw image = Image.open("../roof.jpg") draw = ImageDraw.Draw(image) width = image.size[0] height = image.size[1] pix = image.load() for x in range(width): for y in range(height): r = round(pix[x, y][0]/2) g = round(pix[x, y][1]/2) b = round(pix[x, y][2]/2) draw.point((x, y), (r, g, b)) image.save("roofDark.jpg") del draw
from rest_framework import permissions # based on https://www.django-rest-framework.org/api-guide/permissions/#examples class IsOwner(permissions.BasePermission): """Object-level permission to only allow owners of an object to interact with it.""" # pylint: disable=no-self-use,unused-argument def has_object_permission(self, request, view, obj): # Instance must have an attribute named `owner`. return obj.owner == request.user
from django.contrib import admin from tinymce.widgets import TinyMCE from .models import Video from django.db import models ##admin.site.register(Item) admin.site.register(Video) # Register your models here.
from ale.base.base import Driver
import lab as B from gpcm.sample import ESS from stheno import Normal import numpy as np from .util import approx def test_ess(): # Construct a prior and a likelihood. prior = Normal(np.array([[0.6, 0.3], [0.3, 0.6]])) lik = Normal( np.array([[0.2], [0.3]]), np.array([[1, 0.2], [0.2, 1]]), ) # Perform sampling. sampler = ESS(lik.logpdf, prior.sample) num_samples = 30_000 samples = B.concat(*sampler.sample(num=num_samples), axis=1) samples_mean = B.mean(samples, axis=1)[:, None] samples_cov = ( B.matmul(samples - samples_mean, samples - samples_mean, tr_b=True) / num_samples ) # Compute posterior statistics. prec_prior = B.inv(prior.var) prec_lik = B.inv(lik.var) cov = B.inv(prec_prior + prec_lik) mean = cov @ (prec_prior @ prior.mean + prec_lik @ lik.mean) approx(samples_cov, cov, atol=5e-2) approx(samples_mean, mean, atol=5e-2)
from bs4 import BeautifulSoup import requests import csv def writetoCsv(filename,url,dynamicurl): out=open(filename, 'w+',encoding='gb18030',newline = '') csv_write=csv.writer(out) csv_write.writerow(['评分','评论题目','用户名','时间','评论内容']) dynamiCrawl(url,dynamicurl,csv_write) writetoCsv('imdbcommuter.csv',url1,dynamicurl1) writetoCsv('imdbparis.csv',url2,dynamicurl2) import csv #csv文件的读取 def csvRead(filename): csv_reader = csv.reader(open(filename, encoding='gb18030')) for row in csv_reader: username=row[0] url=row[1] print(row[1]) # crawl(username,url) import urllib.request import json from urllib.error import HTTPError import requests cook='_zap=103062c2-b140-4bb3-9b09-c6cf339bb388; d_c0="AECCyMfSwQyPTpKO0aCmkPUGPKluXFgq_Bw=|1511945695"; __DAYU_PP=EbUEJqyeeFNufYfVivYR2ba2111a3034; _xsrf=342e908a-93bd-4e26-bb31-ee4cb0e42acb; q_c1=deb5f34c9c784ae5b70a9cae0b24ee5d|1521542185000|1501208857000; capsion_ticket="2|1:0|10:1521558824|14:capsion_ticket|44:MjEwZjI4NTI4Mjk2NGRkZWFiMWZjY2JjY2I3Mjk0NjA=|629b05c7a8cb266f450ffe58788e2c07e56051bdcc00b7957f702bd49017e4c5"; z_c0="2|1:0|10:1521558888|4:z_c0|92:Mi4xdDlrX0FnQUFBQUFBUUlMSXg5TEJEQ1lBQUFCZ0FsVk5hSE9lV3dCaHRfdnRXbnYzc3FiRGdFZUpBMW1CcmstUFZ3|f3cebdebc483f14d2bab9d9f4048d79bd5f77237fe51453a66c2a90d8bf8d54d"; unlock_ticket="ABCK5wre8ggmAAAAYAJVTXAssVpzFDh0uhrcnxHvYPpchsYSmNRR_g=="' headers = { "Cookie":cook, "User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit 537.36 (KHTML, like Gecko) Chrome", "Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8"} def getObj(url): for i in range(5): try: html=requests.get(url,headers=headers) html.encoding="gbk" break except HTTPError as e: # time.sleep(3) print('再次请求') print('HTTPError') return None try: bsObj = BeautifulSoup(html.text,'html.parser') except AttributeError as e: print('AttributeError') return None return bsObj #获取json文件方法 def getJson(url): #获取json f= urllib.request.Request(url,headers=headers) html=urllib.request.urlopen(f).read().decode('utf-8',"ignore") #post方法请求json数据 r = requests.post(url, data = {'cityCode':citycode,'page':page}) try: data=json.loads(r.text)["BusinessHallList"] return data import urllib.request from selenium import webdriver #可用来解决js渲染的重定向数据抓取不到问题 def phangetObj(url): driver =webdriver.PhantomJS(executable_path="phantomjs.exe") #使用浏览器请求页面 driver.get(url) #加载3秒,等待所有数据加载完毕 time.sleep(3) #通过id来定位元素, #.text获取元素的文本数据 # dl=driver.find_elements_by_css_selector("#searchLeftOptions") pageSource=driver.page_source bsObj=BeautifulSoup(pageSource,"html.parser") crawl(bsObj,f,city,indu,r) driver.close() import xlwt #写入excel data=xlwt.Workbook() table=data.add_sheet(indu.replace("/","_"),cell_overwrite_ok=True) r=0 row=[] for i in range(0,len(row)): table.write(r,i,row[i]) data.save("test.xls") #解析本地html文件 from bs4 import BeautifulSoup bsObj = BeautifulSoup(open('2016.html','r',encoding='utf-8').read(),'html.parser')
# Copyright 2022 AI Singapore # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Abstract Node class for all nodes. """ import collections import logging from abc import ABCMeta, abstractmethod from pathlib import Path from typing import Any, Dict, List, Optional, Union from peekingduck.config_loader import ConfigLoader from peekingduck.utils.create_node_helper import obj_det_change_class_name_to_id class AbstractNode(metaclass=ABCMeta): """Abstract Node class for inheritance by nodes. Defines default attributes and methods of a node. Args: config (:obj:`Dict[str, Any]` | :obj:`None`): Node configuration. node_path (:obj:`str`): Period-separated (``.``) relative path to the node from the ``peekingduck`` directory. **Default: ""**. pkd_base_dir (:obj:`pathlib.Path` | :obj:`None`): Path to ``peekingduck`` directory. """ def __init__( self, config: Dict[str, Any] = None, node_path: str = "", pkd_base_dir: Optional[Path] = None, **kwargs: Any, ) -> None: self._name = node_path self.logger = logging.getLogger(self._name) if not pkd_base_dir: pkd_base_dir = Path(__file__).resolve().parents[2] self.node_name = ".".join(node_path.split(".")[-2:]) # This is only initialized when the `optional_inputs` key is found in # the nodes' config file self.optional_inputs: List[str] # NOTE: config and kwargs_config are similar but are from different # inputs config is when users input a dictionary to update the node # kwargs_config is when users input parameters to update the node self.config_loader = ConfigLoader(pkd_base_dir) self.load_node_config(config, kwargs) # type: ignore # For object detection nodes, convert class names to class ids, if any if self.node_name in ["model.yolo", "model.efficientdet", "model.yolox"]: key = "detect" if hasattr(self, "detect") else "detect_ids" current_ids = self.config[key] _, updated_ids = obj_det_change_class_name_to_id( self.node_name, key, current_ids ) # replace "detect_ids" with new "detect" self.config["detect"] = updated_ids @classmethod def __subclasshook__(cls: Any, subclass: Any) -> bool: return hasattr(subclass, "run") and callable(subclass.run) @abstractmethod def run(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """abstract method needed for running node""" raise NotImplementedError("This method needs to be implemented") # pylint: disable=R0201, W0107 def release_resources(self) -> None: """To gracefully release any acquired system resources, e.g. webcam NOTE: To be overridden by subclass if required""" pass @property def inputs(self) -> List[str]: """Input requirements.""" return self.input @property def outputs(self) -> List[str]: """Node outputs.""" return self.output @property def name(self) -> str: """Node name.""" return self._name def load_node_config( self, config: Dict[str, Any], kwargs_config: Dict[str, Any] ) -> None: """Loads node configuration. NOTE: ``config`` and ``kwargs_config`` are similar but come from different inputs. ``config`` is when users input a dictionary to update the node and ``kwargs_config`` is when users input parameters to update the node. Args: config (:obj:`Dict[str, Any]`): Loads configuration from a dictionary input. kwargs_config (:obj:`Dict[str, Any]`): Loads configuration from ``kwargs``. """ # if full set of configuration is not included in config # load configuration and update node with **kwargs where possible # else load from kwargs only self.config = config if not self.config: loaded_config = self.config_loader.get(self.node_name) updated_config = self._edit_config(loaded_config, kwargs_config) self.config = updated_config # sets class attributes for key in self.config: setattr(self, key, self.config[key]) def _edit_config( self, dict_orig: Dict[str, Any], dict_update: Union[Dict[str, Any], collections.abc.Mapping], ) -> Dict[str, Any]: """Update value of a nested dictionary of varying depth using recursion """ if dict_update: for key, value in dict_update.items(): if isinstance(value, collections.abc.Mapping): dict_orig[key] = self._edit_config(dict_orig.get(key, {}), value) elif key not in dict_orig: self.logger.warning( f"Config for node {self.node_name} does not have the key: {key}" ) else: dict_orig[key] = value self.logger.info( f"Config for node {self.node_name} is updated to: '{key}': {value}" ) return dict_orig
# # K2HDKC DBaaS based on Trove # # Copyright 2020 Yahoo Japan Corporation # # K2HDKC DBaaS is a Database as a Service compatible with Trove which # is DBaaS for OpenStack. # Using K2HR3 as backend and incorporating it into Trove to provide # DBaaS functionality. K2HDKC, K2HR3, CHMPX and K2HASH are components # provided as AntPickax. # # For the full copyright and license information, please view # the license file that was distributed with this source code. # # AUTHOR: Takeshi Nakatani # CREATE: Mon Sep 14 2020 # REVISION: # *** /opt/stack/trove/trove/common/configurations.py 2020-06-09 16:18:00.762190008 +0900 --- configurations.py 2020-06-11 13:28:21.189458201 +0900 *************** *** 95,97 **** --- 95,107 ---- def parse(self): return self.CODEC.deserialize(self.config).items() + + class K2hdkcConfParser(object): + + CODEC = stream_codecs.KeyValueCodec(delimiter='=', comment_marker='#', line_terminator='\n') + + def __init__(self, config): + self.config = config + + def parse(self): + return self.CODEC.deserialize(self.config).items()
import json import tqdm import jsonlines from kss import split_sentences with open('train_summary.json', 'r') as data: data = json.load(data) article_original = [] abstractive = [] category = [] extractive = [] for example in tqdm.tqdm(data): try: original = split_sentences(example['original'], num_workers=5) summary = split_sentences(example['summary'], num_workers=5) temp = 1 for sent in summary: if not (sent in original): temp = 0 break if temp==0 or len(summary) != 3 or len(original)==0 or len(summary) == 0: continue except: continue article_original.append(original) abstractive.append(example['summary']) category.append(example['Meta']['category']) # novel, cul_ass, news_r, briefing li = [] for i in range(3): li.append(original.index(summary[i])) extractive.append(li) with open('train.jsonl', 'w') as file: for i in range(len(article_original)): dic = {} dic['category'] = category[i] dic['id'] = i dic['article_original'] = article_original[i] dic['extractive'] = extractive[i] dic['abstractive'] = abstractive[i] dic['extractive_sents'] = [] for num in extractive[i]: if num==None: continue dic['extractive_sents'].append(dic['article_original'][int(num)]) file.write(json.dumps(dic) + '\n') # if i==500: # break
# File name: controlbar.py import kivy kivy.require('1.9.0') from kivy.uix.behaviors import ButtonBehavior, ToggleButtonBehavior from kivy.uix.image import Image from kivy.lang import Builder Builder.load_file('controlbar.kv') class VideoPlayPause(ToggleButtonBehavior, Image): pass class VideoStop(ButtonBehavior, Image): def stop(self, video, play_pause): play_pause.state = 'normal' video.state = 'stop'
import logging from PyQt5.QtWidgets import QPlainTextEdit, QGroupBox, QVBoxLayout from ..python_core.appdirs import get_app_log_dir import pathlib as pl import time import sys # solution copied from https://stackoverflow.com/questions/28655198/best-way-to-display-logs-in-pyqt class QPlainTextEditLogger(QPlainTextEdit, logging.Handler): def __init__(self, parent): super(QPlainTextEdit, self).__init__(parent) super(logging.Handler, self).__init__() self.setReadOnly(True) def emit(self, record): msg = self.format(record) text_cursor = self.textCursor() text_cursor.insertText(f"{msg}\n") self.setTextCursor(text_cursor) class LoggerGroupBox(QGroupBox): def __init__(self, parent, location_dir=None): super().__init__("Event Log", parent) if location_dir is None: location_dir = get_app_log_dir() log_dir = pl.Path(location_dir) log_dir.mkdir(exist_ok=True, parents=True) log_file = str(log_dir / f"started_at_{time.strftime('%Y-%m-%d-%H-%M-%S')}.log") vbox = QVBoxLayout(self) self.log_pte = QPlainTextEditLogger(parent) self.log_pte.setLevel(level=logging.INFO) vbox.addWidget(self.log_pte) view_logger = logging.getLogger("VIEW") view_logger.setLevel(level=logging.INFO) formatter = logging.Formatter("%(asctime)s [VIEW] [%(levelname)-5.5s] %(message)s") self.log_file_handler = logging.FileHandler(log_file) self.log_file_handler.setFormatter(formatter) self.log_file_handler.setLevel(level=logging.DEBUG) view_logger.addHandler(self.log_file_handler) self.log_pte.setFormatter(formatter) view_logger.addHandler(self.log_pte) stream_handler = logging.StreamHandler(sys.stdout) stream_handler.setLevel(level=logging.INFO) stream_handler.setFormatter(formatter) view_logger.addHandler(stream_handler) def __del__(self): root_logger = logging.getLogger("VIEW") root_logger.removeHandler(self.log_pte) root_logger.removeHandler(self.log_file_handler)
from rest_framework import serializers from core.models import Booking, Restaurant from django.db.models import Sum from restaurant.serializers import RestaurantSerializer # this is an example for modifying the serializer data class BookingSerializer(serializers.ModelSerializer): restaurant = serializers.PrimaryKeyRelatedField( many=False, queryset=Restaurant.objects.all() ) class Meta: model = Booking fields = ('id', 'user', 'restaurant', 'seats_number', 'time_start', 'time_end', 'comments', 'is_active') read_only_fields = ('id', 'user',) # creating the global id for the restaurant from the data field to accessible else where in the serializer def validate_restaurant(self, restaurant): global restaurnat_id restaurnat_id = restaurant.id return restaurant # checking the the avalible seating data from the restaurants and booking objects def validate(self, data): # change in the custom value of the comments #data['comments'] = 'the comments added from the serializers, {}'.format(data['seats_number']) # Adding the validation for time constrain time_diff = (data['time_end'] - data['time_start']).seconds/60 if time_diff > 120: raise serializers.ValidationError("The restaurant cannot be booked for more than 2 hours") # data validation for the seating numbers based on seating avalible in the restaurant booked_seats = Booking.objects.filter(restaurant_id=restaurnat_id, is_active=True).aggregate(Sum('seats_number')) avalible_seats = Restaurant.objects.filter(id=restaurnat_id).values('total_seating') avalible_seats_total = avalible_seats[0]['total_seating'] already_booked = booked_seats['seats_number__sum'] # the condition is added to rectify if there are no entries for a particular restaurant if already_booked is None: already_booked = 0 seats_avalible_now = int(avalible_seats_total) - int(already_booked) if data['seats_number'] >= 10: raise serializers.ValidationError("The seats cant be more than 10, seats avalible are:".format(seats_avalible_now)) elif seats_avalible_now <= 0: raise serializers.ValidationError("The restaurant is completly full") elif (seats_avalible_now - data['seats_number']) < 0 : raise serializers.ValidationError(" Only {} seats are avalible for booking".format(seats_avalible_now) return data
# -*- coding: utf-8 -*- """ datetime package. """ from pyrin.packaging.base import Package class DateTimePackage(Package): """ datetime package class. """ NAME = __name__ COMPONENT_NAME = 'globalization.datetime.component' DEPENDS = ['pyrin.configuration', 'pyrin.security.session']
""" This module contains all expressions and classes needed for lazy computation/ query execution. """ import os import shutil import subprocess import tempfile import typing as tp from typing import Any, Callable, Dict, Optional, Sequence, Tuple, Type, Union try: from polars.polars import PyExpr, PyLazyFrame, PyLazyGroupBy _DOCUMENTING = False except ImportError: # pragma: no cover _DOCUMENTING = True from polars import internals as pli from polars.datatypes import DataType, py_type_to_dtype from polars.utils import _process_null_values def wrap_ldf(ldf: "PyLazyFrame") -> "LazyFrame": return LazyFrame._from_pyldf(ldf) class LazyFrame: """ Representation of a Lazy computation graph/ query. """ def __init__(self) -> None: self._ldf: PyLazyFrame @staticmethod def _from_pyldf(ldf: "PyLazyFrame") -> "LazyFrame": self = LazyFrame.__new__(LazyFrame) self._ldf = ldf return self @staticmethod def scan_csv( file: str, infer_schema_length: Optional[int] = 100, has_headers: bool = True, ignore_errors: bool = False, sep: str = ",", skip_rows: int = 0, stop_after_n_rows: Optional[int] = None, cache: bool = True, dtype: Optional[Dict[str, Type[DataType]]] = None, low_memory: bool = False, comment_char: Optional[str] = None, quote_char: Optional[str] = r'"', null_values: Optional[Union[str, tp.List[str], Dict[str, str]]] = None, ) -> "LazyFrame": """ See Also: `pl.scan_csv` """ dtype_list: Optional[tp.List[Tuple[str, Type[DataType]]]] = None if dtype is not None: dtype_list = [] for k, v in dtype.items(): dtype_list.append((k, py_type_to_dtype(v))) processed_null_values = _process_null_values(null_values) self = LazyFrame.__new__(LazyFrame) self._ldf = PyLazyFrame.new_from_csv( file, sep, has_headers, ignore_errors, skip_rows, stop_after_n_rows, cache, dtype_list, low_memory, comment_char, quote_char, processed_null_values, infer_schema_length, ) return self @staticmethod def scan_parquet( file: str, stop_after_n_rows: Optional[int] = None, cache: bool = True ) -> "LazyFrame": """ See Also: `pl.scan_parquet` """ self = LazyFrame.__new__(LazyFrame) self._ldf = PyLazyFrame.new_from_parquet(file, stop_after_n_rows, cache) return self @staticmethod def scan_ipc( file: str, stop_after_n_rows: Optional[int] = None, cache: bool = True ) -> "LazyFrame": """ See Also: `pl.scan_ipc` """ self = LazyFrame.__new__(LazyFrame) self._ldf = PyLazyFrame.new_from_ipc(file, stop_after_n_rows, cache) return self def pipe(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any: """ Apply a function on Self. Parameters ---------- func Callable. args Arguments. kwargs Keyword arguments. """ return func(self, *args, **kwargs) def describe_plan(self) -> str: """ A string representation of the unoptimized query plan. """ return self._ldf.describe_plan() def describe_optimized_plan( self, type_coercion: bool = True, predicate_pushdown: bool = True, projection_pushdown: bool = True, simplify_expression: bool = True, ) -> str: """ A string representation of the optimized query plan. """ ldf = self._ldf.optimization_toggle( type_coercion, predicate_pushdown, projection_pushdown, simplify_expression, string_cache=False, ) return ldf.describe_optimized_plan() def show_graph( self, optimized: bool = True, show: bool = True, output_path: Optional[str] = None, raw_output: bool = False, figsize: Tuple[float, float] = (16.0, 12.0), ) -> Optional[str]: """ Show a plot of the query plan. Note that you should have graphviz installed. Parameters ---------- optimized Optimize the query plan. show Show the figure. output_path Write the figure to disk. raw_output Return dot syntax. figsize Passed to matlotlib if `show` == True. """ try: import matplotlib.image as mpimg import matplotlib.pyplot as plt except ImportError: raise ImportError( "Graphviz dot binary should be on your PATH and matplotlib should be installed to show graph." ) dot = self._ldf.to_dot(optimized) if raw_output: return dot with tempfile.TemporaryDirectory() as tmpdir_name: dot_path = os.path.join(tmpdir_name, "dot") with open(dot_path, "w") as f: f.write(dot) subprocess.run(["dot", "-Nshape=box", "-Tpng", "-O", dot_path]) out_path = os.path.join(tmpdir_name, "dot.png") if output_path is not None: shutil.copy(out_path, output_path) if show: plt.figure(figsize=figsize) img = mpimg.imread(out_path) plt.imshow(img) plt.show() return None def inspect(self, fmt: str = "{}") -> "LazyFrame": # type: ignore """ Prints the value that this node in the computation graph evaluates to and passes on the value. >>> (df.select(pl.col("foo").cumsum().alias("bar")) >>> .inspect() # print the node before the filter >>> .filter(pl.col("bar") == pl.col("foo"))) """ def inspect(s: pli.DataFrame) -> pli.DataFrame: print(fmt.format(s)) # type: ignore return s return self.map(inspect, predicate_pushdown=True, projection_pushdown=True) def sort( self, by: Union[str, "pli.Expr", tp.List[str], tp.List["pli.Expr"]], reverse: Union[bool, tp.List[bool]] = False, ) -> "LazyFrame": """ Sort the DataFrame by: - A single column name - An expression - Multiple expressions Parameters ---------- by Column (expressions) to sort by. reverse Whether or not to sort in reverse order. """ if type(by) is str: return wrap_ldf(self._ldf.sort(by, reverse)) if type(reverse) is bool: reverse = [reverse] by = pli.expr_to_lit_or_expr(by, str_to_lit=False) by = pli._selection_to_pyexpr_list(by) return wrap_ldf(self._ldf.sort_by_exprs(by, reverse)) def collect( self, type_coercion: bool = True, predicate_pushdown: bool = True, projection_pushdown: bool = True, simplify_expression: bool = True, string_cache: bool = False, no_optimization: bool = False, ) -> pli.DataFrame: """ Collect into a DataFrame. Parameters ---------- type_coercion Do type coercion optimization. predicate_pushdown Do predicate pushdown optimization. projection_pushdown Do projection pushdown optimization. simplify_expression Run simplify expressions optimization. string_cache Use a global string cache in this query. This is needed if you want to join on categorical columns. Caution! If you already have set a global string cache, set this to `False` as this will reset the global cache when the query is finished. no_optimization Turn off optimizations. Returns ------- DataFrame """ if no_optimization: predicate_pushdown = False projection_pushdown = False ldf = self._ldf.optimization_toggle( type_coercion, predicate_pushdown, projection_pushdown, simplify_expression, string_cache, ) return pli.wrap_df(ldf.collect()) def fetch( self, n_rows: int = 500, type_coercion: bool = True, predicate_pushdown: bool = True, projection_pushdown: bool = True, simplify_expression: bool = True, string_cache: bool = True, no_optimization: bool = False, ) -> pli.DataFrame: """ Fetch is like a collect operation, but it overwrites the number of rows read by every scan operation. This is a utility that helps debug a query on a smaller number of rows. Note that the fetch does not guarantee the final number of rows in the DataFrame. Filter, join operations and a lower number of rows available in the scanned file influence the final number of rows. Parameters ---------- n_rows Collect n_rows from the data sources. type_coercion Run type coercion optimization. predicate_pushdown Run predicate pushdown optimization. projection_pushdown Run projection pushdown optimization. simplify_expression Run simplify expressions optimization. string_cache Use a global string cache in this query. This is needed if you want to join on categorical columns. no_optimization Turn off optimizations. Returns ------- DataFrame """ if no_optimization: predicate_pushdown = False projection_pushdown = False ldf = self._ldf.optimization_toggle( type_coercion, predicate_pushdown, projection_pushdown, simplify_expression, string_cache, ) return pli.wrap_df(ldf.fetch(n_rows)) @property def columns(self) -> tp.List[str]: """ Get or set column names. Examples -------- >>> df = (pl.DataFrame({ >>> "foo": [1, 2, 3], >>> "bar": [6, 7, 8], >>> "ham": ['a', 'b', 'c'] >>> }).lazy() >>> .select(["foo", "bar"])) >>> df.columns ["foo", "bar"] """ return self._ldf.columns() def cache( self, ) -> "LazyFrame": """ Cache the result once the execution of the physical plan hits this node. """ return wrap_ldf(self._ldf.cache()) def filter(self, predicate: Union["pli.Expr", str]) -> "LazyFrame": """ Filter the rows in the DataFrame based on a predicate expression. Parameters ---------- predicate Expression that evaluates to a boolean Series. Examples -------- >>> lf = pl.DataFrame({ >>> "foo": [1, 2, 3], >>> "bar": [6, 7, 8], >>> "ham": ['a', 'b', 'c'] >>> }).lazy() >>> # Filter on one condition >>> lf.filter(pl.col("foo") < 3).collect() shape: (2, 3) ┌─────┬─────┬─────┐ │ foo ┆ bar ┆ ham │ │ --- ┆ --- ┆ --- │ │ i64 ┆ i64 ┆ str │ ╞═════╪═════╪═════╡ │ 1 ┆ 6 ┆ a │ ├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤ │ 2 ┆ 7 ┆ b │ └─────┴─────┴─────┘ >>> # Filter on multiple conditions >>> lf.filter((pl.col("foo") < 3) & (pl.col("ham") == "a")).collect() shape: (1, 3) ┌─────┬─────┬─────┐ │ foo ┆ bar ┆ ham │ │ --- ┆ --- ┆ --- │ │ i64 ┆ i64 ┆ str │ ╞═════╪═════╪═════╡ │ 1 ┆ 6 ┆ a │ └─────┴─────┴─────┘ """ if isinstance(predicate, str): predicate = pli.col(predicate) return wrap_ldf(self._ldf.filter(predicate._pyexpr)) def select( self, exprs: Union[str, "pli.Expr", Sequence[str], Sequence["pli.Expr"]] ) -> "LazyFrame": """ Select columns from this DataFrame. Parameters ---------- exprs Column or columns to select. """ exprs = pli._selection_to_pyexpr_list(exprs) return wrap_ldf(self._ldf.select(exprs)) def groupby( self, by: Union[str, tp.List[str], "pli.Expr", tp.List["pli.Expr"]], maintain_order: bool = False, ) -> "LazyGroupBy": """ Start a groupby operation. Parameters ---------- by Column(s) to group by. maintain_order Make sure that the order of the groups remain consistent. This is more expensive than a default groupby. """ new_by: tp.List[PyExpr] if isinstance(by, list): new_by = [] for e in by: if isinstance(e, str): e = pli.col(e) new_by.append(e._pyexpr) elif isinstance(by, str): new_by = [pli.col(by)._pyexpr] elif isinstance(by, pli.Expr): new_by = [by._pyexpr] lgb = self._ldf.groupby(new_by, maintain_order) return LazyGroupBy(lgb) def join( self, ldf: "LazyFrame", left_on: Optional[ Union[str, "pli.Expr", tp.List[str], tp.List["pli.Expr"]] ] = None, right_on: Optional[ Union[str, "pli.Expr", tp.List[str], tp.List["pli.Expr"]] ] = None, on: Optional[Union[str, "pli.Expr", tp.List[str], tp.List["pli.Expr"]]] = None, how: str = "inner", suffix: str = "_right", allow_parallel: bool = True, force_parallel: bool = False, asof_by: Optional[Union[str, tp.List[str]]] = None, asof_by_left: Optional[Union[str, tp.List[str]]] = None, asof_by_right: Optional[Union[str, tp.List[str]]] = None, ) -> "LazyFrame": """ Add a join operation to the Logical Plan. Parameters ---------- ldf Lazy DataFrame to join with. left_on Join column of the left DataFrame. right_on Join column of the right DataFrame. on Join column of both DataFrames. If set, `left_on` and `right_on` should be None. how one of: "inner" "left" "outer" "asof", "cross" suffix Suffix to append to columns with a duplicate name. allow_parallel Allow the physical plan to optionally evaluate the computation of both DataFrames up to the join in parallel. force_parallel Force the physical plan to evaluate the computation of both DataFrames up to the join in parallel. asof_by join on these columns before doing asof join asof_by_left join on these columns before doing asof join asof_by_right join on these columns before doing asof join # Asof joins This is similar to a left-join except that we match on nearest key rather than equal keys. The keys must be sorted to perform an asof join """ if how == "cross": return wrap_ldf( self._ldf.join( ldf._ldf, [], [], allow_parallel, force_parallel, how, suffix, [], [], ) ) left_on_: Union[tp.List[str], tp.List[pli.Expr], None] if isinstance(left_on, (str, pli.Expr)): left_on_ = [left_on] # type: ignore[assignment] else: left_on_ = left_on right_on_: Union[tp.List[str], tp.List[pli.Expr], None] if isinstance(right_on, (str, pli.Expr)): right_on_ = [right_on] # type: ignore[assignment] else: right_on_ = right_on if isinstance(on, str): left_on_ = [on] right_on_ = [on] elif isinstance(on, list): left_on_ = on right_on_ = on if left_on_ is None or right_on_ is None: raise ValueError("You should pass the column to join on as an argument.") new_left_on = [] for column in left_on_: if isinstance(column, str): column = pli.col(column) new_left_on.append(column._pyexpr) new_right_on = [] for column in right_on_: if isinstance(column, str): column = pli.col(column) new_right_on.append(column._pyexpr) # set asof_by left_asof_by_: Union[tp.List[str], None] if isinstance(asof_by_left, str): left_asof_by_ = [asof_by_left] # type: ignore[assignment] else: left_asof_by_ = asof_by_left right_asof_by_: Union[tp.List[str], None] if isinstance(asof_by_right, (str, pli.Expr)): right_asof_by_ = [asof_by_right] # type: ignore[assignment] else: right_asof_by_ = asof_by_right if isinstance(asof_by, str): left_asof_by_ = [asof_by] right_asof_by_ = [asof_by] elif isinstance(asof_by, list): left_asof_by_ = asof_by right_asof_by_ = asof_by if left_asof_by_ is None: left_asof_by_ = [] if right_asof_by_ is None: right_asof_by_ = [] return wrap_ldf( self._ldf.join( ldf._ldf, new_left_on, new_right_on, allow_parallel, force_parallel, how, suffix, left_asof_by_, right_asof_by_, ) ) def with_columns( self, exprs: Union[tp.List["pli.Expr"], "pli.Expr"] ) -> "LazyFrame": """ Add or overwrite multiple columns in a DataFrame. Parameters ---------- exprs List of Expressions that evaluate to columns. """ if isinstance(exprs, pli.Expr): return self.with_column(exprs) pyexprs = [] for e in exprs: if isinstance(e, pli.Expr): pyexprs.append(e._pyexpr) elif isinstance(e, pli.Series): pyexprs.append(pli.lit(e)._pyexpr) return wrap_ldf(self._ldf.with_columns(pyexprs)) def with_column(self, expr: "pli.Expr") -> "LazyFrame": """ Add or overwrite column in a DataFrame. Parameters ---------- expr Expression that evaluates to column. """ return self.with_columns([expr]) def drop(self, columns: Union[str, tp.List[str]]) -> "LazyFrame": """ Remove one or multiple columns from a DataFrame. Parameters ---------- columns - Name of the column that should be removed. - List of column names. """ if isinstance(columns, str): columns = [columns] return wrap_ldf(self._ldf.drop_columns(columns)) def with_column_renamed(self, existing_name: str, new_name: str) -> "LazyFrame": """ Rename a column in the DataFrame """ return wrap_ldf(self._ldf.with_column_renamed(existing_name, new_name)) def rename(self, mapping: Dict[str, str]) -> "LazyFrame": """ Rename column names. This does not preserve column order. Parameters ---------- mapping Key value pairs that map from old name to new name. """ existing = list(mapping.keys()) new = list(mapping.values()) return wrap_ldf(self._ldf.rename(existing, new)) def reverse(self) -> "LazyFrame": """ Reverse the DataFrame. """ return wrap_ldf(self._ldf.reverse()) def shift(self, periods: int) -> "LazyFrame": """ Shift the values by a given period and fill the parts that will be empty due to this operation with `Nones`. Parameters ---------- periods Number of places to shift (may be negative). """ return wrap_ldf(self._ldf.shift(periods)) def shift_and_fill( self, periods: int, fill_value: Union["pli.Expr", int, str, float] ) -> "LazyFrame": """ Shift the values by a given period and fill the parts that will be empty due to this operation with the result of the `fill_value` expression. Parameters ---------- periods Number of places to shift (may be negative). fill_value fill None values with the result of this expression. """ if not isinstance(fill_value, pli.Expr): fill_value = pli.lit(fill_value) return wrap_ldf(self._ldf.shift_and_fill(periods, fill_value._pyexpr)) def slice(self, offset: int, length: int) -> "LazyFrame": """ Slice the DataFrame. Parameters ---------- offset Start index. length Length of the slice. """ return wrap_ldf(self._ldf.slice(offset, length)) def limit(self, n: int) -> "LazyFrame": """ Limit the DataFrame to the first `n` rows. Note if you don't want the rows to be scanned, use the `fetch` operation. Parameters ---------- n Number of rows. """ return self.slice(0, n) def head(self, n: int) -> "LazyFrame": """ Get the first `n` rows of the DataFrame Note if you don't want the rows to be scanned, use the `fetch` operation. Parameters ---------- n Number of rows. """ return self.limit(n) def tail(self, n: int) -> "LazyFrame": """ Get the last `n` rows of the DataFrame. Parameters ---------- n Number of rows. """ return wrap_ldf(self._ldf.tail(n)) def last(self) -> "LazyFrame": """ Get the last row of the DataFrame. """ return self.tail(1) def first(self) -> "LazyFrame": """ Get the first row of the DataFrame. """ return self.slice(0, 1) def with_row_count(self, name: str = "row_nr") -> "LazyFrame": """ Add a column at index 0 that counts the rows. Parameters ---------- name Name of the column to add. """ return wrap_ldf(self._ldf.with_row_count(name)) def fill_null(self, fill_value: Union[int, str, "pli.Expr"]) -> "LazyFrame": """ Fill missing values Parameters ---------- fill_value Value to fill the missing values with """ if not isinstance(fill_value, pli.Expr): fill_value = pli.lit(fill_value) return wrap_ldf(self._ldf.fill_null(fill_value._pyexpr)) def fill_nan(self, fill_value: Union[int, str, float, "pli.Expr"]) -> "LazyFrame": """ Fill floating point NaN values. ..warning:: NOTE that floating point NaN (No a Number) are not missing values! to replace missing values, use `fill_null`. Parameters ---------- fill_value Value to fill the NaN values with """ if not isinstance(fill_value, pli.Expr): fill_value = pli.lit(fill_value) return wrap_ldf(self._ldf.fill_nan(fill_value._pyexpr)) def std(self) -> "LazyFrame": """ Aggregate the columns in the DataFrame to their standard deviation value. """ return wrap_ldf(self._ldf.std()) def var(self) -> "LazyFrame": """ Aggregate the columns in the DataFrame to their variance value. """ return wrap_ldf(self._ldf.var()) def max(self) -> "LazyFrame": """ Aggregate the columns in the DataFrame to their maximum value. """ return wrap_ldf(self._ldf.max()) def min(self) -> "LazyFrame": """ Aggregate the columns in the DataFrame to their minimum value. """ return wrap_ldf(self._ldf.min()) def sum(self) -> "LazyFrame": """ Aggregate the columns in the DataFrame to their sum value. """ return wrap_ldf(self._ldf.sum()) def mean(self) -> "LazyFrame": """ Aggregate the columns in the DataFrame to their mean value. """ return wrap_ldf(self._ldf.mean()) def median(self) -> "LazyFrame": """ Aggregate the columns in the DataFrame to their median value. """ return wrap_ldf(self._ldf.median()) def quantile(self, quantile: float) -> "LazyFrame": """ Aggregate the columns in the DataFrame to their quantile value. """ return wrap_ldf(self._ldf.quantile(quantile)) def explode( self, columns: Union[str, tp.List[str], "pli.Expr", tp.List["pli.Expr"]] ) -> "LazyFrame": """ Explode lists to long format. Examples -------- >>> df = pl.DataFrame({ >>> "letters": ["c", "c", "a", "c", "a", "b"], >>> "nrs": [[1, 2], [1, 3], [4, 3], [5, 5, 5], [6], [2, 1, 2]] >>> }) >>> df shape: (6, 2) ╭─────────┬────────────╮ │ letters ┆ nrs │ │ --- ┆ --- │ │ str ┆ list [i64] │ ╞═════════╪════════════╡ │ "c" ┆ [1, 2] │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤ │ "c" ┆ [1, 3] │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤ │ "a" ┆ [4, 3] │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤ │ "c" ┆ [5, 5, 5] │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤ │ "a" ┆ [6] │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤ │ "b" ┆ [2, 1, 2] │ ╰─────────┴────────────╯ >>> df.explode("nrs") shape: (13, 2) ╭─────────┬─────╮ │ letters ┆ nrs │ │ --- ┆ --- │ │ str ┆ i64 │ ╞═════════╪═════╡ │ "c" ┆ 1 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "c" ┆ 2 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "c" ┆ 1 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "c" ┆ 3 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ ... ┆ ... │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "c" ┆ 5 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "a" ┆ 6 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "b" ┆ 2 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "b" ┆ 1 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "b" ┆ 2 │ ╰─────────┴─────╯ """ columns = pli._selection_to_pyexpr_list(columns) return wrap_ldf(self._ldf.explode(columns)) def drop_duplicates( self, maintain_order: bool = False, subset: Optional[Union[tp.List[str], str]] = None, ) -> "LazyFrame": """ Drop duplicate rows from this DataFrame. Note that this fails if there is a column of type `List` in the DataFrame. """ if subset is not None and not isinstance(subset, list): subset = [subset] return wrap_ldf(self._ldf.drop_duplicates(maintain_order, subset)) def drop_nulls( self, subset: Optional[Union[tp.List[str], str]] = None ) -> "LazyFrame": """ Drop rows with null values from this DataFrame. Examples -------- >>> df = pl.DataFrame({ >>> "foo": [1, 2, 3], >>> "bar": [6, None, 8], >>> "ham": ['a', 'b', 'c'] >>> }) >>> df.lazy().drop_nulls().collect() shape: (2, 3) ┌─────┬─────┬─────┐ │ foo ┆ bar ┆ ham │ │ --- ┆ --- ┆ --- │ │ i64 ┆ i64 ┆ str │ ╞═════╪═════╪═════╡ │ 1 ┆ 6 ┆ "a" │ ├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤ │ 3 ┆ 8 ┆ "c" │ └─────┴─────┴─────┘ This method only drops nulls row-wise if any single value of the row is null. Below are some example snippets that show how you could drop null values based on other conditions >>> df = pl.DataFrame( >>> { >>> "a": [None, None, None, None], >>> "b": [1, 2, None, 1], >>> "c": [1, None, None, 1], >>> } >>> ) >>> df shape: (4, 3) ┌──────┬──────┬──────┐ │ a ┆ b ┆ c │ │ --- ┆ --- ┆ --- │ │ f64 ┆ i64 ┆ i64 │ ╞══════╪══════╪══════╡ │ null ┆ 1 ┆ 1 │ ├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌╌┤ │ null ┆ 2 ┆ null │ ├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌╌┤ │ null ┆ null ┆ null │ ├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌╌┤ │ null ┆ 1 ┆ 1 │ └──────┴──────┴──────┘ >>> # drop a row only if all values are null >>> df.filter(~pl.fold(acc=True, f=lambda acc, s: acc & s.is_null(), exprs=pl.all())) shape: (3, 3) ┌──────┬─────┬──────┐ │ a ┆ b ┆ c │ │ --- ┆ --- ┆ --- │ │ f64 ┆ i64 ┆ i64 │ ╞══════╪═════╪══════╡ │ null ┆ 1 ┆ 1 │ ├╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌┤ │ null ┆ 2 ┆ null │ ├╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌┤ │ null ┆ 1 ┆ 1 │ └──────┴─────┴──────┘ """ if subset is not None and not isinstance(subset, list): subset = [subset] return wrap_ldf(self._ldf.drop_nulls(subset)) def melt( self, id_vars: Union[str, tp.List[str]], value_vars: Union[str, tp.List[str]] ) -> "LazyFrame": """ Unpivot DataFrame to long format. Parameters ---------- id_vars Columns to use as identifier variables. value_vars Values to use as identifier variables. """ if isinstance(value_vars, str): value_vars = [value_vars] if isinstance(id_vars, str): id_vars = [id_vars] return wrap_ldf(self._ldf.melt(id_vars, value_vars)) def map( self, f: Callable[[pli.DataFrame], pli.DataFrame], predicate_pushdown: bool = True, projection_pushdown: bool = True, no_optimizations: bool = False, ) -> "LazyFrame": """ Apply a custom function. It is important that the function returns a Polars DataFrame. Parameters ---------- f Lambda/ function to apply. predicate_pushdown Allow predicate pushdown optimization to pass this node. projection_pushdown Allow projection pushdown optimization to pass this node. no_optimizations Turn off all optimizations past this point. """ if not no_optimizations: predicate_pushdown = False projection_pushdown = False return wrap_ldf(self._ldf.map(f, predicate_pushdown, projection_pushdown)) def interpolate(self) -> "LazyFrame": """ Interpolate intermediate values. The interpolation method is linear. """ return self.select(pli.col("*").interpolate()) # type: ignore class LazyGroupBy: """ Created by `df.lazy().groupby("foo)"` """ def __init__(self, lgb: "PyLazyGroupBy"): self.lgb = lgb def agg(self, aggs: Union[tp.List["pli.Expr"], "pli.Expr"]) -> "LazyFrame": """ Describe the aggregation that need to be done on a group. Parameters ---------- aggs Single/ Multiple aggregation expression(s). Examples -------- >>> (pl.scan_csv("data.csv") .groupby("groups") .agg([ pl.col("name").n_unique().alias("unique_names"), pl.max("values") ]) ) """ aggs = pli._selection_to_pyexpr_list(aggs) return wrap_ldf(self.lgb.agg(aggs)) def head(self, n: int = 5) -> "LazyFrame": """ Return first n rows of each group. Parameters ---------- n Number of values of the group to select Examples -------- >>> df = pl.DataFrame({ >>> "letters": ["c", "c", "a", "c", "a", "b"], >>> "nrs": [1, 2, 3, 4, 5, 6] >>> }) >>> df shape: (6, 2) ╭─────────┬─────╮ │ letters ┆ nrs │ │ --- ┆ --- │ │ str ┆ i64 │ ╞═════════╪═════╡ │ "c" ┆ 1 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "c" ┆ 2 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "a" ┆ 3 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "c" ┆ 4 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "a" ┆ 5 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "b" ┆ 6 │ ╰─────────┴─────╯ >>> (df.groupby("letters") >>> .head(2) >>> .sort("letters") >>> ) shape: (5, 2) ╭─────────┬─────╮ │ letters ┆ nrs │ │ --- ┆ --- │ │ str ┆ i64 │ ╞═════════╪═════╡ │ "a" ┆ 3 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "a" ┆ 5 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "b" ┆ 6 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "c" ┆ 1 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "c" ┆ 2 │ ╰─────────┴─────╯ """ return wrap_ldf(self.lgb.head(n)) def tail(self, n: int = 5) -> "LazyFrame": """ Return last n rows of each group. Parameters ---------- n Number of values of the group to select Examples -------- >>> df = pl.DataFrame({ >>> "letters": ["c", "c", "a", "c", "a", "b"], >>> "nrs": [1, 2, 3, 4, 5, 6] >>> }) >>> df shape: (6, 2) ╭─────────┬─────╮ │ letters ┆ nrs │ │ --- ┆ --- │ │ str ┆ i64 │ ╞═════════╪═════╡ │ "c" ┆ 1 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "c" ┆ 2 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "a" ┆ 3 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "c" ┆ 4 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "a" ┆ 5 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "b" ┆ 6 │ ╰─────────┴─────╯ >>> (df.groupby("letters") >>> .tail(2) >>> .sort("letters") >>> ) shape: (5, 2) ╭─────────┬─────╮ │ letters ┆ nrs │ │ --- ┆ --- │ │ str ┆ i64 │ ╞═════════╪═════╡ │ "a" ┆ 3 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "a" ┆ 5 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "b" ┆ 6 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "c" ┆ 2 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "c" ┆ 4 │ ╰─────────┴─────╯ """ return wrap_ldf(self.lgb.tail(n)) def apply(self, f: Callable[[pli.DataFrame], pli.DataFrame]) -> "LazyFrame": """ Apply a function over the groups as a new `DataFrame`. It is not recommended that you use this as materializing the `DataFrame` is quite expensive. Parameters ---------- f Function to apply over the `DataFrame`. """ return wrap_ldf(self.lgb.apply(f))
""" SparseArray data structure """ from __future__ import division # pylint: disable=E1101,E1103,W0231 from numpy import nan, ndarray import numpy as np import pandas as pd from pandas.core.base import PandasObject import pandas.core.common as com from pandas import compat, lib from pandas.compat import range from pandas.compat.numpy import function as nv from pandas._sparse import SparseIndex, BlockIndex, IntIndex import pandas._sparse as splib import pandas.index as _index import pandas.core.algorithms as algos import pandas.core.ops as ops import pandas.formats.printing as printing from pandas.util.decorators import Appender from pandas.indexes.base import _index_shared_docs _sparray_doc_kwargs = dict(klass='SparseArray') def _arith_method(op, name, str_rep=None, default_axis=None, fill_zeros=None, **eval_kwargs): """ Wrapper function for Series arithmetic operations, to avoid code duplication. """ def wrapper(self, other): if isinstance(other, np.ndarray): if len(self) != len(other): raise AssertionError("length mismatch: %d vs. %d" % (len(self), len(other))) if not isinstance(other, com.ABCSparseArray): other = SparseArray(other, fill_value=self.fill_value) if name[0] == 'r': return _sparse_array_op(other, self, op, name[1:]) else: return _sparse_array_op(self, other, op, name) elif lib.isscalar(other): new_fill_value = op(np.float64(self.fill_value), np.float64(other)) return _wrap_result(name, op(self.sp_values, other), self.sp_index, new_fill_value) else: # pragma: no cover raise TypeError('operation with %s not supported' % type(other)) if name.startswith("__"): name = name[2:-2] wrapper.__name__ = name return wrapper def _sparse_array_op(left, right, op, name): if left.sp_index.equals(right.sp_index): result = op(left.sp_values, right.sp_values) result_index = left.sp_index else: sparse_op = getattr(splib, 'sparse_%s' % name) result, result_index = sparse_op(left.sp_values, left.sp_index, left.fill_value, right.sp_values, right.sp_index, right.fill_value) try: fill_value = op(left.fill_value, right.fill_value) except: fill_value = nan return _wrap_result(name, result, result_index, fill_value) def _wrap_result(name, data, sparse_index, fill_value): """ wrap op result to have correct dtype """ if name in ('eq', 'ne', 'lt', 'gt', 'le', 'ge'): # ToDo: We can remove this condition when removing # SparseArray's dtype default when closing GH 667 return SparseArray(data, sparse_index=sparse_index, fill_value=fill_value, dtype=np.bool) else: return SparseArray(data, sparse_index=sparse_index, fill_value=fill_value) class SparseArray(PandasObject, np.ndarray): """Data structure for labeled, sparse floating point 1-D data Parameters ---------- data : {array-like (1-D), Series, SparseSeries, dict} kind : {'block', 'integer'} fill_value : float Defaults to NaN (code for missing) sparse_index : {BlockIndex, IntIndex}, optional Only if you have one. Mainly used internally Notes ----- SparseArray objects are immutable via the typical Python means. If you must change values, convert to dense, make your changes, then convert back to sparse """ __array_priority__ = 15 _typ = 'array' _subtyp = 'sparse_array' sp_index = None fill_value = None def __new__(cls, data, sparse_index=None, index=None, kind='integer', fill_value=None, dtype=np.float64, copy=False): if index is not None: if data is None: data = np.nan if not lib.isscalar(data): raise Exception("must only pass scalars with an index ") values = np.empty(len(index), dtype='float64') values.fill(data) data = values if dtype is not None: dtype = np.dtype(dtype) is_sparse_array = isinstance(data, SparseArray) if fill_value is None: if is_sparse_array: fill_value = data.fill_value else: fill_value = nan if is_sparse_array: sparse_index = data.sp_index values = np.asarray(data) else: # array-like if sparse_index is None: values, sparse_index = make_sparse(data, kind=kind, fill_value=fill_value) else: values = _sanitize_values(data) if len(values) != sparse_index.npoints: raise AssertionError("Non array-like type {0} must have" " the same length as the" " index".format(type(values))) # Create array, do *not* copy data by default if copy: try: # ToDo: Can remove this error handling when we actually # support other dtypes subarr = np.array(values, dtype=dtype, copy=True) except ValueError: subarr = np.array(values, copy=True) else: try: subarr = np.asarray(values, dtype=dtype) except ValueError: subarr = np.asarray(values) # if we have a bool type, make sure that we have a bool fill_value if ((dtype is not None and issubclass(dtype.type, np.bool_)) or (data is not None and lib.is_bool_array(subarr))): if np.isnan(fill_value) or not fill_value: fill_value = False else: fill_value = bool(fill_value) # Change the class of the array to be the subclass type. return cls._simple_new(subarr, sparse_index, fill_value) @classmethod def _simple_new(cls, data, sp_index, fill_value): if (com.is_integer_dtype(data) and com.is_float(fill_value) and sp_index.ngaps > 0): # if float fill_value is being included in dense repr, # convert values to float data = data.astype(float) result = data.view(cls) if not isinstance(sp_index, SparseIndex): # caller must pass SparseIndex raise ValueError('sp_index must be a SparseIndex') result.sp_index = sp_index result.fill_value = fill_value return result @property def _constructor(self): return lambda x: SparseArray(x, fill_value=self.fill_value, kind=self.kind) @property def kind(self): if isinstance(self.sp_index, BlockIndex): return 'block' elif isinstance(self.sp_index, IntIndex): return 'integer' def __array_finalize__(self, obj): """ Gets called after any ufunc or other array operations, necessary to pass on the index. """ self.sp_index = getattr(obj, 'sp_index', None) self.fill_value = getattr(obj, 'fill_value', None) def __reduce__(self): """Necessary for making this object picklable""" object_state = list(ndarray.__reduce__(self)) subclass_state = self.fill_value, self.sp_index object_state[2] = (object_state[2], subclass_state) return tuple(object_state) def __setstate__(self, state): """Necessary for making this object picklable""" nd_state, own_state = state ndarray.__setstate__(self, nd_state) fill_value, sp_index = own_state[:2] self.sp_index = sp_index self.fill_value = fill_value def __len__(self): try: return self.sp_index.length except: return 0 def __unicode__(self): return '%s\nFill: %s\n%s' % (printing.pprint_thing(self), printing.pprint_thing(self.fill_value), printing.pprint_thing(self.sp_index)) def disable(self, other): raise NotImplementedError('inplace binary ops not supported') # Inplace operators __iadd__ = disable __isub__ = disable __imul__ = disable __itruediv__ = disable __ifloordiv__ = disable __ipow__ = disable # Python 2 division operators if not compat.PY3: __idiv__ = disable @property def values(self): """ Dense values """ output = np.empty(len(self), dtype=self.dtype) int_index = self.sp_index.to_int_index() output.fill(self.fill_value) output.put(int_index.indices, self) return output @property def sp_values(self): # caching not an option, leaks memory return self.view(np.ndarray) def get_values(self, fill=None): """ return a dense representation """ return self.to_dense(fill=fill) def to_dense(self, fill=None): """ Convert SparseSeries to (dense) Series """ return self.values def __iter__(self): for i in range(len(self)): yield self._get_val_at(i) def __getitem__(self, key): """ """ if com.is_integer(key): return self._get_val_at(key) elif isinstance(key, tuple): data_slice = self.values[key] else: if isinstance(key, SparseArray): key = np.asarray(key) if hasattr(key, '__len__') and len(self) != len(key): return self.take(key) else: data_slice = self.values[key] return self._constructor(data_slice) def __getslice__(self, i, j): if i < 0: i = 0 if j < 0: j = 0 slobj = slice(i, j) return self.__getitem__(slobj) def _get_val_at(self, loc): n = len(self) if loc < 0: loc += n if loc >= n or loc < 0: raise IndexError('Out of bounds access') sp_loc = self.sp_index.lookup(loc) if sp_loc == -1: return self.fill_value else: return _index.get_value_at(self, sp_loc) @Appender(_index_shared_docs['take'] % _sparray_doc_kwargs) def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs): """ Sparse-compatible version of ndarray.take Returns ------- taken : ndarray """ nv.validate_take(tuple(), kwargs) if axis: raise ValueError("axis must be 0, input was {0}".format(axis)) if com.is_integer(indices): # return scalar return self[indices] indices = com._ensure_platform_int(indices) n = len(self) if allow_fill and fill_value is not None: # allow -1 to indicate self.fill_value, # self.fill_value may not be NaN if (indices < -1).any(): msg = ('When allow_fill=True and fill_value is not None, ' 'all indices must be >= -1') raise ValueError(msg) elif (n <= indices).any(): msg = 'index is out of bounds for size {0}' raise IndexError(msg.format(n)) else: if ((indices < -n) | (n <= indices)).any(): msg = 'index is out of bounds for size {0}' raise IndexError(msg.format(n)) indices = indices.astype(np.int32) if not (allow_fill and fill_value is not None): indices = indices.copy() indices[indices < 0] += n locs = self.sp_index.lookup_array(indices) indexer = np.arange(len(locs), dtype=np.int32) mask = locs != -1 if mask.any(): indexer = indexer[mask] new_values = self.sp_values.take(locs[mask]) else: indexer = np.empty(shape=(0, ), dtype=np.int32) new_values = np.empty(shape=(0, ), dtype=self.sp_values.dtype) sp_index = _make_index(len(indices), indexer, kind=self.sp_index) return self._simple_new(new_values, sp_index, self.fill_value) def __setitem__(self, key, value): # if com.is_integer(key): # self.values[key] = value # else: # raise Exception("SparseArray does not support seting non-scalars # via setitem") raise TypeError( "SparseArray does not support item assignment via setitem") def __setslice__(self, i, j, value): if i < 0: i = 0 if j < 0: j = 0 slobj = slice(i, j) # noqa # if not lib.isscalar(value): # raise Exception("SparseArray does not support seting non-scalars # via slices") # x = self.values # x[slobj] = value # self.values = x raise TypeError("SparseArray does not support item assignment via " "slices") def astype(self, dtype=None): """ """ dtype = np.dtype(dtype) if dtype is not None and dtype not in (np.float_, float): raise TypeError('Can only support floating point data for now') return self.copy() def copy(self, deep=True): """ Make a copy of the SparseSeries. Only the actual sparse values need to be copied """ if deep: values = self.sp_values.copy() else: values = self.sp_values return SparseArray(values, sparse_index=self.sp_index, dtype=self.dtype, fill_value=self.fill_value) def count(self): """ Compute sum of non-NA/null observations in SparseSeries. If the fill_value is not NaN, the "sparse" locations will be included in the observation count Returns ------- nobs : int """ sp_values = self.sp_values valid_spvals = np.isfinite(sp_values).sum() if self._null_fill_value: return valid_spvals else: return valid_spvals + self.sp_index.ngaps @property def _null_fill_value(self): return com.isnull(self.fill_value) @property def _valid_sp_values(self): sp_vals = self.sp_values mask = com.notnull(sp_vals) return sp_vals[mask] @Appender(_index_shared_docs['fillna'] % _sparray_doc_kwargs) def fillna(self, value, downcast=None): if downcast is not None: raise NotImplementedError if issubclass(self.dtype.type, np.floating): value = float(value) if self._null_fill_value: return self._simple_new(self.sp_values, self.sp_index, fill_value=value) else: new_values = self.sp_values.copy() new_values[com.isnull(new_values)] = value return self._simple_new(new_values, self.sp_index, fill_value=self.fill_value) def sum(self, axis=0, *args, **kwargs): """ Sum of non-NA/null values Returns ------- sum : float """ nv.validate_sum(args, kwargs) valid_vals = self._valid_sp_values sp_sum = valid_vals.sum() if self._null_fill_value: return sp_sum else: nsparse = self.sp_index.ngaps return sp_sum + self.fill_value * nsparse def cumsum(self, axis=0, *args, **kwargs): """ Cumulative sum of values. Preserves locations of NaN values Returns ------- cumsum : Series """ nv.validate_cumsum(args, kwargs) # TODO: gh-12855 - return a SparseArray here if com.notnull(self.fill_value): return self.to_dense().cumsum() # TODO: what if sp_values contains NaN?? return SparseArray(self.sp_values.cumsum(), sparse_index=self.sp_index, fill_value=self.fill_value) def mean(self, axis=0, *args, **kwargs): """ Mean of non-NA/null values Returns ------- mean : float """ nv.validate_mean(args, kwargs) valid_vals = self._valid_sp_values sp_sum = valid_vals.sum() ct = len(valid_vals) if self._null_fill_value: return sp_sum / ct else: nsparse = self.sp_index.ngaps return (sp_sum + self.fill_value * nsparse) / (ct + nsparse) def value_counts(self, dropna=True): """ Returns a Series containing counts of unique values. Parameters ---------- dropna : boolean, default True Don't include counts of NaN, even if NaN is in sp_values. Returns ------- counts : Series """ keys, counts = algos._value_counts_arraylike(self.sp_values, dropna=dropna) fcounts = self.sp_index.ngaps if fcounts > 0: if self._null_fill_value and dropna: pass else: if self._null_fill_value: mask = pd.isnull(keys) else: mask = keys == self.fill_value if mask.any(): counts[mask] += fcounts else: keys = np.insert(keys, 0, self.fill_value) counts = np.insert(counts, 0, fcounts) if not isinstance(keys, pd.Index): keys = pd.Index(keys) result = pd.Series(counts, index=keys) return result def _maybe_to_dense(obj): """ try to convert to dense """ if hasattr(obj, 'to_dense'): return obj.to_dense() return obj def _maybe_to_sparse(array): if isinstance(array, com.ABCSparseSeries): array = SparseArray(array.values, sparse_index=array.sp_index, fill_value=array.fill_value, copy=True) if not isinstance(array, SparseArray): array = com._values_from_object(array) return array def _sanitize_values(arr): """ return an ndarray for our input, in a platform independent manner """ if hasattr(arr, 'values'): arr = arr.values else: # scalar if lib.isscalar(arr): arr = [arr] # ndarray if isinstance(arr, np.ndarray): pass elif com.is_list_like(arr) and len(arr) > 0: arr = com._possibly_convert_platform(arr) else: arr = np.asarray(arr) return arr def make_sparse(arr, kind='block', fill_value=nan): """ Convert ndarray to sparse format Parameters ---------- arr : ndarray kind : {'block', 'integer'} fill_value : NaN or another value Returns ------- (sparse_values, index) : (ndarray, SparseIndex) """ arr = _sanitize_values(arr) if arr.ndim > 1: raise TypeError("expected dimension <= 1 data") if com.isnull(fill_value): mask = com.notnull(arr) else: mask = arr != fill_value length = len(arr) if length != mask.size: # the arr is a SparseArray indices = mask.sp_index.indices else: indices = np.arange(length, dtype=np.int32)[mask] index = _make_index(length, indices, kind) sparsified_values = arr[mask] return sparsified_values, index def _make_index(length, indices, kind): if kind == 'block' or isinstance(kind, BlockIndex): locs, lens = splib.get_blocks(indices) index = BlockIndex(length, locs, lens) elif kind == 'integer' or isinstance(kind, IntIndex): index = IntIndex(length, indices) else: # pragma: no cover raise ValueError('must be block or integer type') return index ops.add_special_arithmetic_methods(SparseArray, arith_method=_arith_method, comp_method=_arith_method, use_numexpr=False)
""" Custom loaders for different datasets """
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Placement API handlers for resource providers.""" from oslo_db import exception as db_exc from oslo_serialization import jsonutils from oslo_utils import encodeutils from oslo_utils import timeutils from oslo_utils import uuidutils import webob from nova.api.openstack.placement import microversion from nova.api.openstack.placement.schemas import resource_provider as rp_schema from nova.api.openstack.placement import util from nova.api.openstack.placement import wsgi_wrapper from nova import exception from nova.i18n import _ from nova.objects import resource_provider as rp_obj def _serialize_links(environ, resource_provider): url = util.resource_provider_url(environ, resource_provider) links = [{'rel': 'self', 'href': url}] rel_types = ['inventories', 'usages'] want_version = environ[microversion.MICROVERSION_ENVIRON] if want_version >= (1, 1): rel_types.append('aggregates') if want_version >= (1, 6): rel_types.append('traits') if want_version >= (1, 11): rel_types.append('allocations') for rel in rel_types: links.append({'rel': rel, 'href': '%s/%s' % (url, rel)}) return links def _serialize_provider(environ, resource_provider, want_version): data = { 'uuid': resource_provider.uuid, 'name': resource_provider.name, 'generation': resource_provider.generation, 'links': _serialize_links(environ, resource_provider) } if want_version.matches((1, 14)): data['parent_provider_uuid'] = resource_provider.parent_provider_uuid data['root_provider_uuid'] = resource_provider.root_provider_uuid return data def _serialize_providers(environ, resource_providers, want_version): output = [] last_modified = None get_last_modified = want_version.matches((1, 15)) for provider in resource_providers: if get_last_modified: last_modified = util.pick_last_modified(last_modified, provider) provider_data = _serialize_provider(environ, provider, want_version) output.append(provider_data) last_modified = last_modified or timeutils.utcnow(with_timezone=True) return ({"resource_providers": output}, last_modified) @wsgi_wrapper.PlacementWsgify @util.require_content('application/json') def create_resource_provider(req): """POST to create a resource provider. On success return a 201 response with an empty body and a location header pointing to the newly created resource provider. """ context = req.environ['placement.context'] schema = rp_schema.POST_RESOURCE_PROVIDER_SCHEMA want_version = req.environ[microversion.MICROVERSION_ENVIRON] if want_version.matches((1, 14)): schema = rp_schema.POST_RP_SCHEMA_V1_14 data = util.extract_json(req.body, schema) try: uuid = data.setdefault('uuid', uuidutils.generate_uuid()) resource_provider = rp_obj.ResourceProvider(context, **data) resource_provider.create() except db_exc.DBDuplicateEntry as exc: # Whether exc.columns has one or two entries (in the event # of both fields being duplicates) appears to be database # dependent, so going with the complete solution here. duplicate = ', '.join(['%s: %s' % (column, data[column]) for column in exc.columns]) raise webob.exc.HTTPConflict( _('Conflicting resource provider %(duplicate)s already exists.') % {'duplicate': duplicate}) except exception.ObjectActionError as exc: raise webob.exc.HTTPBadRequest( _('Unable to create resource provider "%(name)s", %(rp_uuid)s: ' '%(error)s') % {'name': data['name'], 'rp_uuid': uuid, 'error': exc}) req.response.location = util.resource_provider_url( req.environ, resource_provider) req.response.status = 201 req.response.content_type = None return req.response @wsgi_wrapper.PlacementWsgify def delete_resource_provider(req): """DELETE to destroy a single resource provider. On success return a 204 and an empty body. """ uuid = util.wsgi_path_item(req.environ, 'uuid') context = req.environ['placement.context'] # The containing application will catch a not found here. try: resource_provider = rp_obj.ResourceProvider.get_by_uuid( context, uuid) resource_provider.destroy() except exception.ResourceProviderInUse as exc: raise webob.exc.HTTPConflict( _('Unable to delete resource provider %(rp_uuid)s: %(error)s') % {'rp_uuid': uuid, 'error': exc}) except exception.NotFound as exc: raise webob.exc.HTTPNotFound( _("No resource provider with uuid %s found for delete") % uuid) req.response.status = 204 req.response.content_type = None return req.response @wsgi_wrapper.PlacementWsgify @util.check_accept('application/json') def get_resource_provider(req): """Get a single resource provider. On success return a 200 with an application/json body representing the resource provider. """ want_version = req.environ[microversion.MICROVERSION_ENVIRON] uuid = util.wsgi_path_item(req.environ, 'uuid') # The containing application will catch a not found here. context = req.environ['placement.context'] resource_provider = rp_obj.ResourceProvider.get_by_uuid( context, uuid) response = req.response response.body = encodeutils.to_utf8(jsonutils.dumps( _serialize_provider(req.environ, resource_provider, want_version))) response.content_type = 'application/json' if want_version.matches((1, 15)): modified = util.pick_last_modified(None, resource_provider) response.last_modified = modified response.cache_control = 'no-cache' return response @wsgi_wrapper.PlacementWsgify @util.check_accept('application/json') def list_resource_providers(req): """GET a list of resource providers. On success return a 200 and an application/json body representing a collection of resource providers. """ context = req.environ['placement.context'] want_version = req.environ[microversion.MICROVERSION_ENVIRON] schema = rp_schema.GET_RPS_SCHEMA_1_0 if want_version.matches((1, 18)): schema = rp_schema.GET_RPS_SCHEMA_1_18 elif want_version.matches((1, 14)): schema = rp_schema.GET_RPS_SCHEMA_1_14 elif want_version.matches((1, 4)): schema = rp_schema.GET_RPS_SCHEMA_1_4 elif want_version.matches((1, 3)): schema = rp_schema.GET_RPS_SCHEMA_1_3 util.validate_query_params(req, schema) filters = {} qpkeys = ('uuid', 'name', 'member_of', 'in_tree', 'resources', 'required') for attr in qpkeys: if attr in req.GET: value = req.GET[attr] # special case member_of to always make its value a # list, either by accepting the single value, or if it # starts with 'in:' splitting on ','. # NOTE(cdent): This will all change when we start using # JSONSchema validation of query params. if attr == 'member_of': if value.startswith('in:'): value = value[3:].split(',') else: value = [value] # Make sure the values are actually UUIDs. for aggr_uuid in value: if not uuidutils.is_uuid_like(aggr_uuid): raise webob.exc.HTTPBadRequest( _('Invalid uuid value: %(uuid)s') % {'uuid': aggr_uuid}) elif attr == 'resources': value = util.normalize_resources_qs_param(value) elif attr == 'required': value = util.normalize_traits_qs_param(value) filters[attr] = value try: resource_providers = rp_obj.ResourceProviderList.get_all_by_filters( context, filters) except exception.ResourceClassNotFound as exc: raise webob.exc.HTTPBadRequest( _('Invalid resource class in resources parameter: %(error)s') % {'error': exc}) except exception.TraitNotFound as exc: raise webob.exc.HTTPBadRequest( _('Invalid trait(s) in "required" parameter: %(error)s') % {'error': exc}) response = req.response output, last_modified = _serialize_providers( req.environ, resource_providers, want_version) response.body = encodeutils.to_utf8(jsonutils.dumps(output)) response.content_type = 'application/json' if want_version.matches((1, 15)): response.last_modified = last_modified response.cache_control = 'no-cache' return response @wsgi_wrapper.PlacementWsgify @util.require_content('application/json') def update_resource_provider(req): """PUT to update a single resource provider. On success return a 200 response with a representation of the updated resource provider. """ uuid = util.wsgi_path_item(req.environ, 'uuid') context = req.environ['placement.context'] want_version = req.environ[microversion.MICROVERSION_ENVIRON] # The containing application will catch a not found here. resource_provider = rp_obj.ResourceProvider.get_by_uuid( context, uuid) schema = rp_schema.PUT_RESOURCE_PROVIDER_SCHEMA if want_version.matches((1, 14)): schema = rp_schema.PUT_RP_SCHEMA_V1_14 data = util.extract_json(req.body, schema) for field in rp_obj.ResourceProvider.SETTABLE_FIELDS: if field in data: setattr(resource_provider, field, data[field]) try: resource_provider.save() except db_exc.DBDuplicateEntry as exc: raise webob.exc.HTTPConflict( _('Conflicting resource provider %(name)s already exists.') % {'name': data['name']}) except exception.ObjectActionError as exc: raise webob.exc.HTTPBadRequest( _('Unable to save resource provider %(rp_uuid)s: %(error)s') % {'rp_uuid': uuid, 'error': exc}) response = req.response response.status = 200 response.body = encodeutils.to_utf8(jsonutils.dumps( _serialize_provider(req.environ, resource_provider, want_version))) response.content_type = 'application/json' if want_version.matches((1, 15)): response.last_modified = resource_provider.updated_at response.cache_control = 'no-cache' return response
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This package contains functions for reading and writing HDF5 tables that are not meant to be used directly, but instead are available as readers/writers in `astropy.table`. See :ref:`table_io` for more details. """ import os import warnings import numpy as np # NOTE: Do not import anything from astropy.table here. # https://github.com/astropy/astropy/issues/6604 from astropy.utils.exceptions import AstropyUserWarning, AstropyDeprecationWarning HDF5_SIGNATURE = b'\x89HDF\r\n\x1a\n' META_KEY = '__table_column_meta__' __all__ = ['read_table_hdf5', 'write_table_hdf5'] def meta_path(path): return path + '.' + META_KEY def _find_all_structured_arrays(handle): """ Find all structured arrays in an HDF5 file """ import h5py structured_arrays = [] def append_structured_arrays(name, obj): if isinstance(obj, h5py.Dataset) and obj.dtype.kind == 'V': structured_arrays.append(name) handle.visititems(append_structured_arrays) return structured_arrays def is_hdf5(origin, filepath, fileobj, *args, **kwargs): if fileobj is not None: loc = fileobj.tell() try: signature = fileobj.read(8) finally: fileobj.seek(loc) return signature == HDF5_SIGNATURE elif filepath is not None: return filepath.endswith(('.hdf5', '.h5')) try: import h5py except ImportError: return False else: return isinstance(args[0], (h5py.File, h5py.Group, h5py.Dataset)) def read_table_hdf5(input, path=None, character_as_bytes=True): """ Read a Table object from an HDF5 file This requires `h5py <http://www.h5py.org/>`_ to be installed. If more than one table is present in the HDF5 file or group, the first table is read in and a warning is displayed. Parameters ---------- input : str or :class:`h5py:File` or :class:`h5py:Group` or :class:`h5py:Dataset` If a string, the filename to read the table from. If an h5py object, either the file or the group object to read the table from. path : str The path from which to read the table inside the HDF5 file. This should be relative to the input file or group. character_as_bytes: bool If `True` then Table columns are left as bytes. If `False` then Table columns are converted to unicode. """ try: import h5py except ImportError: raise Exception("h5py is required to read and write HDF5 files") # This function is iterative, and only gets to writing the file when # the input is an hdf5 Group. Moreover, the input variable is changed in # place. # Here, we save its value to be used at the end when the conditions are # right. input_save = input if isinstance(input, (h5py.File, h5py.Group)): # If a path was specified, follow the path if path is not None: try: input = input[path] except (KeyError, ValueError): raise OSError(f"Path {path} does not exist") # `input` is now either a group or a dataset. If it is a group, we # will search for all structured arrays inside the group, and if there # is one we can proceed otherwise an error is raised. If it is a # dataset, we just proceed with the reading. if isinstance(input, h5py.Group): # Find all structured arrays in group arrays = _find_all_structured_arrays(input) if len(arrays) == 0: raise ValueError("no table found in HDF5 group {}". format(path)) elif len(arrays) > 0: path = arrays[0] if path is None else path + '/' + arrays[0] if len(arrays) > 1: warnings.warn("path= was not specified but multiple tables" " are present, reading in first available" " table (path={})".format(path), AstropyUserWarning) return read_table_hdf5(input, path=path) elif not isinstance(input, h5py.Dataset): # If a file object was passed, then we need to extract the filename # because h5py cannot properly read in file objects. if hasattr(input, 'read'): try: input = input.name except AttributeError: raise TypeError("h5py can only open regular files") # Open the file for reading, and recursively call read_table_hdf5 with # the file object and the path. f = h5py.File(input, 'r') try: return read_table_hdf5(f, path=path, character_as_bytes=character_as_bytes) finally: f.close() # If we are here, `input` should be a Dataset object, which we can now # convert to a Table. # Create a Table object from astropy.table import Table, meta, serialize table = Table(np.array(input)) # Read the meta-data from the file. For back-compatibility, we can read # the old file format where the serialized metadata were saved in the # attributes of the HDF5 dataset. # In the new format, instead, metadata are stored in a new dataset in the # same file. This is introduced in Astropy 3.0 old_version_meta = META_KEY in input.attrs new_version_meta = path is not None and meta_path(path) in input_save if old_version_meta or new_version_meta: if new_version_meta: header = meta.get_header_from_yaml( h.decode('utf-8') for h in input_save[meta_path(path)]) elif old_version_meta: header = meta.get_header_from_yaml( h.decode('utf-8') for h in input.attrs[META_KEY]) if 'meta' in list(header.keys()): table.meta = header['meta'] header_cols = dict((x['name'], x) for x in header['datatype']) for col in table.columns.values(): for attr in ('description', 'format', 'unit', 'meta'): if attr in header_cols[col.name]: setattr(col, attr, header_cols[col.name][attr]) # Construct new table with mixins, using tbl.meta['__serialized_columns__'] # as guidance. table = serialize._construct_mixins_from_columns(table) else: # Read the meta-data from the file table.meta.update(input.attrs) if not character_as_bytes: table.convert_bytestring_to_unicode() return table def _encode_mixins(tbl): """Encode a Table ``tbl`` that may have mixin columns to a Table with only astropy Columns + appropriate meta-data to allow subsequent decoding. """ from astropy.table import serialize from astropy.table.table import has_info_class from astropy import units as u from astropy.utils.data_info import MixinInfo, serialize_context_as # If PyYAML is not available then check to see if there are any mixin cols # that *require* YAML serialization. HDF5 already has support for # Quantity, so if those are the only mixins the proceed without doing the # YAML bit, for backward compatibility (i.e. not requiring YAML to write # Quantity). try: import yaml except ImportError: for col in tbl.itercols(): if (has_info_class(col, MixinInfo) and col.__class__ is not u.Quantity): raise TypeError("cannot write type {} column '{}' " "to HDF5 without PyYAML installed." .format(col.__class__.__name__, col.info.name)) # Convert the table to one with no mixins, only Column objects. This adds # meta data which is extracted with meta.get_yaml_from_table. with serialize_context_as('hdf5'): encode_tbl = serialize.represent_mixins_as_columns(tbl) return encode_tbl def write_table_hdf5(table, output, path=None, compression=False, append=False, overwrite=False, serialize_meta=False): """ Write a Table object to an HDF5 file This requires `h5py <http://www.h5py.org/>`_ to be installed. Parameters ---------- table : `~astropy.table.Table` Data table that is to be written to file. output : str or :class:`h5py:File` or :class:`h5py:Group` If a string, the filename to write the table to. If an h5py object, either the file or the group object to write the table to. path : str The path to which to write the table inside the HDF5 file. This should be relative to the input file or group. If not specified, defaults to ``__astropy_table__``. compression : bool or str or int Whether to compress the table inside the HDF5 file. If set to `True`, ``'gzip'`` compression is used. If a string is specified, it should be one of ``'gzip'``, ``'szip'``, or ``'lzf'``. If an integer is specified (in the range 0-9), ``'gzip'`` compression is used, and the integer denotes the compression level. append : bool Whether to append the table to an existing HDF5 file. overwrite : bool Whether to overwrite any existing file without warning. If ``append=True`` and ``overwrite=True`` then only the dataset will be replaced; the file/group will not be overwritten. """ from astropy.table import meta try: import h5py except ImportError: raise Exception("h5py is required to read and write HDF5 files") if path is None: # table is just an arbitrary, hardcoded string here. path = '__astropy_table__' elif path.endswith('/'): raise ValueError("table path should end with table name, not /") if '/' in path: group, name = path.rsplit('/', 1) else: group, name = None, path if isinstance(output, (h5py.File, h5py.Group)): if len(list(output.keys())) > 0 and name == '__astropy_table__': raise ValueError("table path should always be set via the " "path= argument when writing to existing " "files") elif name == '__astropy_table__': warnings.warn("table path was not set via the path= argument; " "using default path {}".format(path)) if group: try: output_group = output[group] except (KeyError, ValueError): output_group = output.create_group(group) else: output_group = output elif isinstance(output, str): if os.path.exists(output) and not append: if overwrite and not append: os.remove(output) else: raise OSError(f"File exists: {output}") # Open the file for appending or writing f = h5py.File(output, 'a' if append else 'w') # Recursively call the write function try: return write_table_hdf5(table, f, path=path, compression=compression, append=append, overwrite=overwrite, serialize_meta=serialize_meta) finally: f.close() else: raise TypeError('output should be a string or an h5py File or ' 'Group object') # Check whether table already exists if name in output_group: if append and overwrite: # Delete only the dataset itself del output_group[name] else: raise OSError(f"Table {path} already exists") # Encode any mixin columns as plain columns + appropriate metadata table = _encode_mixins(table) # Table with numpy unicode strings can't be written in HDF5 so # to write such a table a copy of table is made containing columns as # bytestrings. Now this copy of the table can be written in HDF5. if any(col.info.dtype.kind == 'U' for col in table.itercols()): table = table.copy(copy_data=False) table.convert_unicode_to_bytestring() # Warn if information will be lost when serialize_meta=False. This is # hardcoded to the set difference between column info attributes and what # HDF5 can store natively (name, dtype) with no meta. if serialize_meta is False: for col in table.itercols(): for attr in ('unit', 'format', 'description', 'meta'): if getattr(col.info, attr, None) not in (None, {}): warnings.warn("table contains column(s) with defined 'unit', 'format'," " 'description', or 'meta' info attributes. These will" " be dropped since serialize_meta=False.", AstropyUserWarning) # Write the table to the file if compression: if compression is True: compression = 'gzip' dset = output_group.create_dataset(name, data=table.as_array(), compression=compression) else: dset = output_group.create_dataset(name, data=table.as_array()) if serialize_meta: header_yaml = meta.get_yaml_from_table(table) header_encoded = [h.encode('utf-8') for h in header_yaml] output_group.create_dataset(meta_path(name), data=header_encoded) else: # Write the Table meta dict key:value pairs to the file as HDF5 # attributes. This works only for a limited set of scalar data types # like numbers, strings, etc., but not any complex types. This path # also ignores column meta like unit or format. for key in table.meta: val = table.meta[key] try: dset.attrs[key] = val except TypeError: warnings.warn("Attribute `{}` of type {} cannot be written to " "HDF5 files - skipping. (Consider specifying " "serialize_meta=True to write all meta data)".format(key, type(val)), AstropyUserWarning) def register_hdf5(): """ Register HDF5 with Unified I/O. """ from astropy.io import registry as io_registry from astropy.table import Table io_registry.register_reader('hdf5', Table, read_table_hdf5) io_registry.register_writer('hdf5', Table, write_table_hdf5) io_registry.register_identifier('hdf5', Table, is_hdf5)
""" Tests the data transformation module. """ # Author: Alex Hepburn <ah13558@bristol.ac.uk> # Kacper Sokol <k.sokol@bristol.ac.uk> # License: new BSD import pytest import numpy as np import fatf.utils.data.transformation as fudt from fatf.exceptions import IncorrectShapeError # yapf: disable NUMERICAL_NP_ARRAY = np.array([ [0, 0, 1., 0.], [1, 0, 2., 4.], [0, 1, 3., 0.], [2, 1, 2., 1.], [1, 0, 1., 2.], [0, 1, 1., 0.]]) NUMERICAL_STRUCT_ARRAY = np.array( [(0, 0, 1., 0.), (1, 0, 2., 4.), (0, 1, 3., 0.), (2, 1, 2., 1.), (1, 0, 1., 2.), (0, 1, 1., 0.)], dtype=[('a', 'i'), ('b', 'i'), ('c', 'f'), ('d', 'f')]) CATEGORICAL_NP_ARRAY = np.array([ ['a', 'b', 'c'], ['a', 'f', 'g'], ['b', 'c', 'c'], ['b', 'f', 'c'], ['a', 'f', 'c'], ['a', 'b', 'g']]) CATEGORICAL_STRUCT_ARRAY = np.array( [('a', 'b', 'c'), ('a', 'f', 'g'), ('b', 'c', 'c'), ('b', 'f', 'c'), ('a', 'f', 'c'), ('a', 'b', 'g')], dtype=[('a', 'U1'), ('b', 'U1'), ('c', 'U1')]) MIXED_ARRAY = np.array( [(0, 'a', 0.08, 'a'), (0, 'f', 0.03, 'bb'), (1, 'c', 0.08, 'aa'), (1, 'a', 0.73, 'a'), (0, 'c', 0.36, 'b'), (1, 'f', 0.08, 'bb')], dtype=[('a', 'i'), ('b', 'U1'), ('c', 'f'), ('d', 'U2')]) NUMERICAL_NP_BINARY = np.array([ [1, 1, 1, 1], [0, 1, 0, 0], [1, 0, 0, 1], [0, 0, 0, 0], [0, 1, 1, 0], [1, 0, 1, 1]]) NUMERICAL_STRUCT_BINARY = np.array( [(1, 1, 1, 1), (0, 1, 0, 0), (1, 0, 0, 1), (0, 0, 0, 0), (0, 1, 1, 0), (1, 0, 1, 1)], dtype=[('a', np.int8), ('b', np.int8), ('c', np.int8), ('d', np.int8)]) CATEGORICAL_NP_BINARY = np.array([ [1, 1, 1], [1, 0, 0], [0, 0, 1], [0, 0, 1], [1, 0, 1], [1, 1, 0]]) CATEGORICAL_STRUCT_BINARY = np.array([ (1, 1, 1), (1, 0, 0), (0, 0, 1), (0, 0, 1), (1, 0, 1), (1, 1, 0)], dtype=[('a', np.int8), ('b', np.int8), ('c', np.int8)]) MIXED_BINARY = np.array( [(1, 1, 1, 1), (1, 0, 0, 0), (0, 0, 1, 0), (0, 1, 0, 1), (1, 0, 0, 0), (0, 0, 1, 0)], dtype=[('a', np.int8), ('b', np.int8), ('c', np.int8), ('d', np.int8)]) # yapf: enable def test_validate_input_drm(): """ Tests :func:`fatf.utils.data.transformation._validate_input_drm` function. """ incorrect_shape_dataset = ('The input dataset must be a 2-dimensional ' 'numpy array.') type_error_data = ('The input dataset must be of a base type -- text, ' 'numbers or mixture of the two.') # incorrect_shape_data_row = ('The data row must either be a 1-dimensional ' 'numpy array or a numpy void object for ' 'structured rows.') # type_error_data_row = ('The dtype of the data row is too different from ' 'the dtype of the dataset provided.') incorrect_shape_features = ('The data row must contain the same number of ' 'elements as the number of columns in the ' 'provided dataset.') with pytest.raises(IncorrectShapeError) as exin: fudt._validate_input_drm(NUMERICAL_NP_ARRAY[0], None) assert str(exin.value) == incorrect_shape_dataset # with pytest.raises(TypeError) as exin: fudt._validate_input_drm(np.array([[None, 0], [0, 1]]), np.array([])) assert str(exin.value) == type_error_data with pytest.raises(IncorrectShapeError) as exin: fudt._validate_input_drm(NUMERICAL_NP_ARRAY, NUMERICAL_NP_ARRAY) assert str(exin.value) == incorrect_shape_data_row with pytest.raises(TypeError) as exin: fudt._validate_input_drm(NUMERICAL_NP_ARRAY, CATEGORICAL_NP_ARRAY[0]) assert str(exin.value) == type_error_data_row with pytest.raises(IncorrectShapeError) as exin: fudt._validate_input_drm(NUMERICAL_NP_ARRAY, NUMERICAL_NP_ARRAY[0][:1]) assert str(exin.value) == incorrect_shape_features def test_dataset_row_masking(): """ Tests :func:`fatf.utils.data.transformation.dataset_row_masking` function. """ binary = fudt.dataset_row_masking(NUMERICAL_NP_ARRAY, NUMERICAL_NP_ARRAY[0]) assert np.array_equal(binary, NUMERICAL_NP_BINARY) binary = fudt.dataset_row_masking(NUMERICAL_NP_ARRAY, np.array([10.] * 4)) assert np.array_equal(binary, np.zeros_like(NUMERICAL_NP_BINARY)) binary = fudt.dataset_row_masking(NUMERICAL_STRUCT_ARRAY, NUMERICAL_STRUCT_ARRAY[0]) assert np.array_equal(binary, NUMERICAL_STRUCT_BINARY) array = np.array([(5, 5, 5., 5.)], dtype=[('a', 'i'), ('b', 'i'), ('c', 'f'), ('d', 'f')])[0] binary = fudt.dataset_row_masking(NUMERICAL_STRUCT_ARRAY, array) assert np.array_equal(binary, np.zeros_like(NUMERICAL_STRUCT_BINARY)) binary = fudt.dataset_row_masking(CATEGORICAL_NP_ARRAY, CATEGORICAL_NP_ARRAY[0]) assert np.array_equal(binary, CATEGORICAL_NP_BINARY) array = np.array(['z'] * 3) binary = fudt.dataset_row_masking(CATEGORICAL_NP_ARRAY, array) assert np.array_equal(binary, np.zeros_like(CATEGORICAL_NP_BINARY)) binary = fudt.dataset_row_masking(CATEGORICAL_STRUCT_ARRAY, CATEGORICAL_STRUCT_ARRAY[0]) assert np.array_equal(binary, CATEGORICAL_STRUCT_BINARY) array = np.array([('z', 'z', 'z')], dtype=[('a', 'U1'), ('b', 'U1'), ('c', 'U1')])[0] binary = fudt.dataset_row_masking(CATEGORICAL_STRUCT_ARRAY, array) assert np.array_equal(binary, np.zeros_like(CATEGORICAL_STRUCT_BINARY)) binary = fudt.dataset_row_masking(MIXED_ARRAY, MIXED_ARRAY[0]) assert np.array_equal(binary, MIXED_BINARY) array = np.array( [(2, 'z', 2., 'z')], dtype=[('a', 'i'), ('b', 'U1'), ('c', 'f'), ('d', 'U2')] )[0] # yapf: disable binary = fudt.dataset_row_masking(MIXED_ARRAY, array) assert np.array_equal(binary, np.zeros_like(MIXED_BINARY))