hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c3594e67f3f6ba6b208b98c777c26d71c1846ac | 3,771 | py | Python | contrib/macdeploy/custom_dsstore.py | DreamCoinOfficial/DreamCoin | 9e4b698a09bf99d0266e5429b71cfb96da6eac60 | [
"MIT"
] | null | null | null | contrib/macdeploy/custom_dsstore.py | DreamCoinOfficial/DreamCoin | 9e4b698a09bf99d0266e5429b71cfb96da6eac60 | [
"MIT"
] | null | null | null | contrib/macdeploy/custom_dsstore.py | DreamCoinOfficial/DreamCoin | 9e4b698a09bf99d0266e5429b71cfb96da6eac60 | [
"MIT"
] | 1 | 2018-10-07T17:59:36.000Z | 2018-10-07T17:59:36.000Z | #!/usr/bin/env python
# Copyright (c) 2013-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from __future__ import division,print_function,unicode_literals
import biplist
from ds_store import DSStore
from mac_alias import Alias
import sys
output_file = sys.argv[1]
package_name_ns = sys.argv[2]
ds = DSStore.open(output_file, 'w+')
ds['.']['bwsp'] = {
'ShowStatusBar': False,
'WindowBounds': b'{{300, 280}, {500, 343}}',
'ContainerShowSidebar': False,
'SidebarWidth': 0,
'ShowTabView': False,
'PreviewPaneVisibility': False,
'ShowToolbar': False,
'ShowSidebar': False,
'ShowPathbar': True
}
icvp = {
'gridOffsetX': 0.0,
'textSize': 12.0,
'viewOptionsVersion': 1,
'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07bitcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00',
'backgroundColorBlue': 1.0,
'iconSize': 96.0,
'backgroundColorGreen': 1.0,
'arrangeBy': 'none',
'showIconPreview': True,
'gridSpacing': 100.0,
'gridOffsetY': 0.0,
'showItemInfo': False,
'labelOnBottom': True,
'backgroundType': 2,
'backgroundColorRed': 1.0
}
alias = Alias.from_bytes(icvp['backgroundImageAlias'])
alias.volume.name = package_name_ns
alias.volume.posix_path = '/Volumes/' + package_name_ns
alias.volume.disk_image_alias.target.filename = package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.carbon_path = 'Macintosh HD:Users:\x00bitcoinuser:\x00Documents:\x00bitcoin:\x00bitcoin:\x00' + package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.posix_path = 'Users/bitcoinuser/Documents/bitcoin/bitcoin/' + package_name_ns + '.temp.dmg'
alias.target.carbon_path = package_name_ns + ':.background:\x00background.tiff'
icvp['backgroundImageAlias'] = biplist.Data(alias.to_bytes())
ds['.']['icvp'] = icvp
ds['.']['vSrn'] = ('long', 1)
ds['Applications']['Iloc'] = (370, 156)
ds['DREM-Qt.app']['Iloc'] = (128, 156)
ds.flush()
ds.close()
| 61.819672 | 1,817 | 0.727128 |
from __future__ import division,print_function,unicode_literals
import biplist
from ds_store import DSStore
from mac_alias import Alias
import sys
output_file = sys.argv[1]
package_name_ns = sys.argv[2]
ds = DSStore.open(output_file, 'w+')
ds['.']['bwsp'] = {
'ShowStatusBar': False,
'WindowBounds': b'{{300, 280}, {500, 343}}',
'ContainerShowSidebar': False,
'SidebarWidth': 0,
'ShowTabView': False,
'PreviewPaneVisibility': False,
'ShowToolbar': False,
'ShowSidebar': False,
'ShowPathbar': True
}
icvp = {
'gridOffsetX': 0.0,
'textSize': 12.0,
'viewOptionsVersion': 1,
'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07bitcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00',
'backgroundColorBlue': 1.0,
'iconSize': 96.0,
'backgroundColorGreen': 1.0,
'arrangeBy': 'none',
'showIconPreview': True,
'gridSpacing': 100.0,
'gridOffsetY': 0.0,
'showItemInfo': False,
'labelOnBottom': True,
'backgroundType': 2,
'backgroundColorRed': 1.0
}
alias = Alias.from_bytes(icvp['backgroundImageAlias'])
alias.volume.name = package_name_ns
alias.volume.posix_path = '/Volumes/' + package_name_ns
alias.volume.disk_image_alias.target.filename = package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.carbon_path = 'Macintosh HD:Users:\x00bitcoinuser:\x00Documents:\x00bitcoin:\x00bitcoin:\x00' + package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.posix_path = 'Users/bitcoinuser/Documents/bitcoin/bitcoin/' + package_name_ns + '.temp.dmg'
alias.target.carbon_path = package_name_ns + ':.background:\x00background.tiff'
icvp['backgroundImageAlias'] = biplist.Data(alias.to_bytes())
ds['.']['icvp'] = icvp
ds['.']['vSrn'] = ('long', 1)
ds['Applications']['Iloc'] = (370, 156)
ds['DREM-Qt.app']['Iloc'] = (128, 156)
ds.flush()
ds.close()
| true | true |
1c359556bb686048bce755e3be36b7b20e5dd626 | 1,203 | py | Python | machine_translation/fairseq/models/fairseq_encoder.py | wangjksjtu/autoassist-exp | 7c4599fb250c2041ab007965b083750875dd6ac9 | [
"BSD-3-Clause"
] | 10 | 2019-11-19T18:03:59.000Z | 2021-01-13T18:18:19.000Z | machine_translation/fairseq/models/fairseq_encoder.py | wangjksjtu/autoassist-exp | 7c4599fb250c2041ab007965b083750875dd6ac9 | [
"BSD-3-Clause"
] | null | null | null | machine_translation/fairseq/models/fairseq_encoder.py | wangjksjtu/autoassist-exp | 7c4599fb250c2041ab007965b083750875dd6ac9 | [
"BSD-3-Clause"
] | 2 | 2019-12-03T16:35:46.000Z | 2020-04-10T21:45:20.000Z | import torch.nn as nn
class FairseqEncoder(nn.Module):
"""Base class for encoders."""
def __init__(self, dictionary):
super().__init__()
self.dictionary = dictionary
def forward(self, src_tokens, src_lengths):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (LongTensor): lengths of each source sentence of shape
`(batch)`
"""
raise NotImplementedError
def reorder_encoder_out(self, encoder_out, new_order):
"""
Reorder encoder output according to `new_order`.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
`encoder_out` rearranged according to `new_order`
"""
raise NotImplementedError
def max_positions(self):
"""Maximum input length supported by the encoder."""
return 1e6 # an arbitrary large number
def upgrade_state_dict(self, state_dict):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
return state_dict
| 29.341463 | 78 | 0.610973 | import torch.nn as nn
class FairseqEncoder(nn.Module):
def __init__(self, dictionary):
super().__init__()
self.dictionary = dictionary
def forward(self, src_tokens, src_lengths):
raise NotImplementedError
def reorder_encoder_out(self, encoder_out, new_order):
raise NotImplementedError
def max_positions(self):
return 1e6
def upgrade_state_dict(self, state_dict):
return state_dict
| true | true |
1c359585aff131855335a4ab58037aaa8370831c | 82,001 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_12_01/operations/_virtual_network_gateways_operations.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 1 | 2021-06-02T08:01:35.000Z | 2021-06-02T08:01:35.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_12_01/operations/_virtual_network_gateways_operations.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 226 | 2019-07-24T07:57:21.000Z | 2019-10-15T01:07:24.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_12_01/operations/_virtual_network_gateways_operations.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class VirtualNetworkGatewaysOperations(object):
"""VirtualNetworkGatewaysOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2018-12-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2018-12-01"
self.config = config
def _create_or_update_initial(
self, resource_group_name, virtual_network_gateway_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'VirtualNetworkGateway')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGateway', response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, virtual_network_gateway_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates or updates a virtual network gateway in the specified resource
group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param parameters: Parameters supplied to create or update virtual
network gateway operation.
:type parameters:
~azure.mgmt.network.v2018_12_01.models.VirtualNetworkGateway
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns VirtualNetworkGateway
or ClientRawResponse<VirtualNetworkGateway> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2018_12_01.models.VirtualNetworkGateway]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2018_12_01.models.VirtualNetworkGateway]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'}
def get(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
"""Gets the specified virtual network gateway by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: VirtualNetworkGateway or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2018_12_01.models.VirtualNetworkGateway
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'}
def _delete_initial(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes the specified virtual network gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'}
def _update_tags_initial(
self, resource_group_name, virtual_network_gateway_name, tags=None, custom_headers=None, raw=False, **operation_config):
parameters = models.TagsObject(tags=tags)
# Construct URL
url = self.update_tags.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'TagsObject')
# Construct and send request
request = self._client.patch(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update_tags(
self, resource_group_name, virtual_network_gateway_name, tags=None, custom_headers=None, raw=False, polling=True, **operation_config):
"""Updates a virtual network gateway tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns VirtualNetworkGateway
or ClientRawResponse<VirtualNetworkGateway> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2018_12_01.models.VirtualNetworkGateway]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2018_12_01.models.VirtualNetworkGateway]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
tags=tags,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'}
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all virtual network gateways by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of VirtualNetworkGateway
:rtype:
~azure.mgmt.network.v2018_12_01.models.VirtualNetworkGatewayPaged[~azure.mgmt.network.v2018_12_01.models.VirtualNetworkGateway]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.VirtualNetworkGatewayPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways'}
def list_connections(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
"""Gets all the connections in a virtual network gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of
VirtualNetworkGatewayConnectionListEntity
:rtype:
~azure.mgmt.network.v2018_12_01.models.VirtualNetworkGatewayConnectionListEntityPaged[~azure.mgmt.network.v2018_12_01.models.VirtualNetworkGatewayConnectionListEntity]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_connections.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.VirtualNetworkGatewayConnectionListEntityPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list_connections.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/connections'}
def _reset_initial(
self, resource_group_name, virtual_network_gateway_name, gateway_vip=None, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.reset.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if gateway_vip is not None:
query_parameters['gatewayVip'] = self._serialize.query("gateway_vip", gateway_vip, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def reset(
self, resource_group_name, virtual_network_gateway_name, gateway_vip=None, custom_headers=None, raw=False, polling=True, **operation_config):
"""Resets the primary of the virtual network gateway in the specified
resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param gateway_vip: Virtual network gateway vip address supplied to
the begin reset of the active-active feature enabled gateway.
:type gateway_vip: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns VirtualNetworkGateway
or ClientRawResponse<VirtualNetworkGateway> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2018_12_01.models.VirtualNetworkGateway]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2018_12_01.models.VirtualNetworkGateway]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._reset_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
gateway_vip=gateway_vip,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
reset.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/reset'}
def _reset_vpn_client_shared_key_initial(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.reset_vpn_client_shared_key.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def reset_vpn_client_shared_key(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Resets the VPN client shared key of the virtual network gateway in the
specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._reset_vpn_client_shared_key_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
reset_vpn_client_shared_key.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/resetvpnclientsharedkey'}
def _generatevpnclientpackage_initial(
self, resource_group_name, virtual_network_gateway_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.generatevpnclientpackage.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'VpnClientParameters')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def generatevpnclientpackage(
self, resource_group_name, virtual_network_gateway_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Generates VPN client package for P2S client of the virtual network
gateway in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param parameters: Parameters supplied to the generate virtual network
gateway VPN client package operation.
:type parameters:
~azure.mgmt.network.v2018_12_01.models.VpnClientParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns str or
ClientRawResponse<str> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[str] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[str]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._generatevpnclientpackage_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
generatevpnclientpackage.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/generatevpnclientpackage'}
def _generate_vpn_profile_initial(
self, resource_group_name, virtual_network_gateway_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.generate_vpn_profile.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'VpnClientParameters')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def generate_vpn_profile(
self, resource_group_name, virtual_network_gateway_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Generates VPN profile for P2S client of the virtual network gateway in
the specified resource group. Used for IKEV2 and radius based
authentication.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param parameters: Parameters supplied to the generate virtual network
gateway VPN client package operation.
:type parameters:
~azure.mgmt.network.v2018_12_01.models.VpnClientParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns str or
ClientRawResponse<str> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[str] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[str]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._generate_vpn_profile_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
generate_vpn_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/generatevpnprofile'}
def _get_vpn_profile_package_url_initial(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.get_vpn_profile_package_url.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_vpn_profile_package_url(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Gets pre-generated VPN profile for P2S client of the virtual network
gateway in the specified resource group. The profile needs to be
generated first using generateVpnProfile.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns str or
ClientRawResponse<str> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[str] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[str]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._get_vpn_profile_package_url_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_vpn_profile_package_url.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getvpnprofilepackageurl'}
def _get_bgp_peer_status_initial(
self, resource_group_name, virtual_network_gateway_name, peer=None, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.get_bgp_peer_status.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if peer is not None:
query_parameters['peer'] = self._serialize.query("peer", peer, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('BgpPeerStatusListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_bgp_peer_status(
self, resource_group_name, virtual_network_gateway_name, peer=None, custom_headers=None, raw=False, polling=True, **operation_config):
"""The GetBgpPeerStatus operation retrieves the status of all BGP peers.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param peer: The IP address of the peer to retrieve the status of.
:type peer: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns BgpPeerStatusListResult
or ClientRawResponse<BgpPeerStatusListResult> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2018_12_01.models.BgpPeerStatusListResult]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2018_12_01.models.BgpPeerStatusListResult]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._get_bgp_peer_status_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
peer=peer,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('BgpPeerStatusListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_bgp_peer_status.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getBgpPeerStatus'}
def supported_vpn_devices(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
"""Gets a xml format representation for supported vpn devices.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: str or ClientRawResponse if raw=true
:rtype: str or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.supported_vpn_devices.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
supported_vpn_devices.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/supportedvpndevices'}
def _get_learned_routes_initial(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.get_learned_routes.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GatewayRouteListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_learned_routes(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""This operation retrieves a list of routes the virtual network gateway
has learned, including routes learned from BGP peers.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns GatewayRouteListResult
or ClientRawResponse<GatewayRouteListResult> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2018_12_01.models.GatewayRouteListResult]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2018_12_01.models.GatewayRouteListResult]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._get_learned_routes_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('GatewayRouteListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_learned_routes.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getLearnedRoutes'}
def _get_advertised_routes_initial(
self, resource_group_name, virtual_network_gateway_name, peer, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.get_advertised_routes.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['peer'] = self._serialize.query("peer", peer, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GatewayRouteListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_advertised_routes(
self, resource_group_name, virtual_network_gateway_name, peer, custom_headers=None, raw=False, polling=True, **operation_config):
"""This operation retrieves a list of routes the virtual network gateway
is advertising to the specified peer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param peer: The IP address of the peer
:type peer: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns GatewayRouteListResult
or ClientRawResponse<GatewayRouteListResult> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2018_12_01.models.GatewayRouteListResult]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2018_12_01.models.GatewayRouteListResult]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._get_advertised_routes_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
peer=peer,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('GatewayRouteListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_advertised_routes.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getAdvertisedRoutes'}
def _set_vpnclient_ipsec_parameters_initial(
self, resource_group_name, virtual_network_gateway_name, vpnclient_ipsec_params, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.set_vpnclient_ipsec_parameters.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(vpnclient_ipsec_params, 'VpnClientIPsecParameters')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VpnClientIPsecParameters', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def set_vpnclient_ipsec_parameters(
self, resource_group_name, virtual_network_gateway_name, vpnclient_ipsec_params, custom_headers=None, raw=False, polling=True, **operation_config):
"""The Set VpnclientIpsecParameters operation sets the vpnclient ipsec
policy for P2S client of virtual network gateway in the specified
resource group through Network resource provider.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param vpnclient_ipsec_params: Parameters supplied to the Begin Set
vpnclient ipsec parameters of Virtual Network Gateway P2S client
operation through Network resource provider.
:type vpnclient_ipsec_params:
~azure.mgmt.network.v2018_12_01.models.VpnClientIPsecParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns
VpnClientIPsecParameters or
ClientRawResponse<VpnClientIPsecParameters> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2018_12_01.models.VpnClientIPsecParameters]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2018_12_01.models.VpnClientIPsecParameters]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._set_vpnclient_ipsec_parameters_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
vpnclient_ipsec_params=vpnclient_ipsec_params,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('VpnClientIPsecParameters', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
set_vpnclient_ipsec_parameters.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/setvpnclientipsecparameters'}
def _get_vpnclient_ipsec_parameters_initial(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.get_vpnclient_ipsec_parameters.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VpnClientIPsecParameters', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_vpnclient_ipsec_parameters(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""The Get VpnclientIpsecParameters operation retrieves information about
the vpnclient ipsec policy for P2S client of virtual network gateway in
the specified resource group through Network resource provider.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The virtual network gateway name.
:type virtual_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns
VpnClientIPsecParameters or
ClientRawResponse<VpnClientIPsecParameters> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2018_12_01.models.VpnClientIPsecParameters]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2018_12_01.models.VpnClientIPsecParameters]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._get_vpnclient_ipsec_parameters_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('VpnClientIPsecParameters', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_vpnclient_ipsec_parameters.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getvpnclientipsecparameters'}
def vpn_device_configuration_script(
self, resource_group_name, virtual_network_gateway_connection_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Gets a xml format representation for vpn device configuration script.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_connection_name: The name of the
virtual network gateway connection for which the configuration script
is generated.
:type virtual_network_gateway_connection_name: str
:param parameters: Parameters supplied to the generate vpn device
script operation.
:type parameters:
~azure.mgmt.network.v2018_12_01.models.VpnDeviceScriptParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: str or ClientRawResponse if raw=true
:rtype: str or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.vpn_device_configuration_script.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayConnectionName': self._serialize.url("virtual_network_gateway_connection_name", virtual_network_gateway_connection_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'VpnDeviceScriptParameters')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
vpn_device_configuration_script.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/vpndeviceconfigurationscript'}
| 49.848632 | 231 | 0.692918 |
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class VirtualNetworkGatewaysOperations(object):
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2018-12-01"
self.config = config
def _create_or_update_initial(
self, resource_group_name, virtual_network_gateway_name, parameters, custom_headers=None, raw=False, **operation_config):
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
body_content = self._serialize.body(parameters, 'VirtualNetworkGateway')
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGateway', response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, virtual_network_gateway_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'}
def get(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'}
def _delete_initial(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, polling=True, **operation_config):
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'}
def _update_tags_initial(
self, resource_group_name, virtual_network_gateway_name, tags=None, custom_headers=None, raw=False, **operation_config):
parameters = models.TagsObject(tags=tags)
url = self.update_tags.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
body_content = self._serialize.body(parameters, 'TagsObject')
request = self._client.patch(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update_tags(
self, resource_group_name, virtual_network_gateway_name, tags=None, custom_headers=None, raw=False, polling=True, **operation_config):
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
tags=tags,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'}
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
def prepare_request(next_link=None):
if not next_link:
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
header_dict = None
if raw:
header_dict = {}
deserialized = models.VirtualNetworkGatewayPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways'}
def list_connections(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
def prepare_request(next_link=None):
if not next_link:
url = self.list_connections.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
header_dict = None
if raw:
header_dict = {}
deserialized = models.VirtualNetworkGatewayConnectionListEntityPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list_connections.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/connections'}
def _reset_initial(
self, resource_group_name, virtual_network_gateway_name, gateway_vip=None, custom_headers=None, raw=False, **operation_config):
url = self.reset.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
if gateway_vip is not None:
query_parameters['gatewayVip'] = self._serialize.query("gateway_vip", gateway_vip, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def reset(
self, resource_group_name, virtual_network_gateway_name, gateway_vip=None, custom_headers=None, raw=False, polling=True, **operation_config):
raw_result = self._reset_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
gateway_vip=gateway_vip,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
reset.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/reset'}
def _reset_vpn_client_shared_key_initial(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
url = self.reset_vpn_client_shared_key.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def reset_vpn_client_shared_key(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, polling=True, **operation_config):
raw_result = self._reset_vpn_client_shared_key_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
reset_vpn_client_shared_key.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/resetvpnclientsharedkey'}
def _generatevpnclientpackage_initial(
self, resource_group_name, virtual_network_gateway_name, parameters, custom_headers=None, raw=False, **operation_config):
url = self.generatevpnclientpackage.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
body_content = self._serialize.body(parameters, 'VpnClientParameters')
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def generatevpnclientpackage(
self, resource_group_name, virtual_network_gateway_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
raw_result = self._generatevpnclientpackage_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
generatevpnclientpackage.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/generatevpnclientpackage'}
def _generate_vpn_profile_initial(
self, resource_group_name, virtual_network_gateway_name, parameters, custom_headers=None, raw=False, **operation_config):
url = self.generate_vpn_profile.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
body_content = self._serialize.body(parameters, 'VpnClientParameters')
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def generate_vpn_profile(
self, resource_group_name, virtual_network_gateway_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
raw_result = self._generate_vpn_profile_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
generate_vpn_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/generatevpnprofile'}
def _get_vpn_profile_package_url_initial(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
url = self.get_vpn_profile_package_url.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_vpn_profile_package_url(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, polling=True, **operation_config):
raw_result = self._get_vpn_profile_package_url_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_vpn_profile_package_url.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getvpnprofilepackageurl'}
def _get_bgp_peer_status_initial(
self, resource_group_name, virtual_network_gateway_name, peer=None, custom_headers=None, raw=False, **operation_config):
url = self.get_bgp_peer_status.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
if peer is not None:
query_parameters['peer'] = self._serialize.query("peer", peer, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('BgpPeerStatusListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_bgp_peer_status(
self, resource_group_name, virtual_network_gateway_name, peer=None, custom_headers=None, raw=False, polling=True, **operation_config):
raw_result = self._get_bgp_peer_status_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
peer=peer,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('BgpPeerStatusListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_bgp_peer_status.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getBgpPeerStatus'}
def supported_vpn_devices(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
url = self.supported_vpn_devices.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
supported_vpn_devices.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/supportedvpndevices'}
def _get_learned_routes_initial(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
url = self.get_learned_routes.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GatewayRouteListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_learned_routes(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, polling=True, **operation_config):
raw_result = self._get_learned_routes_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('GatewayRouteListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_learned_routes.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getLearnedRoutes'}
def _get_advertised_routes_initial(
self, resource_group_name, virtual_network_gateway_name, peer, custom_headers=None, raw=False, **operation_config):
url = self.get_advertised_routes.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['peer'] = self._serialize.query("peer", peer, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GatewayRouteListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_advertised_routes(
self, resource_group_name, virtual_network_gateway_name, peer, custom_headers=None, raw=False, polling=True, **operation_config):
raw_result = self._get_advertised_routes_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
peer=peer,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('GatewayRouteListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_advertised_routes.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getAdvertisedRoutes'}
def _set_vpnclient_ipsec_parameters_initial(
self, resource_group_name, virtual_network_gateway_name, vpnclient_ipsec_params, custom_headers=None, raw=False, **operation_config):
url = self.set_vpnclient_ipsec_parameters.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
body_content = self._serialize.body(vpnclient_ipsec_params, 'VpnClientIPsecParameters')
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VpnClientIPsecParameters', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def set_vpnclient_ipsec_parameters(
self, resource_group_name, virtual_network_gateway_name, vpnclient_ipsec_params, custom_headers=None, raw=False, polling=True, **operation_config):
raw_result = self._set_vpnclient_ipsec_parameters_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
vpnclient_ipsec_params=vpnclient_ipsec_params,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('VpnClientIPsecParameters', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
set_vpnclient_ipsec_parameters.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/setvpnclientipsecparameters'}
def _get_vpnclient_ipsec_parameters_initial(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
url = self.get_vpnclient_ipsec_parameters.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VpnClientIPsecParameters', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_vpnclient_ipsec_parameters(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, polling=True, **operation_config):
raw_result = self._get_vpnclient_ipsec_parameters_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('VpnClientIPsecParameters', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_vpnclient_ipsec_parameters.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getvpnclientipsecparameters'}
def vpn_device_configuration_script(
self, resource_group_name, virtual_network_gateway_connection_name, parameters, custom_headers=None, raw=False, **operation_config):
url = self.vpn_device_configuration_script.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayConnectionName': self._serialize.url("virtual_network_gateway_connection_name", virtual_network_gateway_connection_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
body_content = self._serialize.body(parameters, 'VpnDeviceScriptParameters')
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
vpn_device_configuration_script.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/vpndeviceconfigurationscript'}
| true | true |
1c3595aa8db877c916c7a3a1c15d81b0908b0cae | 2,537 | py | Python | utils.py | henrykasim/CS6220_MGGCN | 0e5cf9eee0e85be2a4bc3ab39611a7378ce15999 | [
"Apache-2.0"
] | null | null | null | utils.py | henrykasim/CS6220_MGGCN | 0e5cf9eee0e85be2a4bc3ab39611a7378ce15999 | [
"Apache-2.0"
] | null | null | null | utils.py | henrykasim/CS6220_MGGCN | 0e5cf9eee0e85be2a4bc3ab39611a7378ce15999 | [
"Apache-2.0"
] | null | null | null | '''Some helper functions for PyTorch, including:
- get_mean_and_std: calculate the mean and std value of dataset.
- msr_init: net parameter initialization.
- progress_bar: progress bar mimic xlua.progress.
'''
import os
import sys
import time
import math
import torch.nn as nn
import torch.nn.init as init
#_, term_width = os.popen('stty size', 'r').read().split()
#term_width = int(term_width)
term_width = 80
TOTAL_BAR_LENGTH = 65.
last_time = time.time()
begin_time = last_time
def progress_bar(current, total, msg=None):
global last_time, begin_time
if current == 0:
begin_time = time.time() # Reset for new bar.
cur_len = int(TOTAL_BAR_LENGTH*current/total)
rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1
sys.stdout.write(' [')
for i in range(cur_len):
sys.stdout.write('=')
sys.stdout.write('>')
for i in range(rest_len):
sys.stdout.write('.')
sys.stdout.write(']')
cur_time = time.time()
step_time = cur_time - last_time
last_time = cur_time
tot_time = cur_time - begin_time
L = []
L.append(' Step: %s' % format_time(step_time))
L.append(' | Tot: %s' % format_time(tot_time))
if msg:
L.append(' | ' + msg)
msg = ''.join(L)
sys.stdout.write(msg)
for i in range(term_width-int(TOTAL_BAR_LENGTH)-len(msg)-3):
sys.stdout.write(' ')
# Go back to the center of the bar.
for i in range(term_width-int(TOTAL_BAR_LENGTH/2)+2):
sys.stdout.write('\b')
sys.stdout.write(' %d/%d ' % (current+1, total))
if current < total-1:
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
sys.stdout.flush()
def format_time(seconds):
days = int(seconds / 3600/24)
seconds = seconds - days*3600*24
hours = int(seconds / 3600)
seconds = seconds - hours*3600
minutes = int(seconds / 60)
seconds = seconds - minutes*60
secondsf = int(seconds)
seconds = seconds - secondsf
millis = int(seconds*1000)
f = ''
i = 1
if days > 0:
f += str(days) + 'D'
i += 1
if hours > 0 and i <= 2:
f += str(hours) + 'h'
i += 1
if minutes > 0 and i <= 2:
f += str(minutes) + 'm'
i += 1
if secondsf > 0 and i <= 2:
f += str(secondsf) + 's'
i += 1
if millis > 0 and i <= 2:
f += str(millis) + 'ms'
i += 1
if f == '':
f = '0ms'
return f
| 26.989362 | 69 | 0.562475 | import os
import sys
import time
import math
import torch.nn as nn
import torch.nn.init as init
term_width = 80
TOTAL_BAR_LENGTH = 65.
last_time = time.time()
begin_time = last_time
def progress_bar(current, total, msg=None):
global last_time, begin_time
if current == 0:
begin_time = time.time()
cur_len = int(TOTAL_BAR_LENGTH*current/total)
rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1
sys.stdout.write(' [')
for i in range(cur_len):
sys.stdout.write('=')
sys.stdout.write('>')
for i in range(rest_len):
sys.stdout.write('.')
sys.stdout.write(']')
cur_time = time.time()
step_time = cur_time - last_time
last_time = cur_time
tot_time = cur_time - begin_time
L = []
L.append(' Step: %s' % format_time(step_time))
L.append(' | Tot: %s' % format_time(tot_time))
if msg:
L.append(' | ' + msg)
msg = ''.join(L)
sys.stdout.write(msg)
for i in range(term_width-int(TOTAL_BAR_LENGTH)-len(msg)-3):
sys.stdout.write(' ')
for i in range(term_width-int(TOTAL_BAR_LENGTH/2)+2):
sys.stdout.write('\b')
sys.stdout.write(' %d/%d ' % (current+1, total))
if current < total-1:
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
sys.stdout.flush()
def format_time(seconds):
days = int(seconds / 3600/24)
seconds = seconds - days*3600*24
hours = int(seconds / 3600)
seconds = seconds - hours*3600
minutes = int(seconds / 60)
seconds = seconds - minutes*60
secondsf = int(seconds)
seconds = seconds - secondsf
millis = int(seconds*1000)
f = ''
i = 1
if days > 0:
f += str(days) + 'D'
i += 1
if hours > 0 and i <= 2:
f += str(hours) + 'h'
i += 1
if minutes > 0 and i <= 2:
f += str(minutes) + 'm'
i += 1
if secondsf > 0 and i <= 2:
f += str(secondsf) + 's'
i += 1
if millis > 0 and i <= 2:
f += str(millis) + 'ms'
i += 1
if f == '':
f = '0ms'
return f
| true | true |
1c359607fb2eed80bcacb219fec5540be158c144 | 2,235 | py | Python | homeassistant/components/lcn/scene.py | tbarbette/core | 8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c | [
"Apache-2.0"
] | 4 | 2016-06-22T12:00:41.000Z | 2018-06-11T20:31:25.000Z | homeassistant/components/lcn/scene.py | tbarbette/core | 8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c | [
"Apache-2.0"
] | 58 | 2020-08-03T07:33:02.000Z | 2022-03-31T06:02:05.000Z | homeassistant/components/lcn/scene.py | tbarbette/core | 8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c | [
"Apache-2.0"
] | 6 | 2019-07-06T00:43:13.000Z | 2021-01-16T13:27:06.000Z | """Support for LCN scenes."""
from typing import Any
import pypck
from homeassistant.components.scene import Scene
from homeassistant.const import CONF_ADDRESS, CONF_SCENE
from . import LcnEntity
from .const import (
CONF_CONNECTIONS,
CONF_OUTPUTS,
CONF_REGISTER,
CONF_TRANSITION,
DATA_LCN,
OUTPUT_PORTS,
)
from .helpers import get_connection
PARALLEL_UPDATES = 0
async def async_setup_platform(
hass, hass_config, async_add_entities, discovery_info=None
):
"""Set up the LCN scene platform."""
if discovery_info is None:
return
devices = []
for config in discovery_info:
address, connection_id = config[CONF_ADDRESS]
addr = pypck.lcn_addr.LcnAddr(*address)
connections = hass.data[DATA_LCN][CONF_CONNECTIONS]
connection = get_connection(connections, connection_id)
address_connection = connection.get_address_conn(addr)
devices.append(LcnScene(config, address_connection))
async_add_entities(devices)
class LcnScene(LcnEntity, Scene):
"""Representation of a LCN scene."""
def __init__(self, config, device_connection):
"""Initialize the LCN scene."""
super().__init__(config, device_connection)
self.register_id = config[CONF_REGISTER]
self.scene_id = config[CONF_SCENE]
self.output_ports = []
self.relay_ports = []
for port in config[CONF_OUTPUTS]:
if port in OUTPUT_PORTS:
self.output_ports.append(pypck.lcn_defs.OutputPort[port])
else: # in RELEAY_PORTS
self.relay_ports.append(pypck.lcn_defs.RelayPort[port])
if config[CONF_TRANSITION] is None:
self.transition = None
else:
self.transition = pypck.lcn_defs.time_to_ramp_value(config[CONF_TRANSITION])
async def async_added_to_hass(self):
"""Run when entity about to be added to hass."""
async def async_activate(self, **kwargs: Any) -> None:
"""Activate scene."""
await self.device_connection.activate_scene(
self.register_id,
self.scene_id,
self.output_ports,
self.relay_ports,
self.transition,
)
| 28.653846 | 88 | 0.665324 | from typing import Any
import pypck
from homeassistant.components.scene import Scene
from homeassistant.const import CONF_ADDRESS, CONF_SCENE
from . import LcnEntity
from .const import (
CONF_CONNECTIONS,
CONF_OUTPUTS,
CONF_REGISTER,
CONF_TRANSITION,
DATA_LCN,
OUTPUT_PORTS,
)
from .helpers import get_connection
PARALLEL_UPDATES = 0
async def async_setup_platform(
hass, hass_config, async_add_entities, discovery_info=None
):
if discovery_info is None:
return
devices = []
for config in discovery_info:
address, connection_id = config[CONF_ADDRESS]
addr = pypck.lcn_addr.LcnAddr(*address)
connections = hass.data[DATA_LCN][CONF_CONNECTIONS]
connection = get_connection(connections, connection_id)
address_connection = connection.get_address_conn(addr)
devices.append(LcnScene(config, address_connection))
async_add_entities(devices)
class LcnScene(LcnEntity, Scene):
def __init__(self, config, device_connection):
super().__init__(config, device_connection)
self.register_id = config[CONF_REGISTER]
self.scene_id = config[CONF_SCENE]
self.output_ports = []
self.relay_ports = []
for port in config[CONF_OUTPUTS]:
if port in OUTPUT_PORTS:
self.output_ports.append(pypck.lcn_defs.OutputPort[port])
else:
self.relay_ports.append(pypck.lcn_defs.RelayPort[port])
if config[CONF_TRANSITION] is None:
self.transition = None
else:
self.transition = pypck.lcn_defs.time_to_ramp_value(config[CONF_TRANSITION])
async def async_added_to_hass(self):
async def async_activate(self, **kwargs: Any) -> None:
await self.device_connection.activate_scene(
self.register_id,
self.scene_id,
self.output_ports,
self.relay_ports,
self.transition,
)
| true | true |
1c359721a50f00cc6c1003cf26fafa5d46e97b98 | 3,709 | py | Python | Scanner.py | hajin-kim/PLS_TinyAda_Compiler | 9c376eaeab87688fb5b6af4f925003c6559b7c1b | [
"MIT"
] | null | null | null | Scanner.py | hajin-kim/PLS_TinyAda_Compiler | 9c376eaeab87688fb5b6af4f925003c6559b7c1b | [
"MIT"
] | null | null | null | Scanner.py | hajin-kim/PLS_TinyAda_Compiler | 9c376eaeab87688fb5b6af4f925003c6559b7c1b | [
"MIT"
] | 1 | 2020-12-05T13:28:38.000Z | 2020-12-05T13:28:38.000Z | from Const import Const
from Token import Token
from Chario import Chario
class Scanner:
"""
The Scanner class recognizes and generates tokens
in a stream of characters and returns these tokens to the parser.
The Scanner class also detects any lexical errors.
"""
def __init__(self, chario):
self.chario = chario
def StringToken(self):
"""
Scans a string literal surrounded by \", e.g. "hahahoho"
"""
# remove first \"
self.chario.GetNextChar()
result = ""
while self.chario.PeekNextChar() != "\"":
result += self.chario.GetNextChar()
# remove last \"
self.chario.GetNextChar()
return Token(Const.stringLiteral, result)
def IntegerToken(self):
"""
Scans an integer value, which is a series of digits
"""
result = ""
while self.chario.PeekNextChar().isdigit():
result += self.chario.GetNextChar()
return Token(Const.numericalLiteral, result)
def AlphabeticToken(self):
"""
Scans either an identifier(e.g. variable name) or a reserved word(e.g. is, null).
"""
# list of characters that cannot exist right after an identifier or a reserved word
delimiters = (" ", "\n", "\r", "\t", "\\", ",", ":", "<", ">", "=", ";", "+", "-", "*", "/", "(", ")", "EOF")
# scan the token
result = ""
while self.chario.PeekNextChar() not in delimiters:
result += self.chario.GetNextChar()
# return the result as either reserved word itself or an identifier
if result in Const.reservedWords:
return Token(result, None)
else:
return Token(Const.ID, result)
def OperatorToken(self):
"""
Scans an operator symbol from chario(e.g. +, :=).
If an unexpected character is detected, RuntimeError will be raised.
"""
singleCharOperators = ("+", "-", ";", "(", ")", ",", "=")
possiblyDoubleCharOperators = ("/", ":", ">", "<", "*")
doubleCharOperators = ("/=", ":=", "<=", ">=", "**")
# look for ".." first
firstChar = self.chario.GetNextChar()
if firstChar == "." and self.chario.PeekNextChar() == ".":
self.chario.GetNextChar()
return Token(Const.DOT_DOT, None)
# then look for definitely single character operators(e.g. +)
if firstChar in singleCharOperators:
return Token(firstChar, None)
else:
# if not, check if the character is possibly a double character operator
# (which is also a valid one by itself, e.g. *)
if firstChar in possiblyDoubleCharOperators:
candidate = firstChar + self.chario.PeekNextChar()
# check if the next character also contributes on making a double character operator(e.g. **)
if candidate in doubleCharOperators:
return Token(firstChar + self.chario.GetNextChar(), None)
else:
return Token(firstChar, None)
# if none of the above were the case, then its a unexpected symbol
else:
self.chario.PrintErrorMessage("Unexpected symbol '" + firstChar + "' was scanned")
return Token(Const.UET, firstChar)
def GetNextToken(self):
"""
Read characters from chario and return the first token found
"""
# remove ignored characters
ignoredCharacters = (" ", "\r", "\t")
while True:
nextChar = self.chario.PeekNextChar()
if nextChar == "EOF":
return Token(Const.EOF, None)
if nextChar in ignoredCharacters:
self.chario.GetNextChar()
else:
break
# check the type of this token.
# this scanner assumes that all identifiers start with an alphabet.
nextChar = self.chario.PeekNextChar()
if nextChar == Const.NEWLINE:
self.chario.GetNextChar()
return Token(Const.NEWLINE, None)
elif nextChar == "\"":
return self.StringToken()
elif nextChar.isalpha():
return self.AlphabeticToken()
elif nextChar.isdigit():
return self.IntegerToken()
else:
return self.OperatorToken()
| 29.436508 | 111 | 0.669453 | from Const import Const
from Token import Token
from Chario import Chario
class Scanner:
def __init__(self, chario):
self.chario = chario
def StringToken(self):
self.chario.GetNextChar()
result = ""
while self.chario.PeekNextChar() != "\"":
result += self.chario.GetNextChar()
self.chario.GetNextChar()
return Token(Const.stringLiteral, result)
def IntegerToken(self):
result = ""
while self.chario.PeekNextChar().isdigit():
result += self.chario.GetNextChar()
return Token(Const.numericalLiteral, result)
def AlphabeticToken(self):
# list of characters that cannot exist right after an identifier or a reserved word
delimiters = (" ", "\n", "\r", "\t", "\\", ",", ":", "<", ">", "=", ";", "+", "-", "*", "/", "(", ")", "EOF")
# scan the token
result = ""
while self.chario.PeekNextChar() not in delimiters:
result += self.chario.GetNextChar()
# return the result as either reserved word itself or an identifier
if result in Const.reservedWords:
return Token(result, None)
else:
return Token(Const.ID, result)
def OperatorToken(self):
singleCharOperators = ("+", "-", ";", "(", ")", ",", "=")
possiblyDoubleCharOperators = ("/", ":", ">", "<", "*")
doubleCharOperators = ("/=", ":=", "<=", ">=", "**")
# look for ".." first
firstChar = self.chario.GetNextChar()
if firstChar == "." and self.chario.PeekNextChar() == ".":
self.chario.GetNextChar()
return Token(Const.DOT_DOT, None)
# then look for definitely single character operators(e.g. +)
if firstChar in singleCharOperators:
return Token(firstChar, None)
else:
# if not, check if the character is possibly a double character operator
# (which is also a valid one by itself, e.g. *)
if firstChar in possiblyDoubleCharOperators:
candidate = firstChar + self.chario.PeekNextChar()
# check if the next character also contributes on making a double character operator(e.g. **)
if candidate in doubleCharOperators:
return Token(firstChar + self.chario.GetNextChar(), None)
else:
return Token(firstChar, None)
# if none of the above were the case, then its a unexpected symbol
else:
self.chario.PrintErrorMessage("Unexpected symbol '" + firstChar + "' was scanned")
return Token(Const.UET, firstChar)
def GetNextToken(self):
# remove ignored characters
ignoredCharacters = (" ", "\r", "\t")
while True:
nextChar = self.chario.PeekNextChar()
if nextChar == "EOF":
return Token(Const.EOF, None)
if nextChar in ignoredCharacters:
self.chario.GetNextChar()
else:
break
# check the type of this token.
# this scanner assumes that all identifiers start with an alphabet.
nextChar = self.chario.PeekNextChar()
if nextChar == Const.NEWLINE:
self.chario.GetNextChar()
return Token(Const.NEWLINE, None)
elif nextChar == "\"":
return self.StringToken()
elif nextChar.isalpha():
return self.AlphabeticToken()
elif nextChar.isdigit():
return self.IntegerToken()
else:
return self.OperatorToken()
| true | true |
1c35977c3665ef7141eccc522715cb0dd0bafe4d | 22,875 | py | Python | make-the-country/population_builder.py | awyrough/make-the-country | 4019f66e4041062fb8f76f25b57f664a7308cf0d | [
"MIT"
] | null | null | null | make-the-country/population_builder.py | awyrough/make-the-country | 4019f66e4041062fb8f76f25b57f664a7308cf0d | [
"MIT"
] | null | null | null | make-the-country/population_builder.py | awyrough/make-the-country | 4019f66e4041062fb8f76f25b57f664a7308cf0d | [
"MIT"
] | null | null | null | from itertools import chain
import random as rd
import numpy as np
def treat_income(data):
"""
Convert to doubles, or zero if NaN
"""
try:
return float(data)
except:
return 0.0
def treat_demo(data):
"""
Convert to ***
"""
return data
def treat_group(data):
"""
Convert to ***
"""
return data
def treat_family(data):
"""
Convert to ***
"""
return data
def extract_income(income):
"""
Return columns of family income and non family income from a given tract income row.
"""
# CONSTANTS
faminco_range = range(15, 88, 8)
nonfaminco_range = range(19, 19 + (88 - 15), 8)
faminco = [treat_income(income[x]) for x in faminco_range]
nonfaminco = [treat_income(income[x]) for x in nonfaminco_range]
return faminco, nonfaminco
def extract_demo(demo):
"""
Return columns of demo row.
"""
demo_range = chain(range(8), range(12, 69))
demo = [treat_demo(demo[x]) for x in demo_range]
return demo
def extract_group(group):
"""
Return columns of group row.
"""
group_range = chain(range(6),range(10,14),range(15,18),range(20,24),range(25,28),range(30,34),range(35,38),range(41,45),range(46,49),range(51,55),range(56,59),range(61,65),range(66,69))
group = [treat_group(group[x]) for x in group_range]
return group
def extract_family(family):
"""
Return columns of family row.
"""
# no range, take all columns
family = [treat_family(x) for x in family]
return family
def build_census_block(demo_row, group_row, family_row, family_income, non_family_income, house_count, person_count):
# Get appropriate ranges/columns and convert to strings/doubles where appropriate
demo = extract_demo(demo_row)
group = extract_group(group_row)
family = extract_family(family_row)
madults, mchildren, fadults, fchildren = people_builder(demo)
group_quarters, madults, mchildren, fadults, fchildren = get_group_quarters(group, madults, mchildren, fadults, fchildren)
households, madults, mchildren, fadults, fchildren = household_helper(demo, family, madults, mchildren, fadults, fchildren)
# grab summary information
latlon = [float(demo_row[x]) for x in [8, 9]]
county = demo_row[2]
state = demo_row[1]
tract = demo_row[3]
block = demo_row[5]
# write block to output
rows, house_count, person_count = block_builder(households, group_quarters, latlon, house_count, person_count,
family_income, non_family_income, state, county, tract, block)
return rows, house_count, person_count
def block_builder(houses, groups, latlon, house_count, person_count, fam_inco, non_fam_inco, state, county, tract, block):
"""
Build list of rows for census block, to be returned, and outputed to output source.
"""
rows = []
for i, h in enumerate(houses):
house = [h[2]]
if len(house) != 0:
house_count+=1
hh_income = get_hh_income(fam_inco, non_fam_inco, h[1])
ind_income = add_individual_income_tt(hh_income, house)
for j, p in enumerate(house[0]):
person_count += 1
idnum = str(1000000000 + person_count)
pid = str(state) + idnum[1:]
row = [state, county, tract, block, house_count, p[2], latlon[0], latlon[1], pid,
p[0], p[1], ind_income[j][0], ind_income[j][1], ind_income[j][2]]
if len(row) != 14:
print(row)
rows.append(row)
for k, quarter in enumerate(groups):
if len(quarter) != 0:
house_count+=1
for z, q in enumerate(quarter):
person_count+=1
idnum = str(1000000000 + person_count)
pid = str(state) + idnum[1:]
income = 0
row = [state, county, tract, block, house_count, q[2], latlon[0], latlon[1], pid,
q[0], q[1], traveler_type(q[0], q[2]), income, income]
rows.append(row)
return rows, house_count, person_count
def people_builder(demo):
"""
Build the population by age group and by gender.
"""
# ALL MEN AT EACH AGE GROUP (DEMOGRAPHIC QUERY FILE
M_AGE_DIST = range(10, 33)
# ALL WOMEN AT EACH AGE GROUP (DEMOGRAPHIC QUERY FILE
F_AGE_DIST = range(34, 57)
return create_residents([int(demo[x]) for x in M_AGE_DIST], [int(demo[y]) for y in F_AGE_DIST])
def get_age(x):
"""
Return random age in between age brackets.
"""
AGE_RANGES = [(0,4) , (5,9) , (10,14) , (15,17), (18,19), (20,20), (21,21), (22,24), (25,29), (30,34),\
(35,39), (40,44), (45,49), (50,54), (55,59), (60,61), (62,64), (65,66), (67,69) , (70, 74), (75,79), (80,84), (85,100) ]
return rd.randint(AGE_RANGES[x][0], AGE_RANGES[x][1])
def create_residents(male_age_groups, female_age_groups):
"""
Build people arrays.
"""
madults = []; mchildren = []; fadults = []; fchildren = []
for i, agepop in enumerate(male_age_groups):
for j in range(agepop):
x = get_age(i)
if x <= 17:
mchildren.append([x, 1, -1])
else:
madults.append([x, 1, -1])
for i, agepop in enumerate(female_age_groups):
for j in range(agepop):
x = get_age(i)
if x <= 17:
fchildren.append([x, 0, -1])
else:
fadults.append([x, 0, -1])
return madults, mchildren, fadults, fchildren
def get_group_quarters(r, madults, mchildren, fadults, fchildren):
"""
Adapt people lists to account for residents in group quarters.
"""
# CONSTANT
GROUP_QUARTERS = range(6, 48)
cfa = []; j = []; nh = []; oiq = []; sh = []; m = []; oniq = []
l = [cfa, j, nh, oiq, sh, m, oniq]
gqlist = [int(r[x]) for x in GROUP_QUARTERS]
for i, gqsize in enumerate(gqlist):
mod = i%7
if i in range(0,7):
popList = mchildren
popRange = (14, 17)
elif i in range(7,14):
popList = madults
popRange = (18, 64)
elif i in range(14,21):
popList = madults
popRange = (65,120)
elif i in range(21, 28):
popList = fchildren
popRange =(14, 17)
elif i in range(28,35):
popList = fadults
popRange = (18, 64)
elif i in range(34,42):
popList = fadults
popRange = (65,120)
# Add them to the right group housing list if they are in the right age
for j in range(gqsize):
pll = len(popList)
if pll>0:
for c in range(pll):
z = np.random.randint(0, len(popList))
popped = popList.pop(z)
if popped[0]>=popRange[0] and popped[0]<=popRange[1]:
break
else:
popList.insert(0, popped)
popped = -1
if popped == -1:
break
else:
popped[2] = mod+2
l[mod].append(popped)
return l, madults, mchildren, fadults, fchildren
def household_helper(demo, family, madults, mchildren, fadults, fchildren):
"""
Prepare data to build households in census block.
"""
# HOUSEHOLD SIZE DISTRIBUTION
HH_DIST = range(58,65)
# HOUSEHOLD RELATIONSHIP DISTRIBUTION
HH_REL_DIST = range(6,31)
# READ IN HOUSE SIZES
house_sizes = expand_household_size([int(demo[x]) for x in HH_DIST])
rel = [int(family[x]) for x in HH_REL_DIST]
# READ IN POPULATION IN HOUSEHOLDS BY TYPE
house_pop = [rel[2], rel[17]]
# READ IN DISTRIBUTION OF HOUSEHOLDERS BY TYPE BY SEX
fam_holder = expand_distribution([rel[x] for x in [4,5]])
non_fam_holder = expand_distribution([rel[x] for x in [18,21]])
# READ IN NUMBER OF NON FAMILY HOUSEHOLDERS LIVING ALONE OR TOGETHER
no_fam_alone = [rel[x] for x in [19, 20, 22, 23]]
# READ IN FAMILY RELATIONS FOR FAMILY HOUSEHOLDS
fam_rel = expand_distribution([rel[x] for x in range(6,17)])
htype = []
for i in range(len(fam_holder)):
htype.append(0)
for i in range(len(non_fam_holder)):
htype.append(1)
# ALL HOUSES FOR THAT BLOCK
hhh = build_houses(house_sizes, house_pop, fam_holder, non_fam_holder,
htype, no_fam_alone, fam_rel, madults, mchildren, fadults, fchildren)
return hhh
def expand_distribution(dist, add=0):
"""
Expand a list into a selectable distribution
"""
vec = [[i]*int(round(float(x))) for i, x in enumerate(dist)]
return [(num + add) for elem in vec for num in elem]
def expand_household_size(dist):
vec = [[i]*int(round(x)) for i, x in enumerate(dist)]
return [(num+1) for elem in vec for num in elem]
def select_one(l):
"""
Select and return element from list and pop the value (without replace)
"""
r = np.random.randint(0,len(l)) if len(l)>1 else 0
if not l:
return 0, l
else:
val = l.pop(r)
return val, l
def build_houses(house_sizes, house_pop, fam_holder, non_fam_holder, htype, no_fam_alone, fam_rel, madults, mchildren, fadults, fchildren):
"""
Build each individual household within a Census block.
"""
numhouses = len(house_sizes)
allHouses = []
allHouseHolders = []
'Population Counters'
inNonFamHousing = 0
inFamHousing = 0
'Householder Availability'
nonfamHouseHoldersAvailable = len(non_fam_holder)
famHouseHoldersAvailable = len(fam_holder)
'Initialize all HouseHolders within Census Block'
for i in range(numhouses):
'Select Household Type From Distribution (0: family, 1: nonfamily)'
hht, htype = select_one(htype)
if (hht == 0):
gender, fam_holder = select_one(fam_holder)
inFamHousing+=1
else:
gender, non_fam_holder = select_one(non_fam_holder)
inNonFamHousing+=1
'Create Householder with Dummy Age of 30, Gender, HHT, and -1 (flag indicating assignment to house)'
householder = [30, int(not gender), hht, -1]
if (int(not gender) == 1) and (len(madults) > 0):
if (len(madults) > 0):
temp = madults.pop()
elif (int(not gender) == 0) and (len(fadults) > 0):
if (len(fadults) > 0):
temp = fadults.pop()
elif (len(fadults) == 0 and len(madults) == 0):
'In the event of non-normally aged householders (what we have classified as children, draw from the oldest'
'Children of the correct gender'
if (int(not gender) == 1):
if (len(mchildren) > 0):
temp = mchildren.pop(mchildren.index(max(mchildren)))
else:
if (len(fchildren) > 0):
temp = fchildren.pop(fchildren.index(max(fchildren)))
elif (int(not gender) == 1) and (len(madults) == 0) and (len(mchildren) > 0):
if (len(mchildren) > 0):
temp = mchildren.pop(mchildren.index(max(mchildren)))
elif (int(not gender) == 0) and (len(fadults) == 0) and (len(fchildren) > 0):
if (len(fchildren) > 0):
temp = fchildren.pop(fchildren.index(max(fchildren)))
householder[0] = temp[0]
allHouseHolders.append(householder)
'Assign HouseHolder to House (by House size) for Non Family'
for hh in enumerate(allHouseHolders):
hh = hh[1]
'Male, Non Family HouseHold'
if (hh[2] == 1) and (hh[1] == 1):
if (no_fam_alone[0] != 0):
house_sizes.remove(1)
allHouses.append([0, hh[2], [hh]])
hh[3] = 0
no_fam_alone[0]-=1
nonfamHouseHoldersAvailable-=1
continue
'Female, Non Family Household'
if (hh[2] == 1) and (hh[1] == 0):
if (no_fam_alone[2] != 0):
house_sizes.remove(1)
allHouses.append([0, hh[2], [hh]])
hh[3] = 0
no_fam_alone[2]-=1
nonfamHouseHoldersAvailable-=1
continue
house_sizes.sort()
house_sizes = house_sizes[::-1]
'Populate Non Family Houses with Non Family Householders and Create Household Object'
while((inNonFamHousing < house_pop[1]) and (nonfamHouseHoldersAvailable > 0)):
for hh in enumerate(allHouseHolders):
hh = hh[1]
if (nonfamHouseHoldersAvailable > 0):
if ((hh[2] == 1) and (hh[3] == -1)):
if len(house_sizes) > 0 :
size = house_sizes.pop()
else:
break
hh[3] = 0
allHouses.append([size-1, hh[2], [hh]])
nonfamHouseHoldersAvailable-=1
inNonFamHousing+=(size-1)
continue
'Populate Family Households for Family Householders and Create Household Object'
while((inFamHousing < house_pop[0]) and (famHouseHoldersAvailable>0)):
for hh in enumerate(allHouseHolders):
hh = hh[1]
if (famHouseHoldersAvailable > 0):
if ((hh[2] == 0) and (hh[3] == -1)):
if len(house_sizes) > 0 :
size = house_sizes.pop()
else:
break
hh[3] = 0
allHouses.append([size-1, hh[2], [hh]])
famHouseHoldersAvailable-=1
inFamHousing+=(size-1)
continue
'Populate Households with All Family Relations, Exhausting Family Relation Distribution'
for j, i in enumerate(fam_rel):
for k, hh in enumerate(allHouses):
if (hh[0] == 0): continue
else:
if ((i == 0) and (hh[0] > 0) and (hh[1] == 0)):
if (hh[2][0][1] == 0) and (len(madults) > 0):
hh[0]-=1
person = madults.pop()
hh[2].append([person[0], 1, hh[1], 0])
break
elif (hh[2][0][1] == 1) and (len(fadults) > 0):
hh[0]-=1
person = fadults.pop()
hh[2].append([person[0], 0, hh[1], 0])
break
if ((i in [1,2,3,4]) and (hh[0] > 0) and (hh[1] == 0)):
if ((len(mchildren) + len(fchildren)) > 1):
r = np.random.randint(1, len(mchildren) + len(fchildren))
if (r < len(mchildren)):
person = mchildren.pop()
hh[2].append([person[0], 1, hh[1], 0])
hh[0]-=1
break
else:
person = fchildren.pop()
hh[2].append([person[0], 0, hh[1], 0])
hh[0]-=1
break
elif (len(mchildren) > 0):
person = mchildren.pop()
hh[2].append([person[0], 1, hh[1], 0])
hh[0]-=1
break
elif (len(fchildren) > 0):
person = fchildren.pop()
hh[2].append([person[0], 0, hh[1], 0])
hh[0]-=1
break
if ((i in [5,6,7,8,9,10]) and (hh[0] > 0) and (hh[1] == 0)):
if ((len(madults) + len(fadults)) > 1):
r = np.random.randint(1, len(madults) + len(fadults))
if (r < len(madults)):
person = madults.pop()
hh[2].append([person[0], 1, hh[1], 0])
hh[0]-=1
break
else:
person = fadults.pop()
hh[2].append([person[0], 0, hh[1], 0])
hh[0]-=1
break
elif (len(madults) > 0):
person = madults.pop()
hh[2].append([person[0], 1, hh[1], 0])
hh[0]-=1
break
elif (len(fadults) > 0):
person = fadults.pop()
hh[2].append([person[0], 0, hh[1], 0])
hh[0]-=1
break
for i, hh in enumerate(allHouses):
while (hh[0] > 0):
if ((len(madults)+len(fadults)) > 1):
r = np.random.randint(1, len(madults) + len(fadults))
if (r < len(madults)):
person = madults.pop()
hh[2].append([person[0], 1, hh[1], 0])
hh[0]-=1
break
else:
person = fadults.pop()
hh[2].append([person[0], 0, hh[1], 0])
hh[0]-=1
break
elif (len(madults) > 0):
person = madults.pop()
hh[2].append([person[0], 1, hh[1], 0])
hh[0]-=1
break
elif (len(fadults) > 0):
person = fadults.pop()
hh[2].append([person[0], 0, hh[1], 0])
hh[0]-=1
break
elif ((len(mchildren) + len(fchildren)) > 1):
r = np.random.randint(1, len(mchildren) + len(fchildren))
if (r < len(mchildren)):
person = mchildren.pop()
hh[2].append([person[0], 1, hh[1], 0])
hh[0]-=1
break
else:
person = fchildren.pop()
hh[2].append([person[0], 0, hh[1], 0])
hh[0]-=1
break
elif (len(mchildren) > 0):
person = mchildren.pop()
hh[2].append([person[0], 1, hh[1], 0])
hh[0]-=1
break
elif (len(fchildren) > 0):
person = fchildren.pop()
hh[2].append([person[0], 0, hh[1], 0])
hh[0]-=1
break
elif (len(fchildren) == 0) and (len(mchildren)==0) and (len(madults) ==0) and (len(fadults)==0):
break
'Fail Safe to Ensure All Population in Households are placed within house, relaxing house size constraint for'
'Largest House in Block'
if (len(allHouses) > 0):
while (len(madults) > 0):
person = madults.pop()
allHouses[len(allHouses)-1][2].append([person[0], 1, 0, 0])
allHouses[len(allHouses)-1][0]-=1
while (len(fadults) > 0):
person = fadults.pop()
allHouses[len(allHouses)-1][2].append([person[0], 0, 0, 0])
allHouses[len(allHouses)-1][0]-=1
while (len(fchildren) > 0):
person = fchildren.pop()
allHouses[len(allHouses)-1][2].append([person[0], 0, 0, 0])
allHouses[len(allHouses)-1][0]-=1
while (len(mchildren) > 0):
person = mchildren.pop()
allHouses[len(allHouses)-1][2].append([person[0], 1, 0, 0])
allHouses[len(allHouses)-1][0]-=1
return allHouses, madults, mchildren, fadults, fchildren
INCOME_BRACKETS = { 1: (0, 9999),
2: (10000,14999),
3: (15000,24999),
4: (25000,34999),
5: (35000,49999),
6: (50000,74999),
7: (75000,99999),
8: (100000,149999),
9: (150000,199999),
10:(200000,1000000)}
def get_hh_income(fam_inco, non_fam_inco, hht):
"""
Draw on household income distributions and return a value within the bracket.
"""
if hht:
i = non_fam_inco
else:
i = fam_inco
ie = expand_distribution(i)
val, ie = select_one(ie)
bracket = val + 1
if bracket == 1:
amount = rd.triangular(2000, 10000,7500)
else:
amount = rd.uniform(INCOME_BRACKETS[bracket][0], INCOME_BRACKETS[bracket][1])
return amount
def income_amount_to_code(income):
"""
Translate the amount of income to the bracket.
"""
for k in INCOME_BRACKETS.keys():
if income<=INCOME_BRACKETS[k][1] and income>=INCOME_BRACKETS[k][0] and income != 0:
return k
elif income == 0:
return 0
def add_individual_income_tt(hhi, h):
"""
Add individual income to the household members.
"""
hhinctt = []
l = 0
for i, p in enumerate(h[0]):
tt = traveler_type(p[0], 0)
if tt in[5,6]:
hhinctt.append([tt,-1,0])
l+=1
elif tt in [0,1,3]:
hhinctt.append([tt,0,0])
elif tt in[2,4]:
studentInc = rd.uniform(INCOME_BRACKETS[1][0],
min(INCOME_BRACKETS[1][1],hhi))
hhinctt.append([tt, 1, studentInc])
hhi-=studentInc
coeffs = []
for i in range(l):
coeffs.append(rd.random())
s = sum(coeffs)
indincomes = [hhi*c/s for c in coeffs]
for q in hhinctt:
if q[1] == -1:
inc = indincomes.pop()
q[1] = income_amount_to_code(inc)
q[2] = inc
return hhinctt
def traveler_type(age, hht):
##0:DNT:0-5, 79+ and those in correct. fac, juvee, nursing homes, other, and military quarters
##1:SCN:5-18: 6-15, 16-18*99.948% ##2:SCW:16-18*.0512%
##3:CNT:18-22*90.34% + in Dorms ##4:CCW:18-22*9.66% (work in same county)
##5:TTT:22-64*78% ##6:HWT:22-64*22% + 65-79 #unemployed(~10%) + work-at-home (~8%) +sickdays
temp = rd.uniform(0,1)
if (age >= 0 and age< 5) or (age>79) or (hht in [2,3,4,5,7]):
travelType=0
elif age>=5 and age<=15:
travelType=1
elif age>=16 and age<= 18:
if temp>=0.99948:
travelType=2
else:
travelType=1
elif age>=18 and age<=22 or hht == 6:
if temp<=.9034:
travelType=3
else:
travelType=4
elif age>=22 and age<=64:
if temp<=.78:
travelType=5
else:
travelType=6
else:
travelType=6
return travelType | 38.061564 | 189 | 0.50706 | from itertools import chain
import random as rd
import numpy as np
def treat_income(data):
try:
return float(data)
except:
return 0.0
def treat_demo(data):
return data
def treat_group(data):
return data
def treat_family(data):
return data
def extract_income(income):
faminco_range = range(15, 88, 8)
nonfaminco_range = range(19, 19 + (88 - 15), 8)
faminco = [treat_income(income[x]) for x in faminco_range]
nonfaminco = [treat_income(income[x]) for x in nonfaminco_range]
return faminco, nonfaminco
def extract_demo(demo):
demo_range = chain(range(8), range(12, 69))
demo = [treat_demo(demo[x]) for x in demo_range]
return demo
def extract_group(group):
group_range = chain(range(6),range(10,14),range(15,18),range(20,24),range(25,28),range(30,34),range(35,38),range(41,45),range(46,49),range(51,55),range(56,59),range(61,65),range(66,69))
group = [treat_group(group[x]) for x in group_range]
return group
def extract_family(family):
family = [treat_family(x) for x in family]
return family
def build_census_block(demo_row, group_row, family_row, family_income, non_family_income, house_count, person_count):
demo = extract_demo(demo_row)
group = extract_group(group_row)
family = extract_family(family_row)
madults, mchildren, fadults, fchildren = people_builder(demo)
group_quarters, madults, mchildren, fadults, fchildren = get_group_quarters(group, madults, mchildren, fadults, fchildren)
households, madults, mchildren, fadults, fchildren = household_helper(demo, family, madults, mchildren, fadults, fchildren)
latlon = [float(demo_row[x]) for x in [8, 9]]
county = demo_row[2]
state = demo_row[1]
tract = demo_row[3]
block = demo_row[5]
rows, house_count, person_count = block_builder(households, group_quarters, latlon, house_count, person_count,
family_income, non_family_income, state, county, tract, block)
return rows, house_count, person_count
def block_builder(houses, groups, latlon, house_count, person_count, fam_inco, non_fam_inco, state, county, tract, block):
rows = []
for i, h in enumerate(houses):
house = [h[2]]
if len(house) != 0:
house_count+=1
hh_income = get_hh_income(fam_inco, non_fam_inco, h[1])
ind_income = add_individual_income_tt(hh_income, house)
for j, p in enumerate(house[0]):
person_count += 1
idnum = str(1000000000 + person_count)
pid = str(state) + idnum[1:]
row = [state, county, tract, block, house_count, p[2], latlon[0], latlon[1], pid,
p[0], p[1], ind_income[j][0], ind_income[j][1], ind_income[j][2]]
if len(row) != 14:
print(row)
rows.append(row)
for k, quarter in enumerate(groups):
if len(quarter) != 0:
house_count+=1
for z, q in enumerate(quarter):
person_count+=1
idnum = str(1000000000 + person_count)
pid = str(state) + idnum[1:]
income = 0
row = [state, county, tract, block, house_count, q[2], latlon[0], latlon[1], pid,
q[0], q[1], traveler_type(q[0], q[2]), income, income]
rows.append(row)
return rows, house_count, person_count
def people_builder(demo):
M_AGE_DIST = range(10, 33)
F_AGE_DIST = range(34, 57)
return create_residents([int(demo[x]) for x in M_AGE_DIST], [int(demo[y]) for y in F_AGE_DIST])
def get_age(x):
AGE_RANGES = [(0,4) , (5,9) , (10,14) , (15,17), (18,19), (20,20), (21,21), (22,24), (25,29), (30,34),\
(35,39), (40,44), (45,49), (50,54), (55,59), (60,61), (62,64), (65,66), (67,69) , (70, 74), (75,79), (80,84), (85,100) ]
return rd.randint(AGE_RANGES[x][0], AGE_RANGES[x][1])
def create_residents(male_age_groups, female_age_groups):
madults = []; mchildren = []; fadults = []; fchildren = []
for i, agepop in enumerate(male_age_groups):
for j in range(agepop):
x = get_age(i)
if x <= 17:
mchildren.append([x, 1, -1])
else:
madults.append([x, 1, -1])
for i, agepop in enumerate(female_age_groups):
for j in range(agepop):
x = get_age(i)
if x <= 17:
fchildren.append([x, 0, -1])
else:
fadults.append([x, 0, -1])
return madults, mchildren, fadults, fchildren
def get_group_quarters(r, madults, mchildren, fadults, fchildren):
GROUP_QUARTERS = range(6, 48)
cfa = []; j = []; nh = []; oiq = []; sh = []; m = []; oniq = []
l = [cfa, j, nh, oiq, sh, m, oniq]
gqlist = [int(r[x]) for x in GROUP_QUARTERS]
for i, gqsize in enumerate(gqlist):
mod = i%7
if i in range(0,7):
popList = mchildren
popRange = (14, 17)
elif i in range(7,14):
popList = madults
popRange = (18, 64)
elif i in range(14,21):
popList = madults
popRange = (65,120)
elif i in range(21, 28):
popList = fchildren
popRange =(14, 17)
elif i in range(28,35):
popList = fadults
popRange = (18, 64)
elif i in range(34,42):
popList = fadults
popRange = (65,120)
for j in range(gqsize):
pll = len(popList)
if pll>0:
for c in range(pll):
z = np.random.randint(0, len(popList))
popped = popList.pop(z)
if popped[0]>=popRange[0] and popped[0]<=popRange[1]:
break
else:
popList.insert(0, popped)
popped = -1
if popped == -1:
break
else:
popped[2] = mod+2
l[mod].append(popped)
return l, madults, mchildren, fadults, fchildren
def household_helper(demo, family, madults, mchildren, fadults, fchildren):
HH_DIST = range(58,65)
HH_REL_DIST = range(6,31)
house_sizes = expand_household_size([int(demo[x]) for x in HH_DIST])
rel = [int(family[x]) for x in HH_REL_DIST]
house_pop = [rel[2], rel[17]]
fam_holder = expand_distribution([rel[x] for x in [4,5]])
non_fam_holder = expand_distribution([rel[x] for x in [18,21]])
no_fam_alone = [rel[x] for x in [19, 20, 22, 23]]
fam_rel = expand_distribution([rel[x] for x in range(6,17)])
htype = []
for i in range(len(fam_holder)):
htype.append(0)
for i in range(len(non_fam_holder)):
htype.append(1)
hhh = build_houses(house_sizes, house_pop, fam_holder, non_fam_holder,
htype, no_fam_alone, fam_rel, madults, mchildren, fadults, fchildren)
return hhh
def expand_distribution(dist, add=0):
vec = [[i]*int(round(float(x))) for i, x in enumerate(dist)]
return [(num + add) for elem in vec for num in elem]
def expand_household_size(dist):
vec = [[i]*int(round(x)) for i, x in enumerate(dist)]
return [(num+1) for elem in vec for num in elem]
def select_one(l):
r = np.random.randint(0,len(l)) if len(l)>1 else 0
if not l:
return 0, l
else:
val = l.pop(r)
return val, l
def build_houses(house_sizes, house_pop, fam_holder, non_fam_holder, htype, no_fam_alone, fam_rel, madults, mchildren, fadults, fchildren):
numhouses = len(house_sizes)
allHouses = []
allHouseHolders = []
inNonFamHousing = 0
inFamHousing = 0
nonfamHouseHoldersAvailable = len(non_fam_holder)
famHouseHoldersAvailable = len(fam_holder)
for i in range(numhouses):
hht, htype = select_one(htype)
if (hht == 0):
gender, fam_holder = select_one(fam_holder)
inFamHousing+=1
else:
gender, non_fam_holder = select_one(non_fam_holder)
inNonFamHousing+=1
householder = [30, int(not gender), hht, -1]
if (int(not gender) == 1) and (len(madults) > 0):
if (len(madults) > 0):
temp = madults.pop()
elif (int(not gender) == 0) and (len(fadults) > 0):
if (len(fadults) > 0):
temp = fadults.pop()
elif (len(fadults) == 0 and len(madults) == 0):
'In the event of non-normally aged householders (what we have classified as children, draw from the oldest'
'Children of the correct gender'
if (int(not gender) == 1):
if (len(mchildren) > 0):
temp = mchildren.pop(mchildren.index(max(mchildren)))
else:
if (len(fchildren) > 0):
temp = fchildren.pop(fchildren.index(max(fchildren)))
elif (int(not gender) == 1) and (len(madults) == 0) and (len(mchildren) > 0):
if (len(mchildren) > 0):
temp = mchildren.pop(mchildren.index(max(mchildren)))
elif (int(not gender) == 0) and (len(fadults) == 0) and (len(fchildren) > 0):
if (len(fchildren) > 0):
temp = fchildren.pop(fchildren.index(max(fchildren)))
householder[0] = temp[0]
allHouseHolders.append(householder)
for hh in enumerate(allHouseHolders):
hh = hh[1]
if (hh[2] == 1) and (hh[1] == 1):
if (no_fam_alone[0] != 0):
house_sizes.remove(1)
allHouses.append([0, hh[2], [hh]])
hh[3] = 0
no_fam_alone[0]-=1
nonfamHouseHoldersAvailable-=1
continue
if (hh[2] == 1) and (hh[1] == 0):
if (no_fam_alone[2] != 0):
house_sizes.remove(1)
allHouses.append([0, hh[2], [hh]])
hh[3] = 0
no_fam_alone[2]-=1
nonfamHouseHoldersAvailable-=1
continue
house_sizes.sort()
house_sizes = house_sizes[::-1]
while((inNonFamHousing < house_pop[1]) and (nonfamHouseHoldersAvailable > 0)):
for hh in enumerate(allHouseHolders):
hh = hh[1]
if (nonfamHouseHoldersAvailable > 0):
if ((hh[2] == 1) and (hh[3] == -1)):
if len(house_sizes) > 0 :
size = house_sizes.pop()
else:
break
hh[3] = 0
allHouses.append([size-1, hh[2], [hh]])
nonfamHouseHoldersAvailable-=1
inNonFamHousing+=(size-1)
continue
while((inFamHousing < house_pop[0]) and (famHouseHoldersAvailable>0)):
for hh in enumerate(allHouseHolders):
hh = hh[1]
if (famHouseHoldersAvailable > 0):
if ((hh[2] == 0) and (hh[3] == -1)):
if len(house_sizes) > 0 :
size = house_sizes.pop()
else:
break
hh[3] = 0
allHouses.append([size-1, hh[2], [hh]])
famHouseHoldersAvailable-=1
inFamHousing+=(size-1)
continue
for j, i in enumerate(fam_rel):
for k, hh in enumerate(allHouses):
if (hh[0] == 0): continue
else:
if ((i == 0) and (hh[0] > 0) and (hh[1] == 0)):
if (hh[2][0][1] == 0) and (len(madults) > 0):
hh[0]-=1
person = madults.pop()
hh[2].append([person[0], 1, hh[1], 0])
break
elif (hh[2][0][1] == 1) and (len(fadults) > 0):
hh[0]-=1
person = fadults.pop()
hh[2].append([person[0], 0, hh[1], 0])
break
if ((i in [1,2,3,4]) and (hh[0] > 0) and (hh[1] == 0)):
if ((len(mchildren) + len(fchildren)) > 1):
r = np.random.randint(1, len(mchildren) + len(fchildren))
if (r < len(mchildren)):
person = mchildren.pop()
hh[2].append([person[0], 1, hh[1], 0])
hh[0]-=1
break
else:
person = fchildren.pop()
hh[2].append([person[0], 0, hh[1], 0])
hh[0]-=1
break
elif (len(mchildren) > 0):
person = mchildren.pop()
hh[2].append([person[0], 1, hh[1], 0])
hh[0]-=1
break
elif (len(fchildren) > 0):
person = fchildren.pop()
hh[2].append([person[0], 0, hh[1], 0])
hh[0]-=1
break
if ((i in [5,6,7,8,9,10]) and (hh[0] > 0) and (hh[1] == 0)):
if ((len(madults) + len(fadults)) > 1):
r = np.random.randint(1, len(madults) + len(fadults))
if (r < len(madults)):
person = madults.pop()
hh[2].append([person[0], 1, hh[1], 0])
hh[0]-=1
break
else:
person = fadults.pop()
hh[2].append([person[0], 0, hh[1], 0])
hh[0]-=1
break
elif (len(madults) > 0):
person = madults.pop()
hh[2].append([person[0], 1, hh[1], 0])
hh[0]-=1
break
elif (len(fadults) > 0):
person = fadults.pop()
hh[2].append([person[0], 0, hh[1], 0])
hh[0]-=1
break
for i, hh in enumerate(allHouses):
while (hh[0] > 0):
if ((len(madults)+len(fadults)) > 1):
r = np.random.randint(1, len(madults) + len(fadults))
if (r < len(madults)):
person = madults.pop()
hh[2].append([person[0], 1, hh[1], 0])
hh[0]-=1
break
else:
person = fadults.pop()
hh[2].append([person[0], 0, hh[1], 0])
hh[0]-=1
break
elif (len(madults) > 0):
person = madults.pop()
hh[2].append([person[0], 1, hh[1], 0])
hh[0]-=1
break
elif (len(fadults) > 0):
person = fadults.pop()
hh[2].append([person[0], 0, hh[1], 0])
hh[0]-=1
break
elif ((len(mchildren) + len(fchildren)) > 1):
r = np.random.randint(1, len(mchildren) + len(fchildren))
if (r < len(mchildren)):
person = mchildren.pop()
hh[2].append([person[0], 1, hh[1], 0])
hh[0]-=1
break
else:
person = fchildren.pop()
hh[2].append([person[0], 0, hh[1], 0])
hh[0]-=1
break
elif (len(mchildren) > 0):
person = mchildren.pop()
hh[2].append([person[0], 1, hh[1], 0])
hh[0]-=1
break
elif (len(fchildren) > 0):
person = fchildren.pop()
hh[2].append([person[0], 0, hh[1], 0])
hh[0]-=1
break
elif (len(fchildren) == 0) and (len(mchildren)==0) and (len(madults) ==0) and (len(fadults)==0):
break
if (len(allHouses) > 0):
while (len(madults) > 0):
person = madults.pop()
allHouses[len(allHouses)-1][2].append([person[0], 1, 0, 0])
allHouses[len(allHouses)-1][0]-=1
while (len(fadults) > 0):
person = fadults.pop()
allHouses[len(allHouses)-1][2].append([person[0], 0, 0, 0])
allHouses[len(allHouses)-1][0]-=1
while (len(fchildren) > 0):
person = fchildren.pop()
allHouses[len(allHouses)-1][2].append([person[0], 0, 0, 0])
allHouses[len(allHouses)-1][0]-=1
while (len(mchildren) > 0):
person = mchildren.pop()
allHouses[len(allHouses)-1][2].append([person[0], 1, 0, 0])
allHouses[len(allHouses)-1][0]-=1
return allHouses, madults, mchildren, fadults, fchildren
INCOME_BRACKETS = { 1: (0, 9999),
2: (10000,14999),
3: (15000,24999),
4: (25000,34999),
5: (35000,49999),
6: (50000,74999),
7: (75000,99999),
8: (100000,149999),
9: (150000,199999),
10:(200000,1000000)}
def get_hh_income(fam_inco, non_fam_inco, hht):
if hht:
i = non_fam_inco
else:
i = fam_inco
ie = expand_distribution(i)
val, ie = select_one(ie)
bracket = val + 1
if bracket == 1:
amount = rd.triangular(2000, 10000,7500)
else:
amount = rd.uniform(INCOME_BRACKETS[bracket][0], INCOME_BRACKETS[bracket][1])
return amount
def income_amount_to_code(income):
for k in INCOME_BRACKETS.keys():
if income<=INCOME_BRACKETS[k][1] and income>=INCOME_BRACKETS[k][0] and income != 0:
return k
elif income == 0:
return 0
def add_individual_income_tt(hhi, h):
hhinctt = []
l = 0
for i, p in enumerate(h[0]):
tt = traveler_type(p[0], 0)
if tt in[5,6]:
hhinctt.append([tt,-1,0])
l+=1
elif tt in [0,1,3]:
hhinctt.append([tt,0,0])
elif tt in[2,4]:
studentInc = rd.uniform(INCOME_BRACKETS[1][0],
min(INCOME_BRACKETS[1][1],hhi))
hhinctt.append([tt, 1, studentInc])
hhi-=studentInc
coeffs = []
for i in range(l):
coeffs.append(rd.random())
s = sum(coeffs)
indincomes = [hhi*c/s for c in coeffs]
for q in hhinctt:
if q[1] == -1:
inc = indincomes.pop()
q[1] = income_amount_to_code(inc)
q[2] = inc
return hhinctt
def traveler_type(age, hht):
| true | true |
1c35979d53d9bc6a3421f7b64d03efca03b07692 | 382 | py | Python | authapp/urls.py | tum0xa/geekbrains-django2-homework | 55a7a0aa60da2978ab4abd5d2dacf7af21b301cc | [
"MIT"
] | null | null | null | authapp/urls.py | tum0xa/geekbrains-django2-homework | 55a7a0aa60da2978ab4abd5d2dacf7af21b301cc | [
"MIT"
] | null | null | null | authapp/urls.py | tum0xa/geekbrains-django2-homework | 55a7a0aa60da2978ab4abd5d2dacf7af21b301cc | [
"MIT"
] | null | null | null | from django.urls import path
import authapp.views as authapp
app_name = 'authapp'
urlpatterns = [
path('login/', authapp.login, name='login'),
path('logout', authapp.logout, name='logout'),
path('register/', authapp.register, name='register'),
path('edit/', authapp.edit, name='edit'),
path('verify/<email>/<activation_key>/', authapp.verify, name='verify'),
]
| 31.833333 | 76 | 0.675393 | from django.urls import path
import authapp.views as authapp
app_name = 'authapp'
urlpatterns = [
path('login/', authapp.login, name='login'),
path('logout', authapp.logout, name='logout'),
path('register/', authapp.register, name='register'),
path('edit/', authapp.edit, name='edit'),
path('verify/<email>/<activation_key>/', authapp.verify, name='verify'),
]
| true | true |
1c3597d09aa6a31612e7f03092b8eca067dd16b3 | 8,226 | py | Python | Personalization/script_BolCom.py | CyrilShch/persona-training-scripts | 8f026fe29b35b7f217fbb58445181dc0569f3321 | [
"MIT"
] | null | null | null | Personalization/script_BolCom.py | CyrilShch/persona-training-scripts | 8f026fe29b35b7f217fbb58445181dc0569f3321 | [
"MIT"
] | null | null | null | Personalization/script_BolCom.py | CyrilShch/persona-training-scripts | 8f026fe29b35b7f217fbb58445181dc0569f3321 | [
"MIT"
] | null | null | null | # imports
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import TimeoutException
import pandas as pd
import numpy as np
import time
import re
from tqdm import tqdm
import argparse
import warnings
from user_agents import parse
warnings.simplefilter("ignore")
# SCRIPT USAGE:
### without user-agent:
# python Personalization/script_BolCom.py
# --exp_name BC_first_exp1
# --items_list sneakers parfum sandalen horloge rugzak zonnebril kostuum trainingspak badpak jurk overhemd mantel laarzen koptelefoon yogamat sjaal badjas halsketting portemonnee
# --web_page https://www.bol.com/
# --exec_path Personalization/geckodriver.exe
### with user-agent:
# python Personalization/script_BolCom.py
# --exp_name BC_second_exp2
# --items_list sneakers parfum sandalen horloge rugzak zonnebril kostuum trainingspak badpak jurk overhemd mantel laarzen koptelefoon yogamat sjaal badjas halsketting portemonnee
# --web_page https://www.bol.com/
# --exec_path Personalization/geckodriver.exe
# --ua_string "Mozilla/5.0 (Linux; U; Android 4.0.4; en-gb; GT-I9300 Build/IMM76D) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30"
# LIST OF UA STRING:
### iPhone's user agent string
# ua_string = 'Mozilla/5.0 (iPhone; CPU iPhone OS 5_1 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9B179 Safari/7534.48.3'
### Samsung Galaxy S3
# ua_string = 'Mozilla/5.0 (Linux; U; Android 4.0.4; en-gb; GT-I9300 Build/IMM76D) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30'
### non touch Blackberry device
# ua_string = 'BlackBerry9700/5.0.0.862 Profile/MIDP-2.1 Configuration/CLDC-1.1 VendorID/331 UNTRUSTED/1.0 3gpp-gba'
### iPad's user agent string
# ua_string = 'Mozilla/5.0(iPad; U; CPU iPhone OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B314 Safari/531.21.10'
### Kindle Fire's user agent string
# ua_string = 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_3; en-us; Silk/1.1.0-80) AppleWebKit/533.16 (KHTML, like Gecko) Version/5.0 Safari/533.16 Silk-Accelerated=true'
### Touch capable Windows 8 device
# ua_string = 'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Trident/6.0; Touch)'
def get_parser():
# parse parameters
parser = argparse.ArgumentParser(description='Scrape Lidl website')
parser.add_argument("--exp_name", type=str, default="", help="Experiment name")
parser.add_argument("--items_list", nargs='+', default="", help="List of products to search")
parser.add_argument("--web_page", type=str, default="", help="Website url")
parser.add_argument("--exec_path", type=str, default="", help="Path to execute the webdriver")
parser.add_argument("--ua_string", type=str, default="", help="User agent string to specify to identify/detect devices and browsers")
parser.add_argument("--proxy", type=str, default="", help="Proxy to mimic IP Address Geolocation")
return parser
def iteration(driver, item, delays, collected_data):
# banner button BolCom click to update the search bar
banner_button = driver.find_element_by_class_name('omniture_main_logo')
# randomly choose a delay and freeze the execution to mimic a person usage
delay = np.random.choice(delays)
time.sleep(delay)
banner_button.click() # press ENTER
delay = np.random.choice(delays)
time.sleep(delay)
# put a query in the search bar
search = driver.find_element_by_name("searchtext")
search.send_keys(item) # put it in the search field
search.submit() # press ENTER
time.sleep(5)
timeout = 30
try:
main = WebDriverWait(driver, timeout).until(EC.visibility_of_element_located((By.ID, 'js_items_content')))
time.sleep(5)
articles = main.find_elements_by_class_name('product-item--row') # get all products from the page
for article in tqdm(articles):
price_header = article.find_elements_by_class_name('price-block__price') # get a price object
if len(price_header) != 0:
# process price text
price = re.sub(r'[\n\r]+', '.', price_header[0].text) # get a price text
price = re.sub("\-", "00", price)
product_header = article.find_elements_by_class_name('product-title') # get a product name
# get a seller name
try:
seller = article.find_elements_by_class_name('product-seller__name')
assert seller
except:
seller = article.find_elements_by_class_name('product-seller')
if len(seller) == 0: # case if there is no seller specified
_seller = 'NaN'
else:
_seller = seller[0].text # get a seller name text
# temporary dictionary of the product data
temp = {
'item': item,
'product': product_header[0].text,
'seller': _seller,
'price': price}
collected_data.append(temp) # append the data
except TimeoutException:
# driver.quit()
print("driver has not found products on the webpage")
def main(params):
# initialize a list of the possible delays to mimic user interaction with websites
delays = [1, 2, 3, 4, 5]
# initialize a list where we store all collected data
collected_data = []
# list of items to search
items_list = params.items_list
# initalize webdriver options
profile = webdriver.FirefoxProfile()
if params.ua_string != '':
# user agent string
ua_string = params.ua_string
# initialize user agent
user_agent = parse(ua_string)
print(f"Current user-agent: {user_agent}")
profile.set_preference("general.useragent.override", ua_string)
PROXY = params.proxy
if PROXY != '':
webdriver.DesiredCapabilities.FIREFOX['proxy'] = {
"httpProxy": PROXY,
"ftpProxy": PROXY,
"sslProxy": PROXY,
"proxyType": "MANUAL",
}
# initialize a webdriver
driver = webdriver.Firefox(profile, executable_path=params.exec_path)
# get the url
driver.get(params.web_page)
# time to wait a response from the page
timeout = 30
# press the button to accept cookies
try:
cookies = WebDriverWait(driver, timeout).until(EC.visibility_of_element_located((By.CLASS_NAME, "js-confirm-button")))
delay = np.random.choice(delays)
time.sleep(delay)
cookies.send_keys(Keys.RETURN) # press ENTER
except TimeoutException:
print("Didn't found the button accept cookies.")
pass
# initialize a list with failed items
skipped_items = []
# collect the data
for item in tqdm(items_list):
print("================")
print(item)
print("================")
print("\n")
try:
try:
try:
_ = iteration(driver, item, delays, collected_data)
except:
_ = iteration(driver, item, delays, collected_data)
except:
try:
_ = iteration(driver, item, delays, collected_data)
except:
_ = iteration(driver, item, delays, collected_data)
except:
print(f"{item} was skipped")
skipped_items.append(item)
pass
print("Writing csv file...")
df = pd.DataFrame(collected_data)
df.to_csv(f'{params.exp_name}.csv', index=False)
print("Writing finished.")
# close the driver
driver.quit()
if __name__ == '__main__':
parser = get_parser()
params, unknown = parser.parse_known_args()
# run the script
main(params)
| 38.260465 | 182 | 0.642232 |
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import TimeoutException
import pandas as pd
import numpy as np
import time
import re
from tqdm import tqdm
import argparse
import warnings
from user_agents import parse
warnings.simplefilter("ignore")
Mobile/9B179 Safari/7534.48.3'
### Samsung Galaxy S3
# ua_string = 'Mozilla/5.0 (Linux; U; Android 4.0.4; en-gb; GT-I9300 Build/IMM76D) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30'
### non touch Blackberry device
# ua_string = 'BlackBerry9700/5.0.0.862 Profile/MIDP-2.1 Configuration/CLDC-1.1 VendorID/331 UNTRUSTED/1.0 3gpp-gba'
### iPad's user agent string
n-us; Silk/1.1.0-80) AppleWebKit/533.16 (KHTML, like Gecko) Version/5.0 Safari/533.16 Silk-Accelerated=true'
### Touch capable Windows 8 device
# ua_string = 'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Trident/6.0; Touch)'
def get_parser():
# parse parameters
parser = argparse.ArgumentParser(description='Scrape Lidl website')
parser.add_argument("--exp_name", type=str, default="", help="Experiment name")
parser.add_argument("--items_list", nargs='+', default="", help="List of products to search")
parser.add_argument("--web_page", type=str, default="", help="Website url")
parser.add_argument("--exec_path", type=str, default="", help="Path to execute the webdriver")
parser.add_argument("--ua_string", type=str, default="", help="User agent string to specify to identify/detect devices and browsers")
parser.add_argument("--proxy", type=str, default="", help="Proxy to mimic IP Address Geolocation")
return parser
def iteration(driver, item, delays, collected_data):
# banner button BolCom click to update the search bar
banner_button = driver.find_element_by_class_name('omniture_main_logo')
# randomly choose a delay and freeze the execution to mimic a person usage
delay = np.random.choice(delays)
time.sleep(delay)
banner_button.click() # press ENTER
delay = np.random.choice(delays)
time.sleep(delay)
# put a query in the search bar
search = driver.find_element_by_name("searchtext")
search.send_keys(item) # put it in the search field
search.submit() # press ENTER
time.sleep(5)
timeout = 30
try:
main = WebDriverWait(driver, timeout).until(EC.visibility_of_element_located((By.ID, 'js_items_content')))
time.sleep(5)
articles = main.find_elements_by_class_name('product-item--row') # get all products from the page
for article in tqdm(articles):
price_header = article.find_elements_by_class_name('price-block__price') # get a price object
if len(price_header) != 0:
# process price text
price = re.sub(r'[\n\r]+', '.', price_header[0].text) # get a price text
price = re.sub("\-", "00", price)
product_header = article.find_elements_by_class_name('product-title') # get a product name
# get a seller name
try:
seller = article.find_elements_by_class_name('product-seller__name')
assert seller
except:
seller = article.find_elements_by_class_name('product-seller')
if len(seller) == 0: # case if there is no seller specified
_seller = 'NaN'
else:
_seller = seller[0].text # get a seller name text
# temporary dictionary of the product data
temp = {
'item': item,
'product': product_header[0].text,
'seller': _seller,
'price': price}
collected_data.append(temp) # append the data
except TimeoutException:
# driver.quit()
print("driver has not found products on the webpage")
def main(params):
# initialize a list of the possible delays to mimic user interaction with websites
delays = [1, 2, 3, 4, 5]
# initialize a list where we store all collected data
collected_data = []
# list of items to search
items_list = params.items_list
# initalize webdriver options
profile = webdriver.FirefoxProfile()
if params.ua_string != '':
# user agent string
ua_string = params.ua_string
# initialize user agent
user_agent = parse(ua_string)
print(f"Current user-agent: {user_agent}")
profile.set_preference("general.useragent.override", ua_string)
PROXY = params.proxy
if PROXY != '':
webdriver.DesiredCapabilities.FIREFOX['proxy'] = {
"httpProxy": PROXY,
"ftpProxy": PROXY,
"sslProxy": PROXY,
"proxyType": "MANUAL",
}
# initialize a webdriver
driver = webdriver.Firefox(profile, executable_path=params.exec_path)
# get the url
driver.get(params.web_page)
# time to wait a response from the page
timeout = 30
# press the button to accept cookies
try:
cookies = WebDriverWait(driver, timeout).until(EC.visibility_of_element_located((By.CLASS_NAME, "js-confirm-button")))
delay = np.random.choice(delays)
time.sleep(delay)
cookies.send_keys(Keys.RETURN) # press ENTER
except TimeoutException:
print("Didn't found the button accept cookies.")
pass
skipped_items = []
for item in tqdm(items_list):
print("================")
print(item)
print("================")
print("\n")
try:
try:
try:
_ = iteration(driver, item, delays, collected_data)
except:
_ = iteration(driver, item, delays, collected_data)
except:
try:
_ = iteration(driver, item, delays, collected_data)
except:
_ = iteration(driver, item, delays, collected_data)
except:
print(f"{item} was skipped")
skipped_items.append(item)
pass
print("Writing csv file...")
df = pd.DataFrame(collected_data)
df.to_csv(f'{params.exp_name}.csv', index=False)
print("Writing finished.")
driver.quit()
if __name__ == '__main__':
parser = get_parser()
params, unknown = parser.parse_known_args()
main(params)
| true | true |
1c3598a66c4040a3519509163bc6019d2f7f3d7a | 8,965 | py | Python | api/environments/views.py | SolidStateGroup/Bullet-Train-API | ea47ccbdadf665a806ae4e0eff6ad1a2f1b0ba19 | [
"BSD-3-Clause"
] | null | null | null | api/environments/views.py | SolidStateGroup/Bullet-Train-API | ea47ccbdadf665a806ae4e0eff6ad1a2f1b0ba19 | [
"BSD-3-Clause"
] | null | null | null | api/environments/views.py | SolidStateGroup/Bullet-Train-API | ea47ccbdadf665a806ae4e0eff6ad1a2f1b0ba19 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from django.utils.decorators import method_decorator
from drf_yasg2 import openapi
from drf_yasg2.utils import swagger_auto_schema
from flag_engine.api.document_builders import build_environment_document
from rest_framework import mixins, status, viewsets
from rest_framework.decorators import action
from rest_framework.exceptions import ValidationError
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from environments.permissions.permissions import (
EnvironmentAdminPermission,
EnvironmentPermissions,
NestedEnvironmentPermissions,
)
from permissions.serializers import (
PermissionModelSerializer,
UserObjectPermissionsSerializer,
)
from projects.models import Project
from webhooks.mixins import TriggerSampleWebhookMixin
from webhooks.webhooks import WebhookType
from .identities.traits.models import Trait
from .identities.traits.serializers import (
DeleteAllTraitKeysSerializer,
TraitKeysSerializer,
)
from .models import Environment, EnvironmentAPIKey, Webhook
from .permissions.models import (
EnvironmentPermissionModel,
UserEnvironmentPermission,
UserPermissionGroupEnvironmentPermission,
)
from .serializers import (
CloneEnvironmentSerializer,
CreateUpdateEnvironmentSerializer,
EnvironmentAPIKeySerializer,
EnvironmentSerializerLight,
WebhookSerializer,
)
logger = logging.getLogger(__name__)
@method_decorator(
name="list",
decorator=swagger_auto_schema(
manual_parameters=[
openapi.Parameter(
"project",
openapi.IN_QUERY,
"ID of the project to filter by.",
required=False,
type=openapi.TYPE_INTEGER,
)
]
),
)
class EnvironmentViewSet(viewsets.ModelViewSet):
lookup_field = "api_key"
permission_classes = [IsAuthenticated, EnvironmentPermissions]
def get_serializer_class(self):
if self.action == "trait_keys":
return TraitKeysSerializer
if self.action == "delete_traits":
return DeleteAllTraitKeysSerializer
if self.action == "clone":
return CloneEnvironmentSerializer
elif self.action in ("create", "update", "partial_update"):
return CreateUpdateEnvironmentSerializer
return EnvironmentSerializerLight
def get_serializer_context(self):
context = super(EnvironmentViewSet, self).get_serializer_context()
if self.kwargs.get("api_key"):
context["environment"] = self.get_object()
return context
def get_queryset(self):
if self.action == "list":
project_id = self.request.query_params.get(
"project"
) or self.request.data.get("project")
try:
project = Project.objects.get(id=project_id)
except Project.DoesNotExist:
raise ValidationError("Invalid or missing value for project parameter.")
return self.request.user.get_permitted_environments(
"VIEW_ENVIRONMENT", project=project
)
# Permission class handles validation of permissions for other actions
return Environment.objects.all()
def perform_create(self, serializer):
environment = serializer.save()
UserEnvironmentPermission.objects.create(
user=self.request.user, environment=environment, admin=True
)
@action(detail=True, methods=["GET"], url_path="trait-keys")
def trait_keys(self, request, *args, **kwargs):
keys = [
trait_key
for trait_key in Trait.objects.filter(
identity__environment=self.get_object()
)
.order_by()
.values_list("trait_key", flat=True)
.distinct()
]
data = {"keys": keys}
serializer = self.get_serializer(data=data)
if serializer.is_valid():
return Response(serializer.data, status=status.HTTP_200_OK)
else:
return Response(
{"detail": "Couldn't get trait keys"},
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
@action(detail=True, methods=["POST"])
def clone(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
clone = serializer.save(source_env=self.get_object())
UserEnvironmentPermission.objects.create(
user=self.request.user, environment=clone, admin=True
)
return Response(serializer.data, status=status.HTTP_200_OK)
@action(detail=True, methods=["POST"], url_path="delete-traits")
def delete_traits(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
if serializer.is_valid():
serializer.delete()
return Response(status=status.HTTP_200_OK)
else:
return Response(
{"detail": "Couldn't delete trait keys."},
status=status.HTTP_400_BAD_REQUEST,
)
@swagger_auto_schema(responses={200: PermissionModelSerializer})
@action(detail=False, methods=["GET"])
def permissions(self, *args, **kwargs):
return Response(
PermissionModelSerializer(
instance=EnvironmentPermissionModel.objects.all(), many=True
).data
)
@swagger_auto_schema(responses={200: UserObjectPermissionsSerializer})
@action(
detail=True,
methods=["GET"],
url_path="my-permissions",
url_name="my-permissions",
)
def user_permissions(self, request, *args, **kwargs):
# TODO: tidy this mess up
environment = self.get_object()
group_permissions = UserPermissionGroupEnvironmentPermission.objects.filter(
group__users=request.user, environment=environment
)
user_permissions = UserEnvironmentPermission.objects.filter(
user=request.user, environment=environment
)
permissions = set()
for group_permission in group_permissions:
permissions = permissions.union(
{
permission.key
for permission in group_permission.permissions.all()
if permission.key
}
)
for user_permission in user_permissions:
permissions = permissions.union(
{
permission.key
for permission in user_permission.permissions.all()
if permission.key
}
)
is_project_admin = request.user.is_project_admin(environment.project)
data = {
"admin": group_permissions.filter(admin=True).exists()
or user_permissions.filter(admin=True).exists()
or is_project_admin,
"permissions": permissions,
}
serializer = UserObjectPermissionsSerializer(data=data)
serializer.is_valid()
return Response(serializer.data)
@action(detail=True, methods=["GET"], url_path="document")
def get_document(self, request, api_key: str):
environment = Environment.objects.select_related(
"project", "project__organisation"
).get(api_key=api_key)
return Response(build_environment_document(environment))
class NestedEnvironmentViewSet(viewsets.GenericViewSet):
model_class = None
webhook_type = WebhookType.ENVIRONMENT
def get_queryset(self):
return self.model_class.objects.filter(
environment__api_key=self.kwargs.get("environment_api_key")
)
def perform_create(self, serializer):
serializer.save(environment=self._get_environment())
def perform_update(self, serializer):
serializer.save(environment=self._get_environment())
def _get_environment(self):
return Environment.objects.get(api_key=self.kwargs.get("environment_api_key"))
class WebhookViewSet(
NestedEnvironmentViewSet,
mixins.ListModelMixin,
mixins.CreateModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
TriggerSampleWebhookMixin,
):
serializer_class = WebhookSerializer
pagination_class = None
permission_classes = [IsAuthenticated, NestedEnvironmentPermissions]
model_class = Webhook
webhook_type = WebhookType.ENVIRONMENT
class EnvironmentAPIKeyViewSet(
NestedEnvironmentViewSet,
mixins.ListModelMixin,
mixins.CreateModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
):
serializer_class = EnvironmentAPIKeySerializer
pagination_class = None
permission_classes = [IsAuthenticated, EnvironmentAdminPermission]
model_class = EnvironmentAPIKey
| 33.451493 | 88 | 0.668265 |
from __future__ import unicode_literals
import logging
from django.utils.decorators import method_decorator
from drf_yasg2 import openapi
from drf_yasg2.utils import swagger_auto_schema
from flag_engine.api.document_builders import build_environment_document
from rest_framework import mixins, status, viewsets
from rest_framework.decorators import action
from rest_framework.exceptions import ValidationError
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from environments.permissions.permissions import (
EnvironmentAdminPermission,
EnvironmentPermissions,
NestedEnvironmentPermissions,
)
from permissions.serializers import (
PermissionModelSerializer,
UserObjectPermissionsSerializer,
)
from projects.models import Project
from webhooks.mixins import TriggerSampleWebhookMixin
from webhooks.webhooks import WebhookType
from .identities.traits.models import Trait
from .identities.traits.serializers import (
DeleteAllTraitKeysSerializer,
TraitKeysSerializer,
)
from .models import Environment, EnvironmentAPIKey, Webhook
from .permissions.models import (
EnvironmentPermissionModel,
UserEnvironmentPermission,
UserPermissionGroupEnvironmentPermission,
)
from .serializers import (
CloneEnvironmentSerializer,
CreateUpdateEnvironmentSerializer,
EnvironmentAPIKeySerializer,
EnvironmentSerializerLight,
WebhookSerializer,
)
logger = logging.getLogger(__name__)
@method_decorator(
name="list",
decorator=swagger_auto_schema(
manual_parameters=[
openapi.Parameter(
"project",
openapi.IN_QUERY,
"ID of the project to filter by.",
required=False,
type=openapi.TYPE_INTEGER,
)
]
),
)
class EnvironmentViewSet(viewsets.ModelViewSet):
lookup_field = "api_key"
permission_classes = [IsAuthenticated, EnvironmentPermissions]
def get_serializer_class(self):
if self.action == "trait_keys":
return TraitKeysSerializer
if self.action == "delete_traits":
return DeleteAllTraitKeysSerializer
if self.action == "clone":
return CloneEnvironmentSerializer
elif self.action in ("create", "update", "partial_update"):
return CreateUpdateEnvironmentSerializer
return EnvironmentSerializerLight
def get_serializer_context(self):
context = super(EnvironmentViewSet, self).get_serializer_context()
if self.kwargs.get("api_key"):
context["environment"] = self.get_object()
return context
def get_queryset(self):
if self.action == "list":
project_id = self.request.query_params.get(
"project"
) or self.request.data.get("project")
try:
project = Project.objects.get(id=project_id)
except Project.DoesNotExist:
raise ValidationError("Invalid or missing value for project parameter.")
return self.request.user.get_permitted_environments(
"VIEW_ENVIRONMENT", project=project
)
return Environment.objects.all()
def perform_create(self, serializer):
environment = serializer.save()
UserEnvironmentPermission.objects.create(
user=self.request.user, environment=environment, admin=True
)
@action(detail=True, methods=["GET"], url_path="trait-keys")
def trait_keys(self, request, *args, **kwargs):
keys = [
trait_key
for trait_key in Trait.objects.filter(
identity__environment=self.get_object()
)
.order_by()
.values_list("trait_key", flat=True)
.distinct()
]
data = {"keys": keys}
serializer = self.get_serializer(data=data)
if serializer.is_valid():
return Response(serializer.data, status=status.HTTP_200_OK)
else:
return Response(
{"detail": "Couldn't get trait keys"},
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
@action(detail=True, methods=["POST"])
def clone(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
clone = serializer.save(source_env=self.get_object())
UserEnvironmentPermission.objects.create(
user=self.request.user, environment=clone, admin=True
)
return Response(serializer.data, status=status.HTTP_200_OK)
@action(detail=True, methods=["POST"], url_path="delete-traits")
def delete_traits(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
if serializer.is_valid():
serializer.delete()
return Response(status=status.HTTP_200_OK)
else:
return Response(
{"detail": "Couldn't delete trait keys."},
status=status.HTTP_400_BAD_REQUEST,
)
@swagger_auto_schema(responses={200: PermissionModelSerializer})
@action(detail=False, methods=["GET"])
def permissions(self, *args, **kwargs):
return Response(
PermissionModelSerializer(
instance=EnvironmentPermissionModel.objects.all(), many=True
).data
)
@swagger_auto_schema(responses={200: UserObjectPermissionsSerializer})
@action(
detail=True,
methods=["GET"],
url_path="my-permissions",
url_name="my-permissions",
)
def user_permissions(self, request, *args, **kwargs):
environment = self.get_object()
group_permissions = UserPermissionGroupEnvironmentPermission.objects.filter(
group__users=request.user, environment=environment
)
user_permissions = UserEnvironmentPermission.objects.filter(
user=request.user, environment=environment
)
permissions = set()
for group_permission in group_permissions:
permissions = permissions.union(
{
permission.key
for permission in group_permission.permissions.all()
if permission.key
}
)
for user_permission in user_permissions:
permissions = permissions.union(
{
permission.key
for permission in user_permission.permissions.all()
if permission.key
}
)
is_project_admin = request.user.is_project_admin(environment.project)
data = {
"admin": group_permissions.filter(admin=True).exists()
or user_permissions.filter(admin=True).exists()
or is_project_admin,
"permissions": permissions,
}
serializer = UserObjectPermissionsSerializer(data=data)
serializer.is_valid()
return Response(serializer.data)
@action(detail=True, methods=["GET"], url_path="document")
def get_document(self, request, api_key: str):
environment = Environment.objects.select_related(
"project", "project__organisation"
).get(api_key=api_key)
return Response(build_environment_document(environment))
class NestedEnvironmentViewSet(viewsets.GenericViewSet):
model_class = None
webhook_type = WebhookType.ENVIRONMENT
def get_queryset(self):
return self.model_class.objects.filter(
environment__api_key=self.kwargs.get("environment_api_key")
)
def perform_create(self, serializer):
serializer.save(environment=self._get_environment())
def perform_update(self, serializer):
serializer.save(environment=self._get_environment())
def _get_environment(self):
return Environment.objects.get(api_key=self.kwargs.get("environment_api_key"))
class WebhookViewSet(
NestedEnvironmentViewSet,
mixins.ListModelMixin,
mixins.CreateModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
TriggerSampleWebhookMixin,
):
serializer_class = WebhookSerializer
pagination_class = None
permission_classes = [IsAuthenticated, NestedEnvironmentPermissions]
model_class = Webhook
webhook_type = WebhookType.ENVIRONMENT
class EnvironmentAPIKeyViewSet(
NestedEnvironmentViewSet,
mixins.ListModelMixin,
mixins.CreateModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
):
serializer_class = EnvironmentAPIKeySerializer
pagination_class = None
permission_classes = [IsAuthenticated, EnvironmentAdminPermission]
model_class = EnvironmentAPIKey
| true | true |
1c35990dae6d9ff8d69a6e3ecdefe2a0bc11800f | 41,418 | py | Python | python/src/lib/python/pelix/ipopo/decorators.py | isandlaTech/cohorte-runtime | 686556cdde20beba77ae202de9969be46feed5e2 | [
"Apache-2.0"
] | 6 | 2015-04-28T16:51:08.000Z | 2017-07-12T11:29:00.000Z | pelix/src/main/python/pelix/ipopo/decorators.py | isandlaTech/cohorte-3rdparty | d39a1bf5d6d39550f8ee93770bcac55c5f098367 | [
"Apache-2.0"
] | 29 | 2015-02-24T11:11:26.000Z | 2017-08-25T08:30:18.000Z | python/src/lib/python/pelix/ipopo/decorators.py | isandlaTech/cohorte-runtime | 686556cdde20beba77ae202de9969be46feed5e2 | [
"Apache-2.0"
] | 1 | 2015-08-24T13:23:43.000Z | 2015-08-24T13:23:43.000Z | #!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Defines the iPOPO decorators classes to manipulate component factory classes
:author: Thomas Calmant
:copyright: Copyright 2014, isandlaTech
:license: Apache License 2.0
:version: 0.5.7
:status: Beta
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Module version
__version_info__ = (0, 5, 7)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
# Pelix modules
from pelix.utilities import is_string, to_iterable
from pelix.ipopo.contexts import FactoryContext, Requirement
import pelix.ipopo.constants as constants
# Standard library
import inspect
import logging
import threading
import types
# ------------------------------------------------------------------------------
# Prepare the module logger
_logger = logging.getLogger("ipopo.decorators")
# ------------------------------------------------------------------------------
def is_from_parent(cls, attribute_name, value=None):
"""
Tests if the current attribute value is shared by a parent of the given
class.
Returns None if the attribute value is None.
:param cls: Child class with the requested attribute
:param attribute_name: Name of the attribute to be tested
:param value: The exact value in the child class (optional)
:return: True if the attribute value is shared with a parent class
"""
if value is None:
try:
# Get the current value
value = getattr(cls, attribute_name)
except AttributeError:
# No need to go further: the attribute does not exist
return False
for base in cls.__bases__:
# Look for the value in each parent class
if getattr(base, attribute_name, None) is value:
# Found !
return True
# Attribute value not found in parent classes
return False
def get_factory_context(cls):
"""
Retrieves the factory context object associated to a factory. Creates it
if needed
:param cls: The factory class
:return: The factory class context
"""
context = getattr(cls, constants.IPOPO_FACTORY_CONTEXT, None)
if context is None:
# Class not yet manipulated
context = FactoryContext()
elif is_from_parent(cls, constants.IPOPO_FACTORY_CONTEXT):
# Create a copy the context
context = context.copy(True)
# * Manipulation has not been applied yet
context.completed = False
else:
# Nothing special to do
return context
# Context has been created or copied, inject the new bean
setattr(cls, constants.IPOPO_FACTORY_CONTEXT, context)
return context
def get_method_description(method):
"""
Retrieves a description of the given method. If possible, the description
contains the source file name and line.
:param method: A method
:return: A description of the method (at least its name)
"""
try:
try:
line_no = inspect.getsourcelines(method)[1]
except IOError:
# Error reading the source file
line_no = -1
return "'{method}' ({file}:{line})" \
.format(method=method.__name__,
file=inspect.getfile(method),
line=line_no)
except TypeError:
# Method can't be inspected
return "'{0}'".format(method.__name__)
def validate_method_arity(method, *needed_args):
"""
Tests if the decorated method has a sufficient number of parameters.
:param method: The method to be tested
:param needed_args: The name (for description only) of the needed
arguments, without "self".
:return: Nothing
:raise TypeError: Invalid number of parameter
"""
nb_needed_args = len(needed_args) + 1
# Test the number of parameters
argspec = inspect.getargspec(method)
method_args = argspec.args
if len(method_args) == 0:
# No argument at all
raise TypeError("Decorated method {0} must have at least the 'self' "
"parameter".format(get_method_description(method)))
if argspec.varargs is not None:
# Variable arguments
if len(method_args) != 1 or method_args[0] != "self":
# Other arguments detected
raise TypeError("When using '*args', the decorated {0} method must"
" only accept the 'self' argument"
.format(get_method_description(method)))
elif len(method_args) != nb_needed_args or method_args[0] != 'self':
# "Normal" arguments
raise TypeError("The decorated method {0} must accept exactly {1} "
"parameters : (self, {2})"
.format(get_method_description(method), nb_needed_args,
", ".join(needed_args)))
# ------------------------------------------------------------------------------
def _ipopo_setup_callback(cls, context):
"""
Sets up the class _callback dictionary
:param cls: The class to handle
:param context: The factory class context
"""
assert inspect.isclass(cls)
assert isinstance(context, FactoryContext)
if context.callbacks is not None:
callbacks = context.callbacks.copy()
else:
callbacks = {}
functions = inspect.getmembers(cls, inspect.isroutine)
for _, function in functions:
if not hasattr(function, constants.IPOPO_METHOD_CALLBACKS):
# No attribute, get the next member
continue
method_callbacks = getattr(function, constants.IPOPO_METHOD_CALLBACKS)
if not isinstance(method_callbacks, list):
# Invalid content
_logger.warning("Invalid callback information %s in %s",
constants.IPOPO_METHOD_CALLBACKS,
get_method_description(function))
continue
# Keeping it allows inheritance : by removing it, only the first
# child will see the attribute -> Don't remove it
# Store the call backs
for _callback in method_callbacks:
if _callback in callbacks and \
not is_from_parent(cls, callbacks[_callback].__name__,
callbacks[_callback]):
_logger.warning("Redefining the callback %s in class '%s'.\n"
"\tPrevious callback : %s\n"
"\tNew callback : %s", _callback, cls.__name__,
get_method_description(callbacks[_callback]),
get_method_description(function))
callbacks[_callback] = function
# Update the factory context
context.callbacks.clear()
context.callbacks.update(callbacks)
def _ipopo_setup_field_callback(cls, context):
"""
Sets up the class _field_callback dictionary
:param cls: The class to handle
:param context: The factory class context
"""
assert inspect.isclass(cls)
assert isinstance(context, FactoryContext)
if context.field_callbacks is not None:
callbacks = context.field_callbacks.copy()
else:
callbacks = {}
functions = inspect.getmembers(cls, inspect.isroutine)
for name, function in functions:
if not hasattr(function, constants.IPOPO_METHOD_FIELD_CALLBACKS):
# No attribute, get the next member
continue
method_callbacks = getattr(function,
constants.IPOPO_METHOD_FIELD_CALLBACKS)
if not isinstance(method_callbacks, list):
# Invalid content
_logger.warning("Invalid attribute %s in %s",
constants.IPOPO_METHOD_FIELD_CALLBACKS, name)
continue
# Keeping it allows inheritance : by removing it, only the first
# child will see the attribute -> Don't remove it
# Store the call backs
for kind, field, if_valid in method_callbacks:
fields_cbs = callbacks.setdefault(field, {})
if kind in fields_cbs and \
not is_from_parent(cls, fields_cbs[kind][0].__name__):
_logger.warning("Redefining the callback %s in '%s'. "
"Previous callback : '%s' (%s). "
"New callback : %s", kind, name,
fields_cbs[kind][0].__name__,
fields_cbs[kind][0], function)
fields_cbs[kind] = (function, if_valid)
# Update the factory context
context.field_callbacks.clear()
context.field_callbacks.update(callbacks)
# ------------------------------------------------------------------------------
def _append_object_entry(obj, list_name, entry):
"""
Appends the given entry in the given object list.
Creates the list field if needed.
:param obj: The object that contains the list
:param list_name: The name of the list member in *obj*
:param entry: The entry to be added to the list
:raise ValueError: Invalid attribute content
"""
# Get the list
obj_list = getattr(obj, list_name, None)
if obj_list is None:
# We'll have to create it
obj_list = []
setattr(obj, list_name, obj_list)
assert isinstance(obj_list, list)
# Set up the property, if needed
if entry not in obj_list:
obj_list.append(entry)
# ------------------------------------------------------------------------------
class Holder(object):
"""
Simple class that holds a value
"""
def __init__(self, value):
"""
Sets up the holder instance
"""
self.value = value
def _ipopo_class_field_property(name, value, methods_prefix):
"""
Sets up an iPOPO field property, using Python property() capabilities
:param name: The property name
:param value: The property default value
:param methods_prefix: The common prefix of the getter and setter injected
methods
:return: A generated Python property()
"""
# The property lock
lock = threading.RLock()
# Prepare the methods names
getter_name = "{0}{1}".format(methods_prefix,
constants.IPOPO_GETTER_SUFFIX)
setter_name = "{0}{1}".format(methods_prefix,
constants.IPOPO_SETTER_SUFFIX)
local_holder = Holder(value)
def get_value(self):
"""
Retrieves the property value, from the iPOPO dictionaries
"""
getter = getattr(self, getter_name, None)
if getter is not None:
# Use the component getter
with lock:
return getter(self, name)
else:
# Use the local holder
return local_holder.value
def set_value(self, new_value):
"""
Sets the property value and trigger an update event
:param new_value: The new property value
"""
setter = getattr(self, setter_name, None)
if setter is not None:
# Use the component setter
with lock:
setter(self, name, new_value)
else:
# Change the local holder
local_holder.value = new_value
return property(get_value, set_value)
# ------------------------------------------------------------------------------
class Instantiate(object):
"""
Decorator that sets up a future instance of a component
"""
def __init__(self, name, properties=None):
"""
Sets up the decorator
:param name: Instance name
:param properties: Instance properties
"""
if not is_string(name):
raise TypeError("Instance name must be a string")
if properties is not None and not isinstance(properties, dict):
raise TypeError("Instance properties must be a dictionary or None")
name = name.strip()
if not name:
raise ValueError("Invalid instance name '{0}'".format(name))
self.__name = name
self.__properties = properties
def __call__(self, factory_class):
"""
Sets up and registers the instances descriptions
:param factory_class: The factory class to instantiate
:return: The decorated factory class
:raise TypeError: The given object is not a class
"""
if not inspect.isclass(factory_class):
raise TypeError("@Instantiate can decorate only classes, "
"not '{0}'".format(type(factory_class).__name__))
# Store the instance in the factory context
context = get_factory_context(factory_class)
try:
context.add_instance(self.__name, self.__properties)
except NameError:
_logger.warning("Component '%s' defined twice, new definition "
"ignored", self.__name)
return factory_class
# ------------------------------------------------------------------------------
class ComponentFactory(object):
"""
Decorator that sets up a component factory class
"""
def __init__(self, name=None, excluded=None):
"""
Sets up the decorator
:param name: Name of the component factory
:param excluded: List of IDs of handlers which configuration must not
be inherited from the parent class
"""
self.__factory_name = name
self.__excluded_inheritance = to_iterable(excluded)
def __call__(self, factory_class):
"""
Sets up and registers the factory class
:param factory_class: The class to decorate
:return: The decorated class
:raise TypeError: The given object is not a class
"""
if not inspect.isclass(factory_class):
raise TypeError("@ComponentFactory can decorate only classes, "
"not '{0}'".format(type(factory_class).__name__))
# Get the factory context
context = get_factory_context(factory_class)
# Test if a manipulation has already been applied
if not context.completed:
# Set up the factory name
if not self.__factory_name:
self.__factory_name = factory_class.__name__ + "Factory"
# Manipulate the class...
# Update the factory context
context.name = self.__factory_name
context.inherit_handlers(self.__excluded_inheritance)
context.completed = True
# Find callbacks
_ipopo_setup_callback(factory_class, context)
_ipopo_setup_field_callback(factory_class, context)
# Store the factory context in its field
setattr(factory_class, constants.IPOPO_FACTORY_CONTEXT, context)
# Inject the properties getter and setter if needed
if context.properties_fields:
setattr(factory_class, constants.IPOPO_PROPERTY_PREFIX
+ constants.IPOPO_GETTER_SUFFIX, None)
setattr(factory_class, constants.IPOPO_PROPERTY_PREFIX
+ constants.IPOPO_SETTER_SUFFIX, None)
else:
# Manipulation already applied: do nothing more
_logger.error("%s has already been manipulated with the name '%s'."
" Keeping the old name.",
get_method_description(factory_class), context.name)
return factory_class
# ------------------------------------------------------------------------------
class Property(object):
"""
@Property decorator
Defines a component property.
"""
HANDLER_ID = constants.HANDLER_PROPERTY
""" ID of the handler configured by this decorator """
def __init__(self, field=None, name=None, value=None):
"""
Sets up the property
:param field: The property field in the class (can't be None nor empty)
:param name: The property name (if None, this will be the field name)
:param value: The property value
:raise TypeError: Invalid argument type
:raise ValueError: If the name or the name is None or empty
"""
# Field validity test
if not is_string(field):
raise TypeError("Field name must be a string")
field = field.strip()
if not field or ' ' in field:
raise ValueError("Empty or invalid property field name '{0}'"
.format(field))
# Name validity test
if name is not None:
if not is_string(name):
raise TypeError("Property name must be a string")
name = name.strip()
if not name:
# No name given: use the field name
name = field
self.__field = field
self.__name = name
self.__value = value
def __call__(self, clazz):
"""
Adds the property to the class iPOPO properties field.
Creates the field if needed.
:param clazz: The class to decorate
:return: The decorated class
:raise TypeError: If *clazz* is not a type
"""
if not inspect.isclass(clazz):
raise TypeError("@Property can decorate only classes, not '{0}'"
.format(type(clazz).__name__))
# Get the factory context
context = get_factory_context(clazz)
if context.completed:
# Do nothing if the class has already been manipulated
_logger.warning("@Property: Already manipulated class: %s",
get_method_description(clazz))
return clazz
# Set up the property in the class
context.properties[self.__name] = self.__value
# Associate the field to the property name
context.properties_fields[self.__field] = self.__name
# Mark the handler in the factory context
context.set_handler(self.HANDLER_ID, None)
# Inject a property in the class. The property will call an instance
# level getter / setter, injected by iPOPO after the instance creation
setattr(clazz, self.__field,
_ipopo_class_field_property(self.__name, self.__value,
constants.IPOPO_PROPERTY_PREFIX))
return clazz
# ------------------------------------------------------------------------------
def _get_specifications(specifications):
"""
Computes the list of strings corresponding to the given specifications
:param specifications: A string, a class or a list of specifications
:return: A list of strings
:raise ValueError: Invalid specification found
"""
if not specifications:
raise ValueError("No specifications given")
if inspect.isclass(specifications):
# Get the name of the class
return [specifications.__name__]
elif is_string(specifications):
# Specification name
specifications = specifications.strip()
if not specifications:
raise ValueError("Empty specification given")
return [specifications]
elif isinstance(specifications, (list, tuple)):
# List given: normalize its content
results = []
for specification in specifications:
results.extend(_get_specifications(specification))
return results
else:
raise ValueError("Unhandled specifications type : {0}"
.format(type(specifications).__name__))
class Provides(object):
"""
@Provides decorator
Defines an interface exported by a component.
"""
HANDLER_ID = constants.HANDLER_PROVIDES
""" ID of the handler configured by this decorator """
def __init__(self, specifications, controller=None):
"""
Sets up a provided service.
A service controller can be defined to enable or disable the service.
:param specifications: A list of provided interface(s) name(s)
(can't be empty)
:param controller: Name of the service controller class field
(optional)
:raise ValueError: If the specifications are invalid
"""
if controller is not None:
if not is_string(controller):
raise ValueError("Controller name must be a string")
controller = controller.strip()
if not controller:
# Empty controller name
_logger.warning("Empty controller name given")
controller = None
elif ' ' in controller:
raise ValueError("Controller name contains spaces")
self.__specifications = _get_specifications(specifications)
self.__controller = controller
def __call__(self, clazz):
"""
Adds the provided service information to the class context iPOPO field.
Creates the field if needed.
:param clazz: The class to decorate
:return: The decorated class
:raise TypeError: If *clazz* is not a type
"""
if not inspect.isclass(clazz):
raise TypeError("@Provides can decorate only classes, not '{0}'"
.format(type(clazz).__name__))
# Get the factory context
context = get_factory_context(clazz)
if context.completed:
# Do nothing if the class has already been manipulated
_logger.warning("@Provides: Already manipulated class: %s",
get_method_description(clazz))
return clazz
# Avoid duplicates (but keep the order)
filtered_specs = []
for spec in self.__specifications:
if spec not in filtered_specs:
filtered_specs.append(spec)
# Store the service information
config = context.set_handler_default(self.HANDLER_ID, [])
config.append((filtered_specs, self.__controller))
if self.__controller:
# Inject a property in the class. The property will call an
# instance level getter / setter, injected by iPOPO after the
# instance creation
setattr(clazz, self.__controller,
_ipopo_class_field_property(
self.__controller, True,
constants.IPOPO_CONTROLLER_PREFIX))
# Inject the future controller methods
setattr(clazz, constants.IPOPO_CONTROLLER_PREFIX
+ constants.IPOPO_GETTER_SUFFIX, None)
setattr(clazz, constants.IPOPO_CONTROLLER_PREFIX
+ constants.IPOPO_SETTER_SUFFIX, None)
return clazz
# ------------------------------------------------------------------------------
class Requires(object):
"""
@Requires decorator
Defines a required service
"""
HANDLER_ID = constants.HANDLER_REQUIRES
""" ID of the handler configured by this decorator """
def __init__(self, field, specification, aggregate=False, optional=False,
spec_filter=None):
"""
Sets up the requirement
:param field: The injected field
:param specification: The injected service specification
:param aggregate: If true, injects a list
:param optional: If true, this injection is optional
:param spec_filter: An LDAP query to filter injected services upon
their properties
:raise TypeError: A parameter has an invalid type
:raise ValueError: An error occurred while parsing the filter or an
argument is incorrect
"""
if not field:
raise ValueError("Empty field name.")
if not is_string(field):
raise TypeError("The field name must be a string, not {0}"
.format(type(field).__name__))
if ' ' in field:
raise ValueError("Field name can't contain spaces.")
self.__field = field
# Be sure that there is only one required specification
specifications = _get_specifications(specification)
self.__multi_specs = len(specifications) > 1
# Construct the requirement object
self.__requirement = Requirement(specifications[0],
aggregate, optional, spec_filter)
def __call__(self, clazz):
"""
Adds the requirement to the class iPOPO field
:param clazz: The class to decorate
:return: The decorated class
:raise TypeError: If *clazz* is not a type
"""
if not inspect.isclass(clazz):
raise TypeError("@Requires can decorate only classes, not '{0}'"
.format(type(clazz).__name__))
if self.__multi_specs:
_logger.warning("Only one specification can be required: %s -> %s",
clazz.__name__, self.__field)
# Set up the property in the class
context = get_factory_context(clazz)
if context.completed:
# Do nothing if the class has already been manipulated
_logger.warning("@Requires: Already manipulated class: %s",
get_method_description(clazz))
return clazz
# Store the requirement information
config = context.set_handler_default(self.HANDLER_ID, {})
config[self.__field] = self.__requirement
# Inject the field
setattr(clazz, self.__field, None)
return clazz
# ------------------------------------------------------------------------------
class RequiresMap(object):
"""
@RequiresMap decorator
Defines a required service, injected in a dictionary
"""
HANDLER_ID = constants.HANDLER_REQUIRES_MAP
""" ID of the handler configured by this decorator """
def __init__(self, field, specification, key, allow_none=False,
aggregate=False, optional=False, spec_filter=None):
"""
Sets up the requirement
:param field: The injected field
:param specification: The injected service specification
:param key: Name of the service property to use as a dictionary key
:param allow_none: If True, inject services with a None property value
:param aggregate: If true, injects a list
:param optional: If true, this injection is optional
:param spec_filter: An LDAP query to filter injected services upon
their properties
:raise TypeError: A parameter has an invalid type
:raise ValueError: An error occurred while parsing the filter or an
argument is incorrect
"""
# Check if field is valid
if not field:
raise ValueError("Empty field name.")
if not is_string(field):
raise TypeError("The field name must be a string, not {0}"
.format(type(field).__name__))
if ' ' in field:
raise ValueError("Field name can't contain spaces.")
self.__field = field
# Be sure that there is only one required specification
specifications = _get_specifications(specification)
self.__multi_specs = len(specifications) > 1
# Check if key is valid
if not key:
raise ValueError("No property key given")
# Store the flags
self.__key = key
self.__allow_none = allow_none
# Construct the requirement object
self.__requirement = Requirement(specifications[0],
aggregate, optional, spec_filter)
def __call__(self, clazz):
"""
Adds the requirement to the class iPOPO field
:param clazz: The class to decorate
:return: The decorated class
:raise TypeError: If *clazz* is not a type
"""
if not inspect.isclass(clazz):
raise TypeError("@RequiresMap can decorate only classes, not '{0}'"
.format(type(clazz).__name__))
if self.__multi_specs:
_logger.warning("Only one specification can be required: %s -> %s",
get_method_description(clazz), self.__field)
# Set up the property in the class
context = get_factory_context(clazz)
if context.completed:
# Do nothing if the class has already been manipulated
_logger.warning("@RequiresMap: Already manipulated class: %s",
get_method_description(clazz))
return clazz
# Store the requirement information
config = context.set_handler_default(self.HANDLER_ID, {})
config[self.__field] = (self.__requirement,
self.__key, self.__allow_none)
# Inject the field
setattr(clazz, self.__field, None)
return clazz
# ------------------------------------------------------------------------------
class BindField(object):
"""
BindField callback decorator, called when a component is bound to a
dependency, injected in the given field.
The decorated method must have the following prototype :
.. python::
def bind_method(self, field, service, service_reference):
'''
Method called when a service is bound to the component
field: Field wherein the dependency is injected
service: The injected service instance.
service_reference: The injected service ServiceReference
'''
# ...
If the service is a required one, the bind callback is called **before**
the component is validated.
The bind field callback is called **after** the global bind method.
The service reference can be stored *if its reference is deleted on
unbind*.
Exceptions raised by a bind callback are ignored.
"""
def __init__(self, field, if_valid=False):
"""
Sets up the decorator
:param field: Field associated to the binding
:param if_valid: Call the method only if the component is valid
"""
self._field = field
self._if_valid = if_valid
def __call__(self, method):
"""
Updates the "field callback" list for this method
:param method: Method to decorate
:return: Decorated method
:raise TypeError: The decorated element is not a valid function
"""
if not inspect.isroutine(method):
raise TypeError("@BindField can only be applied on functions")
# Tests the number of parameters
validate_method_arity(method, "field", "service", "service_reference")
_append_object_entry(method, constants.IPOPO_METHOD_FIELD_CALLBACKS,
(constants.IPOPO_CALLBACK_BIND_FIELD,
self._field, self._if_valid))
return method
class UpdateField(object):
"""
UpdateField callback decorator, called when a component dependency property
has been modified.
The decorated method must have the following prototype :
.. python::
def update_method(self, service, service_reference, old_properties):
'''
Method called when a service is bound to the component
service: The injected service instance.
service_reference: The injected service ServiceReference
old_properties: Previous service properties
'''
# ...
Exceptions raised by an update callback are ignored.
"""
def __init__(self, field, if_valid=False):
"""
Sets up the decorator
:param field: Field associated to the binding
:param if_valid: Call the method only if the component is valid
"""
self._field = field
self._if_valid = if_valid
def __call__(self, method):
"""
Updates the "field callback" list for this method
:param method: Method to decorate
:return: Decorated method
:raise TypeError: The decorated element is not a valid function
"""
if not inspect.isroutine(method):
raise TypeError("@UnbindField can only be applied on functions")
# Tests the number of parameters
validate_method_arity(method, "field", "service", "service_reference",
"old_properties")
_append_object_entry(method, constants.IPOPO_METHOD_FIELD_CALLBACKS,
(constants.IPOPO_CALLBACK_UPDATE_FIELD,
self._field, self._if_valid))
return method
class UnbindField(object):
"""
UnbindField callback decorator, called when a component is unbound to a
dependency, removed from the given field.
The decorated method must have the following prototype :
.. python::
def unbind_method(self, field, service, service_reference):
'''
Method called when a service is bound to the component
field: Field wherein the dependency is injected
service: The injected service instance.
service_reference: The injected service ServiceReference
'''
# ...
If the service is a required one, the unbind callback is called **after**
the component has been invalidated.
The unbind field callback is called **before** the global unbind method.
Exceptions raised by an unbind callback are ignored.
"""
def __init__(self, field, if_valid=False):
"""
Sets up the decorator
:param field: Field associated to the binding
:param if_valid: Call the method only if the component is valid
"""
self._field = field
self._if_valid = if_valid
def __call__(self, method):
"""
Updates the "field callback" list for this method
:param method: Method to decorate
:return: Decorated method
:raise TypeError: The decorated element is not a valid function
"""
if not inspect.isroutine(method):
raise TypeError("@UnbindField can only be applied on functions")
# Tests the number of parameters
validate_method_arity(method, "field", "service", "service_reference")
_append_object_entry(method, constants.IPOPO_METHOD_FIELD_CALLBACKS,
(constants.IPOPO_CALLBACK_UNBIND_FIELD,
self._field, self._if_valid))
return method
# ------------------------------------------------------------------------------
def Bind(method):
"""
Bind callback decorator, called when a component is bound to a dependency.
The decorated method must have the following prototype :
.. python::
def bind_method(self, service, service_reference):
'''
Method called when a service is bound to the component
service: The injected service instance.
service_reference: The injected service ServiceReference
'''
# ...
If the service is a required one, the bind callback is called **before**
the component is validated.
The service reference can be stored *if its reference is deleted on
unbind*.
Exceptions raised by a bind callback are ignored.
:param method: The decorated method
:raise TypeError: The decorated element is not a valid function
"""
if not inspect.isroutine(method):
raise TypeError("@Bind can only be applied on functions")
# Tests the number of parameters
validate_method_arity(method, "service", "service_reference")
_append_object_entry(method, constants.IPOPO_METHOD_CALLBACKS,
constants.IPOPO_CALLBACK_BIND)
return method
def Update(method):
"""
Update callback decorator, called when a component dependency property has
been modified.
The decorated method must have the following prototype :
.. python::
def update_method(self, service, service_reference, old_properties):
'''
Method called when a service is bound to the component
service: The injected service instance.
service_reference: The injected service ServiceReference
old_properties: Previous service properties
'''
# ...
Exceptions raised by an update callback are ignored.
:param method: The decorated method
:raise TypeError: The decorated element is not a valid function
"""
if not isinstance(method, types.FunctionType):
raise TypeError("@Update can only be applied on functions")
# Tests the number of parameters
validate_method_arity(method, "service", "service_reference",
"old_properties")
_append_object_entry(method, constants.IPOPO_METHOD_CALLBACKS,
constants.IPOPO_CALLBACK_UPDATE)
return method
def Unbind(method):
"""
Unbind callback decorator, called when a component dependency is unbound.
The decorated method must have the following prototype :
.. python::
def unbind_method(self, service, service_reference):
'''
Method called when a service is bound to the component
service: The injected service instance.
service_reference: The injected service ServiceReference
'''
# ...
If the service is a required one, the unbind callback is called **after**
the component has been invalidated.
Exceptions raised by an unbind callback are ignored.
:param method: The decorated method
:raise TypeError: The decorated element is not a valid function
"""
if not isinstance(method, types.FunctionType):
raise TypeError("@Unbind can only be applied on functions")
# Tests the number of parameters
validate_method_arity(method, "service", "service_reference")
_append_object_entry(method, constants.IPOPO_METHOD_CALLBACKS,
constants.IPOPO_CALLBACK_UNBIND)
return method
def Validate(method):
"""
Validation callback decorator, called when a component becomes valid,
i.e. if all of its required dependencies has been injected.
The decorated method must have the following prototype :
.. python::
def validation_method(self, bundle_context):
'''
Method called when the component is validated
bundle_context: The component's bundle context
'''
# ...
If the validation callback raises an exception, the component is considered
not validated.
If the component provides a service, the validation method is called before
the provided service is registered to the framework.
:param method: The decorated method
:raise TypeError: The decorated element is not a valid function
"""
if not isinstance(method, types.FunctionType):
raise TypeError("@Validate can only be applied on functions")
# Tests the number of parameters
validate_method_arity(method, "bundle_context")
_append_object_entry(method, constants.IPOPO_METHOD_CALLBACKS,
constants.IPOPO_CALLBACK_VALIDATE)
return method
def Invalidate(method):
"""
Invalidation callback decorator, called when a component becomes invalid,
i.e. if one of its required dependencies disappeared
The decorated method must have the following prototype :
.. python::
def invalidation_method(self, bundle_context):
'''
Method called when the component is invalidated
bundle_context: The component's bundle context
'''
# ...
Exceptions raised by an invalidation callback are ignored.
If the component provides a service, the invalidation method is called
after the provided service has been unregistered to the framework.
:param method: The decorated method
:raise TypeError: The decorated element is not a function
"""
if not isinstance(method, types.FunctionType):
raise TypeError("@Invalidate can only be applied on functions")
# Tests the number of parameters
validate_method_arity(method, "bundle_context")
_append_object_entry(method, constants.IPOPO_METHOD_CALLBACKS,
constants.IPOPO_CALLBACK_INVALIDATE)
return method
| 33.893617 | 80 | 0.608504 |
__version_info__ = (0, 5, 7)
__version__ = ".".join(str(x) for x in __version_info__)
__docformat__ = "restructuredtext en"
from pelix.utilities import is_string, to_iterable
from pelix.ipopo.contexts import FactoryContext, Requirement
import pelix.ipopo.constants as constants
import inspect
import logging
import threading
import types
_logger = logging.getLogger("ipopo.decorators")
def is_from_parent(cls, attribute_name, value=None):
if value is None:
try:
value = getattr(cls, attribute_name)
except AttributeError:
return False
for base in cls.__bases__:
if getattr(base, attribute_name, None) is value:
return True
return False
def get_factory_context(cls):
context = getattr(cls, constants.IPOPO_FACTORY_CONTEXT, None)
if context is None:
context = FactoryContext()
elif is_from_parent(cls, constants.IPOPO_FACTORY_CONTEXT):
context = context.copy(True)
context.completed = False
else:
return context
setattr(cls, constants.IPOPO_FACTORY_CONTEXT, context)
return context
def get_method_description(method):
try:
try:
line_no = inspect.getsourcelines(method)[1]
except IOError:
line_no = -1
return "'{method}' ({file}:{line})" \
.format(method=method.__name__,
file=inspect.getfile(method),
line=line_no)
except TypeError:
return "'{0}'".format(method.__name__)
def validate_method_arity(method, *needed_args):
nb_needed_args = len(needed_args) + 1
# Test the number of parameters
argspec = inspect.getargspec(method)
method_args = argspec.args
if len(method_args) == 0:
# No argument at all
raise TypeError("Decorated method {0} must have at least the 'self' "
"parameter".format(get_method_description(method)))
if argspec.varargs is not None:
# Variable arguments
if len(method_args) != 1 or method_args[0] != "self":
# Other arguments detected
raise TypeError("When using '*args', the decorated {0} method must"
" only accept the 'self' argument"
.format(get_method_description(method)))
elif len(method_args) != nb_needed_args or method_args[0] != 'self':
# "Normal" arguments
raise TypeError("The decorated method {0} must accept exactly {1} "
"parameters : (self, {2})"
.format(get_method_description(method), nb_needed_args,
", ".join(needed_args)))
# ------------------------------------------------------------------------------
def _ipopo_setup_callback(cls, context):
assert inspect.isclass(cls)
assert isinstance(context, FactoryContext)
if context.callbacks is not None:
callbacks = context.callbacks.copy()
else:
callbacks = {}
functions = inspect.getmembers(cls, inspect.isroutine)
for _, function in functions:
if not hasattr(function, constants.IPOPO_METHOD_CALLBACKS):
# No attribute, get the next member
continue
method_callbacks = getattr(function, constants.IPOPO_METHOD_CALLBACKS)
if not isinstance(method_callbacks, list):
# Invalid content
_logger.warning("Invalid callback information %s in %s",
constants.IPOPO_METHOD_CALLBACKS,
get_method_description(function))
continue
# Keeping it allows inheritance : by removing it, only the first
# child will see the attribute -> Don't remove it
for _callback in method_callbacks:
if _callback in callbacks and \
not is_from_parent(cls, callbacks[_callback].__name__,
callbacks[_callback]):
_logger.warning("Redefining the callback %s in class '%s'.\n"
"\tPrevious callback : %s\n"
"\tNew callback : %s", _callback, cls.__name__,
get_method_description(callbacks[_callback]),
get_method_description(function))
callbacks[_callback] = function
context.callbacks.clear()
context.callbacks.update(callbacks)
def _ipopo_setup_field_callback(cls, context):
assert inspect.isclass(cls)
assert isinstance(context, FactoryContext)
if context.field_callbacks is not None:
callbacks = context.field_callbacks.copy()
else:
callbacks = {}
functions = inspect.getmembers(cls, inspect.isroutine)
for name, function in functions:
if not hasattr(function, constants.IPOPO_METHOD_FIELD_CALLBACKS):
continue
method_callbacks = getattr(function,
constants.IPOPO_METHOD_FIELD_CALLBACKS)
if not isinstance(method_callbacks, list):
_logger.warning("Invalid attribute %s in %s",
constants.IPOPO_METHOD_FIELD_CALLBACKS, name)
continue
# Store the call backs
for kind, field, if_valid in method_callbacks:
fields_cbs = callbacks.setdefault(field, {})
if kind in fields_cbs and \
not is_from_parent(cls, fields_cbs[kind][0].__name__):
_logger.warning("Redefining the callback %s in '%s'. "
"Previous callback : '%s' (%s). "
"New callback : %s", kind, name,
fields_cbs[kind][0].__name__,
fields_cbs[kind][0], function)
fields_cbs[kind] = (function, if_valid)
# Update the factory context
context.field_callbacks.clear()
context.field_callbacks.update(callbacks)
# ------------------------------------------------------------------------------
def _append_object_entry(obj, list_name, entry):
# Get the list
obj_list = getattr(obj, list_name, None)
if obj_list is None:
# We'll have to create it
obj_list = []
setattr(obj, list_name, obj_list)
assert isinstance(obj_list, list)
if entry not in obj_list:
obj_list.append(entry)
class Holder(object):
def __init__(self, value):
self.value = value
def _ipopo_class_field_property(name, value, methods_prefix):
lock = threading.RLock()
getter_name = "{0}{1}".format(methods_prefix,
constants.IPOPO_GETTER_SUFFIX)
setter_name = "{0}{1}".format(methods_prefix,
constants.IPOPO_SETTER_SUFFIX)
local_holder = Holder(value)
def get_value(self):
getter = getattr(self, getter_name, None)
if getter is not None:
with lock:
return getter(self, name)
else:
return local_holder.value
def set_value(self, new_value):
setter = getattr(self, setter_name, None)
if setter is not None:
with lock:
setter(self, name, new_value)
else:
local_holder.value = new_value
return property(get_value, set_value)
class Instantiate(object):
def __init__(self, name, properties=None):
if not is_string(name):
raise TypeError("Instance name must be a string")
if properties is not None and not isinstance(properties, dict):
raise TypeError("Instance properties must be a dictionary or None")
name = name.strip()
if not name:
raise ValueError("Invalid instance name '{0}'".format(name))
self.__name = name
self.__properties = properties
def __call__(self, factory_class):
if not inspect.isclass(factory_class):
raise TypeError("@Instantiate can decorate only classes, "
"not '{0}'".format(type(factory_class).__name__))
context = get_factory_context(factory_class)
try:
context.add_instance(self.__name, self.__properties)
except NameError:
_logger.warning("Component '%s' defined twice, new definition "
"ignored", self.__name)
return factory_class
class ComponentFactory(object):
def __init__(self, name=None, excluded=None):
self.__factory_name = name
self.__excluded_inheritance = to_iterable(excluded)
def __call__(self, factory_class):
if not inspect.isclass(factory_class):
raise TypeError("@ComponentFactory can decorate only classes, "
"not '{0}'".format(type(factory_class).__name__))
context = get_factory_context(factory_class)
if not context.completed:
if not self.__factory_name:
self.__factory_name = factory_class.__name__ + "Factory"
context.name = self.__factory_name
context.inherit_handlers(self.__excluded_inheritance)
context.completed = True
_ipopo_setup_callback(factory_class, context)
_ipopo_setup_field_callback(factory_class, context)
setattr(factory_class, constants.IPOPO_FACTORY_CONTEXT, context)
if context.properties_fields:
setattr(factory_class, constants.IPOPO_PROPERTY_PREFIX
+ constants.IPOPO_GETTER_SUFFIX, None)
setattr(factory_class, constants.IPOPO_PROPERTY_PREFIX
+ constants.IPOPO_SETTER_SUFFIX, None)
else:
_logger.error("%s has already been manipulated with the name '%s'."
" Keeping the old name.",
get_method_description(factory_class), context.name)
return factory_class
class Property(object):
HANDLER_ID = constants.HANDLER_PROPERTY
def __init__(self, field=None, name=None, value=None):
if not is_string(field):
raise TypeError("Field name must be a string")
field = field.strip()
if not field or ' ' in field:
raise ValueError("Empty or invalid property field name '{0}'"
.format(field))
if name is not None:
if not is_string(name):
raise TypeError("Property name must be a string")
name = name.strip()
if not name:
name = field
self.__field = field
self.__name = name
self.__value = value
def __call__(self, clazz):
if not inspect.isclass(clazz):
raise TypeError("@Property can decorate only classes, not '{0}'"
.format(type(clazz).__name__))
context = get_factory_context(clazz)
if context.completed:
_logger.warning("@Property: Already manipulated class: %s",
get_method_description(clazz))
return clazz
context.properties[self.__name] = self.__value
context.properties_fields[self.__field] = self.__name
context.set_handler(self.HANDLER_ID, None)
setattr(clazz, self.__field,
_ipopo_class_field_property(self.__name, self.__value,
constants.IPOPO_PROPERTY_PREFIX))
return clazz
def _get_specifications(specifications):
if not specifications:
raise ValueError("No specifications given")
if inspect.isclass(specifications):
return [specifications.__name__]
elif is_string(specifications):
specifications = specifications.strip()
if not specifications:
raise ValueError("Empty specification given")
return [specifications]
elif isinstance(specifications, (list, tuple)):
results = []
for specification in specifications:
results.extend(_get_specifications(specification))
return results
else:
raise ValueError("Unhandled specifications type : {0}"
.format(type(specifications).__name__))
class Provides(object):
HANDLER_ID = constants.HANDLER_PROVIDES
def __init__(self, specifications, controller=None):
if controller is not None:
if not is_string(controller):
raise ValueError("Controller name must be a string")
controller = controller.strip()
if not controller:
_logger.warning("Empty controller name given")
controller = None
elif ' ' in controller:
raise ValueError("Controller name contains spaces")
self.__specifications = _get_specifications(specifications)
self.__controller = controller
def __call__(self, clazz):
if not inspect.isclass(clazz):
raise TypeError("@Provides can decorate only classes, not '{0}'"
.format(type(clazz).__name__))
context = get_factory_context(clazz)
if context.completed:
_logger.warning("@Provides: Already manipulated class: %s",
get_method_description(clazz))
return clazz
filtered_specs = []
for spec in self.__specifications:
if spec not in filtered_specs:
filtered_specs.append(spec)
config = context.set_handler_default(self.HANDLER_ID, [])
config.append((filtered_specs, self.__controller))
if self.__controller:
setattr(clazz, self.__controller,
_ipopo_class_field_property(
self.__controller, True,
constants.IPOPO_CONTROLLER_PREFIX))
setattr(clazz, constants.IPOPO_CONTROLLER_PREFIX
+ constants.IPOPO_GETTER_SUFFIX, None)
setattr(clazz, constants.IPOPO_CONTROLLER_PREFIX
+ constants.IPOPO_SETTER_SUFFIX, None)
return clazz
class Requires(object):
HANDLER_ID = constants.HANDLER_REQUIRES
def __init__(self, field, specification, aggregate=False, optional=False,
spec_filter=None):
if not field:
raise ValueError("Empty field name.")
if not is_string(field):
raise TypeError("The field name must be a string, not {0}"
.format(type(field).__name__))
if ' ' in field:
raise ValueError("Field name can't contain spaces.")
self.__field = field
# Be sure that there is only one required specification
specifications = _get_specifications(specification)
self.__multi_specs = len(specifications) > 1
# Construct the requirement object
self.__requirement = Requirement(specifications[0],
aggregate, optional, spec_filter)
def __call__(self, clazz):
if not inspect.isclass(clazz):
raise TypeError("@Requires can decorate only classes, not '{0}'"
.format(type(clazz).__name__))
if self.__multi_specs:
_logger.warning("Only one specification can be required: %s -> %s",
clazz.__name__, self.__field)
# Set up the property in the class
context = get_factory_context(clazz)
if context.completed:
# Do nothing if the class has already been manipulated
_logger.warning("@Requires: Already manipulated class: %s",
get_method_description(clazz))
return clazz
# Store the requirement information
config = context.set_handler_default(self.HANDLER_ID, {})
config[self.__field] = self.__requirement
# Inject the field
setattr(clazz, self.__field, None)
return clazz
# ------------------------------------------------------------------------------
class RequiresMap(object):
HANDLER_ID = constants.HANDLER_REQUIRES_MAP
def __init__(self, field, specification, key, allow_none=False,
aggregate=False, optional=False, spec_filter=None):
# Check if field is valid
if not field:
raise ValueError("Empty field name.")
if not is_string(field):
raise TypeError("The field name must be a string, not {0}"
.format(type(field).__name__))
if ' ' in field:
raise ValueError("Field name can't contain spaces.")
self.__field = field
specifications = _get_specifications(specification)
self.__multi_specs = len(specifications) > 1
if not key:
raise ValueError("No property key given")
self.__key = key
self.__allow_none = allow_none
self.__requirement = Requirement(specifications[0],
aggregate, optional, spec_filter)
def __call__(self, clazz):
if not inspect.isclass(clazz):
raise TypeError("@RequiresMap can decorate only classes, not '{0}'"
.format(type(clazz).__name__))
if self.__multi_specs:
_logger.warning("Only one specification can be required: %s -> %s",
get_method_description(clazz), self.__field)
context = get_factory_context(clazz)
if context.completed:
_logger.warning("@RequiresMap: Already manipulated class: %s",
get_method_description(clazz))
return clazz
config = context.set_handler_default(self.HANDLER_ID, {})
config[self.__field] = (self.__requirement,
self.__key, self.__allow_none)
setattr(clazz, self.__field, None)
return clazz
class BindField(object):
def __init__(self, field, if_valid=False):
self._field = field
self._if_valid = if_valid
def __call__(self, method):
if not inspect.isroutine(method):
raise TypeError("@BindField can only be applied on functions")
validate_method_arity(method, "field", "service", "service_reference")
_append_object_entry(method, constants.IPOPO_METHOD_FIELD_CALLBACKS,
(constants.IPOPO_CALLBACK_BIND_FIELD,
self._field, self._if_valid))
return method
class UpdateField(object):
def __init__(self, field, if_valid=False):
self._field = field
self._if_valid = if_valid
def __call__(self, method):
if not inspect.isroutine(method):
raise TypeError("@UnbindField can only be applied on functions")
validate_method_arity(method, "field", "service", "service_reference",
"old_properties")
_append_object_entry(method, constants.IPOPO_METHOD_FIELD_CALLBACKS,
(constants.IPOPO_CALLBACK_UPDATE_FIELD,
self._field, self._if_valid))
return method
class UnbindField(object):
def __init__(self, field, if_valid=False):
self._field = field
self._if_valid = if_valid
def __call__(self, method):
if not inspect.isroutine(method):
raise TypeError("@UnbindField can only be applied on functions")
validate_method_arity(method, "field", "service", "service_reference")
_append_object_entry(method, constants.IPOPO_METHOD_FIELD_CALLBACKS,
(constants.IPOPO_CALLBACK_UNBIND_FIELD,
self._field, self._if_valid))
return method
def Bind(method):
if not inspect.isroutine(method):
raise TypeError("@Bind can only be applied on functions")
validate_method_arity(method, "service", "service_reference")
_append_object_entry(method, constants.IPOPO_METHOD_CALLBACKS,
constants.IPOPO_CALLBACK_BIND)
return method
def Update(method):
if not isinstance(method, types.FunctionType):
raise TypeError("@Update can only be applied on functions")
validate_method_arity(method, "service", "service_reference",
"old_properties")
_append_object_entry(method, constants.IPOPO_METHOD_CALLBACKS,
constants.IPOPO_CALLBACK_UPDATE)
return method
def Unbind(method):
if not isinstance(method, types.FunctionType):
raise TypeError("@Unbind can only be applied on functions")
validate_method_arity(method, "service", "service_reference")
_append_object_entry(method, constants.IPOPO_METHOD_CALLBACKS,
constants.IPOPO_CALLBACK_UNBIND)
return method
def Validate(method):
if not isinstance(method, types.FunctionType):
raise TypeError("@Validate can only be applied on functions")
validate_method_arity(method, "bundle_context")
_append_object_entry(method, constants.IPOPO_METHOD_CALLBACKS,
constants.IPOPO_CALLBACK_VALIDATE)
return method
def Invalidate(method):
if not isinstance(method, types.FunctionType):
raise TypeError("@Invalidate can only be applied on functions")
validate_method_arity(method, "bundle_context")
_append_object_entry(method, constants.IPOPO_METHOD_CALLBACKS,
constants.IPOPO_CALLBACK_INVALIDATE)
return method
| true | true |
1c359b29600fc3f07ff3aa05035e35f61decb956 | 2,531 | py | Python | data/ghosts/ared_scatter.py | Vlad-Shcherbina/icfpc2014-tbd | 8169102307808a80801bf5ee55688e41287990bf | [
"WTFPL"
] | 4 | 2015-01-14T11:35:08.000Z | 2020-01-19T19:14:40.000Z | data/ghosts/ared_scatter.py | Vlad-Shcherbina/icfpc2014-tbd | 8169102307808a80801bf5ee55688e41287990bf | [
"WTFPL"
] | null | null | null | data/ghosts/ared_scatter.py | Vlad-Shcherbina/icfpc2014-tbd | 8169102307808a80801bf5ee55688e41287990bf | [
"WTFPL"
] | null | null | null | # python aghost.py ../data/ghosts/ared_scatter.py >../data/ghosts/ared_scatter.ghc
import game
def run():
WALL = 0
EMPTY = 1
PILL = 2
POWER_PILL = 3
FRUIT = 4
LM_START = 5
GHOST_START = 6
mem.x, mem.y = get_ghost_coords(get_index())
mem.tx, mem.ty = get_lm_coords()
mem.vitality, mem.old_dir = get_ghost_status(get_index())
mem.best_closest = 0
mem.best_dist = 255
mem.d = 4
while mem.d:
join()
mem.d -= 1
# can't turn around
if mem.d ^ 2 == mem.old_dir:
continue
mem.x1 = mem.x
mem.y1 = mem.y
if mem.d == game.UP:
mem.y1 -= 1
elif mem.d == game.RIGHT:
mem.x1 += 1
elif mem.d == game.DOWN:
mem.y1 += 1
elif mem.d == game.LEFT:
mem.x1 -= 1
join()
if get_map_square(mem.x1, mem.y1) == WALL:
continue
def dist(x1, y1, x2, y2):
if x1 > x2:
mem.result = x1 - x2
else:
mem.result = x2 - x1
if y1 > y2:
mem.result += y1 - y2
else:
mem.result += y2 - y1
join()
return mem.result
mem.dist = dist(mem.x1, mem.y1, mem.tx, mem.ty)
if mem.vitality == game.FRIGHT:
mem.dist = 255 - mem.dist
mem.self_index = get_index()
mem.other_index = 0
mem.closest = 255
while mem.other_index < 5:
if mem.other_index == mem.self_index:
mem.other_index += 1
continue
mem.other_vitality, _ = get_ghost_status(mem.other_index)
if (mem.other_vitality == game.FRIGHT) != (mem.vitality == game.FRIGHT):
mem.other_index += 1
continue
join()
mem.other_x, mem.other_y = get_ghost_coords(mem.other_index)
if mem.other_x == 0:
break
mem.other_index += 1
mem.dist_to_other = dist(mem.x1, mem.y1, mem.other_x, mem.other_y)
if mem.closest > mem.dist_to_other:
mem.closest = mem.dist_to_other
# if distance to lm is the same, prefer to stay apart from closest ghost
if (mem.dist < mem.best_dist or
mem.dist == mem.best_dist and mem.best_closest < mem.closest):
mem.best_dist = mem.dist
mem.best_closest = mem.closest
set_dir(mem.d)
join()
inline('HLT')
| 27.215054 | 84 | 0.504939 |
import game
def run():
WALL = 0
EMPTY = 1
PILL = 2
POWER_PILL = 3
FRUIT = 4
LM_START = 5
GHOST_START = 6
mem.x, mem.y = get_ghost_coords(get_index())
mem.tx, mem.ty = get_lm_coords()
mem.vitality, mem.old_dir = get_ghost_status(get_index())
mem.best_closest = 0
mem.best_dist = 255
mem.d = 4
while mem.d:
join()
mem.d -= 1
if mem.d ^ 2 == mem.old_dir:
continue
mem.x1 = mem.x
mem.y1 = mem.y
if mem.d == game.UP:
mem.y1 -= 1
elif mem.d == game.RIGHT:
mem.x1 += 1
elif mem.d == game.DOWN:
mem.y1 += 1
elif mem.d == game.LEFT:
mem.x1 -= 1
join()
if get_map_square(mem.x1, mem.y1) == WALL:
continue
def dist(x1, y1, x2, y2):
if x1 > x2:
mem.result = x1 - x2
else:
mem.result = x2 - x1
if y1 > y2:
mem.result += y1 - y2
else:
mem.result += y2 - y1
join()
return mem.result
mem.dist = dist(mem.x1, mem.y1, mem.tx, mem.ty)
if mem.vitality == game.FRIGHT:
mem.dist = 255 - mem.dist
mem.self_index = get_index()
mem.other_index = 0
mem.closest = 255
while mem.other_index < 5:
if mem.other_index == mem.self_index:
mem.other_index += 1
continue
mem.other_vitality, _ = get_ghost_status(mem.other_index)
if (mem.other_vitality == game.FRIGHT) != (mem.vitality == game.FRIGHT):
mem.other_index += 1
continue
join()
mem.other_x, mem.other_y = get_ghost_coords(mem.other_index)
if mem.other_x == 0:
break
mem.other_index += 1
mem.dist_to_other = dist(mem.x1, mem.y1, mem.other_x, mem.other_y)
if mem.closest > mem.dist_to_other:
mem.closest = mem.dist_to_other
# if distance to lm is the same, prefer to stay apart from closest ghost
if (mem.dist < mem.best_dist or
mem.dist == mem.best_dist and mem.best_closest < mem.closest):
mem.best_dist = mem.dist
mem.best_closest = mem.closest
set_dir(mem.d)
join()
inline('HLT')
| true | true |
1c359c31473983caa5968f9bc90f6cd52f26c029 | 8,025 | py | Python | doc/conf.py | glhr/gammatone | 14fdcd37c0c3054e5c85ed8c53f2cdec6e5d2b99 | [
"BSD-3-Clause"
] | 176 | 2015-01-08T03:56:11.000Z | 2022-03-31T09:36:40.000Z | doc/conf.py | glhr/gammatone | 14fdcd37c0c3054e5c85ed8c53f2cdec6e5d2b99 | [
"BSD-3-Clause"
] | 9 | 2015-01-01T06:11:29.000Z | 2020-12-28T23:32:29.000Z | doc/conf.py | glhr/gammatone | 14fdcd37c0c3054e5c85ed8c53f2cdec6e5d2b99 | [
"BSD-3-Clause"
] | 64 | 2015-03-31T05:16:37.000Z | 2022-02-18T10:17:49.000Z | # -*- coding: utf-8 -*-
#
# gammatone documentation build configuration file, created by
# sphinx-quickstart on Sat Dec 8 23:21:49 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Gammatone Filterbank Toolkit'
copyright = u'2014, Jason Heeris'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'haiku'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = u"%s %s" % (project, release)
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**' : [
'localtoc.html',
'globaltoc.html',
'relations.html',
'searchbox.html'
],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'gammatonedoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'gammatone.tex', u'Gammatone Documentation',
u'Jason Heeris', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'gammatone', u'Gammatone Documentation',
[u'Jason Heeris'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'gammatone', u'Gammatone Documentation',
u'Jason Heeris', 'gammatone', 'Gammatone filterbank construction tools.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Autodoc configuration -----------------------------------------------------
# autodoc_default_flags = ['members']
| 31.594488 | 80 | 0.708287 |
import sys, os
extensions = ['sphinx.ext.autodoc']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'Gammatone Filterbank Toolkit'
copyright = u'2014, Jason Heeris'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'haiku'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = u"%s %s" % (project, release)
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**' : [
'localtoc.html',
'globaltoc.html',
'relations.html',
'searchbox.html'
],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'gammatonedoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'gammatone.tex', u'Gammatone Documentation',
u'Jason Heeris', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'gammatone', u'Gammatone Documentation',
[u'Jason Heeris'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'gammatone', u'Gammatone Documentation',
u'Jason Heeris', 'gammatone', 'Gammatone filterbank construction tools.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Autodoc configuration -----------------------------------------------------
# autodoc_default_flags = ['members']
| true | true |
1c359d5e9ed626c478696e05e998f64373e2c26d | 5,087 | py | Python | dataset.py | aod321/new_train | 23bf0a64ac274433cbc372898d97ae9d1aa5f6cd | [
"BSD-2-Clause"
] | 16 | 2020-07-11T07:53:49.000Z | 2022-03-10T11:52:31.000Z | dataset.py | aod321/new_train | 23bf0a64ac274433cbc372898d97ae9d1aa5f6cd | [
"BSD-2-Clause"
] | 1 | 2020-08-12T07:57:47.000Z | 2021-08-31T15:08:23.000Z | dataset.py | aod321/new_train | 23bf0a64ac274433cbc372898d97ae9d1aa5f6cd | [
"BSD-2-Clause"
] | 1 | 2022-02-28T10:32:43.000Z | 2022-02-28T10:32:43.000Z | import numpy as np
import os
from torch.utils.data import Dataset
from skimage import io
import cv2
import torch
class HelenDataset(Dataset):
# HelenDataset
def __init__(self, txt_file, root_dir, parts_root_dir=None, transform=None):
"""
Args:
txt_file (string): Path to the txt file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.name_list = np.loadtxt(os.path.join(root_dir, txt_file), dtype="str", delimiter=',')
self.mode = 'train'
if txt_file == "exemplars.txt":
self.mode = 'train'
elif txt_file == "testing.txt":
self.mode = 'test'
elif txt_file == "tuning.txt":
self.mode = 'val'
self.root_dir = root_dir
self.parts_root_dir = parts_root_dir
self.transform = transform
def __len__(self):
return len(self.name_list)
def __getitem__(self, idx):
img_name = self.name_list[idx, 1].strip()
img_path = os.path.join(self.root_dir, 'images',
img_name + '.jpg')
labels_path = [os.path.join(self.root_dir, 'labels',
img_name,
img_name + "_lbl%.2d.png") % i
for i in range(11)]
image = io.imread(img_path)
image = np.array(image)
labels = [io.imread(labels_path[i]) for i in range(11)]
labels = np.array(labels)
# bg = labels[0] + labels[1] + labels[10]
bg = 255 - labels[2:10].sum(0)
labels = np.uint8(np.concatenate(([bg.clip(0, 255)], labels[2:10]), axis=0))
orig_size = image.shape
if self.parts_root_dir is not None:
parts, parts_mask = self.getparts(idx)
sample = {'image': image, 'labels': labels, 'orig': image, 'orig_label': labels, 'orig_size': orig_size,
'parts_gt': parts, 'parts_mask_gt': parts_mask, 'name': img_name, 'index': idx}
else:
sample = {'image': image, 'labels': labels, 'orig': image, 'orig_label': labels, 'orig_size': orig_size,
'name': img_name, 'index': idx}
if self.transform:
sample = self.transform(sample)
new_label = sample['labels']
new_label_fg = torch.sum(new_label[1:], dim=0, keepdim=True) # 1 x 128 x 128
new_label[0] = 1. - new_label_fg
sample['labels'] = new_label
return sample
def getparts(self, idx):
name = self.name_list[idx, 1].strip()
name_list = ['eyebrow1', 'eyebrow2', 'eye1', 'eye2', 'nose', 'mouth']
path = {x: os.path.join(self.parts_root_dir, x, self.mode)
for x in name_list}
parts_path = {x: os.path.join(path[x], name + "_image.png")
for x in name_list}
parts_mask_path = {x: os.path.join(path[x], name + "_label.png")
for x in name_list}
parts = [io.imread(parts_path[x])
for x in name_list]
parts_mask = [cv2.imread(parts_mask_path[x], cv2.IMREAD_GRAYSCALE).astype(np.float32())
for x in name_list] # (H, W)
return parts, parts_mask
class PartsDataset(Dataset):
def __init__(self, txt_file, root_dir, transform=None):
"""
Args:
txt_file (string): Path to the txt file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.name_list = np.loadtxt(os.path.join(root_dir, txt_file), dtype="str", delimiter=',')
self.mode = 'train'
if txt_file == "exemplars.txt":
self.mode = 'train'
elif txt_file == "testing.txt":
self.mode = 'test'
elif txt_file == "tuning.txt":
self.mode = 'val'
self.root_dir = root_dir
self.transform = transform
def __len__(self):
return len(self.name_list)
def __getitem__(self, idx):
img_name = self.name_list[idx, 1].strip()
name_list = ['eyebrow1', 'eyebrow2', 'eye1', 'eye2', 'nose', 'mouth']
path = {x: os.path.join(self.root_dir, x, self.mode)
for x in name_list}
parts_path = {x: os.path.join(path[x], img_name + "_image.png")
for x in name_list}
parts_mask_path = {x: os.path.join(path[x], img_name + "_label.png")
for x in name_list}
parts = [io.imread(parts_path[x])
for x in name_list]
parts_mask = [cv2.imread(parts_mask_path[x], cv2.IMREAD_GRAYSCALE).astype(np.int32)
for x in name_list] # (H, W)
sample = {'image': parts, 'labels': parts_mask}
if self.transform:
sample = self.transform(sample)
return sample
| 39.130769 | 116 | 0.553175 | import numpy as np
import os
from torch.utils.data import Dataset
from skimage import io
import cv2
import torch
class HelenDataset(Dataset):
def __init__(self, txt_file, root_dir, parts_root_dir=None, transform=None):
self.name_list = np.loadtxt(os.path.join(root_dir, txt_file), dtype="str", delimiter=',')
self.mode = 'train'
if txt_file == "exemplars.txt":
self.mode = 'train'
elif txt_file == "testing.txt":
self.mode = 'test'
elif txt_file == "tuning.txt":
self.mode = 'val'
self.root_dir = root_dir
self.parts_root_dir = parts_root_dir
self.transform = transform
def __len__(self):
return len(self.name_list)
def __getitem__(self, idx):
img_name = self.name_list[idx, 1].strip()
img_path = os.path.join(self.root_dir, 'images',
img_name + '.jpg')
labels_path = [os.path.join(self.root_dir, 'labels',
img_name,
img_name + "_lbl%.2d.png") % i
for i in range(11)]
image = io.imread(img_path)
image = np.array(image)
labels = [io.imread(labels_path[i]) for i in range(11)]
labels = np.array(labels)
bg = 255 - labels[2:10].sum(0)
labels = np.uint8(np.concatenate(([bg.clip(0, 255)], labels[2:10]), axis=0))
orig_size = image.shape
if self.parts_root_dir is not None:
parts, parts_mask = self.getparts(idx)
sample = {'image': image, 'labels': labels, 'orig': image, 'orig_label': labels, 'orig_size': orig_size,
'parts_gt': parts, 'parts_mask_gt': parts_mask, 'name': img_name, 'index': idx}
else:
sample = {'image': image, 'labels': labels, 'orig': image, 'orig_label': labels, 'orig_size': orig_size,
'name': img_name, 'index': idx}
if self.transform:
sample = self.transform(sample)
new_label = sample['labels']
new_label_fg = torch.sum(new_label[1:], dim=0, keepdim=True)
new_label[0] = 1. - new_label_fg
sample['labels'] = new_label
return sample
def getparts(self, idx):
name = self.name_list[idx, 1].strip()
name_list = ['eyebrow1', 'eyebrow2', 'eye1', 'eye2', 'nose', 'mouth']
path = {x: os.path.join(self.parts_root_dir, x, self.mode)
for x in name_list}
parts_path = {x: os.path.join(path[x], name + "_image.png")
for x in name_list}
parts_mask_path = {x: os.path.join(path[x], name + "_label.png")
for x in name_list}
parts = [io.imread(parts_path[x])
for x in name_list]
parts_mask = [cv2.imread(parts_mask_path[x], cv2.IMREAD_GRAYSCALE).astype(np.float32())
for x in name_list]
return parts, parts_mask
class PartsDataset(Dataset):
def __init__(self, txt_file, root_dir, transform=None):
self.name_list = np.loadtxt(os.path.join(root_dir, txt_file), dtype="str", delimiter=',')
self.mode = 'train'
if txt_file == "exemplars.txt":
self.mode = 'train'
elif txt_file == "testing.txt":
self.mode = 'test'
elif txt_file == "tuning.txt":
self.mode = 'val'
self.root_dir = root_dir
self.transform = transform
def __len__(self):
return len(self.name_list)
def __getitem__(self, idx):
img_name = self.name_list[idx, 1].strip()
name_list = ['eyebrow1', 'eyebrow2', 'eye1', 'eye2', 'nose', 'mouth']
path = {x: os.path.join(self.root_dir, x, self.mode)
for x in name_list}
parts_path = {x: os.path.join(path[x], img_name + "_image.png")
for x in name_list}
parts_mask_path = {x: os.path.join(path[x], img_name + "_label.png")
for x in name_list}
parts = [io.imread(parts_path[x])
for x in name_list]
parts_mask = [cv2.imread(parts_mask_path[x], cv2.IMREAD_GRAYSCALE).astype(np.int32)
for x in name_list]
sample = {'image': parts, 'labels': parts_mask}
if self.transform:
sample = self.transform(sample)
return sample
| true | true |
1c359ee05d301a0225cfaa3fa30c2d9d8f2e14e9 | 14,180 | py | Python | tests/components/mazda/test_config_flow.py | pcaston/core | e74d946cef7a9d4e232ae9e0ba150d18018cfe33 | [
"Apache-2.0"
] | 1 | 2021-07-08T20:09:55.000Z | 2021-07-08T20:09:55.000Z | tests/components/mazda/test_config_flow.py | pcaston/core | e74d946cef7a9d4e232ae9e0ba150d18018cfe33 | [
"Apache-2.0"
] | 47 | 2021-02-21T23:43:07.000Z | 2022-03-31T06:07:10.000Z | tests/components/mazda/test_config_flow.py | OpenPeerPower/core | f673dfac9f2d0c48fa30af37b0a99df9dd6640ee | [
"Apache-2.0"
] | null | null | null | """Test the Mazda Connected Services config flow."""
from unittest.mock import patch
import aiohttp
from openpeerpower import config_entries, data_entry_flow, setup
from openpeerpower.components.mazda.config_flow import (
MazdaAccountLockedException,
MazdaAuthenticationException,
)
from openpeerpower.components.mazda.const import DOMAIN
from openpeerpower.const import CONF_EMAIL, CONF_PASSWORD, CONF_REGION
from openpeerpower.core import OpenPeerPower
from tests.common import MockConfigEntry
FIXTURE_USER_INPUT = {
CONF_EMAIL: "example@example.com",
CONF_PASSWORD: "password",
CONF_REGION: "MNAO",
}
FIXTURE_USER_INPUT_REAUTH = {
CONF_EMAIL: "example@example.com",
CONF_PASSWORD: "password_fixed",
CONF_REGION: "MNAO",
}
FIXTURE_USER_INPUT_REAUTH_CHANGED_EMAIL = {
CONF_EMAIL: "example2@example.com",
CONF_PASSWORD: "password_fixed",
CONF_REGION: "MNAO",
}
async def test_form(opp):
"""Test the entire flow."""
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {}
with patch(
"openpeerpower.components.mazda.config_flow.MazdaAPI.validate_credentials",
return_value=True,
), patch(
"openpeerpower.components.mazda.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await opp.config_entries.flow.async_configure(
result["flow_id"],
FIXTURE_USER_INPUT,
)
await opp.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == FIXTURE_USER_INPUT[CONF_EMAIL]
assert result2["data"] == FIXTURE_USER_INPUT
assert len(mock_setup_entry.mock_calls) == 1
async def test_account_already_exists(opp):
"""Test account already exists."""
mock_config = MockConfigEntry(
domain=DOMAIN,
unique_id=FIXTURE_USER_INPUT[CONF_EMAIL],
data=FIXTURE_USER_INPUT,
)
mock_config.add_to_opp(opp)
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {}
with patch(
"openpeerpower.components.mazda.config_flow.MazdaAPI.validate_credentials",
return_value=True,
):
result2 = await opp.config_entries.flow.async_configure(
result["flow_id"],
FIXTURE_USER_INPUT,
)
await opp.async_block_till_done()
assert result2["type"] == "abort"
assert result2["reason"] == "already_configured"
async def test_form_invalid_auth(opp: OpenPeerPower) -> None:
"""Test we handle invalid auth."""
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {}
with patch(
"openpeerpower.components.mazda.config_flow.MazdaAPI.validate_credentials",
side_effect=MazdaAuthenticationException("Failed to authenticate"),
):
result2 = await opp.config_entries.flow.async_configure(
result["flow_id"],
FIXTURE_USER_INPUT,
)
await opp.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["step_id"] == "user"
assert result2["errors"] == {"base": "invalid_auth"}
async def test_form_account_locked(opp: OpenPeerPower) -> None:
"""Test we handle account locked error."""
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {}
with patch(
"openpeerpower.components.mazda.config_flow.MazdaAPI.validate_credentials",
side_effect=MazdaAccountLockedException("Account locked"),
):
result2 = await opp.config_entries.flow.async_configure(
result["flow_id"],
FIXTURE_USER_INPUT,
)
await opp.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["step_id"] == "user"
assert result2["errors"] == {"base": "account_locked"}
async def test_form_cannot_connect(opp):
"""Test we handle cannot connect error."""
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"openpeerpower.components.mazda.config_flow.MazdaAPI.validate_credentials",
side_effect=aiohttp.ClientError,
):
result2 = await opp.config_entries.flow.async_configure(
result["flow_id"],
FIXTURE_USER_INPUT,
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_unknown_error(opp):
"""Test we handle unknown error."""
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"openpeerpower.components.mazda.config_flow.MazdaAPI.validate_credentials",
side_effect=Exception,
):
result2 = await opp.config_entries.flow.async_configure(
result["flow_id"],
FIXTURE_USER_INPUT,
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "unknown"}
async def test_reauth_flow(opp: OpenPeerPower) -> None:
"""Test reauth works."""
await setup.async_setup_component(opp, "persistent_notification", {})
mock_config = MockConfigEntry(
domain=DOMAIN,
unique_id=FIXTURE_USER_INPUT[CONF_EMAIL],
data=FIXTURE_USER_INPUT,
)
mock_config.add_to_opp(opp)
with patch(
"openpeerpower.components.mazda.config_flow.MazdaAPI.validate_credentials",
side_effect=MazdaAuthenticationException("Failed to authenticate"),
), patch(
"openpeerpower.components.mazda.async_setup_entry",
return_value=True,
):
await opp.config_entries.async_setup(mock_config.entry_id)
await opp.async_block_till_done()
result = await opp.config_entries.flow.async_init(
DOMAIN,
context={
"source": config_entries.SOURCE_REAUTH,
"entry_id": mock_config.entry_id,
},
data=FIXTURE_USER_INPUT,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {}
with patch(
"openpeerpower.components.mazda.config_flow.MazdaAPI.validate_credentials",
return_value=True,
), patch("openpeerpower.components.mazda.async_setup_entry", return_value=True):
result2 = await opp.config_entries.flow.async_configure(
result["flow_id"],
FIXTURE_USER_INPUT_REAUTH,
)
await opp.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result2["reason"] == "reauth_successful"
async def test_reauth_authorization_error(opp: OpenPeerPower) -> None:
"""Test we show user form on authorization error."""
mock_config = MockConfigEntry(
domain=DOMAIN,
unique_id=FIXTURE_USER_INPUT[CONF_EMAIL],
data=FIXTURE_USER_INPUT,
)
mock_config.add_to_opp(opp)
with patch(
"openpeerpower.components.mazda.config_flow.MazdaAPI.validate_credentials",
side_effect=MazdaAuthenticationException("Failed to authenticate"),
), patch(
"openpeerpower.components.mazda.async_setup_entry",
return_value=True,
):
result = await opp.config_entries.flow.async_init(
DOMAIN,
context={
"source": config_entries.SOURCE_REAUTH,
"entry_id": mock_config.entry_id,
},
data=FIXTURE_USER_INPUT,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
result2 = await opp.config_entries.flow.async_configure(
result["flow_id"],
FIXTURE_USER_INPUT_REAUTH,
)
await opp.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["step_id"] == "user"
assert result2["errors"] == {"base": "invalid_auth"}
async def test_reauth_account_locked(opp: OpenPeerPower) -> None:
"""Test we show user form on account_locked error."""
mock_config = MockConfigEntry(
domain=DOMAIN,
unique_id=FIXTURE_USER_INPUT[CONF_EMAIL],
data=FIXTURE_USER_INPUT,
)
mock_config.add_to_opp(opp)
with patch(
"openpeerpower.components.mazda.config_flow.MazdaAPI.validate_credentials",
side_effect=MazdaAccountLockedException("Account locked"),
), patch(
"openpeerpower.components.mazda.async_setup_entry",
return_value=True,
):
result = await opp.config_entries.flow.async_init(
DOMAIN,
context={
"source": config_entries.SOURCE_REAUTH,
"entry_id": mock_config.entry_id,
},
data=FIXTURE_USER_INPUT,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
result2 = await opp.config_entries.flow.async_configure(
result["flow_id"],
FIXTURE_USER_INPUT_REAUTH,
)
await opp.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["step_id"] == "user"
assert result2["errors"] == {"base": "account_locked"}
async def test_reauth_connection_error(opp: OpenPeerPower) -> None:
"""Test we show user form on connection error."""
mock_config = MockConfigEntry(
domain=DOMAIN,
unique_id=FIXTURE_USER_INPUT[CONF_EMAIL],
data=FIXTURE_USER_INPUT,
)
mock_config.add_to_opp(opp)
with patch(
"openpeerpower.components.mazda.config_flow.MazdaAPI.validate_credentials",
side_effect=aiohttp.ClientError,
), patch(
"openpeerpower.components.mazda.async_setup_entry",
return_value=True,
):
result = await opp.config_entries.flow.async_init(
DOMAIN,
context={
"source": config_entries.SOURCE_REAUTH,
"entry_id": mock_config.entry_id,
},
data=FIXTURE_USER_INPUT,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
result2 = await opp.config_entries.flow.async_configure(
result["flow_id"],
FIXTURE_USER_INPUT_REAUTH,
)
await opp.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["step_id"] == "user"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_reauth_unknown_error(opp: OpenPeerPower) -> None:
"""Test we show user form on unknown error."""
mock_config = MockConfigEntry(
domain=DOMAIN,
unique_id=FIXTURE_USER_INPUT[CONF_EMAIL],
data=FIXTURE_USER_INPUT,
)
mock_config.add_to_opp(opp)
with patch(
"openpeerpower.components.mazda.config_flow.MazdaAPI.validate_credentials",
side_effect=Exception,
), patch(
"openpeerpower.components.mazda.async_setup_entry",
return_value=True,
):
result = await opp.config_entries.flow.async_init(
DOMAIN,
context={
"source": config_entries.SOURCE_REAUTH,
"entry_id": mock_config.entry_id,
},
data=FIXTURE_USER_INPUT,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
result2 = await opp.config_entries.flow.async_configure(
result["flow_id"],
FIXTURE_USER_INPUT_REAUTH,
)
await opp.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["step_id"] == "user"
assert result2["errors"] == {"base": "unknown"}
async def test_reauth_user_has_new_email_address(opp: OpenPeerPower) -> None:
"""Test reauth with a new email address but same account."""
mock_config = MockConfigEntry(
domain=DOMAIN,
unique_id=FIXTURE_USER_INPUT[CONF_EMAIL],
data=FIXTURE_USER_INPUT,
)
mock_config.add_to_opp(opp)
with patch(
"openpeerpower.components.mazda.config_flow.MazdaAPI.validate_credentials",
return_value=True,
), patch(
"openpeerpower.components.mazda.async_setup_entry",
return_value=True,
):
result = await opp.config_entries.flow.async_init(
DOMAIN,
context={
"source": config_entries.SOURCE_REAUTH,
"entry_id": mock_config.entry_id,
},
data=FIXTURE_USER_INPUT,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
# Change the email and ensure the entry and its unique id gets
# updated in the event the user has changed their email with mazda
result2 = await opp.config_entries.flow.async_configure(
result["flow_id"],
FIXTURE_USER_INPUT_REAUTH_CHANGED_EMAIL,
)
await opp.async_block_till_done()
assert (
mock_config.unique_id == FIXTURE_USER_INPUT_REAUTH_CHANGED_EMAIL[CONF_EMAIL]
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result2["reason"] == "reauth_successful"
| 33.443396 | 88 | 0.656488 | from unittest.mock import patch
import aiohttp
from openpeerpower import config_entries, data_entry_flow, setup
from openpeerpower.components.mazda.config_flow import (
MazdaAccountLockedException,
MazdaAuthenticationException,
)
from openpeerpower.components.mazda.const import DOMAIN
from openpeerpower.const import CONF_EMAIL, CONF_PASSWORD, CONF_REGION
from openpeerpower.core import OpenPeerPower
from tests.common import MockConfigEntry
FIXTURE_USER_INPUT = {
CONF_EMAIL: "example@example.com",
CONF_PASSWORD: "password",
CONF_REGION: "MNAO",
}
FIXTURE_USER_INPUT_REAUTH = {
CONF_EMAIL: "example@example.com",
CONF_PASSWORD: "password_fixed",
CONF_REGION: "MNAO",
}
FIXTURE_USER_INPUT_REAUTH_CHANGED_EMAIL = {
CONF_EMAIL: "example2@example.com",
CONF_PASSWORD: "password_fixed",
CONF_REGION: "MNAO",
}
async def test_form(opp):
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {}
with patch(
"openpeerpower.components.mazda.config_flow.MazdaAPI.validate_credentials",
return_value=True,
), patch(
"openpeerpower.components.mazda.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await opp.config_entries.flow.async_configure(
result["flow_id"],
FIXTURE_USER_INPUT,
)
await opp.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == FIXTURE_USER_INPUT[CONF_EMAIL]
assert result2["data"] == FIXTURE_USER_INPUT
assert len(mock_setup_entry.mock_calls) == 1
async def test_account_already_exists(opp):
mock_config = MockConfigEntry(
domain=DOMAIN,
unique_id=FIXTURE_USER_INPUT[CONF_EMAIL],
data=FIXTURE_USER_INPUT,
)
mock_config.add_to_opp(opp)
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {}
with patch(
"openpeerpower.components.mazda.config_flow.MazdaAPI.validate_credentials",
return_value=True,
):
result2 = await opp.config_entries.flow.async_configure(
result["flow_id"],
FIXTURE_USER_INPUT,
)
await opp.async_block_till_done()
assert result2["type"] == "abort"
assert result2["reason"] == "already_configured"
async def test_form_invalid_auth(opp: OpenPeerPower) -> None:
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {}
with patch(
"openpeerpower.components.mazda.config_flow.MazdaAPI.validate_credentials",
side_effect=MazdaAuthenticationException("Failed to authenticate"),
):
result2 = await opp.config_entries.flow.async_configure(
result["flow_id"],
FIXTURE_USER_INPUT,
)
await opp.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["step_id"] == "user"
assert result2["errors"] == {"base": "invalid_auth"}
async def test_form_account_locked(opp: OpenPeerPower) -> None:
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {}
with patch(
"openpeerpower.components.mazda.config_flow.MazdaAPI.validate_credentials",
side_effect=MazdaAccountLockedException("Account locked"),
):
result2 = await opp.config_entries.flow.async_configure(
result["flow_id"],
FIXTURE_USER_INPUT,
)
await opp.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["step_id"] == "user"
assert result2["errors"] == {"base": "account_locked"}
async def test_form_cannot_connect(opp):
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"openpeerpower.components.mazda.config_flow.MazdaAPI.validate_credentials",
side_effect=aiohttp.ClientError,
):
result2 = await opp.config_entries.flow.async_configure(
result["flow_id"],
FIXTURE_USER_INPUT,
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_unknown_error(opp):
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"openpeerpower.components.mazda.config_flow.MazdaAPI.validate_credentials",
side_effect=Exception,
):
result2 = await opp.config_entries.flow.async_configure(
result["flow_id"],
FIXTURE_USER_INPUT,
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "unknown"}
async def test_reauth_flow(opp: OpenPeerPower) -> None:
await setup.async_setup_component(opp, "persistent_notification", {})
mock_config = MockConfigEntry(
domain=DOMAIN,
unique_id=FIXTURE_USER_INPUT[CONF_EMAIL],
data=FIXTURE_USER_INPUT,
)
mock_config.add_to_opp(opp)
with patch(
"openpeerpower.components.mazda.config_flow.MazdaAPI.validate_credentials",
side_effect=MazdaAuthenticationException("Failed to authenticate"),
), patch(
"openpeerpower.components.mazda.async_setup_entry",
return_value=True,
):
await opp.config_entries.async_setup(mock_config.entry_id)
await opp.async_block_till_done()
result = await opp.config_entries.flow.async_init(
DOMAIN,
context={
"source": config_entries.SOURCE_REAUTH,
"entry_id": mock_config.entry_id,
},
data=FIXTURE_USER_INPUT,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {}
with patch(
"openpeerpower.components.mazda.config_flow.MazdaAPI.validate_credentials",
return_value=True,
), patch("openpeerpower.components.mazda.async_setup_entry", return_value=True):
result2 = await opp.config_entries.flow.async_configure(
result["flow_id"],
FIXTURE_USER_INPUT_REAUTH,
)
await opp.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result2["reason"] == "reauth_successful"
async def test_reauth_authorization_error(opp: OpenPeerPower) -> None:
mock_config = MockConfigEntry(
domain=DOMAIN,
unique_id=FIXTURE_USER_INPUT[CONF_EMAIL],
data=FIXTURE_USER_INPUT,
)
mock_config.add_to_opp(opp)
with patch(
"openpeerpower.components.mazda.config_flow.MazdaAPI.validate_credentials",
side_effect=MazdaAuthenticationException("Failed to authenticate"),
), patch(
"openpeerpower.components.mazda.async_setup_entry",
return_value=True,
):
result = await opp.config_entries.flow.async_init(
DOMAIN,
context={
"source": config_entries.SOURCE_REAUTH,
"entry_id": mock_config.entry_id,
},
data=FIXTURE_USER_INPUT,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
result2 = await opp.config_entries.flow.async_configure(
result["flow_id"],
FIXTURE_USER_INPUT_REAUTH,
)
await opp.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["step_id"] == "user"
assert result2["errors"] == {"base": "invalid_auth"}
async def test_reauth_account_locked(opp: OpenPeerPower) -> None:
mock_config = MockConfigEntry(
domain=DOMAIN,
unique_id=FIXTURE_USER_INPUT[CONF_EMAIL],
data=FIXTURE_USER_INPUT,
)
mock_config.add_to_opp(opp)
with patch(
"openpeerpower.components.mazda.config_flow.MazdaAPI.validate_credentials",
side_effect=MazdaAccountLockedException("Account locked"),
), patch(
"openpeerpower.components.mazda.async_setup_entry",
return_value=True,
):
result = await opp.config_entries.flow.async_init(
DOMAIN,
context={
"source": config_entries.SOURCE_REAUTH,
"entry_id": mock_config.entry_id,
},
data=FIXTURE_USER_INPUT,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
result2 = await opp.config_entries.flow.async_configure(
result["flow_id"],
FIXTURE_USER_INPUT_REAUTH,
)
await opp.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["step_id"] == "user"
assert result2["errors"] == {"base": "account_locked"}
async def test_reauth_connection_error(opp: OpenPeerPower) -> None:
mock_config = MockConfigEntry(
domain=DOMAIN,
unique_id=FIXTURE_USER_INPUT[CONF_EMAIL],
data=FIXTURE_USER_INPUT,
)
mock_config.add_to_opp(opp)
with patch(
"openpeerpower.components.mazda.config_flow.MazdaAPI.validate_credentials",
side_effect=aiohttp.ClientError,
), patch(
"openpeerpower.components.mazda.async_setup_entry",
return_value=True,
):
result = await opp.config_entries.flow.async_init(
DOMAIN,
context={
"source": config_entries.SOURCE_REAUTH,
"entry_id": mock_config.entry_id,
},
data=FIXTURE_USER_INPUT,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
result2 = await opp.config_entries.flow.async_configure(
result["flow_id"],
FIXTURE_USER_INPUT_REAUTH,
)
await opp.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["step_id"] == "user"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_reauth_unknown_error(opp: OpenPeerPower) -> None:
mock_config = MockConfigEntry(
domain=DOMAIN,
unique_id=FIXTURE_USER_INPUT[CONF_EMAIL],
data=FIXTURE_USER_INPUT,
)
mock_config.add_to_opp(opp)
with patch(
"openpeerpower.components.mazda.config_flow.MazdaAPI.validate_credentials",
side_effect=Exception,
), patch(
"openpeerpower.components.mazda.async_setup_entry",
return_value=True,
):
result = await opp.config_entries.flow.async_init(
DOMAIN,
context={
"source": config_entries.SOURCE_REAUTH,
"entry_id": mock_config.entry_id,
},
data=FIXTURE_USER_INPUT,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
result2 = await opp.config_entries.flow.async_configure(
result["flow_id"],
FIXTURE_USER_INPUT_REAUTH,
)
await opp.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["step_id"] == "user"
assert result2["errors"] == {"base": "unknown"}
async def test_reauth_user_has_new_email_address(opp: OpenPeerPower) -> None:
mock_config = MockConfigEntry(
domain=DOMAIN,
unique_id=FIXTURE_USER_INPUT[CONF_EMAIL],
data=FIXTURE_USER_INPUT,
)
mock_config.add_to_opp(opp)
with patch(
"openpeerpower.components.mazda.config_flow.MazdaAPI.validate_credentials",
return_value=True,
), patch(
"openpeerpower.components.mazda.async_setup_entry",
return_value=True,
):
result = await opp.config_entries.flow.async_init(
DOMAIN,
context={
"source": config_entries.SOURCE_REAUTH,
"entry_id": mock_config.entry_id,
},
data=FIXTURE_USER_INPUT,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
result2 = await opp.config_entries.flow.async_configure(
result["flow_id"],
FIXTURE_USER_INPUT_REAUTH_CHANGED_EMAIL,
)
await opp.async_block_till_done()
assert (
mock_config.unique_id == FIXTURE_USER_INPUT_REAUTH_CHANGED_EMAIL[CONF_EMAIL]
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result2["reason"] == "reauth_successful"
| true | true |
1c359f540801b70b7fd2d2cf16e53d8746b7b093 | 1,101 | py | Python | app/main/lib/shared_models/indian_sbert.py | meedan/alegre | ad28736f53b8905882e196e90cac66d39db341a3 | [
"MIT"
] | 11 | 2018-02-07T00:16:54.000Z | 2021-05-13T22:47:07.000Z | app/main/lib/shared_models/indian_sbert.py | meedan/alegre | ad28736f53b8905882e196e90cac66d39db341a3 | [
"MIT"
] | 47 | 2018-11-26T23:17:37.000Z | 2022-03-25T16:12:05.000Z | app/main/lib/shared_models/indian_sbert.py | meedan/alegre | ad28736f53b8905882e196e90cac66d39db341a3 | [
"MIT"
] | 9 | 2019-05-23T22:06:03.000Z | 2020-10-27T20:45:04.000Z | import requests
from sentence_transformers import SentenceTransformer
from flask import current_app as app
from app.main.lib.shared_models.shared_model import SharedModel
from app.main.lib.similarity_measures import angular_similarity
class IndianSbert(SharedModel):
def load(self):
model_name = self.options.get('model_name', 'meedan/indian-sbert')
if self.options.get("model_url"):
try:
self.model = SentenceTransformer(self.options.get("model_url"))
except requests.exceptions.HTTPError as e:
app.logger.info('Attempting to load model by model name in lieu of broken URL')
self.model = SentenceTransformer(model_name)
else:
self.model = SentenceTransformer(model_name)
def respond(self, doc):
return self.vectorize(doc)
def similarity(self, vecA, vecB):
return angular_similarity(vecA, vecB)
def vectorize(self, doc):
"""
vectorize: Embed a text snippet in the vector space.
"""
return self.model.encode([doc])[0].tolist()
| 35.516129 | 95 | 0.673933 | import requests
from sentence_transformers import SentenceTransformer
from flask import current_app as app
from app.main.lib.shared_models.shared_model import SharedModel
from app.main.lib.similarity_measures import angular_similarity
class IndianSbert(SharedModel):
def load(self):
model_name = self.options.get('model_name', 'meedan/indian-sbert')
if self.options.get("model_url"):
try:
self.model = SentenceTransformer(self.options.get("model_url"))
except requests.exceptions.HTTPError as e:
app.logger.info('Attempting to load model by model name in lieu of broken URL')
self.model = SentenceTransformer(model_name)
else:
self.model = SentenceTransformer(model_name)
def respond(self, doc):
return self.vectorize(doc)
def similarity(self, vecA, vecB):
return angular_similarity(vecA, vecB)
def vectorize(self, doc):
return self.model.encode([doc])[0].tolist()
| true | true |
1c35a1d2dbe619d8bc3b78661c7b4ad91e236806 | 2,500 | py | Python | mmdet/core/post_processing/bbox_nms.py | marinarierav-uab/foveabox | 1f313fd14aaf018aadb0c6b3de163eb0a3b1fbd5 | [
"Apache-2.0"
] | 1 | 2021-01-14T12:04:34.000Z | 2021-01-14T12:04:34.000Z | mmdet/core/post_processing/bbox_nms.py | marinarierav-uab/foveabox | 1f313fd14aaf018aadb0c6b3de163eb0a3b1fbd5 | [
"Apache-2.0"
] | null | null | null | mmdet/core/post_processing/bbox_nms.py | marinarierav-uab/foveabox | 1f313fd14aaf018aadb0c6b3de163eb0a3b1fbd5 | [
"Apache-2.0"
] | null | null | null | import torch
from mmdet.ops.nms import nms_wrapper
def multiclass_nms(multi_bboxes,
multi_scores,
score_thr,
nms_cfg,
max_num=-1,
score_factors=None):
"""NMS for multi-class bboxes.
Args:
multi_bboxes (Tensor): shape (n, #class*4) or (n, 4)
multi_scores (Tensor): shape (n, #class)
score_thr (float): bbox threshold, bboxes with scores lower than it
will not be considered.
nms_thr (float): NMS IoU threshold
max_num (int): if there are more than max_num bboxes after NMS,
only top max_num will be kept.
score_factors (Tensor): The factors multiplied to scores before
applying NMS
Returns:
tuple: (bboxes, labels), tensors of shape (k, 5) and (k, 1). Labels
are 0-based.
"""
num_classes = multi_scores.shape[1]
bboxes, labels = [], []
nms_cfg_ = nms_cfg.copy()
nms_type = nms_cfg_.pop('type', 'nms')
nms_op = getattr(nms_wrapper, nms_type)
for i in range(1, num_classes):
cls_inds = multi_scores[:, i] > score_thr
if not cls_inds.any():
continue
# get bboxes and scores of this class
if multi_bboxes.shape[1] == 4:
_bboxes = multi_bboxes[cls_inds, :]
else:
_bboxes = multi_bboxes[cls_inds, i * 4:(i + 1) * 4]
_scores = multi_scores[cls_inds, i]
if score_factors is not None:
_scores *= score_factors[cls_inds]
cls_dets = torch.cat([_bboxes, _scores[:, None]], dim=1)
# cls_dets. shape: (num_of_det, 5) --> columns: [x1 y1 x2 y2 conf]
cls_dets, inds = nms_op(cls_dets, **nms_cfg_)
cls_labels = multi_bboxes.new_full((cls_dets.shape[0], ), # cls_dets.shape[0] = num_of_det
i - 1, # omplir tot amb id de la classe
dtype=torch.long)
bboxes.append(cls_dets)
labels.append(cls_labels)
if bboxes:
bboxes = torch.cat(bboxes)
labels = torch.cat(labels)
if bboxes.shape[0] > max_num:
_, inds = bboxes[:, -1].sort(descending=True)
inds = inds[:max_num]
bboxes = bboxes[inds]
labels = labels[inds]
else:
bboxes = multi_bboxes.new_zeros((0, 5))
labels = multi_bboxes.new_zeros((0, ), dtype=torch.long)
return bboxes, labels
| 35.211268 | 99 | 0.5612 | import torch
from mmdet.ops.nms import nms_wrapper
def multiclass_nms(multi_bboxes,
multi_scores,
score_thr,
nms_cfg,
max_num=-1,
score_factors=None):
num_classes = multi_scores.shape[1]
bboxes, labels = [], []
nms_cfg_ = nms_cfg.copy()
nms_type = nms_cfg_.pop('type', 'nms')
nms_op = getattr(nms_wrapper, nms_type)
for i in range(1, num_classes):
cls_inds = multi_scores[:, i] > score_thr
if not cls_inds.any():
continue
if multi_bboxes.shape[1] == 4:
_bboxes = multi_bboxes[cls_inds, :]
else:
_bboxes = multi_bboxes[cls_inds, i * 4:(i + 1) * 4]
_scores = multi_scores[cls_inds, i]
if score_factors is not None:
_scores *= score_factors[cls_inds]
cls_dets = torch.cat([_bboxes, _scores[:, None]], dim=1)
cls_dets, inds = nms_op(cls_dets, **nms_cfg_)
cls_labels = multi_bboxes.new_full((cls_dets.shape[0], ),
i - 1,
dtype=torch.long)
bboxes.append(cls_dets)
labels.append(cls_labels)
if bboxes:
bboxes = torch.cat(bboxes)
labels = torch.cat(labels)
if bboxes.shape[0] > max_num:
_, inds = bboxes[:, -1].sort(descending=True)
inds = inds[:max_num]
bboxes = bboxes[inds]
labels = labels[inds]
else:
bboxes = multi_bboxes.new_zeros((0, 5))
labels = multi_bboxes.new_zeros((0, ), dtype=torch.long)
return bboxes, labels
| true | true |
1c35a247b4bb1cb6218f6cb0c90d1d9a63a5f510 | 9,603 | py | Python | tests/clvm/test_puzzles.py | AedgeCoin/aedge-blockchain2 | 8a690026e73b59572d6d40da5003bab1bbd71057 | [
"Apache-2.0"
] | 6 | 2021-10-12T03:51:57.000Z | 2022-02-09T04:28:48.000Z | tests/clvm/test_puzzles.py | AedgeCoin/aedge-blockchain2 | 8a690026e73b59572d6d40da5003bab1bbd71057 | [
"Apache-2.0"
] | 4 | 2021-10-11T18:36:46.000Z | 2021-10-17T18:18:16.000Z | tests/clvm/test_puzzles.py | AedgeCoin/aedge-blockchain2 | 8a690026e73b59572d6d40da5003bab1bbd71057 | [
"Apache-2.0"
] | 4 | 2021-11-05T17:20:37.000Z | 2022-03-16T02:59:05.000Z | from typing import Iterable, List, Tuple
from unittest import TestCase
from blspy import AugSchemeMPL, BasicSchemeMPL, G1Element, G2Element
from aedge.types.blockchain_format.program import Program
from aedge.types.blockchain_format.sized_bytes import bytes32
from aedge.types.coin_spend import CoinSpend
from aedge.types.spend_bundle import SpendBundle
from aedge.util.condition_tools import ConditionOpcode
from aedge.util.hash import std_hash
from aedge.wallet.puzzles import (
p2_conditions,
p2_delegated_conditions,
p2_delegated_puzzle,
p2_delegated_puzzle_or_hidden_puzzle,
p2_m_of_n_delegate_direct,
p2_puzzle_hash,
)
from tests.util.key_tool import KeyTool
from ..core.make_block_generator import int_to_public_key
from .coin_store import CoinStore, CoinTimestamp
T1 = CoinTimestamp(1, 10000000)
T2 = CoinTimestamp(5, 10003000)
MAX_BLOCK_COST_CLVM = int(1e18)
COST_PER_BYTE = int(12000)
def secret_exponent_for_index(index: int) -> int:
blob = index.to_bytes(32, "big")
hashed_blob = BasicSchemeMPL.key_gen(std_hash(b"foo" + blob))
r = int.from_bytes(hashed_blob, "big")
return r
def public_key_for_index(index: int, key_lookup: KeyTool) -> bytes:
secret_exponent = secret_exponent_for_index(index)
key_lookup.add_secret_exponents([secret_exponent])
return bytes(int_to_public_key(secret_exponent))
def throwaway_puzzle_hash(index: int, key_lookup: KeyTool) -> bytes32:
return p2_delegated_puzzle.puzzle_for_pk(public_key_for_index(index, key_lookup)).get_tree_hash()
def do_test_spend(
puzzle_reveal: Program,
solution: Program,
payments: Iterable[Tuple[bytes32, int]],
key_lookup: KeyTool,
farm_time: CoinTimestamp = T1,
spend_time: CoinTimestamp = T2,
) -> SpendBundle:
"""
This method will farm a coin paid to the hash of `puzzle_reveal`, then try to spend it
with `solution`, and verify that the created coins correspond to `payments`.
The `key_lookup` is used to create a signed version of the `SpendBundle`, although at
this time, signatures are not verified.
"""
coin_db = CoinStore()
puzzle_hash = puzzle_reveal.get_tree_hash()
# farm it
coin = coin_db.farm_coin(puzzle_hash, farm_time)
# spend it
coin_spend = CoinSpend(coin, puzzle_reveal, solution)
spend_bundle = SpendBundle([coin_spend], G2Element())
coin_db.update_coin_store_for_spend_bundle(spend_bundle, spend_time, MAX_BLOCK_COST_CLVM, COST_PER_BYTE)
# ensure all outputs are there
for puzzle_hash, amount in payments:
for coin in coin_db.coins_for_puzzle_hash(puzzle_hash):
if coin.amount == amount:
break
else:
assert 0
# make sure we can actually sign the solution
signatures = []
for coin_spend in spend_bundle.coin_spends:
signature = key_lookup.signature_for_solution(coin_spend, bytes([2] * 32))
signatures.append(signature)
return SpendBundle(spend_bundle.coin_spends, AugSchemeMPL.aggregate(signatures))
def default_payments_and_conditions(
initial_index: int, key_lookup: KeyTool
) -> Tuple[List[Tuple[bytes32, int]], Program]:
payments = [
(throwaway_puzzle_hash(initial_index + 1, key_lookup), initial_index * 1000),
(throwaway_puzzle_hash(initial_index + 2, key_lookup), (initial_index + 1) * 1000),
]
conditions = Program.to([make_create_coin_condition(ph, amount) for ph, amount in payments])
return payments, conditions
def make_create_coin_condition(puzzle_hash, amount):
return Program.to([ConditionOpcode.CREATE_COIN, puzzle_hash, amount])
class TestPuzzles(TestCase):
def test_p2_conditions(self):
key_lookup = KeyTool()
payments, conditions = default_payments_and_conditions(1, key_lookup)
puzzle = p2_conditions.puzzle_for_conditions(conditions)
solution = p2_conditions.solution_for_conditions(conditions)
do_test_spend(puzzle, solution, payments, key_lookup)
def test_p2_delegated_conditions(self):
key_lookup = KeyTool()
payments, conditions = default_payments_and_conditions(1, key_lookup)
pk = public_key_for_index(1, key_lookup)
puzzle = p2_delegated_conditions.puzzle_for_pk(pk)
solution = p2_delegated_conditions.solution_for_conditions(conditions)
do_test_spend(puzzle, solution, payments, key_lookup)
def test_p2_delegated_puzzle_simple(self):
key_lookup = KeyTool()
payments, conditions = default_payments_and_conditions(1, key_lookup)
pk = public_key_for_index(1, key_lookup)
puzzle = p2_delegated_puzzle.puzzle_for_pk(pk)
solution = p2_delegated_puzzle.solution_for_conditions(conditions)
do_test_spend(puzzle, solution, payments, key_lookup)
def test_p2_delegated_puzzle_graftroot(self):
key_lookup = KeyTool()
payments, conditions = default_payments_and_conditions(1, key_lookup)
delegated_puzzle = p2_delegated_conditions.puzzle_for_pk(public_key_for_index(8, key_lookup))
delegated_solution = p2_delegated_conditions.solution_for_conditions(conditions)
puzzle_program = p2_delegated_puzzle.puzzle_for_pk(public_key_for_index(1, key_lookup))
solution = p2_delegated_puzzle.solution_for_delegated_puzzle(delegated_puzzle, delegated_solution)
do_test_spend(puzzle_program, solution, payments, key_lookup)
def test_p2_puzzle_hash(self):
key_lookup = KeyTool()
payments, conditions = default_payments_and_conditions(1, key_lookup)
inner_puzzle = p2_delegated_conditions.puzzle_for_pk(public_key_for_index(4, key_lookup))
inner_solution = p2_delegated_conditions.solution_for_conditions(conditions)
inner_puzzle_hash = inner_puzzle.get_tree_hash()
puzzle_program = p2_puzzle_hash.puzzle_for_inner_puzzle_hash(inner_puzzle_hash)
assert puzzle_program == p2_puzzle_hash.puzzle_for_inner_puzzle(inner_puzzle)
solution = p2_puzzle_hash.solution_for_inner_puzzle_and_inner_solution(inner_puzzle, inner_solution)
do_test_spend(puzzle_program, solution, payments, key_lookup)
def test_p2_m_of_n_delegated_puzzle(self):
key_lookup = KeyTool()
payments, conditions = default_payments_and_conditions(1, key_lookup)
pks = [public_key_for_index(_, key_lookup) for _ in range(1, 6)]
M = 3
delegated_puzzle = p2_conditions.puzzle_for_conditions(conditions)
delegated_solution = []
puzzle_program = p2_m_of_n_delegate_direct.puzzle_for_m_of_public_key_list(M, pks)
selectors = [1, [], [], 1, 1]
solution = p2_m_of_n_delegate_direct.solution_for_delegated_puzzle(
M, selectors, delegated_puzzle, delegated_solution
)
do_test_spend(puzzle_program, solution, payments, key_lookup)
def test_p2_delegated_puzzle_or_hidden_puzzle_with_hidden_puzzle(self):
key_lookup = KeyTool()
payments, conditions = default_payments_and_conditions(1, key_lookup)
hidden_puzzle = p2_conditions.puzzle_for_conditions(conditions)
hidden_public_key = public_key_for_index(10, key_lookup)
puzzle = p2_delegated_puzzle_or_hidden_puzzle.puzzle_for_public_key_and_hidden_puzzle(
hidden_public_key, hidden_puzzle
)
solution = p2_delegated_puzzle_or_hidden_puzzle.solution_for_hidden_puzzle(
hidden_public_key, hidden_puzzle, Program.to(0)
)
do_test_spend(puzzle, solution, payments, key_lookup)
def do_test_spend_p2_delegated_puzzle_or_hidden_puzzle_with_delegated_puzzle(self, hidden_pub_key_index):
key_lookup = KeyTool()
payments, conditions = default_payments_and_conditions(1, key_lookup)
hidden_puzzle = p2_conditions.puzzle_for_conditions(conditions)
hidden_public_key = public_key_for_index(hidden_pub_key_index, key_lookup)
puzzle = p2_delegated_puzzle_or_hidden_puzzle.puzzle_for_public_key_and_hidden_puzzle(
hidden_public_key, hidden_puzzle
)
payable_payments, payable_conditions = default_payments_and_conditions(5, key_lookup)
delegated_puzzle = p2_conditions.puzzle_for_conditions(payable_conditions)
delegated_solution = []
synthetic_public_key = p2_delegated_puzzle_or_hidden_puzzle.calculate_synthetic_public_key(
hidden_public_key, hidden_puzzle.get_tree_hash()
)
solution = p2_delegated_puzzle_or_hidden_puzzle.solution_for_delegated_puzzle(
delegated_puzzle, delegated_solution
)
hidden_puzzle_hash = hidden_puzzle.get_tree_hash()
synthetic_offset = p2_delegated_puzzle_or_hidden_puzzle.calculate_synthetic_offset(
hidden_public_key, hidden_puzzle_hash
)
hidden_pub_key_point = G1Element.from_bytes(hidden_public_key)
assert synthetic_public_key == int_to_public_key(synthetic_offset) + hidden_pub_key_point
secret_exponent = key_lookup.get(hidden_public_key)
assert int_to_public_key(secret_exponent) == hidden_pub_key_point
synthetic_secret_exponent = secret_exponent + synthetic_offset
key_lookup.add_secret_exponents([synthetic_secret_exponent])
do_test_spend(puzzle, solution, payable_payments, key_lookup)
def test_p2_delegated_puzzle_or_hidden_puzzle_with_delegated_puzzle(self):
for hidden_pub_key_index in range(1, 10):
self.do_test_spend_p2_delegated_puzzle_or_hidden_puzzle_with_delegated_puzzle(hidden_pub_key_index)
| 39.356557 | 111 | 0.75289 | from typing import Iterable, List, Tuple
from unittest import TestCase
from blspy import AugSchemeMPL, BasicSchemeMPL, G1Element, G2Element
from aedge.types.blockchain_format.program import Program
from aedge.types.blockchain_format.sized_bytes import bytes32
from aedge.types.coin_spend import CoinSpend
from aedge.types.spend_bundle import SpendBundle
from aedge.util.condition_tools import ConditionOpcode
from aedge.util.hash import std_hash
from aedge.wallet.puzzles import (
p2_conditions,
p2_delegated_conditions,
p2_delegated_puzzle,
p2_delegated_puzzle_or_hidden_puzzle,
p2_m_of_n_delegate_direct,
p2_puzzle_hash,
)
from tests.util.key_tool import KeyTool
from ..core.make_block_generator import int_to_public_key
from .coin_store import CoinStore, CoinTimestamp
T1 = CoinTimestamp(1, 10000000)
T2 = CoinTimestamp(5, 10003000)
MAX_BLOCK_COST_CLVM = int(1e18)
COST_PER_BYTE = int(12000)
def secret_exponent_for_index(index: int) -> int:
blob = index.to_bytes(32, "big")
hashed_blob = BasicSchemeMPL.key_gen(std_hash(b"foo" + blob))
r = int.from_bytes(hashed_blob, "big")
return r
def public_key_for_index(index: int, key_lookup: KeyTool) -> bytes:
secret_exponent = secret_exponent_for_index(index)
key_lookup.add_secret_exponents([secret_exponent])
return bytes(int_to_public_key(secret_exponent))
def throwaway_puzzle_hash(index: int, key_lookup: KeyTool) -> bytes32:
return p2_delegated_puzzle.puzzle_for_pk(public_key_for_index(index, key_lookup)).get_tree_hash()
def do_test_spend(
puzzle_reveal: Program,
solution: Program,
payments: Iterable[Tuple[bytes32, int]],
key_lookup: KeyTool,
farm_time: CoinTimestamp = T1,
spend_time: CoinTimestamp = T2,
) -> SpendBundle:
coin_db = CoinStore()
puzzle_hash = puzzle_reveal.get_tree_hash()
coin = coin_db.farm_coin(puzzle_hash, farm_time)
coin_spend = CoinSpend(coin, puzzle_reveal, solution)
spend_bundle = SpendBundle([coin_spend], G2Element())
coin_db.update_coin_store_for_spend_bundle(spend_bundle, spend_time, MAX_BLOCK_COST_CLVM, COST_PER_BYTE)
for puzzle_hash, amount in payments:
for coin in coin_db.coins_for_puzzle_hash(puzzle_hash):
if coin.amount == amount:
break
else:
assert 0
signatures = []
for coin_spend in spend_bundle.coin_spends:
signature = key_lookup.signature_for_solution(coin_spend, bytes([2] * 32))
signatures.append(signature)
return SpendBundle(spend_bundle.coin_spends, AugSchemeMPL.aggregate(signatures))
def default_payments_and_conditions(
initial_index: int, key_lookup: KeyTool
) -> Tuple[List[Tuple[bytes32, int]], Program]:
payments = [
(throwaway_puzzle_hash(initial_index + 1, key_lookup), initial_index * 1000),
(throwaway_puzzle_hash(initial_index + 2, key_lookup), (initial_index + 1) * 1000),
]
conditions = Program.to([make_create_coin_condition(ph, amount) for ph, amount in payments])
return payments, conditions
def make_create_coin_condition(puzzle_hash, amount):
return Program.to([ConditionOpcode.CREATE_COIN, puzzle_hash, amount])
class TestPuzzles(TestCase):
def test_p2_conditions(self):
key_lookup = KeyTool()
payments, conditions = default_payments_and_conditions(1, key_lookup)
puzzle = p2_conditions.puzzle_for_conditions(conditions)
solution = p2_conditions.solution_for_conditions(conditions)
do_test_spend(puzzle, solution, payments, key_lookup)
def test_p2_delegated_conditions(self):
key_lookup = KeyTool()
payments, conditions = default_payments_and_conditions(1, key_lookup)
pk = public_key_for_index(1, key_lookup)
puzzle = p2_delegated_conditions.puzzle_for_pk(pk)
solution = p2_delegated_conditions.solution_for_conditions(conditions)
do_test_spend(puzzle, solution, payments, key_lookup)
def test_p2_delegated_puzzle_simple(self):
key_lookup = KeyTool()
payments, conditions = default_payments_and_conditions(1, key_lookup)
pk = public_key_for_index(1, key_lookup)
puzzle = p2_delegated_puzzle.puzzle_for_pk(pk)
solution = p2_delegated_puzzle.solution_for_conditions(conditions)
do_test_spend(puzzle, solution, payments, key_lookup)
def test_p2_delegated_puzzle_graftroot(self):
key_lookup = KeyTool()
payments, conditions = default_payments_and_conditions(1, key_lookup)
delegated_puzzle = p2_delegated_conditions.puzzle_for_pk(public_key_for_index(8, key_lookup))
delegated_solution = p2_delegated_conditions.solution_for_conditions(conditions)
puzzle_program = p2_delegated_puzzle.puzzle_for_pk(public_key_for_index(1, key_lookup))
solution = p2_delegated_puzzle.solution_for_delegated_puzzle(delegated_puzzle, delegated_solution)
do_test_spend(puzzle_program, solution, payments, key_lookup)
def test_p2_puzzle_hash(self):
key_lookup = KeyTool()
payments, conditions = default_payments_and_conditions(1, key_lookup)
inner_puzzle = p2_delegated_conditions.puzzle_for_pk(public_key_for_index(4, key_lookup))
inner_solution = p2_delegated_conditions.solution_for_conditions(conditions)
inner_puzzle_hash = inner_puzzle.get_tree_hash()
puzzle_program = p2_puzzle_hash.puzzle_for_inner_puzzle_hash(inner_puzzle_hash)
assert puzzle_program == p2_puzzle_hash.puzzle_for_inner_puzzle(inner_puzzle)
solution = p2_puzzle_hash.solution_for_inner_puzzle_and_inner_solution(inner_puzzle, inner_solution)
do_test_spend(puzzle_program, solution, payments, key_lookup)
def test_p2_m_of_n_delegated_puzzle(self):
key_lookup = KeyTool()
payments, conditions = default_payments_and_conditions(1, key_lookup)
pks = [public_key_for_index(_, key_lookup) for _ in range(1, 6)]
M = 3
delegated_puzzle = p2_conditions.puzzle_for_conditions(conditions)
delegated_solution = []
puzzle_program = p2_m_of_n_delegate_direct.puzzle_for_m_of_public_key_list(M, pks)
selectors = [1, [], [], 1, 1]
solution = p2_m_of_n_delegate_direct.solution_for_delegated_puzzle(
M, selectors, delegated_puzzle, delegated_solution
)
do_test_spend(puzzle_program, solution, payments, key_lookup)
def test_p2_delegated_puzzle_or_hidden_puzzle_with_hidden_puzzle(self):
key_lookup = KeyTool()
payments, conditions = default_payments_and_conditions(1, key_lookup)
hidden_puzzle = p2_conditions.puzzle_for_conditions(conditions)
hidden_public_key = public_key_for_index(10, key_lookup)
puzzle = p2_delegated_puzzle_or_hidden_puzzle.puzzle_for_public_key_and_hidden_puzzle(
hidden_public_key, hidden_puzzle
)
solution = p2_delegated_puzzle_or_hidden_puzzle.solution_for_hidden_puzzle(
hidden_public_key, hidden_puzzle, Program.to(0)
)
do_test_spend(puzzle, solution, payments, key_lookup)
def do_test_spend_p2_delegated_puzzle_or_hidden_puzzle_with_delegated_puzzle(self, hidden_pub_key_index):
key_lookup = KeyTool()
payments, conditions = default_payments_and_conditions(1, key_lookup)
hidden_puzzle = p2_conditions.puzzle_for_conditions(conditions)
hidden_public_key = public_key_for_index(hidden_pub_key_index, key_lookup)
puzzle = p2_delegated_puzzle_or_hidden_puzzle.puzzle_for_public_key_and_hidden_puzzle(
hidden_public_key, hidden_puzzle
)
payable_payments, payable_conditions = default_payments_and_conditions(5, key_lookup)
delegated_puzzle = p2_conditions.puzzle_for_conditions(payable_conditions)
delegated_solution = []
synthetic_public_key = p2_delegated_puzzle_or_hidden_puzzle.calculate_synthetic_public_key(
hidden_public_key, hidden_puzzle.get_tree_hash()
)
solution = p2_delegated_puzzle_or_hidden_puzzle.solution_for_delegated_puzzle(
delegated_puzzle, delegated_solution
)
hidden_puzzle_hash = hidden_puzzle.get_tree_hash()
synthetic_offset = p2_delegated_puzzle_or_hidden_puzzle.calculate_synthetic_offset(
hidden_public_key, hidden_puzzle_hash
)
hidden_pub_key_point = G1Element.from_bytes(hidden_public_key)
assert synthetic_public_key == int_to_public_key(synthetic_offset) + hidden_pub_key_point
secret_exponent = key_lookup.get(hidden_public_key)
assert int_to_public_key(secret_exponent) == hidden_pub_key_point
synthetic_secret_exponent = secret_exponent + synthetic_offset
key_lookup.add_secret_exponents([synthetic_secret_exponent])
do_test_spend(puzzle, solution, payable_payments, key_lookup)
def test_p2_delegated_puzzle_or_hidden_puzzle_with_delegated_puzzle(self):
for hidden_pub_key_index in range(1, 10):
self.do_test_spend_p2_delegated_puzzle_or_hidden_puzzle_with_delegated_puzzle(hidden_pub_key_index)
| true | true |
1c35a290382adab8e3e0c12672d1944211c3a49a | 2,958 | py | Python | aliyun-python-sdk-live/aliyunsdklive/request/v20161101/AddCasterComponentRequest.py | xiaozhao1/aliyun-openapi-python-sdk | 7297b69619fbe18a053ce552df9ab378b7c5719f | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-live/aliyunsdklive/request/v20161101/AddCasterComponentRequest.py | xiaozhao1/aliyun-openapi-python-sdk | 7297b69619fbe18a053ce552df9ab378b7c5719f | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-live/aliyunsdklive/request/v20161101/AddCasterComponentRequest.py | xiaozhao1/aliyun-openapi-python-sdk | 7297b69619fbe18a053ce552df9ab378b7c5719f | [
"Apache-2.0"
] | 1 | 2021-01-26T05:01:42.000Z | 2021-01-26T05:01:42.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class AddCasterComponentRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'live', '2016-11-01', 'AddCasterComponent','live')
def get_ImageLayerContent(self):
return self.get_query_params().get('ImageLayerContent')
def set_ImageLayerContent(self,ImageLayerContent):
self.add_query_param('ImageLayerContent',ImageLayerContent)
def get_CasterId(self):
return self.get_query_params().get('CasterId')
def set_CasterId(self,CasterId):
self.add_query_param('CasterId',CasterId)
def get_ComponentLayer(self):
return self.get_query_params().get('ComponentLayer')
def set_ComponentLayer(self,ComponentLayer):
self.add_query_param('ComponentLayer',ComponentLayer)
def get_ComponentName(self):
return self.get_query_params().get('ComponentName')
def set_ComponentName(self,ComponentName):
self.add_query_param('ComponentName',ComponentName)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_Version(self):
return self.get_query_params().get('Version')
def set_Version(self,Version):
self.add_query_param('Version',Version)
def get_ComponentType(self):
return self.get_query_params().get('ComponentType')
def set_ComponentType(self,ComponentType):
self.add_query_param('ComponentType',ComponentType)
def get_SecurityToken(self):
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self,SecurityToken):
self.add_query_param('SecurityToken',SecurityToken)
def get_LocationId(self):
return self.get_query_params().get('LocationId')
def set_LocationId(self,LocationId):
self.add_query_param('LocationId',LocationId)
def get_Effect(self):
return self.get_query_params().get('Effect')
def set_Effect(self,Effect):
self.add_query_param('Effect',Effect)
def get_TextLayerContent(self):
return self.get_query_params().get('TextLayerContent')
def set_TextLayerContent(self,TextLayerContent):
self.add_query_param('TextLayerContent',TextLayerContent) | 32.866667 | 79 | 0.768087 |
from aliyunsdkcore.request import RpcRequest
class AddCasterComponentRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'live', '2016-11-01', 'AddCasterComponent','live')
def get_ImageLayerContent(self):
return self.get_query_params().get('ImageLayerContent')
def set_ImageLayerContent(self,ImageLayerContent):
self.add_query_param('ImageLayerContent',ImageLayerContent)
def get_CasterId(self):
return self.get_query_params().get('CasterId')
def set_CasterId(self,CasterId):
self.add_query_param('CasterId',CasterId)
def get_ComponentLayer(self):
return self.get_query_params().get('ComponentLayer')
def set_ComponentLayer(self,ComponentLayer):
self.add_query_param('ComponentLayer',ComponentLayer)
def get_ComponentName(self):
return self.get_query_params().get('ComponentName')
def set_ComponentName(self,ComponentName):
self.add_query_param('ComponentName',ComponentName)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_Version(self):
return self.get_query_params().get('Version')
def set_Version(self,Version):
self.add_query_param('Version',Version)
def get_ComponentType(self):
return self.get_query_params().get('ComponentType')
def set_ComponentType(self,ComponentType):
self.add_query_param('ComponentType',ComponentType)
def get_SecurityToken(self):
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self,SecurityToken):
self.add_query_param('SecurityToken',SecurityToken)
def get_LocationId(self):
return self.get_query_params().get('LocationId')
def set_LocationId(self,LocationId):
self.add_query_param('LocationId',LocationId)
def get_Effect(self):
return self.get_query_params().get('Effect')
def set_Effect(self,Effect):
self.add_query_param('Effect',Effect)
def get_TextLayerContent(self):
return self.get_query_params().get('TextLayerContent')
def set_TextLayerContent(self,TextLayerContent):
self.add_query_param('TextLayerContent',TextLayerContent) | true | true |
1c35a2c55a9edc93f75b27cf557964534bc944c3 | 16,462 | py | Python | codeformatter/lib/scssbeautifier/css/beautifier.py | ephenyxshop/sublimetext-codeformatter | f4af5682b3e28d7ec0b450808bc0c0ad6b017fa9 | [
"MIT"
] | 676 | 2015-01-01T03:56:14.000Z | 2022-03-31T18:20:47.000Z | python/cssbeautifier/css/beautifier.py | Houfeng/js-beautify | 0076b9f342875be32067725d61538086e902725e | [
"MIT"
] | 331 | 2015-01-02T19:31:30.000Z | 2022-03-19T03:24:29.000Z | python/cssbeautifier/css/beautifier.py | Houfeng/js-beautify | 0076b9f342875be32067725d61538086e902725e | [
"MIT"
] | 196 | 2015-01-02T20:48:12.000Z | 2022-03-13T06:48:19.000Z | from __future__ import print_function
import sys
import re
import copy
from .options import BeautifierOptions
from jsbeautifier.core.options import mergeOpts
from jsbeautifier.core.output import Output
from jsbeautifier.__version__ import __version__
#
# The MIT License (MIT)
# Copyright (c) 2007-2017 Einar Lielmanis, Liam Newman, and contributors.
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
def default_options():
return BeautifierOptions()
def beautify(string, opts=default_options()):
b = Beautifier(string, opts)
return b.beautify()
def beautify_file(file_name, opts=default_options()):
if file_name == '-': # stdin
stream = sys.stdin
else:
stream = open(file_name)
content = ''.join(stream.readlines())
b = Beautifier(content, opts)
return b.beautify()
def usage(stream=sys.stdout):
print("cssbeautifier.py@" + __version__ + """
CSS beautifier (http://jsbeautifier.org/)
""", file=stream)
if stream == sys.stderr:
return 1
else:
return 0
WHITE_RE = re.compile("^\s+$")
WORD_RE = re.compile("[\w$\-_]")
class Printer:
def __init__(self, beautifier, indent_char, indent_size, default_indent=""):
self.beautifier = beautifier
self.newlines_from_last_ws_eat = 0
self.indentSize = indent_size
self.singleIndent = (indent_size) * indent_char
self.indentLevel = 0
self.nestedLevel = 0
self.baseIndentString = default_indent
self.output = Output(self.singleIndent, self.baseIndentString)
def indent(self):
self.indentLevel += 1
def outdent(self):
if self.indentLevel > 0:
self.indentLevel -= 1
def preserveSingleSpace(self,isAfterSpace):
if isAfterSpace:
self.output.space_before_token = True
def print_string(self, output_string):
if self.output.just_added_newline():
self.output.set_indent(self.indentLevel)
self.output.add_token(output_string)
class Beautifier:
def __init__(self, source_text, opts=default_options()):
import jsbeautifier.core.acorn as acorn
self.lineBreak = acorn.lineBreak
self.allLineBreaks = acorn.allLineBreaks
if not source_text:
source_text = ''
opts = mergeOpts(opts, 'css')
# Continue to accept deprecated option
opts.space_around_combinator = opts.space_around_combinator or opts.space_around_selector_separator
self.opts = opts
self.indentSize = opts.indent_size
self.indentChar = opts.indent_char
self.pos = -1
self.ch = None
if self.opts.indent_with_tabs:
self.indentChar = "\t"
self.indentSize = 1
if self.opts.eol == 'auto':
self.opts.eol = '\n'
if self.lineBreak.search(source_text or ''):
self.opts.eol = self.lineBreak.search(source_text).group()
self.opts.eol = self.opts.eol.replace('\\r', '\r').replace('\\n', '\n')
# HACK: newline parsing inconsistent. This brute force normalizes the input newlines.
self.source_text = re.sub(self.allLineBreaks, '\n', source_text)
# https://developer.mozilla.org/en-US/docs/Web/CSS/At-rule
# also in CONDITIONAL_GROUP_RULE below
self.NESTED_AT_RULE = [ \
"@page", \
"@font-face", \
"@keyframes", \
"@media", \
"@supports", \
"@document"]
self.CONDITIONAL_GROUP_RULE = [ \
"@media", \
"@supports", \
"@document"]
m = re.search("^[\t ]*", self.source_text)
self.baseIndentString = m.group(0)
def next(self):
self.pos = self.pos + 1
if self.pos < len(self.source_text):
self.ch = self.source_text[self.pos]
else:
self.ch = ''
return self.ch
def peek(self,skipWhitespace=False):
start = self.pos
if skipWhitespace:
self.eatWhitespace()
result = ""
if self.pos + 1 < len(self.source_text):
result = self.source_text[self.pos + 1]
if skipWhitespace:
self.pos = start - 1
self.next()
return result
def eatString(self, endChars):
start = self.pos
while self.next():
if self.ch == "\\":
self.next()
elif self.ch in endChars:
break
elif self.ch == "\n":
break
return self.source_text[start:self.pos] + self.ch
def peekString(self, endChar):
start = self.pos
st = self.eatString(endChar)
self.pos = start - 1
self.next()
return st
def eatWhitespace(self, preserve_newlines_local=False):
result = 0
while WHITE_RE.search(self.peek()) is not None:
self.next()
if self.ch == "\n" and preserve_newlines_local and self.opts.preserve_newlines:
self.output.add_new_line(True)
result += 1
self.newlines_from_last_ws_eat = result
return result
def skipWhitespace(self):
result = ''
if self.ch and WHITE_RE.search(self.ch):
result = self.ch
while WHITE_RE.search(self.next()) is not None:
result += self.ch
return result
def eatComment(self):
start = self.pos
singleLine = self.peek() == "/"
self.next()
while self.next():
if not singleLine and self.ch == "*" and self.peek() == "/":
self.next()
break
elif singleLine and self.ch == "\n":
return self.source_text[start:self.pos]
return self.source_text[start:self.pos] + self.ch
def lookBack(self, string):
past = self.source_text[self.pos - len(string):self.pos]
return past.lower() == string
# Nested pseudo-class if we are insideRule
# and the next special character found opens
# a new block
def foundNestedPseudoClass(self):
i = self.pos + 1
openParen = 0
while i < len(self.source_text):
ch = self.source_text[i]
if ch == "{":
return True
elif ch == "(":
# pseudoclasses can contain ()
openParen += 1
elif ch == ")":
if openParen == 0:
return False
openParen -= 1
elif ch == ";" or ch == "}":
return False
i += 1
return False
def beautify(self):
printer = Printer(self, self.indentChar, self.indentSize, self.baseIndentString)
self.output = printer.output
output = self.output
self.pos = -1
self.ch = None
insideRule = False
insidePropertyValue = False
enteringConditionalGroup = False
top_ch = ''
last_top_ch = ''
parenLevel = 0
while True:
whitespace = self.skipWhitespace()
isAfterSpace = whitespace != ''
isAfterNewline = '\n' in whitespace
last_top_ch = top_ch
top_ch = self.ch
if not self.ch:
break
elif self.ch == '/' and self.peek() == '*':
header = printer.indentLevel == 0
if not isAfterNewline or header:
output.add_new_line()
printer.print_string(self.eatComment())
output.add_new_line()
if header:
output.add_new_line(True)
elif self.ch == '/' and self.peek() == '/':
if not isAfterNewline and last_top_ch != '{':
output.trim(True)
output.space_before_token = True
printer.print_string(self.eatComment())
output.add_new_line()
elif self.ch == '@':
printer.preserveSingleSpace(isAfterSpace)
# deal with less propery mixins @{...}
if self.peek(True) == '{':
printer.print_string(self.eatString('}'));
else:
printer.print_string(self.ch)
# strip trailing space, if present, for hash property check
variableOrRule = self.peekString(": ,;{}()[]/='\"")
if variableOrRule[-1] in ": ":
# wwe have a variable or pseudo-class, add it and insert one space before continuing
self.next()
variableOrRule = self.eatString(": ")
if variableOrRule[-1].isspace():
variableOrRule = variableOrRule[:-1]
printer.print_string(variableOrRule)
output.space_before_token = True
if variableOrRule[-1].isspace():
variableOrRule = variableOrRule[:-1]
# might be a nesting at-rule
if variableOrRule in self.NESTED_AT_RULE:
printer.nestedLevel += 1
if variableOrRule in self.CONDITIONAL_GROUP_RULE:
enteringConditionalGroup = True
elif self.ch == '#' and self.peek() == '{':
printer.preserveSingleSpace(isAfterSpace)
printer.print_string(self.eatString('}'));
elif self.ch == '{':
if self.peek(True) == '}':
self.eatWhitespace()
self.next()
output.space_before_token = True
printer.print_string("{}")
if self.eatWhitespace(True) == 0:
output.add_new_line()
if self.newlines_from_last_ws_eat < 2 and self.opts.newline_between_rules and printer.indentLevel == 0:
output.add_new_line(True)
else:
printer.indent()
output.space_before_token = True
printer.print_string(self.ch)
if self.eatWhitespace(True) == 0:
output.add_new_line()
# when entering conditional groups, only rulesets are allowed
if enteringConditionalGroup:
enteringConditionalGroup = False
insideRule = printer.indentLevel > printer.nestedLevel
else:
# otherwise, declarations are also allowed
insideRule = printer.indentLevel >= printer.nestedLevel
elif self.ch == '}':
printer.outdent()
output.add_new_line()
printer.print_string(self.ch)
insideRule = False
insidePropertyValue = False
if printer.nestedLevel:
printer.nestedLevel -= 1
if self.eatWhitespace(True) == 0:
output.add_new_line()
if self.newlines_from_last_ws_eat < 2 and self.opts.newline_between_rules and printer.indentLevel == 0:
output.add_new_line(True)
elif self.ch == ":":
self.eatWhitespace()
if (insideRule or enteringConditionalGroup) and \
not (self.lookBack('&') or self.foundNestedPseudoClass()) and \
not self.lookBack('('):
# 'property: value' delimiter
# which could be in a conditional group query
printer.print_string(":")
if not insidePropertyValue:
insidePropertyValue = True
output.space_before_token = True
else:
# sass/less parent reference don't use a space
# sass nested pseudo-class don't use a space
# preserve space before pseudoclasses/pseudoelements, as it means "in any child"
if self.lookBack(' '):
output.space_before_token = True
if self.peek() == ":":
# pseudo-element
self.next()
printer.print_string("::")
else:
# pseudo-element
printer.print_string(":")
elif self.ch == '"' or self.ch == '\'':
printer.preserveSingleSpace(isAfterSpace)
printer.print_string(self.eatString(self.ch))
elif self.ch == ';':
insidePropertyValue = False
printer.print_string(self.ch)
if self.eatWhitespace(True) == 0:
output.add_new_line()
elif self.ch == '(':
# may be a url
if self.lookBack("url"):
printer.print_string(self.ch)
self.eatWhitespace()
if self.next():
if self.ch is not ')' and self.ch is not '"' \
and self.ch is not '\'':
printer.print_string(self.eatString(')'))
else:
self.pos = self.pos - 1
else:
parenLevel += 1
printer.preserveSingleSpace(isAfterSpace)
printer.print_string(self.ch)
self.eatWhitespace()
elif self.ch == ')':
printer.print_string(self.ch)
parenLevel -= 1
elif self.ch == ',':
printer.print_string(self.ch)
if self.eatWhitespace(True) == 0 and not insidePropertyValue and self.opts.selector_separator_newline and parenLevel < 1:
output.add_new_line()
else:
output.space_before_token = True
elif (self.ch == '>' or self.ch == '+' or self.ch == '~') and \
not insidePropertyValue and parenLevel < 1:
# handle combinator spacing
if self.opts.space_around_combinator:
output.space_before_token = True
printer.print_string(self.ch)
output.space_before_token = True
else:
printer.print_string(self.ch)
self.eatWhitespace()
# squash extra whitespace
if self.ch and WHITE_RE.search(self.ch):
self.ch = ''
elif self.ch == ']':
printer.print_string(self.ch)
elif self.ch == '[':
printer.preserveSingleSpace(isAfterSpace)
printer.print_string(self.ch)
elif self.ch == '=':
# no whitespace before or after
self.eatWhitespace()
printer.print_string('=')
if WHITE_RE.search(self.ch):
self.ch = ''
else:
printer.preserveSingleSpace(isAfterSpace)
printer.print_string(self.ch)
sweet_code = output.get_code(self.opts.end_with_newline, self.opts.eol)
return sweet_code
| 36.339956 | 137 | 0.537177 | from __future__ import print_function
import sys
import re
import copy
from .options import BeautifierOptions
from jsbeautifier.core.options import mergeOpts
from jsbeautifier.core.output import Output
from jsbeautifier.__version__ import __version__
def default_options():
return BeautifierOptions()
def beautify(string, opts=default_options()):
b = Beautifier(string, opts)
return b.beautify()
def beautify_file(file_name, opts=default_options()):
if file_name == '-':
stream = sys.stdin
else:
stream = open(file_name)
content = ''.join(stream.readlines())
b = Beautifier(content, opts)
return b.beautify()
def usage(stream=sys.stdout):
print("cssbeautifier.py@" + __version__ + """
CSS beautifier (http://jsbeautifier.org/)
""", file=stream)
if stream == sys.stderr:
return 1
else:
return 0
WHITE_RE = re.compile("^\s+$")
WORD_RE = re.compile("[\w$\-_]")
class Printer:
def __init__(self, beautifier, indent_char, indent_size, default_indent=""):
self.beautifier = beautifier
self.newlines_from_last_ws_eat = 0
self.indentSize = indent_size
self.singleIndent = (indent_size) * indent_char
self.indentLevel = 0
self.nestedLevel = 0
self.baseIndentString = default_indent
self.output = Output(self.singleIndent, self.baseIndentString)
def indent(self):
self.indentLevel += 1
def outdent(self):
if self.indentLevel > 0:
self.indentLevel -= 1
def preserveSingleSpace(self,isAfterSpace):
if isAfterSpace:
self.output.space_before_token = True
def print_string(self, output_string):
if self.output.just_added_newline():
self.output.set_indent(self.indentLevel)
self.output.add_token(output_string)
class Beautifier:
def __init__(self, source_text, opts=default_options()):
import jsbeautifier.core.acorn as acorn
self.lineBreak = acorn.lineBreak
self.allLineBreaks = acorn.allLineBreaks
if not source_text:
source_text = ''
opts = mergeOpts(opts, 'css')
opts.space_around_combinator = opts.space_around_combinator or opts.space_around_selector_separator
self.opts = opts
self.indentSize = opts.indent_size
self.indentChar = opts.indent_char
self.pos = -1
self.ch = None
if self.opts.indent_with_tabs:
self.indentChar = "\t"
self.indentSize = 1
if self.opts.eol == 'auto':
self.opts.eol = '\n'
if self.lineBreak.search(source_text or ''):
self.opts.eol = self.lineBreak.search(source_text).group()
self.opts.eol = self.opts.eol.replace('\\r', '\r').replace('\\n', '\n')
self.source_text = re.sub(self.allLineBreaks, '\n', source_text)
self.NESTED_AT_RULE = [ \
"@page", \
"@font-face", \
"@keyframes", \
"@media", \
"@supports", \
"@document"]
self.CONDITIONAL_GROUP_RULE = [ \
"@media", \
"@supports", \
"@document"]
m = re.search("^[\t ]*", self.source_text)
self.baseIndentString = m.group(0)
def next(self):
self.pos = self.pos + 1
if self.pos < len(self.source_text):
self.ch = self.source_text[self.pos]
else:
self.ch = ''
return self.ch
def peek(self,skipWhitespace=False):
start = self.pos
if skipWhitespace:
self.eatWhitespace()
result = ""
if self.pos + 1 < len(self.source_text):
result = self.source_text[self.pos + 1]
if skipWhitespace:
self.pos = start - 1
self.next()
return result
def eatString(self, endChars):
start = self.pos
while self.next():
if self.ch == "\\":
self.next()
elif self.ch in endChars:
break
elif self.ch == "\n":
break
return self.source_text[start:self.pos] + self.ch
def peekString(self, endChar):
start = self.pos
st = self.eatString(endChar)
self.pos = start - 1
self.next()
return st
def eatWhitespace(self, preserve_newlines_local=False):
result = 0
while WHITE_RE.search(self.peek()) is not None:
self.next()
if self.ch == "\n" and preserve_newlines_local and self.opts.preserve_newlines:
self.output.add_new_line(True)
result += 1
self.newlines_from_last_ws_eat = result
return result
def skipWhitespace(self):
result = ''
if self.ch and WHITE_RE.search(self.ch):
result = self.ch
while WHITE_RE.search(self.next()) is not None:
result += self.ch
return result
def eatComment(self):
start = self.pos
singleLine = self.peek() == "/"
self.next()
while self.next():
if not singleLine and self.ch == "*" and self.peek() == "/":
self.next()
break
elif singleLine and self.ch == "\n":
return self.source_text[start:self.pos]
return self.source_text[start:self.pos] + self.ch
def lookBack(self, string):
past = self.source_text[self.pos - len(string):self.pos]
return past.lower() == string
def foundNestedPseudoClass(self):
i = self.pos + 1
openParen = 0
while i < len(self.source_text):
ch = self.source_text[i]
if ch == "{":
return True
elif ch == "(":
openParen += 1
elif ch == ")":
if openParen == 0:
return False
openParen -= 1
elif ch == ";" or ch == "}":
return False
i += 1
return False
def beautify(self):
printer = Printer(self, self.indentChar, self.indentSize, self.baseIndentString)
self.output = printer.output
output = self.output
self.pos = -1
self.ch = None
insideRule = False
insidePropertyValue = False
enteringConditionalGroup = False
top_ch = ''
last_top_ch = ''
parenLevel = 0
while True:
whitespace = self.skipWhitespace()
isAfterSpace = whitespace != ''
isAfterNewline = '\n' in whitespace
last_top_ch = top_ch
top_ch = self.ch
if not self.ch:
break
elif self.ch == '/' and self.peek() == '*':
header = printer.indentLevel == 0
if not isAfterNewline or header:
output.add_new_line()
printer.print_string(self.eatComment())
output.add_new_line()
if header:
output.add_new_line(True)
elif self.ch == '/' and self.peek() == '/':
if not isAfterNewline and last_top_ch != '{':
output.trim(True)
output.space_before_token = True
printer.print_string(self.eatComment())
output.add_new_line()
elif self.ch == '@':
printer.preserveSingleSpace(isAfterSpace)
if self.peek(True) == '{':
printer.print_string(self.eatString('}'));
else:
printer.print_string(self.ch)
variableOrRule = self.peekString(": ,;{}()[]/='\"")
if variableOrRule[-1] in ": ":
# wwe have a variable or pseudo-class, add it and insert one space before continuing
self.next()
variableOrRule = self.eatString(": ")
if variableOrRule[-1].isspace():
variableOrRule = variableOrRule[:-1]
printer.print_string(variableOrRule)
output.space_before_token = True
if variableOrRule[-1].isspace():
variableOrRule = variableOrRule[:-1]
# might be a nesting at-rule
if variableOrRule in self.NESTED_AT_RULE:
printer.nestedLevel += 1
if variableOrRule in self.CONDITIONAL_GROUP_RULE:
enteringConditionalGroup = True
elif self.ch == '#' and self.peek() == '{':
printer.preserveSingleSpace(isAfterSpace)
printer.print_string(self.eatString('}'));
elif self.ch == '{':
if self.peek(True) == '}':
self.eatWhitespace()
self.next()
output.space_before_token = True
printer.print_string("{}")
if self.eatWhitespace(True) == 0:
output.add_new_line()
if self.newlines_from_last_ws_eat < 2 and self.opts.newline_between_rules and printer.indentLevel == 0:
output.add_new_line(True)
else:
printer.indent()
output.space_before_token = True
printer.print_string(self.ch)
if self.eatWhitespace(True) == 0:
output.add_new_line()
# when entering conditional groups, only rulesets are allowed
if enteringConditionalGroup:
enteringConditionalGroup = False
insideRule = printer.indentLevel > printer.nestedLevel
else:
# otherwise, declarations are also allowed
insideRule = printer.indentLevel >= printer.nestedLevel
elif self.ch == '}':
printer.outdent()
output.add_new_line()
printer.print_string(self.ch)
insideRule = False
insidePropertyValue = False
if printer.nestedLevel:
printer.nestedLevel -= 1
if self.eatWhitespace(True) == 0:
output.add_new_line()
if self.newlines_from_last_ws_eat < 2 and self.opts.newline_between_rules and printer.indentLevel == 0:
output.add_new_line(True)
elif self.ch == ":":
self.eatWhitespace()
if (insideRule or enteringConditionalGroup) and \
not (self.lookBack('&') or self.foundNestedPseudoClass()) and \
not self.lookBack('('):
# 'property: value' delimiter
# which could be in a conditional group query
printer.print_string(":")
if not insidePropertyValue:
insidePropertyValue = True
output.space_before_token = True
else:
# sass/less parent reference don't use a space
# sass nested pseudo-class don't use a space
# preserve space before pseudoclasses/pseudoelements, as it means "in any child"
if self.lookBack(' '):
output.space_before_token = True
if self.peek() == ":":
# pseudo-element
self.next()
printer.print_string("::")
else:
# pseudo-element
printer.print_string(":")
elif self.ch == '"' or self.ch == '\'':
printer.preserveSingleSpace(isAfterSpace)
printer.print_string(self.eatString(self.ch))
elif self.ch == ';':
insidePropertyValue = False
printer.print_string(self.ch)
if self.eatWhitespace(True) == 0:
output.add_new_line()
elif self.ch == '(':
if self.lookBack("url"):
printer.print_string(self.ch)
self.eatWhitespace()
if self.next():
if self.ch is not ')' and self.ch is not '"' \
and self.ch is not '\'':
printer.print_string(self.eatString(')'))
else:
self.pos = self.pos - 1
else:
parenLevel += 1
printer.preserveSingleSpace(isAfterSpace)
printer.print_string(self.ch)
self.eatWhitespace()
elif self.ch == ')':
printer.print_string(self.ch)
parenLevel -= 1
elif self.ch == ',':
printer.print_string(self.ch)
if self.eatWhitespace(True) == 0 and not insidePropertyValue and self.opts.selector_separator_newline and parenLevel < 1:
output.add_new_line()
else:
output.space_before_token = True
elif (self.ch == '>' or self.ch == '+' or self.ch == '~') and \
not insidePropertyValue and parenLevel < 1:
# handle combinator spacing
if self.opts.space_around_combinator:
output.space_before_token = True
printer.print_string(self.ch)
output.space_before_token = True
else:
printer.print_string(self.ch)
self.eatWhitespace()
# squash extra whitespace
if self.ch and WHITE_RE.search(self.ch):
self.ch = ''
elif self.ch == ']':
printer.print_string(self.ch)
elif self.ch == '[':
printer.preserveSingleSpace(isAfterSpace)
printer.print_string(self.ch)
elif self.ch == '=':
# no whitespace before or after
self.eatWhitespace()
printer.print_string('=')
if WHITE_RE.search(self.ch):
self.ch = ''
else:
printer.preserveSingleSpace(isAfterSpace)
printer.print_string(self.ch)
sweet_code = output.get_code(self.opts.end_with_newline, self.opts.eol)
return sweet_code
| true | true |
1c35a3299e5dc555e4a406f196805aa546c90319 | 25,181 | py | Python | tensorflow_federated/python/core/impl/computation/function_utils.py | alessiomora/federated | 3b501067ed7062aaec3cc8830aaec0a7cf8f0942 | [
"Apache-2.0"
] | 1 | 2021-05-10T10:49:34.000Z | 2021-05-10T10:49:34.000Z | tensorflow_federated/python/core/impl/computation/function_utils.py | alessiomora/federated | 3b501067ed7062aaec3cc8830aaec0a7cf8f0942 | [
"Apache-2.0"
] | null | null | null | tensorflow_federated/python/core/impl/computation/function_utils.py | alessiomora/federated | 3b501067ed7062aaec3cc8830aaec0a7cf8f0942 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for Python functions, defuns, and other types of callables."""
import functools
import inspect
import types
import typing
from typing import Any, Callable, Dict, List, Mapping, Optional, Sequence, Tuple, Union
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.common_libs import structure
from tensorflow_federated.python.core.api import computation_base
from tensorflow_federated.python.core.impl.compiler import building_blocks
from tensorflow_federated.python.core.impl.context_stack import context_base
from tensorflow_federated.python.core.impl.context_stack import context_stack_base
from tensorflow_federated.python.core.impl.types import computation_types
from tensorflow_federated.python.core.impl.types import type_analysis
from tensorflow_federated.python.core.impl.types import type_conversions
from tensorflow_federated.python.core.impl.types import typed_object
from tensorflow_federated.python.tensorflow_libs import function
def get_signature(
fn: Union[types.FunctionType, types.MethodType]) -> inspect.Signature:
"""Returns the `inspect.Signature` structure for the given function or method.
Args:
fn: The Python function or Tensorflow function to analyze.
Returns:
An `inspect.Signature`.
Raises:
TypeError: if the argument is not of a supported type.
"""
if isinstance(fn, (types.FunctionType, types.MethodType)):
return inspect.signature(fn)
elif function.is_tf_function(fn):
return inspect.signature(fn.python_function)
else:
raise TypeError('Expected a Python function or a defun, found {}.'.format(
py_typecheck.type_string(type(fn))))
def is_signature_compatible_with_types(signature: inspect.Signature, *args,
**kwargs) -> bool:
"""Determines if functions matching signature accept `args` and `kwargs`.
Args:
signature: An instance of `inspect.Signature` to verify agains the
arguments.
*args: Zero or more positional arguments, all of which must be instances of
computation_types.Type or something convertible to it by
computation_types.to_type().
**kwargs: Zero or more keyword arguments, all of which must be instances of
computation_types.Type or something convertible to it by
computation_types.to_type().
Returns:
`True` or `False`, depending on the outcome of the test.
Raises:
TypeError: if the arguments are of the wrong computation_types.
"""
try:
bound_args = signature.bind(*args, **kwargs)
except TypeError:
return False
# If we have no defaults then `bind` will have raised `TypeError` if the
# signature was not compatible with *args and **kwargs.
if all(p.default is inspect.Parameter.empty
for p in signature.parameters.values()):
return True
# Otherwise we need to check the defaults against the types that were given to
# ensure they are compatible.
for p in signature.parameters.values():
if p.default is inspect.Parameter.empty or p.default is None:
# No default value or optional.
continue
arg_value = bound_args.arguments.get(p.name, p.default)
if arg_value is p.default:
continue
arg_type = computation_types.to_type(arg_value)
default_type = type_conversions.infer_type(p.default)
if not arg_type.is_assignable_from(default_type):
return False
return True
def is_argument_struct(arg) -> bool:
"""Determines if 'arg' is interpretable as an argument struct.
Args:
arg: A value or type to test.
Returns:
True iff 'arg' is either a `Struct` in which all unnamed elements
precede named ones, or a `StructType` with this property, or something
that can be converted into the latter by computation_types.to_type().
Raises:
TypeError: If the argument is neither an `structure.Struct`,
nor a type spec.
"""
if isinstance(arg, structure.Struct):
elements = structure.to_elements(arg)
elif isinstance(arg, typed_object.TypedObject):
return is_argument_struct(arg.type_signature)
else:
arg = computation_types.to_type(arg)
if arg.is_struct():
elements = structure.to_elements(arg)
else:
return False
max_unnamed = -1
min_named = len(elements)
for idx, element in enumerate(elements):
if element[0]:
min_named = min(min_named, idx)
else:
max_unnamed = idx
return max_unnamed < min_named
def unpack_args_from_struct(
struct_with_args) -> Tuple[List[Any], Dict[str, Any]]:
"""Extracts argument types from a struct.
Args:
struct_with_args: An instance of either an `struct.Struct` or
computation_types.StructType` (or something convertible to it by
computation_types.to_type()), on which is_argument_struct() is True.
Returns:
A pair (args, kwargs) containing tuple elements from 'struct_with_args'.
Raises:
TypeError: if 'struct_with_args' is of a wrong type.
"""
if not is_argument_struct(struct_with_args):
raise TypeError('Not an argument struct: {}.'.format(struct_with_args))
if isinstance(struct_with_args, structure.Struct):
elements = structure.to_elements(struct_with_args)
elif isinstance(struct_with_args, typed_object.TypedObject):
elements = []
for index, (name, _) in enumerate(
structure.to_elements(struct_with_args.type_signature)):
if name is not None:
elements.append((name, getattr(struct_with_args, name)))
else:
elements.append((None, struct_with_args[index]))
else:
struct_with_args = computation_types.to_type(struct_with_args)
struct_with_args.check_struct()
elements = structure.to_elements(struct_with_args)
args = []
kwargs = {}
for name, value in elements:
if name is not None:
kwargs[name] = value
else:
args.append(value)
return args, kwargs
def pack_args_into_struct(
args: Sequence[Any],
kwargs: Mapping[str, Any],
type_spec=None,
context: Optional[context_base.Context] = None) -> structure.Struct:
"""Packs positional and keyword arguments into a `Struct`.
If 'type_spec' is not None, it must be a `StructType` or something that's
convertible to it by computation_types.to_type(). The assignment of arguments
to fields of the struct follows the same rule as during function calls. If
'type_spec' is None, the positional arguments precede any of the keyword
arguments, and the ordering of the keyword arguments matches the ordering in
which they appear in kwargs. If the latter is an OrderedDict, the ordering
will be preserved. On the other hand, if the latter is an ordinary unordered
dict, the ordering is arbitrary.
Args:
args: Positional arguments.
kwargs: Keyword arguments.
type_spec: The optional type specification (either an instance of
`computation_types.StructType` or something convertible to it), or None if
there's no type. Used to drive the arrangements of args into fields of the
constructed struct, as noted in the description.
context: The optional context (an instance of `context_base.Context`) in
which the arguments are being packed. Required if and only if the
`type_spec` is not `None`.
Returns:
An struct containing all the arguments.
Raises:
TypeError: if the arguments are of the wrong computation_types.
"""
type_spec = computation_types.to_type(type_spec)
if not type_spec:
return structure.Struct([(None, arg) for arg in args] +
list(kwargs.items()))
else:
py_typecheck.check_type(type_spec, computation_types.StructType)
py_typecheck.check_type(context, context_base.Context)
context = typing.cast(context_base.Context, context)
if not is_argument_struct(type_spec): # pylint: disable=attribute-error
raise TypeError(
'Parameter type {} does not have a structure of an argument struct, '
'and cannot be populated from multiple positional and keyword '
'arguments'.format(type_spec))
else:
result_elements = []
positions_used = set()
keywords_used = set()
for index, (name,
elem_type) in enumerate(structure.to_elements(type_spec)):
if index < len(args):
# This argument is present in `args`.
if name is not None and name in kwargs:
raise TypeError('Argument `{}` specified twice.'.format(name))
else:
arg_value = args[index]
result_elements.append((name, context.ingest(arg_value, elem_type)))
positions_used.add(index)
elif name is not None and name in kwargs:
# This argument is present in `kwargs`.
arg_value = kwargs[name]
result_elements.append((name, context.ingest(arg_value, elem_type)))
keywords_used.add(name)
elif name:
raise TypeError(f'Missing argument `{name}` of type {elem_type}.')
else:
raise TypeError(
f'Missing argument of type {elem_type} at position {index}.')
positions_missing = set(range(len(args))).difference(positions_used)
if positions_missing:
raise TypeError(
f'Positional arguments at {positions_missing} not used.')
keywords_missing = set(kwargs.keys()).difference(keywords_used)
if keywords_missing:
raise TypeError(f'Keyword arguments at {keywords_missing} not used.')
return structure.Struct(result_elements)
def pack_args(parameter_type, args: Sequence[Any], kwargs: Mapping[str, Any],
context: context_base.Context):
"""Pack arguments into a single one that matches the given parameter type.
The arguments may or may not be packed into a `Struct`, depending on the type
of the parameter, and how many arguments are present.
Args:
parameter_type: The type of the single parameter expected by a computation,
an instance of computation_types.Type or something convertible to it, or
None if the computation is not expecting a parameter.
args: Positional arguments of a call.
kwargs: Keyword arguments of a call.
context: The context (an instance of `context_base.Context`) in which the
arguments are being packed.
Returns:
A single value object of type that matches 'parameter_type' that contains
all the arguments, or None if the 'parameter_type' is None.
Raises:
TypeError: if the args/kwargs do not match the given parameter type.
"""
py_typecheck.check_type(context, context_base.Context)
if parameter_type is None:
# If there's no parameter type, there should be no args of any kind.
if args or kwargs:
raise TypeError('Was not expecting any arguments.')
else:
return None
else:
parameter_type = computation_types.to_type(parameter_type)
if not args and not kwargs:
raise TypeError(
'Declared a parameter of type {}, but got no arguments.'.format(
parameter_type))
else:
single_positional_arg = (len(args) == 1) and not kwargs
if not parameter_type.is_struct():
# If not a `StructType`, a single positional argument is the only
# supported call style.
if not single_positional_arg:
raise TypeError(
'Parameter type {} is compatible only with a single positional '
'argument, but found {} positional and {} keyword args.'.format(
parameter_type, len(args), len(kwargs)))
else:
arg = args[0]
elif single_positional_arg:
arg = args[0]
elif not is_argument_struct(parameter_type):
raise TypeError(
'Parameter type {} does not have a structure of an argument '
'struct, and cannot be populated from multiple positional and '
'keyword arguments; please construct a struct before the '
'call.'.format(parameter_type))
else:
arg = pack_args_into_struct(args, kwargs, parameter_type, context)
return context.ingest(arg, parameter_type)
def _infer_unpack_needed(fn: types.FunctionType,
parameter_type: computation_types.Type,
should_unpack: Optional[bool] = None) -> bool:
"""Returns whether parameter_type must be unpacked when calling fn.
Args:
fn: The function to be invoked.
parameter_type: The TFF type of the parameter bundle to be accepted by the
returned callable.
should_unpack: Default or expected return value; None implies the inferred
value should be returned. If either unpacking or packing could work, and
should_unpack is not None, then should_unpack is returned.
Returns:
A `bool` indicating whether or not to unpack.
"""
# TODO(b/113112885): Revisit whether the 3-way 'unpack' knob is sufficient
# for our needs, or more options are needed.
if should_unpack not in [True, False, None]:
raise TypeError('The unpack argument has an unexpected value {!r}.'.format(
should_unpack))
py_typecheck.check_type(parameter_type, computation_types.Type)
unpack = should_unpack # Default return value.
signature = get_signature(fn)
unpack_required = not is_signature_compatible_with_types(
signature, parameter_type)
# Boolean identity comparison becaue unpack can have a non-boolean value.
if unpack_required and should_unpack is False: # pylint: disable=g-bool-id-comparison
raise TypeError(
'The supplied function \'{}\' with signature {} cannot accept a '
'value of type \'{}\' as a single argument.'.format(
fn.__name__, signature, parameter_type))
if is_argument_struct(parameter_type):
arg_types, kwarg_types = unpack_args_from_struct(parameter_type)
unpack_possible = is_signature_compatible_with_types(
signature, *arg_types, **kwarg_types)
else:
unpack_possible = False
# Boolean identity comparison becaue unpack can have a non-boolean value.
if not unpack_possible and should_unpack is True: # pylint: disable=g-bool-id-comparison
raise TypeError(
'The supplied function with signature {} cannot accept a value of type '
'{} as multiple positional and/or keyword arguments. That is, the '
'argument cannot be unpacked, but unpacking was requested.'.format(
signature, parameter_type))
if unpack_required and not unpack_possible:
raise TypeError(
'The supplied function "{}" with signature {} cannot accept a value of '
'type {} as either a single argument or multiple positional and/or '
'keyword arguments.'.format(fn.__name__, signature, parameter_type))
if not unpack_required and unpack_possible and should_unpack is None:
# The supplied function could accept a value as either a single argument,
# or as multiple positional and/or keyword arguments, and the caller did
# not specify any preference, leaving ambiguity in how to handle the
# mapping. We resolve the ambiguity by defaulting to capturing the entire
# argument, as that's the behavior suggested as expected by the users.
unpack = False
if unpack is None:
# Any ambiguity at this point has been resolved, so the following
# condition holds and need only be verified in tests.
assert unpack_required == unpack_possible, (unpack_required,
unpack_possible)
unpack = unpack_possible
return unpack
_Arguments = Tuple[List[Any], Dict[str, Any]]
def _unpack_arg(arg_types, kwarg_types, arg) -> _Arguments:
"""Unpacks 'arg' into an argument list based on types."""
args = []
for idx, expected_type in enumerate(arg_types):
element_value = arg[idx]
actual_type = type_conversions.infer_type(element_value)
if not expected_type.is_assignable_from(actual_type):
raise TypeError(
'Expected element at position {} to be of type {}, found {}.'.format(
idx, expected_type, actual_type))
if isinstance(element_value, structure.Struct):
element_value = type_conversions.type_to_py_container(
element_value, expected_type)
args.append(element_value)
kwargs = {}
for name, expected_type in kwarg_types.items():
element_value = getattr(arg, name)
actual_type = type_conversions.infer_type(element_value)
if not expected_type.is_assignable_from(actual_type):
raise TypeError(
'Expected element named {} to be of type {}, found {}.'.format(
name, expected_type, actual_type))
if type_analysis.is_struct_with_py_container(element_value, expected_type):
element_value = type_conversions.type_to_py_container(
element_value, expected_type)
kwargs[name] = element_value
return args, kwargs
def _ensure_arg_type(parameter_type, arg) -> _Arguments:
"""Ensures that `arg` matches `parameter_type` before returning it."""
arg_type = type_conversions.infer_type(arg)
if not parameter_type.is_assignable_from(arg_type):
raise TypeError('Expected an argument of type {}, found {}.'.format(
parameter_type, arg_type))
if type_analysis.is_struct_with_py_container(arg, parameter_type):
arg = type_conversions.type_to_py_container(arg, parameter_type)
return [arg], {}
def create_argument_unpacking_fn(
fn: types.FunctionType,
parameter_type: Optional[computation_types.Type],
unpack: Optional[bool] = None) -> Callable[[Any], _Arguments]:
"""Returns a function which converts TFF values into arguments to `fn`.
This function helps to simplify dealing with functions and defuns that might
have diverse and complex signatures, but that represent computations and as
such, conceptually only accept a single parameter.
The argument provided to the returned callable is expected to contain all
arguments required by `fn` and matching the supplied parameter type signature.
If `fn` takes multiple parameters, those should be represented by packing
the arguments to the returned callable into a `Struct`.
The callable unpacks that structure and returns its elements as an `Arguments`
structure containing both positional and keyword arguments.
Example usage:
@tf.function
def my_fn(x, y, z=10, name='bar', *p, **q):
return x + y
type_spec = (tf.int32, tf.int32)
argument_converter = create_argument_unpacking_fn(my_fn, type_spec)
arg = Struct([('x', 10), ('y', 20)])
args, kwargs = argument_converter(arg)
... = my_fn(*args, **kwargs)
Args:
fn: The function to unpack arguments for.
parameter_type: The TFF type of the parameter bundle to be accepted by the
returned callable.
unpack: Whether to break the parameter down into constituent parts (`True`),
leave the parameter as a single unit (False), or allow it to be inferred
from the signature of `fn` (None). In the latter case (None), if any
ambiguity arises, an exception is thrown.
Returns:
A callable accepting one argument to unpack.
Raises:
TypeError: if arguments to this call are of the wrong types, or if the
supplied 'parameter_type' is not compatible with `fn`.
"""
if parameter_type is None:
def _none_arg(arg):
if arg is not None:
raise RuntimeError(
'Unexpected non-`None` argument to no-arg function with '
f'parameter type `None`: {arg}')
return [], {}
return _none_arg
py_typecheck.check_type(parameter_type, computation_types.Type)
if _infer_unpack_needed(fn, parameter_type, unpack):
arg_types, kwarg_types = unpack_args_from_struct(parameter_type)
return functools.partial(_unpack_arg, arg_types, kwarg_types)
else:
return functools.partial(_ensure_arg_type, parameter_type)
class ConcreteFunction(computation_base.Computation):
"""A base class for concretely-typed (non-polymorphic) functions."""
def __init__(self, type_signature, context_stack):
"""Constructs this concrete function with the give type signature.
Args:
type_signature: An instance of computation_types.FunctionType.
context_stack: The context stack to use.
Raises:
TypeError: if the arguments are of the wrong computation_types.
"""
py_typecheck.check_type(type_signature, computation_types.FunctionType)
py_typecheck.check_type(context_stack, context_stack_base.ContextStack)
self._type_signature = type_signature
self._context_stack = context_stack
@property
def type_signature(self):
return self._type_signature
def to_building_block(self) -> building_blocks.ComputationBuildingBlock:
"""Constructs a new `building_blocks.ComputationBuildingBlock`."""
raise NotImplementedError
def __call__(self, *args, **kwargs):
context = self._context_stack.current
arg = pack_args(self._type_signature.parameter, args, kwargs, context)
return context.invoke(self, arg)
def __hash__(self):
raise NotImplementedError(
'Hash must be implemented by the subclasses of `ConcreteFunction`.')
class PolymorphicFunction(object):
"""A generic polymorphic function that accepts arguments of diverse types."""
def __init__(self, concrete_function_factory: Callable[
[computation_types.Type, Optional[bool]], ConcreteFunction]):
"""Crates a polymorphic function with a given function factory.
Args:
concrete_function_factory: A callable that accepts a (non-None) TFF type
as an argument, as well as an optional boolean `unpack` argument which
should be treated as documented in `create_argument_unpacking_fn`
above. The callable must return a ConcreteFunction instance that's been
created to accept a single positional argument of this TFF type (to be
reused for future calls with parameters of a matching type).
"""
self._concrete_function_factory = concrete_function_factory
self._concrete_function_cache = {}
def fn_for_argument_type(self,
arg_type: computation_types.Type,
unpack: Optional[bool] = None) -> ConcreteFunction:
"""Concretizes this function with the provided `arg_type`.
The first time this function is called with a particular type on a
given `PolymorphicFunction` (or this `PolymorphicFunction` is called
with an argument of the given type), the underlying function will be
traced using the provided argument type as input. Later calls will
return the cached computed concrete function.
Args:
arg_type: The argument type to use when concretizing this function.
unpack: Whether to force unpacking the arguments (`True`), never unpack
the arguments (`False`), or infer whether or not to unpack the arguments
(`None`).
Returns:
The `ConcreteFunction` that results from tracing this
`PolymorphicFunction` with `arg_type.
"""
key = repr(arg_type) + str(unpack)
concrete_fn = self._concrete_function_cache.get(key)
if not concrete_fn:
concrete_fn = (self._concrete_function_factory)(arg_type, unpack)
py_typecheck.check_type(concrete_fn, ConcreteFunction,
'concrete function')
if concrete_fn.type_signature.parameter != arg_type:
raise TypeError(
'Expected a concrete function that takes parameter {}, got one '
'that takes {}.'.format(arg_type,
concrete_fn.type_signature.parameter))
self._concrete_function_cache[key] = concrete_fn
return concrete_fn
def __call__(self, *args, **kwargs):
"""Invokes this polymorphic function with a given set of arguments.
Args:
*args: Positional args.
**kwargs: Keyword args.
Returns:
The result of calling a concrete function, instantiated on demand based
on the argument types (and cached for future calls).
Raises:
TypeError: if the concrete functions created by the factory are of the
wrong computation_types.
"""
# TODO(b/113112885): We may need to normalize individuals args, such that
# the type is more predictable and uniform (e.g., if someone supplies an
# unordered dictionary), possibly by converting dict-like and tuple-like
# containers into `Struct`s.
packed_arg = pack_args_into_struct(args, kwargs)
arg_type = type_conversions.infer_type(packed_arg)
# We know the argument types have been packed, so force unpacking.
concrete_fn = self.fn_for_argument_type(arg_type, unpack=True)
return concrete_fn(packed_arg)
| 41.348112 | 91 | 0.713554 |
import functools
import inspect
import types
import typing
from typing import Any, Callable, Dict, List, Mapping, Optional, Sequence, Tuple, Union
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.common_libs import structure
from tensorflow_federated.python.core.api import computation_base
from tensorflow_federated.python.core.impl.compiler import building_blocks
from tensorflow_federated.python.core.impl.context_stack import context_base
from tensorflow_federated.python.core.impl.context_stack import context_stack_base
from tensorflow_federated.python.core.impl.types import computation_types
from tensorflow_federated.python.core.impl.types import type_analysis
from tensorflow_federated.python.core.impl.types import type_conversions
from tensorflow_federated.python.core.impl.types import typed_object
from tensorflow_federated.python.tensorflow_libs import function
def get_signature(
fn: Union[types.FunctionType, types.MethodType]) -> inspect.Signature:
if isinstance(fn, (types.FunctionType, types.MethodType)):
return inspect.signature(fn)
elif function.is_tf_function(fn):
return inspect.signature(fn.python_function)
else:
raise TypeError('Expected a Python function or a defun, found {}.'.format(
py_typecheck.type_string(type(fn))))
def is_signature_compatible_with_types(signature: inspect.Signature, *args,
**kwargs) -> bool:
try:
bound_args = signature.bind(*args, **kwargs)
except TypeError:
return False
if all(p.default is inspect.Parameter.empty
for p in signature.parameters.values()):
return True
for p in signature.parameters.values():
if p.default is inspect.Parameter.empty or p.default is None:
continue
arg_value = bound_args.arguments.get(p.name, p.default)
if arg_value is p.default:
continue
arg_type = computation_types.to_type(arg_value)
default_type = type_conversions.infer_type(p.default)
if not arg_type.is_assignable_from(default_type):
return False
return True
def is_argument_struct(arg) -> bool:
if isinstance(arg, structure.Struct):
elements = structure.to_elements(arg)
elif isinstance(arg, typed_object.TypedObject):
return is_argument_struct(arg.type_signature)
else:
arg = computation_types.to_type(arg)
if arg.is_struct():
elements = structure.to_elements(arg)
else:
return False
max_unnamed = -1
min_named = len(elements)
for idx, element in enumerate(elements):
if element[0]:
min_named = min(min_named, idx)
else:
max_unnamed = idx
return max_unnamed < min_named
def unpack_args_from_struct(
struct_with_args) -> Tuple[List[Any], Dict[str, Any]]:
if not is_argument_struct(struct_with_args):
raise TypeError('Not an argument struct: {}.'.format(struct_with_args))
if isinstance(struct_with_args, structure.Struct):
elements = structure.to_elements(struct_with_args)
elif isinstance(struct_with_args, typed_object.TypedObject):
elements = []
for index, (name, _) in enumerate(
structure.to_elements(struct_with_args.type_signature)):
if name is not None:
elements.append((name, getattr(struct_with_args, name)))
else:
elements.append((None, struct_with_args[index]))
else:
struct_with_args = computation_types.to_type(struct_with_args)
struct_with_args.check_struct()
elements = structure.to_elements(struct_with_args)
args = []
kwargs = {}
for name, value in elements:
if name is not None:
kwargs[name] = value
else:
args.append(value)
return args, kwargs
def pack_args_into_struct(
args: Sequence[Any],
kwargs: Mapping[str, Any],
type_spec=None,
context: Optional[context_base.Context] = None) -> structure.Struct:
type_spec = computation_types.to_type(type_spec)
if not type_spec:
return structure.Struct([(None, arg) for arg in args] +
list(kwargs.items()))
else:
py_typecheck.check_type(type_spec, computation_types.StructType)
py_typecheck.check_type(context, context_base.Context)
context = typing.cast(context_base.Context, context)
if not is_argument_struct(type_spec):
raise TypeError(
'Parameter type {} does not have a structure of an argument struct, '
'and cannot be populated from multiple positional and keyword '
'arguments'.format(type_spec))
else:
result_elements = []
positions_used = set()
keywords_used = set()
for index, (name,
elem_type) in enumerate(structure.to_elements(type_spec)):
if index < len(args):
if name is not None and name in kwargs:
raise TypeError('Argument `{}` specified twice.'.format(name))
else:
arg_value = args[index]
result_elements.append((name, context.ingest(arg_value, elem_type)))
positions_used.add(index)
elif name is not None and name in kwargs:
arg_value = kwargs[name]
result_elements.append((name, context.ingest(arg_value, elem_type)))
keywords_used.add(name)
elif name:
raise TypeError(f'Missing argument `{name}` of type {elem_type}.')
else:
raise TypeError(
f'Missing argument of type {elem_type} at position {index}.')
positions_missing = set(range(len(args))).difference(positions_used)
if positions_missing:
raise TypeError(
f'Positional arguments at {positions_missing} not used.')
keywords_missing = set(kwargs.keys()).difference(keywords_used)
if keywords_missing:
raise TypeError(f'Keyword arguments at {keywords_missing} not used.')
return structure.Struct(result_elements)
def pack_args(parameter_type, args: Sequence[Any], kwargs: Mapping[str, Any],
context: context_base.Context):
py_typecheck.check_type(context, context_base.Context)
if parameter_type is None:
if args or kwargs:
raise TypeError('Was not expecting any arguments.')
else:
return None
else:
parameter_type = computation_types.to_type(parameter_type)
if not args and not kwargs:
raise TypeError(
'Declared a parameter of type {}, but got no arguments.'.format(
parameter_type))
else:
single_positional_arg = (len(args) == 1) and not kwargs
if not parameter_type.is_struct():
# If not a `StructType`, a single positional argument is the only
# supported call style.
if not single_positional_arg:
raise TypeError(
'Parameter type {} is compatible only with a single positional '
'argument, but found {} positional and {} keyword args.'.format(
parameter_type, len(args), len(kwargs)))
else:
arg = args[0]
elif single_positional_arg:
arg = args[0]
elif not is_argument_struct(parameter_type):
raise TypeError(
'Parameter type {} does not have a structure of an argument '
'struct, and cannot be populated from multiple positional and '
'keyword arguments; please construct a struct before the '
'call.'.format(parameter_type))
else:
arg = pack_args_into_struct(args, kwargs, parameter_type, context)
return context.ingest(arg, parameter_type)
def _infer_unpack_needed(fn: types.FunctionType,
parameter_type: computation_types.Type,
should_unpack: Optional[bool] = None) -> bool:
# TODO(b/113112885): Revisit whether the 3-way 'unpack' knob is sufficient
# for our needs, or more options are needed.
if should_unpack not in [True, False, None]:
raise TypeError('The unpack argument has an unexpected value {!r}.'.format(
should_unpack))
py_typecheck.check_type(parameter_type, computation_types.Type)
unpack = should_unpack # Default return value.
signature = get_signature(fn)
unpack_required = not is_signature_compatible_with_types(
signature, parameter_type)
# Boolean identity comparison becaue unpack can have a non-boolean value.
if unpack_required and should_unpack is False: # pylint: disable=g-bool-id-comparison
raise TypeError(
'The supplied function \'{}\' with signature {} cannot accept a '
'value of type \'{}\' as a single argument.'.format(
fn.__name__, signature, parameter_type))
if is_argument_struct(parameter_type):
arg_types, kwarg_types = unpack_args_from_struct(parameter_type)
unpack_possible = is_signature_compatible_with_types(
signature, *arg_types, **kwarg_types)
else:
unpack_possible = False
# Boolean identity comparison becaue unpack can have a non-boolean value.
if not unpack_possible and should_unpack is True: # pylint: disable=g-bool-id-comparison
raise TypeError(
'The supplied function with signature {} cannot accept a value of type '
'{} as multiple positional and/or keyword arguments. That is, the '
'argument cannot be unpacked, but unpacking was requested.'.format(
signature, parameter_type))
if unpack_required and not unpack_possible:
raise TypeError(
'The supplied function "{}" with signature {} cannot accept a value of '
'type {} as either a single argument or multiple positional and/or '
'keyword arguments.'.format(fn.__name__, signature, parameter_type))
if not unpack_required and unpack_possible and should_unpack is None:
# The supplied function could accept a value as either a single argument,
# or as multiple positional and/or keyword arguments, and the caller did
# not specify any preference, leaving ambiguity in how to handle the
# mapping. We resolve the ambiguity by defaulting to capturing the entire
# argument, as that's the behavior suggested as expected by the users.
unpack = False
if unpack is None:
assert unpack_required == unpack_possible, (unpack_required,
unpack_possible)
unpack = unpack_possible
return unpack
_Arguments = Tuple[List[Any], Dict[str, Any]]
def _unpack_arg(arg_types, kwarg_types, arg) -> _Arguments:
args = []
for idx, expected_type in enumerate(arg_types):
element_value = arg[idx]
actual_type = type_conversions.infer_type(element_value)
if not expected_type.is_assignable_from(actual_type):
raise TypeError(
'Expected element at position {} to be of type {}, found {}.'.format(
idx, expected_type, actual_type))
if isinstance(element_value, structure.Struct):
element_value = type_conversions.type_to_py_container(
element_value, expected_type)
args.append(element_value)
kwargs = {}
for name, expected_type in kwarg_types.items():
element_value = getattr(arg, name)
actual_type = type_conversions.infer_type(element_value)
if not expected_type.is_assignable_from(actual_type):
raise TypeError(
'Expected element named {} to be of type {}, found {}.'.format(
name, expected_type, actual_type))
if type_analysis.is_struct_with_py_container(element_value, expected_type):
element_value = type_conversions.type_to_py_container(
element_value, expected_type)
kwargs[name] = element_value
return args, kwargs
def _ensure_arg_type(parameter_type, arg) -> _Arguments:
arg_type = type_conversions.infer_type(arg)
if not parameter_type.is_assignable_from(arg_type):
raise TypeError('Expected an argument of type {}, found {}.'.format(
parameter_type, arg_type))
if type_analysis.is_struct_with_py_container(arg, parameter_type):
arg = type_conversions.type_to_py_container(arg, parameter_type)
return [arg], {}
def create_argument_unpacking_fn(
fn: types.FunctionType,
parameter_type: Optional[computation_types.Type],
unpack: Optional[bool] = None) -> Callable[[Any], _Arguments]:
if parameter_type is None:
def _none_arg(arg):
if arg is not None:
raise RuntimeError(
'Unexpected non-`None` argument to no-arg function with '
f'parameter type `None`: {arg}')
return [], {}
return _none_arg
py_typecheck.check_type(parameter_type, computation_types.Type)
if _infer_unpack_needed(fn, parameter_type, unpack):
arg_types, kwarg_types = unpack_args_from_struct(parameter_type)
return functools.partial(_unpack_arg, arg_types, kwarg_types)
else:
return functools.partial(_ensure_arg_type, parameter_type)
class ConcreteFunction(computation_base.Computation):
def __init__(self, type_signature, context_stack):
py_typecheck.check_type(type_signature, computation_types.FunctionType)
py_typecheck.check_type(context_stack, context_stack_base.ContextStack)
self._type_signature = type_signature
self._context_stack = context_stack
@property
def type_signature(self):
return self._type_signature
def to_building_block(self) -> building_blocks.ComputationBuildingBlock:
raise NotImplementedError
def __call__(self, *args, **kwargs):
context = self._context_stack.current
arg = pack_args(self._type_signature.parameter, args, kwargs, context)
return context.invoke(self, arg)
def __hash__(self):
raise NotImplementedError(
'Hash must be implemented by the subclasses of `ConcreteFunction`.')
class PolymorphicFunction(object):
def __init__(self, concrete_function_factory: Callable[
[computation_types.Type, Optional[bool]], ConcreteFunction]):
self._concrete_function_factory = concrete_function_factory
self._concrete_function_cache = {}
def fn_for_argument_type(self,
arg_type: computation_types.Type,
unpack: Optional[bool] = None) -> ConcreteFunction:
key = repr(arg_type) + str(unpack)
concrete_fn = self._concrete_function_cache.get(key)
if not concrete_fn:
concrete_fn = (self._concrete_function_factory)(arg_type, unpack)
py_typecheck.check_type(concrete_fn, ConcreteFunction,
'concrete function')
if concrete_fn.type_signature.parameter != arg_type:
raise TypeError(
'Expected a concrete function that takes parameter {}, got one '
'that takes {}.'.format(arg_type,
concrete_fn.type_signature.parameter))
self._concrete_function_cache[key] = concrete_fn
return concrete_fn
def __call__(self, *args, **kwargs):
packed_arg = pack_args_into_struct(args, kwargs)
arg_type = type_conversions.infer_type(packed_arg)
concrete_fn = self.fn_for_argument_type(arg_type, unpack=True)
return concrete_fn(packed_arg)
| true | true |
1c35a34531ffadf1c4956f663b32f6b78f00ab93 | 8,020 | py | Python | train.py | sam1373/glow-tts | e38e9f0d149c55d3726b059802971145746d99cd | [
"MIT"
] | null | null | null | train.py | sam1373/glow-tts | e38e9f0d149c55d3726b059802971145746d99cd | [
"MIT"
] | null | null | null | train.py | sam1373/glow-tts | e38e9f0d149c55d3726b059802971145746d99cd | [
"MIT"
] | null | null | null | import os
import json
import argparse
import math
import torch
from torch import nn, optim
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import torch.multiprocessing as mp
import torch.distributed as dist
from apex.parallel import DistributedDataParallel as DDP
from apex import amp
from data_utils import TextMelLoader, TextMelCollate
import models
import commons
import utils
from text.symbols import symbols
global_step = 0
def main():
"""Assume Single Node Multi GPUs Training Only"""
assert torch.cuda.is_available(), "CPU training is not allowed."
n_gpus = torch.cuda.device_count()
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '80000'
hps = utils.get_hparams()
mp.spawn(train_and_eval, nprocs=n_gpus, args=(n_gpus, hps,))
def train_and_eval(rank, n_gpus, hps):
global global_step
if rank == 0:
logger = utils.get_logger(hps.model_dir)
logger.info(hps)
utils.check_git_hash(hps.model_dir)
writer = SummaryWriter(log_dir=hps.model_dir)
writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval"))
dist.init_process_group(backend='nccl', init_method='env://', world_size=n_gpus, rank=rank)
torch.manual_seed(hps.train.seed)
torch.cuda.set_device(rank)
train_dataset = TextMelLoader(hps.data.training_files, hps.data)
train_sampler = torch.utils.data.distributed.DistributedSampler(
train_dataset,
num_replicas=n_gpus,
rank=rank,
shuffle=True)
collate_fn = TextMelCollate(1)
train_loader = DataLoader(train_dataset, num_workers=8, shuffle=False,
batch_size=hps.train.batch_size, pin_memory=True,
drop_last=True, collate_fn=collate_fn, sampler=train_sampler)
if rank == 0:
val_dataset = TextMelLoader(hps.data.validation_files, hps.data)
val_loader = DataLoader(val_dataset, num_workers=8, shuffle=False,
batch_size=hps.train.batch_size, pin_memory=True,
drop_last=True, collate_fn=collate_fn)
#print(len(train_dataset))
#print(len(train_loader))
print(symbols)
print(len(symbols))
generator = models.FlowGenerator(
n_vocab=len(symbols),
out_channels=hps.data.n_mel_channels,
**hps.model).cuda(rank)
optimizer_g = commons.Adam(generator.parameters(), scheduler=hps.train.scheduler, dim_model=hps.model.hidden_channels, warmup_steps=hps.train.warmup_steps, lr=hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps)
if hps.train.fp16_run:
generator, optimizer_g._optim = amp.initialize(generator, optimizer_g._optim, opt_level="O1")
generator = DDP(generator)
try:
_, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), generator, optimizer_g)
epoch_str += 1
optimizer_g.step_num = (epoch_str - 1) * len(train_loader)
optimizer_g._update_learning_rate()
global_step = (epoch_str - 1) * len(train_loader)
except:
if hps.train.ddi and os.path.isfile(os.path.join(hps.model_dir, "ddi_G.pth")):
_ = utils.load_checkpoint(os.path.join(hps.model_dir, "ddi_G.pth"), generator, optimizer_g)
epoch_str = 1
global_step = 0
for epoch in range(epoch_str, hps.train.epochs + 1):
if rank==0:
train(rank, epoch, hps, generator, optimizer_g, train_loader, logger, writer)
evaluate(rank, epoch, hps, generator, optimizer_g, val_loader, logger, writer_eval)
if epoch % hps.train.save_every == 0:
utils.save_checkpoint(generator, optimizer_g, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "G_{}.pth".format(epoch)))
else:
train(rank, epoch, hps, generator, optimizer_g, train_loader, None, None)
def train(rank, epoch, hps, generator, optimizer_g, train_loader, logger, writer):
train_loader.sampler.set_epoch(epoch)
global global_step
generator.train()
for batch_idx, (x, x_lengths, y, y_lengths) in enumerate(train_loader):
x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True)
y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True)
# Train Generator
optimizer_g.zero_grad()
(z, y_m, y_logs, logdet), attn, logw, logw_, x_m, x_logs = generator(x, x_lengths, y, y_lengths, gen=False)
l_mle = 0.5 * math.log(2 * math.pi) + (torch.sum(y_logs) + 0.5 * torch.sum(torch.exp(-2 * y_logs) * (z - y_m)**2)) / (torch.sum(y_lengths // hps.model.n_sqz) * hps.model.n_sqz * hps.data.n_mel_channels)
l_length = torch.sum((logw - logw_)**2) / torch.sum(x_lengths)
loss_gs = [l_mle, l_length]
loss_g = sum(loss_gs)
if hps.train.fp16_run:
with amp.scale_loss(loss_g, optimizer_g._optim) as scaled_loss:
scaled_loss.backward()
grad_norm = commons.clip_grad_value_(amp.master_params(optimizer_g._optim), 5)
else:
loss_g.backward()
grad_norm = commons.clip_grad_value_(generator.parameters(), 5)
optimizer_g.step()
if rank==0:
if batch_idx % hps.train.log_interval == 0:
(y_gen, *_), *_ = generator.module(x[:1], x_lengths[:1], gen=True)
logger.info('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(x), len(train_loader.dataset),
100. * batch_idx / len(train_loader),
loss_g.item()))
logger.info([x.item() for x in loss_gs] + [global_step, optimizer_g.get_lr()])
scalar_dict = {"loss/g/total": loss_g, "learning_rate": optimizer_g.get_lr(), "grad_norm": grad_norm}
scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(loss_gs)})
utils.summarize(
writer=writer,
global_step=global_step,
images={"y_org": utils.plot_spectrogram_to_numpy(y[0].data.cpu().numpy()),
"y_gen": utils.plot_spectrogram_to_numpy(y_gen[0].data.cpu().numpy()),
"attn": utils.plot_alignment_to_numpy(attn[0,0].data.cpu().numpy()),
},
scalars=scalar_dict)
global_step += 1
if rank == 0:
logger.info('====> Epoch: {}'.format(epoch))
def evaluate(rank, epoch, hps, generator, optimizer_g, val_loader, logger, writer_eval):
if rank == 0:
global global_step
generator.eval()
losses_tot = []
with torch.no_grad():
for batch_idx, (x, x_lengths, y, y_lengths) in enumerate(val_loader):
x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True)
y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True)
(z, y_m, y_logs, logdet), attn, logw, logw_, x_m, x_logs = generator(x, x_lengths, y, y_lengths, gen=False)
l_mle = 0.5 * math.log(2 * math.pi) + (torch.sum(y_logs) + 0.5 * torch.sum(torch.exp(-2 * y_logs) * (z - y_m)**2) - torch.sum(logdet)) / (torch.sum(y_lengths // hps.model.n_sqz) * hps.model.n_sqz * hps.data.n_mel_channels)
l_length = torch.sum((logw - logw_)**2) / torch.sum(x_lengths)
loss_gs = [l_mle, l_length]
loss_g = sum(loss_gs)
if batch_idx == 0:
losses_tot = loss_gs
else:
losses_tot = [x + y for (x, y) in zip(losses_tot, loss_gs)]
if batch_idx % hps.train.log_interval == 0:
logger.info('Eval Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(x), len(val_loader.dataset),
100. * batch_idx / len(val_loader),
loss_g.item()))
logger.info([x.item() for x in loss_gs])
losses_tot = [x/len(val_loader) for x in losses_tot]
loss_tot = sum(losses_tot)
scalar_dict = {"loss/g/total": loss_tot}
scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_tot)})
utils.summarize(
writer=writer_eval,
global_step=global_step,
scalars=scalar_dict)
logger.info('====> Epoch: {}'.format(epoch))
if __name__ == "__main__":
main()
| 40.505051 | 230 | 0.673317 | import os
import json
import argparse
import math
import torch
from torch import nn, optim
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import torch.multiprocessing as mp
import torch.distributed as dist
from apex.parallel import DistributedDataParallel as DDP
from apex import amp
from data_utils import TextMelLoader, TextMelCollate
import models
import commons
import utils
from text.symbols import symbols
global_step = 0
def main():
assert torch.cuda.is_available(), "CPU training is not allowed."
n_gpus = torch.cuda.device_count()
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '80000'
hps = utils.get_hparams()
mp.spawn(train_and_eval, nprocs=n_gpus, args=(n_gpus, hps,))
def train_and_eval(rank, n_gpus, hps):
global global_step
if rank == 0:
logger = utils.get_logger(hps.model_dir)
logger.info(hps)
utils.check_git_hash(hps.model_dir)
writer = SummaryWriter(log_dir=hps.model_dir)
writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval"))
dist.init_process_group(backend='nccl', init_method='env://', world_size=n_gpus, rank=rank)
torch.manual_seed(hps.train.seed)
torch.cuda.set_device(rank)
train_dataset = TextMelLoader(hps.data.training_files, hps.data)
train_sampler = torch.utils.data.distributed.DistributedSampler(
train_dataset,
num_replicas=n_gpus,
rank=rank,
shuffle=True)
collate_fn = TextMelCollate(1)
train_loader = DataLoader(train_dataset, num_workers=8, shuffle=False,
batch_size=hps.train.batch_size, pin_memory=True,
drop_last=True, collate_fn=collate_fn, sampler=train_sampler)
if rank == 0:
val_dataset = TextMelLoader(hps.data.validation_files, hps.data)
val_loader = DataLoader(val_dataset, num_workers=8, shuffle=False,
batch_size=hps.train.batch_size, pin_memory=True,
drop_last=True, collate_fn=collate_fn)
print(symbols)
print(len(symbols))
generator = models.FlowGenerator(
n_vocab=len(symbols),
out_channels=hps.data.n_mel_channels,
**hps.model).cuda(rank)
optimizer_g = commons.Adam(generator.parameters(), scheduler=hps.train.scheduler, dim_model=hps.model.hidden_channels, warmup_steps=hps.train.warmup_steps, lr=hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps)
if hps.train.fp16_run:
generator, optimizer_g._optim = amp.initialize(generator, optimizer_g._optim, opt_level="O1")
generator = DDP(generator)
try:
_, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), generator, optimizer_g)
epoch_str += 1
optimizer_g.step_num = (epoch_str - 1) * len(train_loader)
optimizer_g._update_learning_rate()
global_step = (epoch_str - 1) * len(train_loader)
except:
if hps.train.ddi and os.path.isfile(os.path.join(hps.model_dir, "ddi_G.pth")):
_ = utils.load_checkpoint(os.path.join(hps.model_dir, "ddi_G.pth"), generator, optimizer_g)
epoch_str = 1
global_step = 0
for epoch in range(epoch_str, hps.train.epochs + 1):
if rank==0:
train(rank, epoch, hps, generator, optimizer_g, train_loader, logger, writer)
evaluate(rank, epoch, hps, generator, optimizer_g, val_loader, logger, writer_eval)
if epoch % hps.train.save_every == 0:
utils.save_checkpoint(generator, optimizer_g, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "G_{}.pth".format(epoch)))
else:
train(rank, epoch, hps, generator, optimizer_g, train_loader, None, None)
def train(rank, epoch, hps, generator, optimizer_g, train_loader, logger, writer):
train_loader.sampler.set_epoch(epoch)
global global_step
generator.train()
for batch_idx, (x, x_lengths, y, y_lengths) in enumerate(train_loader):
x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True)
y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True)
optimizer_g.zero_grad()
(z, y_m, y_logs, logdet), attn, logw, logw_, x_m, x_logs = generator(x, x_lengths, y, y_lengths, gen=False)
l_mle = 0.5 * math.log(2 * math.pi) + (torch.sum(y_logs) + 0.5 * torch.sum(torch.exp(-2 * y_logs) * (z - y_m)**2)) / (torch.sum(y_lengths // hps.model.n_sqz) * hps.model.n_sqz * hps.data.n_mel_channels)
l_length = torch.sum((logw - logw_)**2) / torch.sum(x_lengths)
loss_gs = [l_mle, l_length]
loss_g = sum(loss_gs)
if hps.train.fp16_run:
with amp.scale_loss(loss_g, optimizer_g._optim) as scaled_loss:
scaled_loss.backward()
grad_norm = commons.clip_grad_value_(amp.master_params(optimizer_g._optim), 5)
else:
loss_g.backward()
grad_norm = commons.clip_grad_value_(generator.parameters(), 5)
optimizer_g.step()
if rank==0:
if batch_idx % hps.train.log_interval == 0:
(y_gen, *_), *_ = generator.module(x[:1], x_lengths[:1], gen=True)
logger.info('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(x), len(train_loader.dataset),
100. * batch_idx / len(train_loader),
loss_g.item()))
logger.info([x.item() for x in loss_gs] + [global_step, optimizer_g.get_lr()])
scalar_dict = {"loss/g/total": loss_g, "learning_rate": optimizer_g.get_lr(), "grad_norm": grad_norm}
scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(loss_gs)})
utils.summarize(
writer=writer,
global_step=global_step,
images={"y_org": utils.plot_spectrogram_to_numpy(y[0].data.cpu().numpy()),
"y_gen": utils.plot_spectrogram_to_numpy(y_gen[0].data.cpu().numpy()),
"attn": utils.plot_alignment_to_numpy(attn[0,0].data.cpu().numpy()),
},
scalars=scalar_dict)
global_step += 1
if rank == 0:
logger.info('====> Epoch: {}'.format(epoch))
def evaluate(rank, epoch, hps, generator, optimizer_g, val_loader, logger, writer_eval):
if rank == 0:
global global_step
generator.eval()
losses_tot = []
with torch.no_grad():
for batch_idx, (x, x_lengths, y, y_lengths) in enumerate(val_loader):
x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True)
y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True)
(z, y_m, y_logs, logdet), attn, logw, logw_, x_m, x_logs = generator(x, x_lengths, y, y_lengths, gen=False)
l_mle = 0.5 * math.log(2 * math.pi) + (torch.sum(y_logs) + 0.5 * torch.sum(torch.exp(-2 * y_logs) * (z - y_m)**2) - torch.sum(logdet)) / (torch.sum(y_lengths // hps.model.n_sqz) * hps.model.n_sqz * hps.data.n_mel_channels)
l_length = torch.sum((logw - logw_)**2) / torch.sum(x_lengths)
loss_gs = [l_mle, l_length]
loss_g = sum(loss_gs)
if batch_idx == 0:
losses_tot = loss_gs
else:
losses_tot = [x + y for (x, y) in zip(losses_tot, loss_gs)]
if batch_idx % hps.train.log_interval == 0:
logger.info('Eval Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(x), len(val_loader.dataset),
100. * batch_idx / len(val_loader),
loss_g.item()))
logger.info([x.item() for x in loss_gs])
losses_tot = [x/len(val_loader) for x in losses_tot]
loss_tot = sum(losses_tot)
scalar_dict = {"loss/g/total": loss_tot}
scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_tot)})
utils.summarize(
writer=writer_eval,
global_step=global_step,
scalars=scalar_dict)
logger.info('====> Epoch: {}'.format(epoch))
if __name__ == "__main__":
main()
| true | true |
1c35a46820e838ec02df61e6d7846407f0d6f7e6 | 1,562 | py | Python | tests/test_forced_phot_inject.py | askap-vast/forced_phot | 8f4307825781743755d189418a9cb9111aaf0b63 | [
"MIT"
] | null | null | null | tests/test_forced_phot_inject.py | askap-vast/forced_phot | 8f4307825781743755d189418a9cb9111aaf0b63 | [
"MIT"
] | null | null | null | tests/test_forced_phot_inject.py | askap-vast/forced_phot | 8f4307825781743755d189418a9cb9111aaf0b63 | [
"MIT"
] | null | null | null | import time
import warnings
from astropy import units as u, constants as c
from astropy.coordinates import SkyCoord
from astropy.io import fits
from astropy.io.fits.verify import VerifyWarning
from astropy.table import Table
import astropy.wcs
from astropy.utils.exceptions import AstropyWarning
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import forced_phot
# suppress FITS verification warnings
warnings.simplefilter("ignore", category=AstropyWarning)
image = "image.i.SB9668.cont.VAST_0341-50A.linmos.taylor.0.restored.fits"
background = "meanMap.image.i.SB9668.cont.VAST_0341-50A.linmos.taylor.0.restored.fits"
noise = "noiseMap.image.i.SB9668.cont.VAST_0341-50A.linmos.taylor.0.restored.fits"
FP = forced_phot.ForcedPhot(image, background, noise)
n = 500
t = time.time()
x = (np.random.random_sample((n,)) - 0.5) * 8000 + 7046.5
y = (np.random.random_sample((n,)) - 0.5) * 8000 + 7046.5
P_inj = astropy.wcs.utils.pixel_to_skycoord(x, y, FP.w)
flux_inj = np.random.random_sample((n,)) * 100e-3 + 0.5e-3
# inject with a wider kernel than recovery
FP.inject(flux_inj, P_inj, nbeam=15)
flux_recover, flux_err_recover, *_ = FP.measure(P_inj, cluster_threshold=None)
print(time.time() - t)
plt.clf()
plt.errorbar(flux_inj, (flux_recover - flux_inj), yerr=flux_err_recover, fmt="o")
plt.plot([0, flux_inj.max()], [0, 0], "k--")
plt.xlabel("Injected flux density (Jy)")
plt.ylabel("(Recovered - injected (Jy)")
plt.title(
r"$\chi^2=%.1f$ (%d DOF)"
% ((((flux_recover - flux_inj) / flux_err_recover) ** 2).sum(), n)
)
| 33.234043 | 86 | 0.741357 | import time
import warnings
from astropy import units as u, constants as c
from astropy.coordinates import SkyCoord
from astropy.io import fits
from astropy.io.fits.verify import VerifyWarning
from astropy.table import Table
import astropy.wcs
from astropy.utils.exceptions import AstropyWarning
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import forced_phot
warnings.simplefilter("ignore", category=AstropyWarning)
image = "image.i.SB9668.cont.VAST_0341-50A.linmos.taylor.0.restored.fits"
background = "meanMap.image.i.SB9668.cont.VAST_0341-50A.linmos.taylor.0.restored.fits"
noise = "noiseMap.image.i.SB9668.cont.VAST_0341-50A.linmos.taylor.0.restored.fits"
FP = forced_phot.ForcedPhot(image, background, noise)
n = 500
t = time.time()
x = (np.random.random_sample((n,)) - 0.5) * 8000 + 7046.5
y = (np.random.random_sample((n,)) - 0.5) * 8000 + 7046.5
P_inj = astropy.wcs.utils.pixel_to_skycoord(x, y, FP.w)
flux_inj = np.random.random_sample((n,)) * 100e-3 + 0.5e-3
FP.inject(flux_inj, P_inj, nbeam=15)
flux_recover, flux_err_recover, *_ = FP.measure(P_inj, cluster_threshold=None)
print(time.time() - t)
plt.clf()
plt.errorbar(flux_inj, (flux_recover - flux_inj), yerr=flux_err_recover, fmt="o")
plt.plot([0, flux_inj.max()], [0, 0], "k--")
plt.xlabel("Injected flux density (Jy)")
plt.ylabel("(Recovered - injected (Jy)")
plt.title(
r"$\chi^2=%.1f$ (%d DOF)"
% ((((flux_recover - flux_inj) / flux_err_recover) ** 2).sum(), n)
)
| true | true |
1c35a4b6d41045fe785dbc29ec44ff85ef64509a | 4,575 | py | Python | PyQuM/ver(1.1)/pyqum/display.py | takehuge/PYQUM | bfc9d9b1c2f4246c7aac3a371baaf587c99f8069 | [
"MIT"
] | null | null | null | PyQuM/ver(1.1)/pyqum/display.py | takehuge/PYQUM | bfc9d9b1c2f4246c7aac3a371baaf587c99f8069 | [
"MIT"
] | null | null | null | PyQuM/ver(1.1)/pyqum/display.py | takehuge/PYQUM | bfc9d9b1c2f4246c7aac3a371baaf587c99f8069 | [
"MIT"
] | null | null | null | # Loading Basics
from colorama import init, Back, Fore
init(autoreset=True) #to convert termcolor to wins color
from os.path import basename as bs
myname = bs(__file__).split('.')[0] # This py-script's name
from flask import Blueprint, render_template, request, redirect, Response, stream_with_context
import random, json, glob, time
import numpy as np
from pyqum import stream_template
bp = Blueprint(myname, __name__, url_prefix='/dsply')
@bp.route('/', methods=['POST', 'GET'])
def show():
return render_template('blog/dsply/display.html')
# Static
@bp.route('/figstatic', methods=['POST', 'GET'])
def figstatic():
def fetch():
datas = [0, 10, 5, 2, 20, 30, 45]
return datas
return render_template('blog/dsply/figstatic.html', datas=fetch()) #this is where it really goes
# Setting shared variables
x = np.arange(0, 12, 0.1)
lx = len(x)
yr = np.random.ranf(lx) - np.random.ranf(lx)
yr2 = np.random.ranf(lx) - np.random.ranf(lx)
ys = np.sin(3*x)
yc = np.cos(3 * x)
# Streaming
@bp.route('/dynamic', methods=['POST', 'GET'])
def dynamic(): # one of the method called by base/layout
datagen, data = {}, {}
data['x'] = [x for x in x]
data['y'] = [y for y in yr]
if request.method == 'POST':
if request.form.get('analysis'):
def gen():
i = 1
while True:
data['y'][1:lx] = data['y'][0:lx - 1]
data['y'][0] = random.uniform(-1, 1)
yield i, data
time.sleep(0.03)
i += 1
datagen = gen()
# return Response(gen()) #Blank page with just data print
# return Response(stream_with_context(gen())) #SAME AS ABOVE
# return Response(stream_template('blog/analysis.html', data=rows)) #BLANK!!! WHY???
return Response(stream_with_context(stream_template('blog/dsply/figdynamic.html', data=datagen)))
# return render_template('blog/analysis.html', data=data) #NORMAL Display, No streaming!
@bp.route('/stream', methods=['POST', 'GET'])
def stream():
datad = []
def gen():
# datad = [] # only if += is used
for i in range(371):
a = np.sin(i * np.pi / 25 + 0.25 * np.pi) + 0.07 * random.uniform(-1, 1)
b = np.cos(i * np.pi / 25 + 0.25 * np.pi) + 0.13 * random.uniform(-1, 1)
book = dict(x=a, y=b)
datad.append(book)
# datad += [book] # equivalent to append but need to declare it inside def
yield i, datad
time.sleep(0.1)
data = gen()
return Response(stream_with_context(stream_template('blog/dsply/figstream.html', data=data)))
@bp.route('/concurrent', methods=['POST', 'GET'])
def concurrent(): # one of the method called by base/layout
datad, data, chartop, chartopt = {}, {}, "", ""
data['x'] = [x for x in x]
data['yS'] = [y for y in ys]
data['yR'] = [y for y in yr]
data['yC'] = [y for y in yc]
data['xud'], data['yup'], data['ydn'] = [], [], []
# chartopt = request.form.get("chartopt")
if 'run' in request.form:
chartopt = request.form.get("chartopt") # selection picked for chart#1
chartop = request.form.get("chartop") # selection picked for chart#2
def gen():
for i in range(lx):
data['xud'].append(data['x'][i])
if str(chartopt) == "sinusoid":
data['yup'].append(data['yS'][i])
if str(chartopt) == "random":
data['yup'].append(data['yR'][i])
if str(chartopt) == "cosine":
data['yup'].append(data['yC'][i])
if str(chartop) == "0":
data['ydn'].append(data['yS'][i])
if str(chartop) == "1":
data['ydn'].append(data['yR'][i])
if str(chartop) == "2":
data['ydn'].append(data['yC'][i])
yield [data['xud'], data['yup'], data['ydn']]
time.sleep(0.03)
datad = gen()
return Response(stream_with_context(stream_template('blog/dsply/figconcurrent.html', datad=datad, chartopt=str(chartopt), chartop=str(chartop))))
# return render_template('blog/analysis.html', data=data) #NORMAL Display, No streaming!
@bp.route('/game01', methods=['POST', 'GET'])
def game01():
return render_template('blog/dsply/game01.html')
print(Back.BLUE + Fore.CYAN + myname + ".bp registered!") # leave 2 lines blank before this
| 36.895161 | 149 | 0.558033 |
from colorama import init, Back, Fore
init(autoreset=True)
from os.path import basename as bs
myname = bs(__file__).split('.')[0]
from flask import Blueprint, render_template, request, redirect, Response, stream_with_context
import random, json, glob, time
import numpy as np
from pyqum import stream_template
bp = Blueprint(myname, __name__, url_prefix='/dsply')
@bp.route('/', methods=['POST', 'GET'])
def show():
return render_template('blog/dsply/display.html')
# Static
@bp.route('/figstatic', methods=['POST', 'GET'])
def figstatic():
def fetch():
datas = [0, 10, 5, 2, 20, 30, 45]
return datas
return render_template('blog/dsply/figstatic.html', datas=fetch()) #this is where it really goes
# Setting shared variables
x = np.arange(0, 12, 0.1)
lx = len(x)
yr = np.random.ranf(lx) - np.random.ranf(lx)
yr2 = np.random.ranf(lx) - np.random.ranf(lx)
ys = np.sin(3*x)
yc = np.cos(3 * x)
# Streaming
@bp.route('/dynamic', methods=['POST', 'GET'])
def dynamic(): # one of the method called by base/layout
datagen, data = {}, {}
data['x'] = [x for x in x]
data['y'] = [y for y in yr]
if request.method == 'POST':
if request.form.get('analysis'):
def gen():
i = 1
while True:
data['y'][1:lx] = data['y'][0:lx - 1]
data['y'][0] = random.uniform(-1, 1)
yield i, data
time.sleep(0.03)
i += 1
datagen = gen()
# return Response(gen()) #Blank page with just data print
# return Response(stream_with_context(gen())) #SAME AS ABOVE
# return Response(stream_template('blog/analysis.html', data=rows)) #BLANK!!! WHY???
return Response(stream_with_context(stream_template('blog/dsply/figdynamic.html', data=datagen)))
# return render_template('blog/analysis.html', data=data) #NORMAL Display, No streaming!
@bp.route('/stream', methods=['POST', 'GET'])
def stream():
datad = []
def gen():
# datad = [] # only if += is used
for i in range(371):
a = np.sin(i * np.pi / 25 + 0.25 * np.pi) + 0.07 * random.uniform(-1, 1)
b = np.cos(i * np.pi / 25 + 0.25 * np.pi) + 0.13 * random.uniform(-1, 1)
book = dict(x=a, y=b)
datad.append(book)
# datad += [book] # equivalent to append but need to declare it inside def
yield i, datad
time.sleep(0.1)
data = gen()
return Response(stream_with_context(stream_template('blog/dsply/figstream.html', data=data)))
@bp.route('/concurrent', methods=['POST', 'GET'])
def concurrent(): # one of the method called by base/layout
datad, data, chartop, chartopt = {}, {}, "", ""
data['x'] = [x for x in x]
data['yS'] = [y for y in ys]
data['yR'] = [y for y in yr]
data['yC'] = [y for y in yc]
data['xud'], data['yup'], data['ydn'] = [], [], []
# chartopt = request.form.get("chartopt")
if 'run' in request.form:
chartopt = request.form.get("chartopt") # selection picked for chart#1
chartop = request.form.get("chartop") # selection picked for chart#2
def gen():
for i in range(lx):
data['xud'].append(data['x'][i])
if str(chartopt) == "sinusoid":
data['yup'].append(data['yS'][i])
if str(chartopt) == "random":
data['yup'].append(data['yR'][i])
if str(chartopt) == "cosine":
data['yup'].append(data['yC'][i])
if str(chartop) == "0":
data['ydn'].append(data['yS'][i])
if str(chartop) == "1":
data['ydn'].append(data['yR'][i])
if str(chartop) == "2":
data['ydn'].append(data['yC'][i])
yield [data['xud'], data['yup'], data['ydn']]
time.sleep(0.03)
datad = gen()
return Response(stream_with_context(stream_template('blog/dsply/figconcurrent.html', datad=datad, chartopt=str(chartopt), chartop=str(chartop))))
# return render_template('blog/analysis.html', data=data) #NORMAL Display, No streaming!
@bp.route('/game01', methods=['POST', 'GET'])
def game01():
return render_template('blog/dsply/game01.html')
print(Back.BLUE + Fore.CYAN + myname + ".bp registered!") # leave 2 lines blank before this
| true | true |
1c35a4f586b8f36ca18fe3a5c76fe04643e128f0 | 131 | py | Python | invest_scenario_generator_summary.py | phargogh/invest-natcap.invest-3 | ee96055a4fa034d9a95fa8ccc6259ab03264e6c1 | [
"BSD-3-Clause"
] | null | null | null | invest_scenario_generator_summary.py | phargogh/invest-natcap.invest-3 | ee96055a4fa034d9a95fa8ccc6259ab03264e6c1 | [
"BSD-3-Clause"
] | null | null | null | invest_scenario_generator_summary.py | phargogh/invest-natcap.invest-3 | ee96055a4fa034d9a95fa8ccc6259ab03264e6c1 | [
"BSD-3-Clause"
] | null | null | null | import invest_natcap.iui.modelui
if __name__ == '__main__':
invest_natcap.iui.modelui.main('scenario-generator-summary.json')
| 26.2 | 69 | 0.778626 | import invest_natcap.iui.modelui
if __name__ == '__main__':
invest_natcap.iui.modelui.main('scenario-generator-summary.json')
| true | true |
1c35a5a8ef74d6c695b4741787fb1b953ad1bb5e | 4,016 | py | Python | MODEL/model_bag_classifier.py | quincy-125/DigiPath_CLAM_TF | 8b7ab50caaca13f666268b0f4e071d123e190978 | [
"MIT"
] | 5 | 2021-05-10T17:23:46.000Z | 2022-02-27T22:33:03.000Z | MODEL/model_bag_classifier.py | quincy-125/DigiPath_CLAM_TF | 8b7ab50caaca13f666268b0f4e071d123e190978 | [
"MIT"
] | null | null | null | MODEL/model_bag_classifier.py | quincy-125/DigiPath_CLAM_TF | 8b7ab50caaca13f666268b0f4e071d123e190978 | [
"MIT"
] | 2 | 2020-12-12T00:15:21.000Z | 2021-05-10T17:23:57.000Z | import tensorflow as tf
import numpy as np
class S_Bag(tf.keras.Model):
def __init__(self, dim_compress_features=512, n_class=2):
super(S_Bag, self).__init__()
self.dim_compress_features = dim_compress_features
self.n_class = n_class
self.s_bag_model = tf.keras.models.Sequential()
self.s_bag_layer = tf.keras.layers.Dense(
units=1, activation='linear', input_shape=(self.n_class, self.dim_compress_features),
name='Bag_Classifier_Layer'
)
self.s_bag_model.add(self.s_bag_layer)
def bag_classifier(self):
return self.s_bag_model
def h_slide(self, A, h):
# compute the slide-level representation aggregated per the attention score distribution for the mth class
SAR = list()
for i in range(len(A)):
sar = tf.linalg.matmul(tf.transpose(A[i]), h[i]) # shape be (2,512)
SAR.append(sar)
slide_agg_rep = tf.math.add_n(SAR) # return h_[slide,m], shape be (2,512)
return slide_agg_rep
def call(self, bag_label, A, h):
slide_agg_rep = self.h_slide(A, h)
bag_classifier = self.bag_classifier()
slide_score_unnorm = bag_classifier(slide_agg_rep)
slide_score_unnorm = tf.reshape(slide_score_unnorm, (1, self.n_class))
Y_hat = tf.math.top_k(slide_score_unnorm, 1)[1][-1]
Y_prob = tf.math.softmax(
tf.reshape(slide_score_unnorm, (1, self.n_class))) # shape be (1,2), predictions for each of the classes
predict_slide_label = np.argmax(Y_prob.numpy())
Y_true = tf.one_hot([bag_label], 2)
return slide_score_unnorm, Y_hat, Y_prob, predict_slide_label, Y_true
class M_Bag(tf.keras.Model):
def __init__(self, dim_compress_features=512, n_class=2):
super(M_Bag, self).__init__()
self.dim_compress_features = dim_compress_features
self.n_class = n_class
self.m_bag_models = list()
self.m_bag_model = tf.keras.models.Sequential()
self.m_bag_layer = tf.keras.layers.Dense(units=1, activation='linear',
input_shape=(1, self.dim_compress_features),
name='Bag_Classifier_Layer')
self.m_bag_model.add(self.m_bag_layer)
for i in range(self.n_class):
self.m_bag_models.append(self.m_bag_model)
def bag_classifier(self):
return self.m_bag_models
def h_slide(self, A, h):
# compute the slide-level representation aggregated per the attention score distribution for the mth class
SAR = list()
for i in range(len(A)):
sar = tf.linalg.matmul(tf.transpose(A[i]), h[i]) # shape be (2,512)
SAR.append(sar)
SAR_Branch = list()
for i in range(self.n_class):
sar_branch = list()
for j in range(len(SAR)):
sar_c = tf.reshape(SAR[j][i], (1, self.dim_compress_features))
sar_branch.append(sar_c)
SAR_Branch.append(sar_branch)
slide_agg_rep = list()
for k in range(self.n_class):
slide_agg_rep.append(tf.math.add_n(SAR_Branch[k]))
return slide_agg_rep
def call(self, bag_label, A, h):
slide_agg_rep = self.h_slide(A, h)
# return s_[slide,m] (slide-level prediction scores)
ssus = list()
for i in range(self.n_class):
bag_classifier = self.bag_classifier()[i]
ssu = bag_classifier(slide_agg_rep[i])
ssus.append(ssu[0][0])
slide_score_unnorm = tf.convert_to_tensor(ssus)
slide_score_unnorm = tf.reshape(slide_score_unnorm, (1, self.n_class))
Y_hat = tf.math.top_k(slide_score_unnorm, 1)[1][-1]
Y_prob = tf.math.softmax(slide_score_unnorm)
predict_slide_label = np.argmax(Y_prob.numpy())
Y_true = tf.one_hot([bag_label], 2)
return slide_score_unnorm, Y_hat, Y_prob, predict_slide_label, Y_true | 38.615385 | 117 | 0.627241 | import tensorflow as tf
import numpy as np
class S_Bag(tf.keras.Model):
def __init__(self, dim_compress_features=512, n_class=2):
super(S_Bag, self).__init__()
self.dim_compress_features = dim_compress_features
self.n_class = n_class
self.s_bag_model = tf.keras.models.Sequential()
self.s_bag_layer = tf.keras.layers.Dense(
units=1, activation='linear', input_shape=(self.n_class, self.dim_compress_features),
name='Bag_Classifier_Layer'
)
self.s_bag_model.add(self.s_bag_layer)
def bag_classifier(self):
return self.s_bag_model
def h_slide(self, A, h):
SAR = list()
for i in range(len(A)):
sar = tf.linalg.matmul(tf.transpose(A[i]), h[i])
SAR.append(sar)
slide_agg_rep = tf.math.add_n(SAR)
return slide_agg_rep
def call(self, bag_label, A, h):
slide_agg_rep = self.h_slide(A, h)
bag_classifier = self.bag_classifier()
slide_score_unnorm = bag_classifier(slide_agg_rep)
slide_score_unnorm = tf.reshape(slide_score_unnorm, (1, self.n_class))
Y_hat = tf.math.top_k(slide_score_unnorm, 1)[1][-1]
Y_prob = tf.math.softmax(
tf.reshape(slide_score_unnorm, (1, self.n_class)))
predict_slide_label = np.argmax(Y_prob.numpy())
Y_true = tf.one_hot([bag_label], 2)
return slide_score_unnorm, Y_hat, Y_prob, predict_slide_label, Y_true
class M_Bag(tf.keras.Model):
def __init__(self, dim_compress_features=512, n_class=2):
super(M_Bag, self).__init__()
self.dim_compress_features = dim_compress_features
self.n_class = n_class
self.m_bag_models = list()
self.m_bag_model = tf.keras.models.Sequential()
self.m_bag_layer = tf.keras.layers.Dense(units=1, activation='linear',
input_shape=(1, self.dim_compress_features),
name='Bag_Classifier_Layer')
self.m_bag_model.add(self.m_bag_layer)
for i in range(self.n_class):
self.m_bag_models.append(self.m_bag_model)
def bag_classifier(self):
return self.m_bag_models
def h_slide(self, A, h):
SAR = list()
for i in range(len(A)):
sar = tf.linalg.matmul(tf.transpose(A[i]), h[i])
SAR.append(sar)
SAR_Branch = list()
for i in range(self.n_class):
sar_branch = list()
for j in range(len(SAR)):
sar_c = tf.reshape(SAR[j][i], (1, self.dim_compress_features))
sar_branch.append(sar_c)
SAR_Branch.append(sar_branch)
slide_agg_rep = list()
for k in range(self.n_class):
slide_agg_rep.append(tf.math.add_n(SAR_Branch[k]))
return slide_agg_rep
def call(self, bag_label, A, h):
slide_agg_rep = self.h_slide(A, h)
ssus = list()
for i in range(self.n_class):
bag_classifier = self.bag_classifier()[i]
ssu = bag_classifier(slide_agg_rep[i])
ssus.append(ssu[0][0])
slide_score_unnorm = tf.convert_to_tensor(ssus)
slide_score_unnorm = tf.reshape(slide_score_unnorm, (1, self.n_class))
Y_hat = tf.math.top_k(slide_score_unnorm, 1)[1][-1]
Y_prob = tf.math.softmax(slide_score_unnorm)
predict_slide_label = np.argmax(Y_prob.numpy())
Y_true = tf.one_hot([bag_label], 2)
return slide_score_unnorm, Y_hat, Y_prob, predict_slide_label, Y_true | true | true |
1c35a5b23716abd9d9b68128141ad5d8c46c52d4 | 16,669 | py | Python | dmd/dmd.py | HaldexBrake/ReducedOrderModeling | d56917f52018dabd317c1a9a583efe0b90cc9e7b | [
"Apache-2.0"
] | 2 | 2020-09-23T08:15:38.000Z | 2021-05-05T13:09:19.000Z | dmd/dmd.py | HaldexBrake/ReducedOrderModeling | d56917f52018dabd317c1a9a583efe0b90cc9e7b | [
"Apache-2.0"
] | null | null | null | dmd/dmd.py | HaldexBrake/ReducedOrderModeling | d56917f52018dabd317c1a9a583efe0b90cc9e7b | [
"Apache-2.0"
] | 1 | 2022-03-05T05:53:28.000Z | 2022-03-05T05:53:28.000Z | """
"""
from pyfmi import load_fmu
import numpy as np
from scipy.linalg import eig
from numpy.linalg import svd, solve, inv, norm
import matplotlib.pyplot as plt
from sympy import symbols, lambdify
def create_input_vec(time_vec, inp_type='sin', amp=10.0, freq=1.0, delta_time=1.0, duration=1):
"""Constructs an input vector either as a sine wave, Dirac pulse or chirp signal.
Args:
time_vec (ndarray): Time vector.
inp_type (str): What kind input is wanted. Must be `sin`, `delta` or `inc_freq`.
amp (double): Amplitude of the sine wave or the Dirac pulse.
freq (double): Frequency of the sine wave.
delta_time (double): Time at which the pulse starts.
duration (int): Duration in time steps of the Dirac pulse.
Returns:
ndarray: The input vector.
Raises:
ValueError: If specified input type is non-existing.
"""
if amp == 0:
u = None
elif inp_type == 'sin':
u = amp*np.sin((freq*2*np.pi)*time_vec)
elif inp_type == 'delta':
u = np.zeros_like(time_vec)
idx = np.argmax(time_vec>delta_time)
u[idx:idx+duration] = np.array(duration*[amp])
elif inp_type == 'inc_freq':
freq = np.linspace(0,1,len(time_vec))
u = amp*np.sin((freq*2*np.pi)*time_vec)
else:
raise ValueError('inp must be either \'sin\', \'inc_freq\' or \'delta\'.')
return u
def get_snapshots_damped_dual_mass(t_start, t_stop, ncp, input_force=None, time_vec=None, states=['mass1.s','mass1.v','mass2.s','mass2.v']):
"""Simulates the FMU of the damped dual mass system and returns the
snapshots as a matrix.
Args:
t_start (double): Simulation start time.
t_stop (double): Simulation stop time.
ncp (int): Number of communication points, i.e. number of time steps
excluding the initial conndition.
input_force (ndarray): Input signal of same length as `time_vec`.
time_vec (ndarray): Time vector of same length as `input_force`.
states (list): The states of the fmu that should be included in the
snapshots.
Returns:
ndarray: The matrix of snapshots, where row i corresponds to state i
in states and time evovles along the columns.
Raises:
ValueError: If `input_force` is given without time vector.
"""
# Load the FMU
model = load_fmu('../fmu/DampedDualMassSystem.fmu')
# Specify number of comuncation points (ncp)
opts = model.simulate_options()
opts['ncp'] = ncp
# Create input object
if input_force is not None:
if time_vec is None:
raise ValueError('Specify time vector plz.')
input_object = ('F', np.transpose(np.vstack((time_vec,input_force))))
else:
input_object = None
# Simulate the FMU
res = model.simulate(start_time=t_start, final_time=t_stop, input=input_object, options=opts)
# If no states are given, return all
if states is None or len(states) == 0:
states = res.keys()
# Extract simulation result
snapshots = np.zeros((len(states),ncp+1))
for i, state in enumerate(states):
snapshots[i,:] = res[state]
return snapshots
def get_snapshots_stop_friction(t_start, t_stop, ncp, input_force=None, time_vec=None, states=['mass1.s','mass1.v','mass2.s','mass2.v']):
"""Simulates the FMU of the damped dual mass system with stop and friction
and returns the snapshots as a matrix.
Args:
t_start (double): Simulation start time.
t_stop (double): Simulation stop time.
ncp (int): Number of communication points, i.e. number of time steps
excluding the initial conndition.
input_force (ndarray): Input signal of same length as `time_vec`.
time_vec (ndarray): Time vector of same length as `input_force`.
states (list): The states of the FMU that should be included in the
snapshots.
Returns:
ndarray: The matrix of snapshots, where row i corresponds to state i
in states and time evovles along the columns.
Raises:
ValueError: If `input_force` is given without time vector.
"""
# Load the FMU
model = load_fmu('../fmu/DampedDualMassSystemStopFriction.fmu')
# Specify number of comuncation points (ncp)
opts = model.simulate_options()
opts['ncp'] = ncp
# Create input object
if input_force is not None:
if time_vec is None:
raise ValueError('Specify time vector plz.')
input_object = ('F', np.transpose(np.vstack((time_vec,input_force))))
else:
input_object = None
# Simulate the FMU
res = model.simulate(start_time=t_start, final_time=t_stop, input=input_object, options=opts)
# Find mask to extract the solution at the specified communication points, i.e. not at
# the additional state events points.
mask = len(res['time'])*[True]
i = 0
for el in time_vec:
while abs(el-res['time'][i])>1e-12:
mask[i] = False
i += 1
if i == len(res['time']):
break
i += 1
# If no states are given, return all
if states is None or len(states)==0:
states = res.keys()
# Extract simulation result
snapshots = np.zeros((len(states),ncp+1))
for i, state in enumerate(states):
snapshots[i,:] = res[state][mask]
return snapshots
def get_koopman_snapshots_stop_friction(t_start, t_stop, ncp, observables, input_force=None, time_vec=None):
"""Simulates the FMU of the damped dual mass system with stop and friction
and returns the snapshots as a matrix where observables have been applied
to the result.
Args:
t_start (double): Simulation start time.
t_stop (double): Simulation stop time.
ncp (int): Number of communication points, i.e. number of time steps
excluding the initial conndition.
observables (list of SymPy expressions): The observable functions used
to extract states.
input_force (ndarray): Input signal of same length as `time_vec`.
time_vec (ndarray): Time vector of same length as `input_force`.
Returns:
ndarray: The matrix of snapshots, where row i corresponds to state i
in states and time evovles along the columns.
Raises:
ValueError: If `input_force` is given without time vector.
"""
# Wrapper function to be able to call the function `func` with
# arguments inside a list
def _wrapper(func, args):
return func(*args)
# Load the FMU
model = load_fmu('../fmu/DampedDualMassSystemStopFriction.fmu')
# Specify number of comuncation points (ncp)
opts = model.simulate_options()
opts['ncp'] = ncp
# Create input object
if input_force is not None:
if time_vec is None:
raise ValueError('Please specify time vector.')
input_object = ('F', np.transpose(np.vstack((time_vec,input_force))))
else:
input_object = None
# Simulate the FMU
res = model.simulate(start_time=t_start, final_time=t_stop, input=input_object, options=opts)
# Find mask for extracting the solution at the correct points, i.e. not at
# the additional state events
mask = len(res['time'])*[True]
i = 0
for t in time_vec:
while abs(t-res['time'][i])>1e-12:
mask[i] = False
i += 1
if i == len(res['time']):
break
i += 1
# Extract simulation result
snapshots = np.zeros((len(observables),ncp+1))
for i, obs in enumerate(observables):
syms = obs.free_symbols # Get args in this observable
states = [sym.name for sym in list(syms)] # Get the names of the args, i.e. our states
f = lambdify(syms, obs, 'numpy') # Vectorize the observable function
values = [res[state][mask] for state in states] # Get simulation result for each state
val = _wrapper(f, values) # Computes g_i(x)
snapshots[i,:] = val
return snapshots
def get_analytical_snapshots_damped_dual_mass(t_start, t_stop, ncp, input_force=None):
"""Generates snapshots for the damped dual mass system from the analytical
solution to the system.
Args:
t_start (double): Simulation start time.
t_stop (double): Simulation stop time.
ncp (int): Number of communication points, i.e. number of time steps
excluding the initial conndition.
input_force (ndarray): Input signal of same length as `time_vec`.
Returns:
ndarray: The matrix of snapshots, where row i corresponds to state i
in states and time evovles along the columns. The states are:
position and veclocity of mass 1, position and veclocity of mass 2
"""
# Analytical solution to the damped dual mass system
# Set the values of constants
k1,k2,m1,m2,c_damp = 250, 1000, 3, 2, np.sqrt(500)
# Construct system matrix s.t. dot{x} = Ax
A = np.array([[0,1,0,0],
[-(k1+k2)/m1,-c_damp/m1,k2/m1,c_damp/m1],
[0,0,0,1],
[k2/m2,c_damp/m2,-k2/m2,-c_damp/m2]])
# Eigendecomposition of A
lam_A,V = eig(A) # eigenvals as elements of lam, eigenvecs as columns in V
# Exponential matrix of a for the given time step
dt = (t_stop-t_start)/ncp # step size
expAdt = V@np.diag(np.exp(dt*lam_A))@inv(V)
expAdt = np.real(expAdt)
# Setup for time-stepping
X = np.zeros((4,ncp+1)) # Construct matrix for storage
X[:,0] = np.array([0, 0, 0.1, -0.2]) # Set initial values
B = solve(A, expAdt-np.eye(4)) # Help matrix for more efficient calculations
b = B[:,-1] # Extract the needed column
# Iterate solution forward in time
# States at time k (given states up to k-1) are given by
# X[:,k] = expAdt@X[:,k-1] + A_inv@(expAdt - np.eye(4))@np.array([0,input_force[k-1]/m2,0,0])
if input_force is None:
for k in range(1,ncp+1):
X[:,k] = expAdt@X[:,k-1]
else:
for k in range(1,ncp+1):
X[:,k] = expAdt@X[:,k-1] + input_force[k-1]/m2*b
return X
def get_data_matrices(data, m_stop=None, u=None, q=0):
"""Creates the X and Y data matrices.
Args:
data (ndarray): Simulation result.
m_stop (int): Number of columns of the data matrices.
u (ndarray): Input vector.
q (int): Number of time-delay embeddings.
Returns:
tuple: (X,Y). The X and Y data matrices.
Raises:
ValueError: If `m_stop` is not valid.
"""
if m_stop is None:
m_stop = data.shape[1]
elif m_stop == 0:
raise ValueError('m_stop must be greater than zero')
elif m_stop <= q+1:
raise ValueError('m_stop at least = q+2')
# Construct data matrices (If statement not required. More effective to create X directly and not stack vectors)
if q>0:
if u is None:
X = data[:,:m_stop-(q+1)]
for i in range(q,0,-1):
X = np.vstack([X,data[:,(q+1-i):m_stop-i]])
Y = np.vstack([X[data.shape[0]:,:],data[:,(q+1):m_stop]])
else:
zero_vec = np.zeros(m_stop-(q+1))
X = np.vstack([data[:,:m_stop-(q+1)],u[:m_stop-(q+1)]])
Y = np.vstack([data[:,1:m_stop-q],zero_vec])
for i in range(q,0,-1):
X = np.vstack([X,data[:,(q+1-i):m_stop-i],u[(q+1-i):m_stop-i]])
Y = np.vstack([Y,data[:,(q+1-(i-1)):m_stop-(i-1)],zero_vec])
else:
if u is None:
X = data[:,:m_stop-1]
Y = data[:,1:m_stop]
else:
X = np.vstack([data[:,:m_stop-1],u[:m_stop-1]])
Y = np.vstack([data[:,1:m_stop],np.zeros((m_stop-1,))])
return X.astype(np.float64),Y.astype(np.float64)
def get_dmd_modes(X, Y, n_trunc=None, plot=False):
"""Computes the DMD modes `v` and eigenvalues `lam` of the data matrices `X`,`Y`.
Args:
X (ndarray): First data matrix.
Y (ndarray): Second data matrix.
n_trunc (int): Truncates `X` to rank `n_trunc`.
plot (bool): Plots the singular values, eigenvalues and some columns of V.
Returns:
tuple: (lam,w,v,A). Eigenvalues, left eigenvectors, right eigenvectors and the matrix A or A_tilde.
"""
U,S,VH = svd(X,full_matrices=False)
if n_trunc is None:
# Compute A_hat and make eigendecomposition
A = Y@VH.T@np.diag(1/S)@U.T
# Add some dust to the diagonal (singular values) if needed
# A = Y@VH.T@np.diag(S/(S*S + 1e-2))@U.T
lam,w,v = eig(A,left=True,right=True)
else:
# Truncate
U = U[:,0:n_trunc]
S_trunc = S[0:n_trunc]
VH = VH[0:n_trunc,:]
# Similarity transform to matrix A_tilde and eigendecomposition
A = np.conj(U.T)@Y@np.conj(VH.T)@np.diag(1/S_trunc)
lam,w_tilde,v_tilde = eig(A,left=True,right=True)
# Project eigenvectors so we get correct DMD modes
w = (w_tilde.T@np.conj(U.T)).T
v = U@v_tilde
if plot:
# Singular values
plt.figure()
nnz_s = S > 1e-12
x = np.arange(len(S))
plt.semilogy(x[nnz_s],S[nnz_s], 'bx')
plt.semilogy(x[nnz_s==False],S[nnz_s==False], 'rx')
plt.xlim([1,len(S)])
if n_trunc is not None:
plt.axvline(x=n_trunc, color='k', linestyle='-',linewidth=1)
plt.grid(True)
plt.xlabel('Index')
plt.ylabel('Singular value')
# Eigenvalues in complex plane
plt.figure()
mask = np.abs(lam) > 1
plt.plot(np.real(lam[mask==False]),np.imag(lam[mask==False]),'bx')
plt.plot(np.real(lam[mask]),np.imag(lam[mask]),'rx')
plt.plot(np.cos(np.linspace(0,2*np.pi)),np.sin(np.linspace(0,2*np.pi)),'--k')
plt.xlabel('Re($\lambda$)')
plt.ylabel('Im($\lambda$)')
plt.grid(True)
plt.axis('equal')
# Columns in V
plt.figure()
plt.plot(np.conj(VH.T)[:,0:6])
plt.xlim([0,VH.shape[1]])
plt.title('Columns in V')
plt.grid(True)
plt.plot()
return lam.astype(np.complex128),w.astype(np.complex128),v.astype(np.complex128),A
def one_step_pred(xk, lam, wH, v, norm_vec):
"""Performs a one-step prediction of the system represented by its DMD.
Args:
xk (ndarray): Solution (states) at time step k.
lam (ndarray): DMD eigenvalues.
wH (ndarray): Complex conjugated left eigenvectors from DMD.
v (ndarray): DMD modes.
norm_vec (ndarray): Normalization vector.
Returns:
ndarray: Prediction of the states of the system at time step k+1.
"""
return np.real(np.sum(((lam*(wH@xk))*norm_vec)*v,axis=1))
def predict(lam, w, v, X0, N, u=None, q=0):
"""Predict the future dynamics of the system given an initial value `X0`. Result is returned
as a matrix where rows correspond to states and columns to time.
Args:
lam (ndarray): DMD eigenvalues.
w (ndarray): Left eigenvectors from DMD.
v (ndarray): DMD modes.
X0 (ndarray): Initial value of the system.
N (int): Number of time steps to predict.
u (ndarray): Input signal.
q (int): Number of time-delay embeddings.
Returns:
ndarray: Prediction of the states of the system for N time steps into the future.
"""
# Construct matrix for predictions and set initial values
n = X0.shape[0]
Yhat = np.zeros((n,N+1-q),dtype=np.float64)
Yhat[:,0] = X0
# Add input in the correct rows and construct a mask for prediction
n_x = n//(q+1)
if u is not None:
if q>0:
mask = np.array((q+1)*((n_x-1)*[True]+[False]))
len_u = len(u)
Yhat[mask==False,:] = np.vstack([u[i:len_u-(q-i)] for i in range(0,q+1)])
else:
mask = (n-1)*[True] + [False]
Yhat[-1,:] = u # Add input
else:
mask = n*[True]
# For efficient calculations
wH = np.conj(w).T
norm_vec = 1/(np.diag(wH@v))
# Prediction
for i in range(1,N+1-q):
yhat = one_step_pred(Yhat[:,i-1],lam,wH,v,norm_vec)
Yhat[mask,i] = yhat[mask]
# Extract predictions
res = np.zeros((n_x,N+1),dtype=np.float64)
res[:,:N+1-q] = Yhat[:n_x,:]
for i in range(q):
res[:,N+1-q+i] = Yhat[(i+1)*n_x:(i+2)*n_x,-1]
return res
| 35.092632 | 140 | 0.601476 | from pyfmi import load_fmu
import numpy as np
from scipy.linalg import eig
from numpy.linalg import svd, solve, inv, norm
import matplotlib.pyplot as plt
from sympy import symbols, lambdify
def create_input_vec(time_vec, inp_type='sin', amp=10.0, freq=1.0, delta_time=1.0, duration=1):
if amp == 0:
u = None
elif inp_type == 'sin':
u = amp*np.sin((freq*2*np.pi)*time_vec)
elif inp_type == 'delta':
u = np.zeros_like(time_vec)
idx = np.argmax(time_vec>delta_time)
u[idx:idx+duration] = np.array(duration*[amp])
elif inp_type == 'inc_freq':
freq = np.linspace(0,1,len(time_vec))
u = amp*np.sin((freq*2*np.pi)*time_vec)
else:
raise ValueError('inp must be either \'sin\', \'inc_freq\' or \'delta\'.')
return u
def get_snapshots_damped_dual_mass(t_start, t_stop, ncp, input_force=None, time_vec=None, states=['mass1.s','mass1.v','mass2.s','mass2.v']):
model = load_fmu('../fmu/DampedDualMassSystem.fmu')
opts = model.simulate_options()
opts['ncp'] = ncp
if input_force is not None:
if time_vec is None:
raise ValueError('Specify time vector plz.')
input_object = ('F', np.transpose(np.vstack((time_vec,input_force))))
else:
input_object = None
res = model.simulate(start_time=t_start, final_time=t_stop, input=input_object, options=opts)
if states is None or len(states) == 0:
states = res.keys()
snapshots = np.zeros((len(states),ncp+1))
for i, state in enumerate(states):
snapshots[i,:] = res[state]
return snapshots
def get_snapshots_stop_friction(t_start, t_stop, ncp, input_force=None, time_vec=None, states=['mass1.s','mass1.v','mass2.s','mass2.v']):
model = load_fmu('../fmu/DampedDualMassSystemStopFriction.fmu')
opts = model.simulate_options()
opts['ncp'] = ncp
if input_force is not None:
if time_vec is None:
raise ValueError('Specify time vector plz.')
input_object = ('F', np.transpose(np.vstack((time_vec,input_force))))
else:
input_object = None
res = model.simulate(start_time=t_start, final_time=t_stop, input=input_object, options=opts)
mask = len(res['time'])*[True]
i = 0
for el in time_vec:
while abs(el-res['time'][i])>1e-12:
mask[i] = False
i += 1
if i == len(res['time']):
break
i += 1
if states is None or len(states)==0:
states = res.keys()
snapshots = np.zeros((len(states),ncp+1))
for i, state in enumerate(states):
snapshots[i,:] = res[state][mask]
return snapshots
def get_koopman_snapshots_stop_friction(t_start, t_stop, ncp, observables, input_force=None, time_vec=None):
def _wrapper(func, args):
return func(*args)
model = load_fmu('../fmu/DampedDualMassSystemStopFriction.fmu')
opts = model.simulate_options()
opts['ncp'] = ncp
if input_force is not None:
if time_vec is None:
raise ValueError('Please specify time vector.')
input_object = ('F', np.transpose(np.vstack((time_vec,input_force))))
else:
input_object = None
res = model.simulate(start_time=t_start, final_time=t_stop, input=input_object, options=opts)
mask = len(res['time'])*[True]
i = 0
for t in time_vec:
while abs(t-res['time'][i])>1e-12:
mask[i] = False
i += 1
if i == len(res['time']):
break
i += 1
snapshots = np.zeros((len(observables),ncp+1))
for i, obs in enumerate(observables):
syms = obs.free_symbols
states = [sym.name for sym in list(syms)]
f = lambdify(syms, obs, 'numpy')
values = [res[state][mask] for state in states]
val = _wrapper(f, values)
snapshots[i,:] = val
return snapshots
def get_analytical_snapshots_damped_dual_mass(t_start, t_stop, ncp, input_force=None):
k1,k2,m1,m2,c_damp = 250, 1000, 3, 2, np.sqrt(500)
A = np.array([[0,1,0,0],
[-(k1+k2)/m1,-c_damp/m1,k2/m1,c_damp/m1],
[0,0,0,1],
[k2/m2,c_damp/m2,-k2/m2,-c_damp/m2]])
lam_A,V = eig(A)
dt = (t_stop-t_start)/ncp
expAdt = V@np.diag(np.exp(dt*lam_A))@inv(V)
expAdt = np.real(expAdt)
X = np.zeros((4,ncp+1))
X[:,0] = np.array([0, 0, 0.1, -0.2])
B = solve(A, expAdt-np.eye(4))
b = B[:,-1]
if input_force is None:
for k in range(1,ncp+1):
X[:,k] = expAdt@X[:,k-1]
else:
for k in range(1,ncp+1):
X[:,k] = expAdt@X[:,k-1] + input_force[k-1]/m2*b
return X
def get_data_matrices(data, m_stop=None, u=None, q=0):
if m_stop is None:
m_stop = data.shape[1]
elif m_stop == 0:
raise ValueError('m_stop must be greater than zero')
elif m_stop <= q+1:
raise ValueError('m_stop at least = q+2')
if q>0:
if u is None:
X = data[:,:m_stop-(q+1)]
for i in range(q,0,-1):
X = np.vstack([X,data[:,(q+1-i):m_stop-i]])
Y = np.vstack([X[data.shape[0]:,:],data[:,(q+1):m_stop]])
else:
zero_vec = np.zeros(m_stop-(q+1))
X = np.vstack([data[:,:m_stop-(q+1)],u[:m_stop-(q+1)]])
Y = np.vstack([data[:,1:m_stop-q],zero_vec])
for i in range(q,0,-1):
X = np.vstack([X,data[:,(q+1-i):m_stop-i],u[(q+1-i):m_stop-i]])
Y = np.vstack([Y,data[:,(q+1-(i-1)):m_stop-(i-1)],zero_vec])
else:
if u is None:
X = data[:,:m_stop-1]
Y = data[:,1:m_stop]
else:
X = np.vstack([data[:,:m_stop-1],u[:m_stop-1]])
Y = np.vstack([data[:,1:m_stop],np.zeros((m_stop-1,))])
return X.astype(np.float64),Y.astype(np.float64)
def get_dmd_modes(X, Y, n_trunc=None, plot=False):
U,S,VH = svd(X,full_matrices=False)
if n_trunc is None:
A = Y@VH.T@np.diag(1/S)@U.T
lam,w,v = eig(A,left=True,right=True)
else:
U = U[:,0:n_trunc]
S_trunc = S[0:n_trunc]
VH = VH[0:n_trunc,:]
A = np.conj(U.T)@Y@np.conj(VH.T)@np.diag(1/S_trunc)
lam,w_tilde,v_tilde = eig(A,left=True,right=True)
w = (w_tilde.T@np.conj(U.T)).T
v = U@v_tilde
if plot:
plt.figure()
nnz_s = S > 1e-12
x = np.arange(len(S))
plt.semilogy(x[nnz_s],S[nnz_s], 'bx')
plt.semilogy(x[nnz_s==False],S[nnz_s==False], 'rx')
plt.xlim([1,len(S)])
if n_trunc is not None:
plt.axvline(x=n_trunc, color='k', linestyle='-',linewidth=1)
plt.grid(True)
plt.xlabel('Index')
plt.ylabel('Singular value')
plt.figure()
mask = np.abs(lam) > 1
plt.plot(np.real(lam[mask==False]),np.imag(lam[mask==False]),'bx')
plt.plot(np.real(lam[mask]),np.imag(lam[mask]),'rx')
plt.plot(np.cos(np.linspace(0,2*np.pi)),np.sin(np.linspace(0,2*np.pi)),'--k')
plt.xlabel('Re($\lambda$)')
plt.ylabel('Im($\lambda$)')
plt.grid(True)
plt.axis('equal')
plt.figure()
plt.plot(np.conj(VH.T)[:,0:6])
plt.xlim([0,VH.shape[1]])
plt.title('Columns in V')
plt.grid(True)
plt.plot()
return lam.astype(np.complex128),w.astype(np.complex128),v.astype(np.complex128),A
def one_step_pred(xk, lam, wH, v, norm_vec):
return np.real(np.sum(((lam*(wH@xk))*norm_vec)*v,axis=1))
def predict(lam, w, v, X0, N, u=None, q=0):
n = X0.shape[0]
Yhat = np.zeros((n,N+1-q),dtype=np.float64)
Yhat[:,0] = X0
n_x = n//(q+1)
if u is not None:
if q>0:
mask = np.array((q+1)*((n_x-1)*[True]+[False]))
len_u = len(u)
Yhat[mask==False,:] = np.vstack([u[i:len_u-(q-i)] for i in range(0,q+1)])
else:
mask = (n-1)*[True] + [False]
Yhat[-1,:] = u
else:
mask = n*[True]
wH = np.conj(w).T
norm_vec = 1/(np.diag(wH@v))
for i in range(1,N+1-q):
yhat = one_step_pred(Yhat[:,i-1],lam,wH,v,norm_vec)
Yhat[mask,i] = yhat[mask]
res = np.zeros((n_x,N+1),dtype=np.float64)
res[:,:N+1-q] = Yhat[:n_x,:]
for i in range(q):
res[:,N+1-q+i] = Yhat[(i+1)*n_x:(i+2)*n_x,-1]
return res
| true | true |
1c35a6637f534abf4a37763fe1915c35e18e1f94 | 10,705 | py | Python | fluid/DeepASR/train.py | phlrain/models | 59adc0d6f38cd2351e16608d6c9d4e72dd5e7fea | [
"Apache-2.0"
] | 1 | 2018-09-12T09:36:44.000Z | 2018-09-12T09:36:44.000Z | fluid/DeepASR/train.py | phlrain/models | 59adc0d6f38cd2351e16608d6c9d4e72dd5e7fea | [
"Apache-2.0"
] | null | null | null | fluid/DeepASR/train.py | phlrain/models | 59adc0d6f38cd2351e16608d6c9d4e72dd5e7fea | [
"Apache-2.0"
] | 2 | 2018-06-14T13:59:36.000Z | 2018-11-14T12:34:47.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
import numpy as np
import argparse
import time
import paddle.fluid as fluid
import data_utils.augmentor.trans_mean_variance_norm as trans_mean_variance_norm
import data_utils.augmentor.trans_add_delta as trans_add_delta
import data_utils.augmentor.trans_splice as trans_splice
import data_utils.augmentor.trans_delay as trans_delay
import data_utils.async_data_reader as reader
from data_utils.util import lodtensor_to_ndarray
from model_utils.model import stacked_lstmp_model
def parse_args():
parser = argparse.ArgumentParser("Training for stacked LSTMP model.")
parser.add_argument(
'--batch_size',
type=int,
default=32,
help='The sequence number of a batch data. (default: %(default)d)')
parser.add_argument(
'--minimum_batch_size',
type=int,
default=1,
help='The minimum sequence number of a batch data. '
'(default: %(default)d)')
parser.add_argument(
'--frame_dim',
type=int,
default=80,
help='Frame dimension of feature data. (default: %(default)d)')
parser.add_argument(
'--stacked_num',
type=int,
default=5,
help='Number of lstmp layers to stack. (default: %(default)d)')
parser.add_argument(
'--proj_dim',
type=int,
default=512,
help='Project size of lstmp unit. (default: %(default)d)')
parser.add_argument(
'--hidden_dim',
type=int,
default=1024,
help='Hidden size of lstmp unit. (default: %(default)d)')
parser.add_argument(
'--class_num',
type=int,
default=3040,
help='Number of classes in label. (default: %(default)d)')
parser.add_argument(
'--pass_num',
type=int,
default=100,
help='Epoch number to train. (default: %(default)d)')
parser.add_argument(
'--print_per_batches',
type=int,
default=100,
help='Interval to print training accuracy. (default: %(default)d)')
parser.add_argument(
'--learning_rate',
type=float,
default=0.00016,
help='Learning rate used to train. (default: %(default)f)')
parser.add_argument(
'--device',
type=str,
default='GPU',
choices=['CPU', 'GPU'],
help='The device type. (default: %(default)s)')
parser.add_argument(
'--parallel', action='store_true', help='If set, run in parallel.')
parser.add_argument(
'--mean_var',
type=str,
default='data/global_mean_var_search26kHr',
help="The path for feature's global mean and variance. "
"(default: %(default)s)")
parser.add_argument(
'--train_feature_lst',
type=str,
default='data/feature.lst',
help='The feature list path for training. (default: %(default)s)')
parser.add_argument(
'--train_label_lst',
type=str,
default='data/label.lst',
help='The label list path for training. (default: %(default)s)')
parser.add_argument(
'--val_feature_lst',
type=str,
default='data/val_feature.lst',
help='The feature list path for validation. (default: %(default)s)')
parser.add_argument(
'--val_label_lst',
type=str,
default='data/val_label.lst',
help='The label list path for validation. (default: %(default)s)')
parser.add_argument(
'--init_model_path',
type=str,
default=None,
help="The model (checkpoint) path which the training resumes from. "
"If None, train the model from scratch. (default: %(default)s)")
parser.add_argument(
'--checkpoints',
type=str,
default='./checkpoints',
help="The directory for saving checkpoints. Do not save checkpoints "
"if set to ''. (default: %(default)s)")
parser.add_argument(
'--infer_models',
type=str,
default='./infer_models',
help="The directory for saving inference models. Do not save inference "
"models if set to ''. (default: %(default)s)")
args = parser.parse_args()
return args
def print_arguments(args):
print('----------- Configuration Arguments -----------')
for arg, value in sorted(vars(args).iteritems()):
print('%s: %s' % (arg, value))
print('------------------------------------------------')
def train(args):
"""train in loop.
"""
# paths check
if args.init_model_path is not None and \
not os.path.exists(args.init_model_path):
raise IOError("Invalid initial model path!")
if args.checkpoints != '' and not os.path.exists(args.checkpoints):
os.mkdir(args.checkpoints)
if args.infer_models != '' and not os.path.exists(args.infer_models):
os.mkdir(args.infer_models)
prediction, avg_cost, accuracy = stacked_lstmp_model(
frame_dim=args.frame_dim,
hidden_dim=args.hidden_dim,
proj_dim=args.proj_dim,
stacked_num=args.stacked_num,
class_num=args.class_num,
parallel=args.parallel)
# program for test
test_program = fluid.default_main_program().clone()
#optimizer = fluid.optimizer.Momentum(learning_rate=args.learning_rate, momentum=0.9)
optimizer = fluid.optimizer.Adam(
learning_rate=fluid.layers.exponential_decay(
learning_rate=args.learning_rate,
decay_steps=1879,
decay_rate=1 / 1.2,
staircase=True))
optimizer.minimize(avg_cost)
place = fluid.CPUPlace() if args.device == 'CPU' else fluid.CUDAPlace(0)
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
# resume training if initial model provided.
if args.init_model_path is not None:
fluid.io.load_persistables(exe, args.init_model_path)
ltrans = [
trans_add_delta.TransAddDelta(2, 2),
trans_mean_variance_norm.TransMeanVarianceNorm(args.mean_var),
trans_splice.TransSplice(5, 5), trans_delay.TransDelay(5)
]
feature_t = fluid.LoDTensor()
label_t = fluid.LoDTensor()
# validation
def test(exe):
# If test data not found, return invalid cost and accuracy
if not (os.path.exists(args.val_feature_lst) and
os.path.exists(args.val_label_lst)):
return -1.0, -1.0
# test data reader
test_data_reader = reader.AsyncDataReader(
args.val_feature_lst,
args.val_label_lst,
-1,
split_sentence_threshold=1024)
test_data_reader.set_transformers(ltrans)
test_costs, test_accs = [], []
for batch_id, batch_data in enumerate(
test_data_reader.batch_iterator(args.batch_size,
args.minimum_batch_size)):
# load_data
(features, labels, lod, _) = batch_data
features = np.reshape(features, (-1, 11, 3, args.frame_dim))
features = np.transpose(features, (0, 2, 1, 3))
feature_t.set(features, place)
feature_t.set_lod([lod])
label_t.set(labels, place)
label_t.set_lod([lod])
cost, acc = exe.run(test_program,
feed={"feature": feature_t,
"label": label_t},
fetch_list=[avg_cost, accuracy],
return_numpy=False)
test_costs.append(lodtensor_to_ndarray(cost)[0])
test_accs.append(lodtensor_to_ndarray(acc)[0])
return np.mean(test_costs), np.mean(test_accs)
# train data reader
train_data_reader = reader.AsyncDataReader(
args.train_feature_lst,
args.train_label_lst,
-1,
split_sentence_threshold=1024)
train_data_reader.set_transformers(ltrans)
# train
for pass_id in xrange(args.pass_num):
pass_start_time = time.time()
for batch_id, batch_data in enumerate(
train_data_reader.batch_iterator(args.batch_size,
args.minimum_batch_size)):
# load_data
(features, labels, lod, name_lst) = batch_data
features = np.reshape(features, (-1, 11, 3, args.frame_dim))
features = np.transpose(features, (0, 2, 1, 3))
feature_t.set(features, place)
feature_t.set_lod([lod])
label_t.set(labels, place)
label_t.set_lod([lod])
to_print = batch_id > 0 and (batch_id % args.print_per_batches == 0)
outs = exe.run(fluid.default_main_program(),
feed={"feature": feature_t,
"label": label_t},
fetch_list=[avg_cost, accuracy] if to_print else [],
return_numpy=False)
if to_print:
print("\nBatch %d, train cost: %f, train acc: %f" %
(batch_id, lodtensor_to_ndarray(outs[0])[0],
lodtensor_to_ndarray(outs[1])[0]))
# save the latest checkpoint
if args.checkpoints != '':
model_path = os.path.join(args.checkpoints,
"deep_asr.latest.checkpoint")
fluid.io.save_persistables(exe, model_path)
else:
sys.stdout.write('.')
sys.stdout.flush()
# run test
val_cost, val_acc = test(exe)
# save checkpoint per pass
if args.checkpoints != '':
model_path = os.path.join(
args.checkpoints,
"deep_asr.pass_" + str(pass_id) + ".checkpoint")
fluid.io.save_persistables(exe, model_path)
# save inference model
if args.infer_models != '':
model_path = os.path.join(
args.infer_models,
"deep_asr.pass_" + str(pass_id) + ".infer.model")
fluid.io.save_inference_model(model_path, ["feature"],
[prediction], exe)
# cal pass time
pass_end_time = time.time()
time_consumed = pass_end_time - pass_start_time
# print info at pass end
print("\nPass %d, time consumed: %f s, val cost: %f, val acc: %f\n" %
(pass_id, time_consumed, val_cost, val_acc))
if __name__ == '__main__':
args = parse_args()
print_arguments(args)
train(args)
| 36.535836 | 89 | 0.58795 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
import numpy as np
import argparse
import time
import paddle.fluid as fluid
import data_utils.augmentor.trans_mean_variance_norm as trans_mean_variance_norm
import data_utils.augmentor.trans_add_delta as trans_add_delta
import data_utils.augmentor.trans_splice as trans_splice
import data_utils.augmentor.trans_delay as trans_delay
import data_utils.async_data_reader as reader
from data_utils.util import lodtensor_to_ndarray
from model_utils.model import stacked_lstmp_model
def parse_args():
parser = argparse.ArgumentParser("Training for stacked LSTMP model.")
parser.add_argument(
'--batch_size',
type=int,
default=32,
help='The sequence number of a batch data. (default: %(default)d)')
parser.add_argument(
'--minimum_batch_size',
type=int,
default=1,
help='The minimum sequence number of a batch data. '
'(default: %(default)d)')
parser.add_argument(
'--frame_dim',
type=int,
default=80,
help='Frame dimension of feature data. (default: %(default)d)')
parser.add_argument(
'--stacked_num',
type=int,
default=5,
help='Number of lstmp layers to stack. (default: %(default)d)')
parser.add_argument(
'--proj_dim',
type=int,
default=512,
help='Project size of lstmp unit. (default: %(default)d)')
parser.add_argument(
'--hidden_dim',
type=int,
default=1024,
help='Hidden size of lstmp unit. (default: %(default)d)')
parser.add_argument(
'--class_num',
type=int,
default=3040,
help='Number of classes in label. (default: %(default)d)')
parser.add_argument(
'--pass_num',
type=int,
default=100,
help='Epoch number to train. (default: %(default)d)')
parser.add_argument(
'--print_per_batches',
type=int,
default=100,
help='Interval to print training accuracy. (default: %(default)d)')
parser.add_argument(
'--learning_rate',
type=float,
default=0.00016,
help='Learning rate used to train. (default: %(default)f)')
parser.add_argument(
'--device',
type=str,
default='GPU',
choices=['CPU', 'GPU'],
help='The device type. (default: %(default)s)')
parser.add_argument(
'--parallel', action='store_true', help='If set, run in parallel.')
parser.add_argument(
'--mean_var',
type=str,
default='data/global_mean_var_search26kHr',
help="The path for feature's global mean and variance. "
"(default: %(default)s)")
parser.add_argument(
'--train_feature_lst',
type=str,
default='data/feature.lst',
help='The feature list path for training. (default: %(default)s)')
parser.add_argument(
'--train_label_lst',
type=str,
default='data/label.lst',
help='The label list path for training. (default: %(default)s)')
parser.add_argument(
'--val_feature_lst',
type=str,
default='data/val_feature.lst',
help='The feature list path for validation. (default: %(default)s)')
parser.add_argument(
'--val_label_lst',
type=str,
default='data/val_label.lst',
help='The label list path for validation. (default: %(default)s)')
parser.add_argument(
'--init_model_path',
type=str,
default=None,
help="The model (checkpoint) path which the training resumes from. "
"If None, train the model from scratch. (default: %(default)s)")
parser.add_argument(
'--checkpoints',
type=str,
default='./checkpoints',
help="The directory for saving checkpoints. Do not save checkpoints "
"if set to ''. (default: %(default)s)")
parser.add_argument(
'--infer_models',
type=str,
default='./infer_models',
help="The directory for saving inference models. Do not save inference "
"models if set to ''. (default: %(default)s)")
args = parser.parse_args()
return args
def print_arguments(args):
print('----------- Configuration Arguments -----------')
for arg, value in sorted(vars(args).iteritems()):
print('%s: %s' % (arg, value))
print('------------------------------------------------')
def train(args):
# paths check
if args.init_model_path is not None and \
not os.path.exists(args.init_model_path):
raise IOError("Invalid initial model path!")
if args.checkpoints != '' and not os.path.exists(args.checkpoints):
os.mkdir(args.checkpoints)
if args.infer_models != '' and not os.path.exists(args.infer_models):
os.mkdir(args.infer_models)
prediction, avg_cost, accuracy = stacked_lstmp_model(
frame_dim=args.frame_dim,
hidden_dim=args.hidden_dim,
proj_dim=args.proj_dim,
stacked_num=args.stacked_num,
class_num=args.class_num,
parallel=args.parallel)
# program for test
test_program = fluid.default_main_program().clone()
#optimizer = fluid.optimizer.Momentum(learning_rate=args.learning_rate, momentum=0.9)
optimizer = fluid.optimizer.Adam(
learning_rate=fluid.layers.exponential_decay(
learning_rate=args.learning_rate,
decay_steps=1879,
decay_rate=1 / 1.2,
staircase=True))
optimizer.minimize(avg_cost)
place = fluid.CPUPlace() if args.device == 'CPU' else fluid.CUDAPlace(0)
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
# resume training if initial model provided.
if args.init_model_path is not None:
fluid.io.load_persistables(exe, args.init_model_path)
ltrans = [
trans_add_delta.TransAddDelta(2, 2),
trans_mean_variance_norm.TransMeanVarianceNorm(args.mean_var),
trans_splice.TransSplice(5, 5), trans_delay.TransDelay(5)
]
feature_t = fluid.LoDTensor()
label_t = fluid.LoDTensor()
# validation
def test(exe):
# If test data not found, return invalid cost and accuracy
if not (os.path.exists(args.val_feature_lst) and
os.path.exists(args.val_label_lst)):
return -1.0, -1.0
# test data reader
test_data_reader = reader.AsyncDataReader(
args.val_feature_lst,
args.val_label_lst,
-1,
split_sentence_threshold=1024)
test_data_reader.set_transformers(ltrans)
test_costs, test_accs = [], []
for batch_id, batch_data in enumerate(
test_data_reader.batch_iterator(args.batch_size,
args.minimum_batch_size)):
# load_data
(features, labels, lod, _) = batch_data
features = np.reshape(features, (-1, 11, 3, args.frame_dim))
features = np.transpose(features, (0, 2, 1, 3))
feature_t.set(features, place)
feature_t.set_lod([lod])
label_t.set(labels, place)
label_t.set_lod([lod])
cost, acc = exe.run(test_program,
feed={"feature": feature_t,
"label": label_t},
fetch_list=[avg_cost, accuracy],
return_numpy=False)
test_costs.append(lodtensor_to_ndarray(cost)[0])
test_accs.append(lodtensor_to_ndarray(acc)[0])
return np.mean(test_costs), np.mean(test_accs)
# train data reader
train_data_reader = reader.AsyncDataReader(
args.train_feature_lst,
args.train_label_lst,
-1,
split_sentence_threshold=1024)
train_data_reader.set_transformers(ltrans)
# train
for pass_id in xrange(args.pass_num):
pass_start_time = time.time()
for batch_id, batch_data in enumerate(
train_data_reader.batch_iterator(args.batch_size,
args.minimum_batch_size)):
# load_data
(features, labels, lod, name_lst) = batch_data
features = np.reshape(features, (-1, 11, 3, args.frame_dim))
features = np.transpose(features, (0, 2, 1, 3))
feature_t.set(features, place)
feature_t.set_lod([lod])
label_t.set(labels, place)
label_t.set_lod([lod])
to_print = batch_id > 0 and (batch_id % args.print_per_batches == 0)
outs = exe.run(fluid.default_main_program(),
feed={"feature": feature_t,
"label": label_t},
fetch_list=[avg_cost, accuracy] if to_print else [],
return_numpy=False)
if to_print:
print("\nBatch %d, train cost: %f, train acc: %f" %
(batch_id, lodtensor_to_ndarray(outs[0])[0],
lodtensor_to_ndarray(outs[1])[0]))
# save the latest checkpoint
if args.checkpoints != '':
model_path = os.path.join(args.checkpoints,
"deep_asr.latest.checkpoint")
fluid.io.save_persistables(exe, model_path)
else:
sys.stdout.write('.')
sys.stdout.flush()
# run test
val_cost, val_acc = test(exe)
# save checkpoint per pass
if args.checkpoints != '':
model_path = os.path.join(
args.checkpoints,
"deep_asr.pass_" + str(pass_id) + ".checkpoint")
fluid.io.save_persistables(exe, model_path)
# save inference model
if args.infer_models != '':
model_path = os.path.join(
args.infer_models,
"deep_asr.pass_" + str(pass_id) + ".infer.model")
fluid.io.save_inference_model(model_path, ["feature"],
[prediction], exe)
# cal pass time
pass_end_time = time.time()
time_consumed = pass_end_time - pass_start_time
# print info at pass end
print("\nPass %d, time consumed: %f s, val cost: %f, val acc: %f\n" %
(pass_id, time_consumed, val_cost, val_acc))
if __name__ == '__main__':
args = parse_args()
print_arguments(args)
train(args)
| true | true |
1c35a6b02678f7ef62a5d7a45ee173d8bed8fcb8 | 3,308 | py | Python | Statstool-Desktop/SetupWindow.py | Declaminius/EU4-MP-Statstool | 2df7b7f08f1c97257dec325322a2e491ea856432 | [
"MIT"
] | 1 | 2020-10-06T14:48:32.000Z | 2020-10-06T14:48:32.000Z | Statstool-Desktop/SetupWindow.py | Declaminius/EU4-MP-Statstool | 2df7b7f08f1c97257dec325322a2e491ea856432 | [
"MIT"
] | 3 | 2021-09-08T02:36:13.000Z | 2022-03-12T00:50:09.000Z | Statstool-Desktop/SetupWindow.py | Declaminius/EU4-MP-Statstool | 2df7b7f08f1c97257dec325322a2e491ea856432 | [
"MIT"
] | 1 | 2020-09-26T15:31:24.000Z | 2020-09-26T15:31:24.000Z | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 25 02:14:51 2019
@author: Florian
"""
import PyQt5.QtWidgets as Widgets
import PyQt5.QtGui as Gui
import PyQt5.QtCore as Core
from parserfunctions import edit_parse
from Savegame import Savegame
from config import icon_dir, old_nations_list, new_nations_list
class SetupWindow(Widgets.QMainWindow):
switch_window = Core.pyqtSignal()
def __init__(self):
super().__init__()
self.savegame_list = [[],[]]
self.old_nations_list = old_nations_list
self.new_nations_list = new_nations_list
self.status = self.statusBar()
self.line1 = Widgets.QLineEdit()
self.line1.setReadOnly(True)
self.line1.setMinimumSize(350, 22)
self.line2 = Widgets.QLineEdit()
self.line2.setReadOnly(True)
self.line2.setMinimumSize(350, 22)
self.select_button1 = Widgets.QPushButton("Savegame 1", self)
self.select_button1.released.connect(self.get_playertags)
self.select_button2 = Widgets.QPushButton("Savegame 2", self)
self.select_button2.released.connect(self.get_playertags)
self.parse_button = Widgets.QPushButton("Parse")
self.parse_button.released.connect(self.parse)
self.parse_button.setEnabled(False)
self.init_ui()
def init_ui(self):
self.setGeometry(760,490,400,100)
self.setWindowTitle("Decla's Stats-Tool")
self.setWindowIcon(Gui.QIcon(icon_dir))
group_box = Widgets.QGroupBox()
vbox = Widgets.QVBoxLayout()
hbox = Widgets.QHBoxLayout()
hbox.addStretch(1)
hbox.addWidget(self.line1)
hbox.addWidget(self.select_button1)
hbox.addStretch(1)
vbox.addLayout(hbox)
hbox = Widgets.QHBoxLayout()
hbox.addStretch(1)
hbox.addWidget(self.line2)
hbox.addWidget(self.select_button2)
hbox.addStretch(1)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.parse_button)
group_box.setLayout(vbox)
self.setCentralWidget(group_box)
def get_playertags(self):
sender = self.sender()
self.openFileNameDialog()
try:
self.playertags, self.tag_list, self.localisation_dict = edit_parse(self.FILEDIR)
self.FILENAME = self.FILEDIR.split("/")[-1]
if sender.text() == "Savegame 1":
self.line1.setText(self.FILEDIR)
if sender.text() == "Savegame 2":
self.line2.setText(self.FILEDIR)
self.status.showMessage("")
except AttributeError:
pass
except (IndexError, UnicodeDecodeError) as e:
print(e)
self.status.showMessage("{} is not a EU4-Savegame".format(self.FILEDIR))
try:
savegame = Savegame(self.playertags, self.tag_list, self.FILEDIR)
savegame.directory = "C:/Users/kunde/Desktop/{}-images".format(self.FILENAME.split(".")[0])
if sender.text() == "Savegame 1":
self.savegame_list[0] = savegame
if sender.text() == "Savegame 2":
self.savegame_list[1] = savegame
except (NameError, AttributeError):
pass
if self.savegame_list[0] and self.savegame_list[1]:
self.parse_button.setEnabled(True)
def openFileNameDialog(self):
options = Widgets.QFileDialog.Options()
fileName, _ = Widgets.QFileDialog.getOpenFileName(self, "Select Savegame", "", "All Files (*);;Python Files (*.py)",
options=options)
if fileName:
self.FILEDIR = fileName
def parse(self):
self.playertags = sorted(list(set(self.savegame_list[0].playertags + self.savegame_list[1].playertags + self.old_nations_list)))
self.switch_window.emit()
| 33.414141 | 130 | 0.738513 |
import PyQt5.QtWidgets as Widgets
import PyQt5.QtGui as Gui
import PyQt5.QtCore as Core
from parserfunctions import edit_parse
from Savegame import Savegame
from config import icon_dir, old_nations_list, new_nations_list
class SetupWindow(Widgets.QMainWindow):
switch_window = Core.pyqtSignal()
def __init__(self):
super().__init__()
self.savegame_list = [[],[]]
self.old_nations_list = old_nations_list
self.new_nations_list = new_nations_list
self.status = self.statusBar()
self.line1 = Widgets.QLineEdit()
self.line1.setReadOnly(True)
self.line1.setMinimumSize(350, 22)
self.line2 = Widgets.QLineEdit()
self.line2.setReadOnly(True)
self.line2.setMinimumSize(350, 22)
self.select_button1 = Widgets.QPushButton("Savegame 1", self)
self.select_button1.released.connect(self.get_playertags)
self.select_button2 = Widgets.QPushButton("Savegame 2", self)
self.select_button2.released.connect(self.get_playertags)
self.parse_button = Widgets.QPushButton("Parse")
self.parse_button.released.connect(self.parse)
self.parse_button.setEnabled(False)
self.init_ui()
def init_ui(self):
self.setGeometry(760,490,400,100)
self.setWindowTitle("Decla's Stats-Tool")
self.setWindowIcon(Gui.QIcon(icon_dir))
group_box = Widgets.QGroupBox()
vbox = Widgets.QVBoxLayout()
hbox = Widgets.QHBoxLayout()
hbox.addStretch(1)
hbox.addWidget(self.line1)
hbox.addWidget(self.select_button1)
hbox.addStretch(1)
vbox.addLayout(hbox)
hbox = Widgets.QHBoxLayout()
hbox.addStretch(1)
hbox.addWidget(self.line2)
hbox.addWidget(self.select_button2)
hbox.addStretch(1)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.parse_button)
group_box.setLayout(vbox)
self.setCentralWidget(group_box)
def get_playertags(self):
sender = self.sender()
self.openFileNameDialog()
try:
self.playertags, self.tag_list, self.localisation_dict = edit_parse(self.FILEDIR)
self.FILENAME = self.FILEDIR.split("/")[-1]
if sender.text() == "Savegame 1":
self.line1.setText(self.FILEDIR)
if sender.text() == "Savegame 2":
self.line2.setText(self.FILEDIR)
self.status.showMessage("")
except AttributeError:
pass
except (IndexError, UnicodeDecodeError) as e:
print(e)
self.status.showMessage("{} is not a EU4-Savegame".format(self.FILEDIR))
try:
savegame = Savegame(self.playertags, self.tag_list, self.FILEDIR)
savegame.directory = "C:/Users/kunde/Desktop/{}-images".format(self.FILENAME.split(".")[0])
if sender.text() == "Savegame 1":
self.savegame_list[0] = savegame
if sender.text() == "Savegame 2":
self.savegame_list[1] = savegame
except (NameError, AttributeError):
pass
if self.savegame_list[0] and self.savegame_list[1]:
self.parse_button.setEnabled(True)
def openFileNameDialog(self):
options = Widgets.QFileDialog.Options()
fileName, _ = Widgets.QFileDialog.getOpenFileName(self, "Select Savegame", "", "All Files (*);;Python Files (*.py)",
options=options)
if fileName:
self.FILEDIR = fileName
def parse(self):
self.playertags = sorted(list(set(self.savegame_list[0].playertags + self.savegame_list[1].playertags + self.old_nations_list)))
self.switch_window.emit()
| true | true |
1c35a6b30dedb9e02bf92ea0967bf9cfc0bd983b | 1,793 | py | Python | otherUsefulScripts/compile_all_plots.py | MagnusHaughey/liverMitoDNAPipeline | 0d63a41ea626bca032473450e3d10d451744f175 | [
"MIT"
] | null | null | null | otherUsefulScripts/compile_all_plots.py | MagnusHaughey/liverMitoDNAPipeline | 0d63a41ea626bca032473450e3d10d451744f175 | [
"MIT"
] | null | null | null | otherUsefulScripts/compile_all_plots.py | MagnusHaughey/liverMitoDNAPipeline | 0d63a41ea626bca032473450e3d10d451744f175 | [
"MIT"
] | null | null | null |
import numpy as np
import glob
import subprocess
import sys
parent_dir = sys.argv[1]
all_files = []
for file in glob.glob(parent_dir + "/*scatterPlot.pdf"):
file = file.split("/")[-1]
all_files.append(file[:-20])
all_files = sorted(set(all_files))
#print(all_files)
#exit(0)
f = open(parent_dir + "/all_SNV_plots.tex" , 'w')
f.write("\\documentclass[15pt]{article}\n \\usepackage[english]{babel}\n \\usepackage[utf8x]{inputenc}\n \\usepackage{graphicx}\n \\usepackage[margin=1in]{geometry}\n \\usepackage[font=Large]{caption}\n \\begin{document}\n\n")
for file in all_files:
patient = file.split("_")[-2]
sample = file.split("_")[-1]
f.write("\\centering\\section*{Patient = " + "{}".format(patient) + " ; sample = " + "{}".format(sample) + " ; primer = M1}\n \\vspace*{0.5in}\n\\centering\n \\begin{figure}[h]\n \\centering\n \\textbf{\\Large{Repeat 1 \\hspace*{1.9in} Repeat 2}}\\par\\medskip\n \\includegraphics[width=0.45\\textwidth]{./" + file + "_M1A_scatterPlot.pdf}\n \\includegraphics[width=0.45\\textwidth]{./" + file + "_M1B_scatterPlot.pdf}\n \\end{figure}\n\n \\includegraphics[width=0.8\\textwidth]{./" + file + "_M1_replicate_frequencies.png}\\\\ \n \\clearpage \n\n")
f.write("\\centering\\section*{Patient = " + "{}".format(patient) + " ; sample = " + "{}".format(sample) + " ; primer = M2}\n \\vspace*{0.5in}\n\\centering\n \\begin{figure}[h]\n \\centering\n \\textbf{\\Large{Repeat 1 \\hspace*{1.9in} Repeat 2}}\\par\\medskip\n \\includegraphics[width=0.45\\textwidth]{./" + file + "_M2A_scatterPlot.pdf}\n \\includegraphics[width=0.45\\textwidth]{./" + file + "_M2B_scatterPlot.pdf}\n \\end{figure}\n\n \\includegraphics[width=0.8\\textwidth]{./" + file + "_M2_replicate_frequencies.png}\\\\ \n \\clearpage \n\n")
f.write("\\end{document}\n")
f.close() | 51.228571 | 550 | 0.663134 |
import numpy as np
import glob
import subprocess
import sys
parent_dir = sys.argv[1]
all_files = []
for file in glob.glob(parent_dir + "/*scatterPlot.pdf"):
file = file.split("/")[-1]
all_files.append(file[:-20])
all_files = sorted(set(all_files))
f = open(parent_dir + "/all_SNV_plots.tex" , 'w')
f.write("\\documentclass[15pt]{article}\n \\usepackage[english]{babel}\n \\usepackage[utf8x]{inputenc}\n \\usepackage{graphicx}\n \\usepackage[margin=1in]{geometry}\n \\usepackage[font=Large]{caption}\n \\begin{document}\n\n")
for file in all_files:
patient = file.split("_")[-2]
sample = file.split("_")[-1]
f.write("\\centering\\section*{Patient = " + "{}".format(patient) + " ; sample = " + "{}".format(sample) + " ; primer = M1}\n \\vspace*{0.5in}\n\\centering\n \\begin{figure}[h]\n \\centering\n \\textbf{\\Large{Repeat 1 \\hspace*{1.9in} Repeat 2}}\\par\\medskip\n \\includegraphics[width=0.45\\textwidth]{./" + file + "_M1A_scatterPlot.pdf}\n \\includegraphics[width=0.45\\textwidth]{./" + file + "_M1B_scatterPlot.pdf}\n \\end{figure}\n\n \\includegraphics[width=0.8\\textwidth]{./" + file + "_M1_replicate_frequencies.png}\\\\ \n \\clearpage \n\n")
f.write("\\centering\\section*{Patient = " + "{}".format(patient) + " ; sample = " + "{}".format(sample) + " ; primer = M2}\n \\vspace*{0.5in}\n\\centering\n \\begin{figure}[h]\n \\centering\n \\textbf{\\Large{Repeat 1 \\hspace*{1.9in} Repeat 2}}\\par\\medskip\n \\includegraphics[width=0.45\\textwidth]{./" + file + "_M2A_scatterPlot.pdf}\n \\includegraphics[width=0.45\\textwidth]{./" + file + "_M2B_scatterPlot.pdf}\n \\end{figure}\n\n \\includegraphics[width=0.8\\textwidth]{./" + file + "_M2_replicate_frequencies.png}\\\\ \n \\clearpage \n\n")
f.write("\\end{document}\n")
f.close() | true | true |
1c35a6e1e0a001f7ac58faac00836738f1d077db | 1,866 | py | Python | shap/benchmark/methods.py | JiechengZhao/shap | ec26a1e0ccdf0a3885943e63502cf479194c13d1 | [
"MIT"
] | null | null | null | shap/benchmark/methods.py | JiechengZhao/shap | ec26a1e0ccdf0a3885943e63502cf479194c13d1 | [
"MIT"
] | null | null | null | shap/benchmark/methods.py | JiechengZhao/shap | ec26a1e0ccdf0a3885943e63502cf479194c13d1 | [
"MIT"
] | null | null | null | from .. import LinearExplainer
from .. import KernelExplainer
from .. import SamplingExplainer
from .. import TreeExplainer
from ..explainers import other
method_dict = {
"Linear SHAP (corr)": lambda model, X: LinearExplainer(model, X, nsamples=1000).shap_values,
"Linear SHAP (ind)": lambda model, X: LinearExplainer(model, X, feature_dependence="interventional").shap_values,
"Coef": lambda model, X: other.CoefficentExplainer(model).attributions,
"Random": lambda model, X: other.RandomExplainer().attributions,
"Kernel SHAP 1000 mean ref.": lambda model, Xt: lambda X: KernelExplainer(model.predict, Xt.mean(0)).shap_values(X, nsamples=1000, l1_reg=0),
"Kernel SHAP 100 mean ref.": lambda model, Xt: lambda X: KernelExplainer(model.predict, Xt.mean(0)).shap_values(X, nsamples=100, l1_reg=0),
"Sampling SHAP 10000": lambda model, Xt: lambda X: SamplingExplainer(model.predict, Xt).shap_values(X, nsamples=10000),
"Sampling SHAP 1000": lambda model, Xt: lambda X: SamplingExplainer(model.predict, Xt).shap_values(X, nsamples=1000),
"Sampling SHAP 100": lambda model, Xt: lambda X: SamplingExplainer(model.predict, Xt).shap_values(X, nsamples=100),
"Tree SHAP": lambda model, Xt: TreeExplainer(model).shap_values,
"Saabas": lambda model, Xt: lambda X: TreeExplainer(model).shap_values(X, approximate=True)
}
linear = [[m, method_dict[m]] for m in [
"Linear SHAP (corr)",
"Linear SHAP (ind)",
"Coef",
"Random",
##"Kernel SHAP 1000 mean ref.",
#"Kernel SHAP 100 mean ref.",
#"Sampling SHAP 10000",
##"Sampling SHAP 1000",
#"Sampling SHAP 100"
]]
tree = [[m, method_dict[m]] for m in [
"Tree SHAP",
"Saabas",
"Random"
##"Kernel SHAP 1000 mean ref.",
#"Kernel SHAP 100 mean ref.",
#"Sampling SHAP 10000",
##"Sampling SHAP 1000",
#"Sampling SHAP 100"
]]
| 43.395349 | 145 | 0.690782 | from .. import LinearExplainer
from .. import KernelExplainer
from .. import SamplingExplainer
from .. import TreeExplainer
from ..explainers import other
method_dict = {
"Linear SHAP (corr)": lambda model, X: LinearExplainer(model, X, nsamples=1000).shap_values,
"Linear SHAP (ind)": lambda model, X: LinearExplainer(model, X, feature_dependence="interventional").shap_values,
"Coef": lambda model, X: other.CoefficentExplainer(model).attributions,
"Random": lambda model, X: other.RandomExplainer().attributions,
"Kernel SHAP 1000 mean ref.": lambda model, Xt: lambda X: KernelExplainer(model.predict, Xt.mean(0)).shap_values(X, nsamples=1000, l1_reg=0),
"Kernel SHAP 100 mean ref.": lambda model, Xt: lambda X: KernelExplainer(model.predict, Xt.mean(0)).shap_values(X, nsamples=100, l1_reg=0),
"Sampling SHAP 10000": lambda model, Xt: lambda X: SamplingExplainer(model.predict, Xt).shap_values(X, nsamples=10000),
"Sampling SHAP 1000": lambda model, Xt: lambda X: SamplingExplainer(model.predict, Xt).shap_values(X, nsamples=1000),
"Sampling SHAP 100": lambda model, Xt: lambda X: SamplingExplainer(model.predict, Xt).shap_values(X, nsamples=100),
"Tree SHAP": lambda model, Xt: TreeExplainer(model).shap_values,
"Saabas": lambda model, Xt: lambda X: TreeExplainer(model).shap_values(X, approximate=True)
}
linear = [[m, method_dict[m]] for m in [
"Linear SHAP (corr)",
"Linear SHAP (ind)",
"Coef",
"Random",
for m in [
"Tree SHAP",
"Saabas",
"Random"
| true | true |
1c35a6e7cc220a027ff5cd4f4aaa716267fd3830 | 18,310 | py | Python | private/templates/EVASS/config.py | hitesh96db/eden | 8e1b22d7d4b92c0bce5b6172d57298949a2f0582 | [
"MIT"
] | null | null | null | private/templates/EVASS/config.py | hitesh96db/eden | 8e1b22d7d4b92c0bce5b6172d57298949a2f0582 | [
"MIT"
] | null | null | null | private/templates/EVASS/config.py | hitesh96db/eden | 8e1b22d7d4b92c0bce5b6172d57298949a2f0582 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
try:
# Python 2.7
from collections import OrderedDict
except:
# Python 2.6
from gluon.contrib.simplejson.ordered_dict import OrderedDict
from gluon import current
from gluon.storage import Storage
from gluon.validators import IS_NOT_EMPTY, IS_EMPTY_OR, IS_IN_SET
from s3 import s3_date, S3Represent
T = current.T
settings = current.deployment_settings
"""
Settings for the EVASS template:
http://eden.sahanafoundation.org/wiki/Deployments/Italy/EVASS
"""
# -----------------------------------------------------------------------------
# Pre-Populate
settings.base.prepopulate = ["EVASS", "demo/users"]
settings.base.system_name = T("EVASS - Sahana Eden for Italy")
settings.base.system_name_short = T("Sahana Eden for Italy")
# Theme (folder to use for views/layout.html)
settings.base.theme = "EVASS"
settings.ui.formstyle = "foundation"
settings.ui.filter_formstyle = "foundation_inline"
settings.ui.hierarchy_theme = "default"
# -----------------------------------------------------------------------------
# Email settings
settings.mail.default_email_subject = True
settings.mail.auth_user_in_email_subject = True
# -----------------------------------------------------------------------------
# Authentication settings
settings.auth.registration_requests_mobile_phone = True
settings.auth.registration_mobile_phone_mandatory = True
settings.auth.registration_requests_organisation = True
# Uncomment this to have the Organisation selection during registration be mandatory
#settings.auth.registration_organisation_required = True
settings.auth.always_notify_approver = False
settings.security.self_registration = False
# Security Policy
# http://eden.sahanafoundation.org/wiki/S3AAA#System-widePolicy
settings.security.policy = 7
def evass_realm_entity(table, row):
"""
Assign a Realm Entity to records
"""
db = current.db
s3db = current.s3db
tablename = table._tablename
realm_entity = None
# Realm is the organization assigned during the record registration/update
if tablename in ("event_event",
"evr_case",
"cr_shelter",
"hrm_human_resource",
"org_facility",
"org_office",
):
otable = s3db.org_organisation
organisation_id = row.organisation_id
if organisation_id:
org = db(otable.id == organisation_id).select(otable.realm_entity,
limitby=(0, 1)).first()
realm_entity = org.realm_entity
elif tablename == "event_incident":
# Incident realm is the related event realm
# (assigned during incident registration/update
etable = db.event_event
try:
incident_id = row.id
query = (table.id == incident_id) & \
(etable.id == table.event_id)
event = db(query).select(etable.realm_entity,
limitby=(0, 1)).first()
realm_entity = event.realm_entity
except:
return
elif tablename == "pr_group":
# Group realm is the user's organisation
user = current.auth.user
if user:
realm_entity = s3db.pr_get_pe_id("org_organisation",
user.organisation_id)
elif tablename == "org_organisation":
realm_entity = row.pe_id
return realm_entity
settings.auth.realm_entity = evass_realm_entity
# -----------------------------------------------------------------------------
# L10n settings
settings.L10n.languages = OrderedDict([
("en", "English"),
("it", "Italiano"),
])
settings.L10n.default_language = "en"
settings.L10n.utc_offset = "UTC +0100"
settings.L10n.date_format = T("%d/%m/%Y")
settings.L10n.decimal_separator = ","
settings.L10n.thousands_separator = "."
settings.L10n.default_country_code = 39
settings.L10n.mandatory_lastname = True
settings.L10n.translate_gis_location = True
# Finance settings
settings.fin.currency_default = "EUR"
settings.fin.currencies = {
"EUR": T("Euros"),
"GBP": T("Great British Pounds"),
"USD": T("United States Dollars"),
}
# -----------------------------------------------------------------------------
# GIS (Map) settings
# GeoNames username
settings.gis.geonames_username = "geoname_username"
settings.gis.countries = ["IT"]
settings.gis.legend = "float"
settings.gis.nav_controls = False
# -----------------------------------------------------------------------------
# Shelters
settings.cr.shelter_population_dynamic = True
settings.cr.shelter_housing_unit_management = True
# -----------------------------------------------------------------------------
# Events
settings.event.types_hierarchical = True
# -----------------------------------------------------------------------------
# Evacuees
settings.evr.physical_description = False
settings.pr.show_emergency_contacts = False
settings.evr.link_to_organisation= True
# -----------------------------------------------------------------------------
# Organisations
settings.org.branches = True
settings.org.branches_tree_view = True
settings.org.facility_types_hierarchical = True
# -----------------------------------------------------------------------------
# Human Resource Management
settings.hrm.email_required = False
settings.hrm.org_required = False
settings.hrm.deletable = True
settings.hrm.multiple_job_titles = True
settings.hrm.staff_experience = False
settings.hrm.vol_active = True
settings.hrm.vol_experience = False
settings.hrm.show_organisation = True
settings.hrm.use_awards = False
settings.hrm.use_certificates = False
settings.hrm.use_skills = True
settings.hrm.use_trainings = False
#*****************************Frontpage settings*************************
# RSS feeds
settings.frontpage.rss = [
{"title": "RSS News - Dipartimento della Protezione Civile ",
"url": "http://www.protezionecivile.gov.it/jcms/do/jprss/Rss/Feed/show.action?id=12170&lang=it#"
},
{"title": "RSS Vigilanza Meteo - Dipartimento della Protezione Civile ",
"url": "http://www.protezionecivile.gov.it/jcms/do/jprss/Rss/Feed/show.action?id=23573&lang=it#"
},
{"title": "RSS Previsioni Meteo - Dipartimento della Protezione Civile ",
"url": "http://www.protezionecivile.gov.it/jcms/do/jprss/Rss/Feed/show.action?id=23575&lang=it#"
},
{"title": "RSS Comunicati Stampa - Dipartimento della Protezione Civile ",
"url": "http://www.protezionecivile.gov.it/jcms/do/jprss/Rss/Feed/show.action?id=23577&lang=it#"
},
{"title": "Twitter - Croce Rossa Italia",
# @crocerossa
#"url": "https://search.twitter.com/search.rss?q=from%3Acrocerossa" # API v1 deprecated, so doesn't work, need to use 3rd-party service, like:
"url": "http://www.rssitfor.me/getrss?name=@crocerossa"
# Hashtag
#url: "http://search.twitter.com/search.atom?q=%23eqnz" # API v1 deprecated, so doesn't work, need to use 3rd-party service, like:
#"url": "http://api2.socialmention.com/search?q=protezionecivile&t=all&f=rss"
},
# {"title": "Twitter - Dipartimento della Protezione Civile",
# # @protezionecivile
# "url": "http://www.rssitfor.me/getrss?name=@protezionecivile"
# # Hashtag
# #url: "http://search.twitter.com/search.atom?q=%23eqnz" # API v1 deprecated, so doesn't work, need to use 3rd-party service, like:
# "url": "http://api2.socialmention.com/search?q=protezionecivile&t=all&f=rss"
# }
]
# -----------------------------------------------------------------------------
def customise_pr_person_resource(r, tablename):
s3db = current.s3db
table = r.resource.table
# Disallow "unknown" gender and defaults to "male"
evr_gender_opts = dict((k, v) for k, v in s3db.pr_gender_opts.items()
if k in (2, 3))
gender = table.gender
gender.requires = IS_IN_SET(evr_gender_opts, zero=None)
gender.default = 3
if r.controller == "evr":
# Hide evacuees emergency contacts
settings.pr.show_emergency_contacts = False
# Last name and date of birth mandatory in EVR module
table.last_name.requires = IS_NOT_EMPTY(error_message = T("Please enter a last name"))
dob_requires = s3_date("dob",
future = 0,
past = 1320,
empty = False).requires
dob_requires.error_message = T("Please enter a date of birth")
table.date_of_birth.requires = dob_requires
# Enable Location_id
from gluon import DIV
from s3.s3widgets import S3LocationSelectorWidget2
levels = ("L1","L2","L3",)
location_id = table.location_id
location_id.readable = location_id.writable = True
location_id.label = T("Place of Birth")
location_id.widget = S3LocationSelectorWidget2(levels=levels,
lines=True,
)
location_id.represent = s3db.gis_LocationRepresent(sep=" | ")
# Enable place of birth
place_of_birth = s3db.pr_person_details.place_of_birth
place_of_birth.label = "Specify a Different Place of Birth"
place_of_birth.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Different Place of Birth"),
T("Specify a different place of birth (foreign country, village, hamlet)")))
place_of_birth.readable = place_of_birth.writable = True
# Disable religion selection
s3db.pr_person_details.religion.readable = False
s3db.pr_person_details.religion.writable = False
# Disable unneeded physical details
pdtable = s3db.pr_physical_description
hide_fields = [
"race",
"complexion",
"height",
"weight",
"hair_length",
"hair_style",
"hair_baldness",
"hair_comment",
"facial_hair_type",
"facial_hair_length",
"facial_hair_color",
"facial_hair_comment",
"body_hair",
"skin_marks",
"medical_conditions"
]
for fname in hide_fields:
field = pdtable[fname]
field.readable = field.writable = False
# This set is suitable for Italy
ethnicity_opts = ("Italian",
"Chinese",
"Albanese",
"Philippine",
"Pakistani",
"English",
"African",
"Other",
"Unknown",
)
ethnicity_opts = dict((v, T(v)) for v in ethnicity_opts)
ethnicity = pdtable.ethnicity
ethnicity.requires = IS_EMPTY_OR(IS_IN_SET(ethnicity_opts,
sort=True))
ethnicity.represent = S3Represent(options=ethnicity_opts,
translate=True)
settings.customise_pr_person_resource = customise_pr_person_resource
def customise_cr_shelter_resource(r, tablename):
s3db = current.s3db
from s3 import S3HierarchyWidget
s3db.cr_shelter.capacity_day.writable = s3db.cr_shelter.capacity_night.writable = False
s3db.cr_shelter.cr_shelter_environment_id.readable = s3db.cr_shelter.cr_shelter_environment_id.writable = True
organisation_represent = current.s3db.org_OrganisationRepresent
node_represent = organisation_represent(parent=False)
org_widget = S3HierarchyWidget(lookup="org_organisation",
represent=node_represent,
multiple=False,
leafonly=False,
)
s3db.cr_shelter.organisation_id.widget = org_widget
settings.customise_cr_shelter_resource = customise_cr_shelter_resource
def customise_pr_group_resource(r, tablename):
messages = current.messages
field = r.table.group_type
pr_group_types = {1 : T("Family"),
2 : T("Tourist Group"),
3 : T("Relief Team"),
4 : T("other"),
5 : T("Mailing Lists"),
6 : T("Society"),
}
field.represent = lambda opt: pr_group_types.get(opt, messages.UNKNOWN_OPT)
field.requires = IS_IN_SET(pr_group_types, zero=None)
settings.customise_pr_group_resource = customise_pr_group_resource
# -----------------------------------------------------------------------------
def customise_event_event_resource(r, tablename):
table = r.table
table.exercise.default = True
table.organisation_id.readable = table.organisation_id.writable = True
settings.customise_event_event_resource = customise_event_event_resource
def customise_event_incident_resource(r, tablename):
from s3 import IS_ONE_OF
db = current.db
table = r.table
table.exercise.default = True
table.event_id.readable = table.event_id.writable = True
represent = S3Represent(lookup=tablename)
table.event_id.requires = IS_ONE_OF(db, "event_event.id",
represent,
filterby="closed",
filter_opts=(False,),
orderby="event_event.name",
sort=True)
settings.customise_event_incident_resource = customise_event_incident_resource
# -----------------------------------------------------------------------------
def customise_project_location_resource(r, tablename):
field = current.s3db.project_location.status_id
field.readable = field.writable = True
settings.customise_project_location_resource = customise_project_location_resource
# -----------------------------------------------------------------------------
# Comment/uncomment modules here to disable/enable them
# @ToDo: Have the system automatically enable migrate if a module is enabled
# Modules menu is defined in modules/eden/menu.py
settings.modules = OrderedDict([
# Core modules which shouldn't be disabled
("default", Storage(
name_nice = T("Home"),
restricted = False, # Use ACLs to control access to this module
access = None, # All Users (inc Anonymous) can see this module in the default menu & access the controller
module_type = None # This item is not shown in the menu
)),
("admin", Storage(
name_nice = T("Administration"),
#description = "Site Administration",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("appadmin", Storage(
name_nice = T("Administration"),
#description = "Site Administration",
restricted = True,
module_type = None # No Menu
)),
("errors", Storage(
name_nice = T("Ticket Viewer"),
#description = "Needed for Breadcrumbs",
restricted = False,
module_type = None # No Menu
)),
("sync", Storage(
name_nice = T("Synchronization"),
#description = "Synchronization",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("translate", Storage(
name_nice = T("Translation Functionality"),
#description = "Selective translation of strings based on module.",
module_type = None,
)),
("gis", Storage(
name_nice = T("Map"),
#description = "Situation Awareness & Geospatial Analysis",
restricted = True,
module_type = 1, # 6th item in the menu
)),
("pr", Storage(
name_nice = T("Person Registry"),
#description = "Central point to record details on People",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu (access to controller is possible to all still)
module_type = 10
)),
("org", Storage(
name_nice = T("Organizations"),
#description = 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities',
restricted = True,
module_type = 10
)),
# All modules below here should be possible to disable safely
("hrm", Storage(
name_nice = T("Staff"),
#description = "Human Resources Management",
restricted = True,
module_type = 10,
)),
("vol", Storage(
name_nice = T("Volunteers"),
#description = "Human Resources Management",
restricted = True,
module_type = 10,
)),
("doc", Storage(
name_nice = T("Documents"),
#description = "A library of digital resources, such as photos, documents and reports",
restricted = True,
module_type = 10,
)),
("msg", Storage(
name_nice = T("Messaging"),
#description = "Sends & Receives Alerts via Email & SMS",
restricted = True,
# The user-visible functionality of this module isn't normally required. Rather it's main purpose is to be accessed from other modules.
module_type = 2,
)),
("cr", Storage(
name_nice = T("Shelters"),
#description = "Tracks the location, capacity and breakdown of victims in Shelters",
restricted = True,
module_type = 10
)),
("evr", Storage(
name_nice = T("Evacuees"),
#description = "Evacuees Registry",
restricted = True, # use Access Control Lists to see this module
module_type = 7
)),
("event", Storage(
name_nice = T("Events"),
#description = "Activate Events (e.g. from Scenario templates) for allocation of appropriate Resources (Human, Assets & Facilities).",
restricted = True,
module_type = 10,
)),
]) | 38.874735 | 147 | 0.593501 |
try:
from collections import OrderedDict
except:
from gluon.contrib.simplejson.ordered_dict import OrderedDict
from gluon import current
from gluon.storage import Storage
from gluon.validators import IS_NOT_EMPTY, IS_EMPTY_OR, IS_IN_SET
from s3 import s3_date, S3Represent
T = current.T
settings = current.deployment_settings
settings.base.prepopulate = ["EVASS", "demo/users"]
settings.base.system_name = T("EVASS - Sahana Eden for Italy")
settings.base.system_name_short = T("Sahana Eden for Italy")
settings.base.theme = "EVASS"
settings.ui.formstyle = "foundation"
settings.ui.filter_formstyle = "foundation_inline"
settings.ui.hierarchy_theme = "default"
settings.mail.default_email_subject = True
settings.mail.auth_user_in_email_subject = True
settings.auth.registration_requests_mobile_phone = True
settings.auth.registration_mobile_phone_mandatory = True
settings.auth.registration_requests_organisation = True
settings.auth.always_notify_approver = False
settings.security.self_registration = False
.policy = 7
def evass_realm_entity(table, row):
db = current.db
s3db = current.s3db
tablename = table._tablename
realm_entity = None
if tablename in ("event_event",
"evr_case",
"cr_shelter",
"hrm_human_resource",
"org_facility",
"org_office",
):
otable = s3db.org_organisation
organisation_id = row.organisation_id
if organisation_id:
org = db(otable.id == organisation_id).select(otable.realm_entity,
limitby=(0, 1)).first()
realm_entity = org.realm_entity
elif tablename == "event_incident":
etable = db.event_event
try:
incident_id = row.id
query = (table.id == incident_id) & \
(etable.id == table.event_id)
event = db(query).select(etable.realm_entity,
limitby=(0, 1)).first()
realm_entity = event.realm_entity
except:
return
elif tablename == "pr_group":
user = current.auth.user
if user:
realm_entity = s3db.pr_get_pe_id("org_organisation",
user.organisation_id)
elif tablename == "org_organisation":
realm_entity = row.pe_id
return realm_entity
settings.auth.realm_entity = evass_realm_entity
# -----------------------------------------------------------------------------
# L10n settings
settings.L10n.languages = OrderedDict([
("en", "English"),
("it", "Italiano"),
])
settings.L10n.default_language = "en"
settings.L10n.utc_offset = "UTC +0100"
settings.L10n.date_format = T("%d/%m/%Y")
settings.L10n.decimal_separator = ","
settings.L10n.thousands_separator = "."
settings.L10n.default_country_code = 39
settings.L10n.mandatory_lastname = True
settings.L10n.translate_gis_location = True
# Finance settings
settings.fin.currency_default = "EUR"
settings.fin.currencies = {
"EUR": T("Euros"),
"GBP": T("Great British Pounds"),
"USD": T("United States Dollars"),
}
# -----------------------------------------------------------------------------
# GIS (Map) settings
# GeoNames username
settings.gis.geonames_username = "geoname_username"
settings.gis.countries = ["IT"]
settings.gis.legend = "float"
settings.gis.nav_controls = False
# -----------------------------------------------------------------------------
# Shelters
settings.cr.shelter_population_dynamic = True
settings.cr.shelter_housing_unit_management = True
# -----------------------------------------------------------------------------
# Events
settings.event.types_hierarchical = True
# -----------------------------------------------------------------------------
# Evacuees
settings.evr.physical_description = False
settings.pr.show_emergency_contacts = False
settings.evr.link_to_organisation= True
# -----------------------------------------------------------------------------
# Organisations
settings.org.branches = True
settings.org.branches_tree_view = True
settings.org.facility_types_hierarchical = True
# -----------------------------------------------------------------------------
# Human Resource Management
settings.hrm.email_required = False
settings.hrm.org_required = False
settings.hrm.deletable = True
settings.hrm.multiple_job_titles = True
settings.hrm.staff_experience = False
settings.hrm.vol_active = True
settings.hrm.vol_experience = False
settings.hrm.show_organisation = True
settings.hrm.use_awards = False
settings.hrm.use_certificates = False
settings.hrm.use_skills = True
settings.hrm.use_trainings = False
#*****************************Frontpage settings*************************
# RSS feeds
settings.frontpage.rss = [
{"title": "RSS News - Dipartimento della Protezione Civile ",
"url": "http://www.protezionecivile.gov.it/jcms/do/jprss/Rss/Feed/show.action?id=12170&lang=it#"
},
{"title": "RSS Vigilanza Meteo - Dipartimento della Protezione Civile ",
"url": "http://www.protezionecivile.gov.it/jcms/do/jprss/Rss/Feed/show.action?id=23573&lang=it#"
},
{"title": "RSS Previsioni Meteo - Dipartimento della Protezione Civile ",
"url": "http://www.protezionecivile.gov.it/jcms/do/jprss/Rss/Feed/show.action?id=23575&lang=it#"
},
{"title": "RSS Comunicati Stampa - Dipartimento della Protezione Civile ",
"url": "http://www.protezionecivile.gov.it/jcms/do/jprss/Rss/Feed/show.action?id=23577&lang=it#"
},
{"title": "Twitter - Croce Rossa Italia",
# @crocerossa
#"url": "https://search.twitter.com/search.rss?q=from%3Acrocerossa" # API v1 deprecated, so doesn't work, need to use 3rd-party service, like:
"url": "http://www.rssitfor.me/getrss?name=@crocerossa"
ll&f=rss"
},
# {"title": "Twitter - Dipartimento della Protezione Civile",
# # @protezionecivile
# "url": "http://www.rssitfor.me/getrss?name=@protezionecivile"
# # Hashtag
# #url: "http://search.twitter.com/search.atom?q=%23eqnz" # API v1 deprecated, so doesn't work, need to use 3rd-party service, like:
]
def customise_pr_person_resource(r, tablename):
s3db = current.s3db
table = r.resource.table
evr_gender_opts = dict((k, v) for k, v in s3db.pr_gender_opts.items()
if k in (2, 3))
gender = table.gender
gender.requires = IS_IN_SET(evr_gender_opts, zero=None)
gender.default = 3
if r.controller == "evr":
settings.pr.show_emergency_contacts = False
table.last_name.requires = IS_NOT_EMPTY(error_message = T("Please enter a last name"))
dob_requires = s3_date("dob",
future = 0,
past = 1320,
empty = False).requires
dob_requires.error_message = T("Please enter a date of birth")
table.date_of_birth.requires = dob_requires
from gluon import DIV
from s3.s3widgets import S3LocationSelectorWidget2
levels = ("L1","L2","L3",)
location_id = table.location_id
location_id.readable = location_id.writable = True
location_id.label = T("Place of Birth")
location_id.widget = S3LocationSelectorWidget2(levels=levels,
lines=True,
)
location_id.represent = s3db.gis_LocationRepresent(sep=" | ")
place_of_birth = s3db.pr_person_details.place_of_birth
place_of_birth.label = "Specify a Different Place of Birth"
place_of_birth.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Different Place of Birth"),
T("Specify a different place of birth (foreign country, village, hamlet)")))
place_of_birth.readable = place_of_birth.writable = True
s3db.pr_person_details.religion.readable = False
s3db.pr_person_details.religion.writable = False
pdtable = s3db.pr_physical_description
hide_fields = [
"race",
"complexion",
"height",
"weight",
"hair_length",
"hair_style",
"hair_baldness",
"hair_comment",
"facial_hair_type",
"facial_hair_length",
"facial_hair_color",
"facial_hair_comment",
"body_hair",
"skin_marks",
"medical_conditions"
]
for fname in hide_fields:
field = pdtable[fname]
field.readable = field.writable = False
ethnicity_opts = ("Italian",
"Chinese",
"Albanese",
"Philippine",
"Pakistani",
"English",
"African",
"Other",
"Unknown",
)
ethnicity_opts = dict((v, T(v)) for v in ethnicity_opts)
ethnicity = pdtable.ethnicity
ethnicity.requires = IS_EMPTY_OR(IS_IN_SET(ethnicity_opts,
sort=True))
ethnicity.represent = S3Represent(options=ethnicity_opts,
translate=True)
settings.customise_pr_person_resource = customise_pr_person_resource
def customise_cr_shelter_resource(r, tablename):
s3db = current.s3db
from s3 import S3HierarchyWidget
s3db.cr_shelter.capacity_day.writable = s3db.cr_shelter.capacity_night.writable = False
s3db.cr_shelter.cr_shelter_environment_id.readable = s3db.cr_shelter.cr_shelter_environment_id.writable = True
organisation_represent = current.s3db.org_OrganisationRepresent
node_represent = organisation_represent(parent=False)
org_widget = S3HierarchyWidget(lookup="org_organisation",
represent=node_represent,
multiple=False,
leafonly=False,
)
s3db.cr_shelter.organisation_id.widget = org_widget
settings.customise_cr_shelter_resource = customise_cr_shelter_resource
def customise_pr_group_resource(r, tablename):
messages = current.messages
field = r.table.group_type
pr_group_types = {1 : T("Family"),
2 : T("Tourist Group"),
3 : T("Relief Team"),
4 : T("other"),
5 : T("Mailing Lists"),
6 : T("Society"),
}
field.represent = lambda opt: pr_group_types.get(opt, messages.UNKNOWN_OPT)
field.requires = IS_IN_SET(pr_group_types, zero=None)
settings.customise_pr_group_resource = customise_pr_group_resource
def customise_event_event_resource(r, tablename):
table = r.table
table.exercise.default = True
table.organisation_id.readable = table.organisation_id.writable = True
settings.customise_event_event_resource = customise_event_event_resource
def customise_event_incident_resource(r, tablename):
from s3 import IS_ONE_OF
db = current.db
table = r.table
table.exercise.default = True
table.event_id.readable = table.event_id.writable = True
represent = S3Represent(lookup=tablename)
table.event_id.requires = IS_ONE_OF(db, "event_event.id",
represent,
filterby="closed",
filter_opts=(False,),
orderby="event_event.name",
sort=True)
settings.customise_event_incident_resource = customise_event_incident_resource
def customise_project_location_resource(r, tablename):
field = current.s3db.project_location.status_id
field.readable = field.writable = True
settings.customise_project_location_resource = customise_project_location_resource
settings.modules = OrderedDict([
("default", Storage(
name_nice = T("Home"),
restricted = False, # Use ACLs to control access to this module
access = None, # All Users (inc Anonymous) can see this module in the default menu & access the controller
module_type = None # This item is not shown in the menu
)),
("admin", Storage(
name_nice = T("Administration"),
#description = "Site Administration",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("appadmin", Storage(
name_nice = T("Administration"),
#description = "Site Administration",
restricted = True,
module_type = None # No Menu
)),
("errors", Storage(
name_nice = T("Ticket Viewer"),
#description = "Needed for Breadcrumbs",
restricted = False,
module_type = None # No Menu
)),
("sync", Storage(
name_nice = T("Synchronization"),
#description = "Synchronization",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("translate", Storage(
name_nice = T("Translation Functionality"),
#description = "Selective translation of strings based on module.",
module_type = None,
)),
("gis", Storage(
name_nice = T("Map"),
#description = "Situation Awareness & Geospatial Analysis",
restricted = True,
module_type = 1, # 6th item in the menu
)),
("pr", Storage(
name_nice = T("Person Registry"),
#description = "Central point to record details on People",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu (access to controller is possible to all still)
module_type = 10
)),
("org", Storage(
name_nice = T("Organizations"),
#description = 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities',
restricted = True,
module_type = 10
)),
# All modules below here should be possible to disable safely
("hrm", Storage(
name_nice = T("Staff"),
#description = "Human Resources Management",
restricted = True,
module_type = 10,
)),
("vol", Storage(
name_nice = T("Volunteers"),
#description = "Human Resources Management",
restricted = True,
module_type = 10,
)),
("doc", Storage(
name_nice = T("Documents"),
#description = "A library of digital resources, such as photos, documents and reports",
restricted = True,
module_type = 10,
)),
("msg", Storage(
name_nice = T("Messaging"),
#description = "Sends & Receives Alerts via Email & SMS",
restricted = True,
# The user-visible functionality of this module isn't normally required. Rather it's main purpose is to be accessed from other modules.
module_type = 2,
)),
("cr", Storage(
name_nice = T("Shelters"),
#description = "Tracks the location, capacity and breakdown of victims in Shelters",
restricted = True,
module_type = 10
)),
("evr", Storage(
name_nice = T("Evacuees"),
#description = "Evacuees Registry",
restricted = True, # use Access Control Lists to see this module
module_type = 7
)),
("event", Storage(
name_nice = T("Events"),
#description = "Activate Events (e.g. from Scenario templates) for allocation of appropriate Resources (Human, Assets & Facilities).",
restricted = True,
module_type = 10,
)),
]) | true | true |
1c35a8ea0984c9b012086dfb54aee13e6e258451 | 16,518 | py | Python | venv/Lib/site-packages/scipy/_lib/_util.py | EkremBayar/bayar | aad1a32044da671d0b4f11908416044753360b39 | [
"MIT"
] | 6 | 2019-12-21T21:15:54.000Z | 2021-04-20T17:35:24.000Z | venv/Lib/site-packages/scipy/_lib/_util.py | EkremBayar/bayar | aad1a32044da671d0b4f11908416044753360b39 | [
"MIT"
] | 20 | 2021-05-03T18:02:23.000Z | 2022-03-12T12:01:04.000Z | venv/Lib/site-packages/scipy/_lib/_util.py | EkremBayar/bayar | aad1a32044da671d0b4f11908416044753360b39 | [
"MIT"
] | 3 | 2021-01-31T16:40:52.000Z | 2021-08-29T18:32:34.000Z | import functools
import operator
import sys
import warnings
import numbers
from collections import namedtuple
import inspect
import math
import numpy as np
try:
from numpy.random import Generator as Generator
except ImportError:
class Generator(): # type: ignore[no-redef]
pass
def _lazywhere(cond, arrays, f, fillvalue=None, f2=None):
"""
np.where(cond, x, fillvalue) always evaluates x even where cond is False.
This one only evaluates f(arr1[cond], arr2[cond], ...).
Examples
--------
>>> a, b = np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8])
>>> def f(a, b):
... return a*b
>>> _lazywhere(a > 2, (a, b), f, np.nan)
array([ nan, nan, 21., 32.])
Notice, it assumes that all `arrays` are of the same shape, or can be
broadcasted together.
"""
if fillvalue is None:
if f2 is None:
raise ValueError("One of (fillvalue, f2) must be given.")
else:
fillvalue = np.nan
else:
if f2 is not None:
raise ValueError("Only one of (fillvalue, f2) can be given.")
arrays = np.broadcast_arrays(*arrays)
temp = tuple(np.extract(cond, arr) for arr in arrays)
tcode = np.mintypecode([a.dtype.char for a in arrays])
out = np.full(np.shape(arrays[0]), fill_value=fillvalue, dtype=tcode)
np.place(out, cond, f(*temp))
if f2 is not None:
temp = tuple(np.extract(~cond, arr) for arr in arrays)
np.place(out, ~cond, f2(*temp))
return out
def _lazyselect(condlist, choicelist, arrays, default=0):
"""
Mimic `np.select(condlist, choicelist)`.
Notice, it assumes that all `arrays` are of the same shape or can be
broadcasted together.
All functions in `choicelist` must accept array arguments in the order
given in `arrays` and must return an array of the same shape as broadcasted
`arrays`.
Examples
--------
>>> x = np.arange(6)
>>> np.select([x <3, x > 3], [x**2, x**3], default=0)
array([ 0, 1, 4, 0, 64, 125])
>>> _lazyselect([x < 3, x > 3], [lambda x: x**2, lambda x: x**3], (x,))
array([ 0., 1., 4., 0., 64., 125.])
>>> a = -np.ones_like(x)
>>> _lazyselect([x < 3, x > 3],
... [lambda x, a: x**2, lambda x, a: a * x**3],
... (x, a), default=np.nan)
array([ 0., 1., 4., nan, -64., -125.])
"""
arrays = np.broadcast_arrays(*arrays)
tcode = np.mintypecode([a.dtype.char for a in arrays])
out = np.full(np.shape(arrays[0]), fill_value=default, dtype=tcode)
for index in range(len(condlist)):
func, cond = choicelist[index], condlist[index]
if np.all(cond is False):
continue
cond, _ = np.broadcast_arrays(cond, arrays[0])
temp = tuple(np.extract(cond, arr) for arr in arrays)
np.place(out, cond, func(*temp))
return out
def _aligned_zeros(shape, dtype=float, order="C", align=None):
"""Allocate a new ndarray with aligned memory.
Primary use case for this currently is working around a f2py issue
in NumPy 1.9.1, where dtype.alignment is such that np.zeros() does
not necessarily create arrays aligned up to it.
"""
dtype = np.dtype(dtype)
if align is None:
align = dtype.alignment
if not hasattr(shape, '__len__'):
shape = (shape,)
size = functools.reduce(operator.mul, shape) * dtype.itemsize
buf = np.empty(size + align + 1, np.uint8)
offset = buf.__array_interface__['data'][0] % align
if offset != 0:
offset = align - offset
# Note: slices producing 0-size arrays do not necessarily change
# data pointer --- so we use and allocate size+1
buf = buf[offset:offset+size+1][:-1]
data = np.ndarray(shape, dtype, buf, order=order)
data.fill(0)
return data
def _prune_array(array):
"""Return an array equivalent to the input array. If the input
array is a view of a much larger array, copy its contents to a
newly allocated array. Otherwise, return the input unchanged.
"""
if array.base is not None and array.size < array.base.size // 2:
return array.copy()
return array
def prod(iterable):
"""
Product of a sequence of numbers.
Faster than np.prod for short lists like array shapes, and does
not overflow if using Python integers.
"""
product = 1
for x in iterable:
product *= x
return product
def float_factorial(n: int) -> float:
"""Compute the factorial and return as a float
Returns infinity when result is too large for a double
"""
return float(math.factorial(n)) if n < 171 else np.inf
class DeprecatedImport(object):
"""
Deprecated import with redirection and warning.
Examples
--------
Suppose you previously had in some module::
from foo import spam
If this has to be deprecated, do::
spam = DeprecatedImport("foo.spam", "baz")
to redirect users to use "baz" module instead.
"""
def __init__(self, old_module_name, new_module_name):
self._old_name = old_module_name
self._new_name = new_module_name
__import__(self._new_name)
self._mod = sys.modules[self._new_name]
def __dir__(self):
return dir(self._mod)
def __getattr__(self, name):
warnings.warn("Module %s is deprecated, use %s instead"
% (self._old_name, self._new_name),
DeprecationWarning)
return getattr(self._mod, name)
# copy-pasted from scikit-learn utils/validation.py
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None (or np.random), return the RandomState singleton used
by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
If seed is a new-style np.random.Generator, return it.
Otherwise, raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
try:
# Generator is only available in numpy >= 1.17
if isinstance(seed, np.random.Generator):
return seed
except AttributeError:
pass
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def _asarray_validated(a, check_finite=True,
sparse_ok=False, objects_ok=False, mask_ok=False,
as_inexact=False):
"""
Helper function for SciPy argument validation.
Many SciPy linear algebra functions do support arbitrary array-like
input arguments. Examples of commonly unsupported inputs include
matrices containing inf/nan, sparse matrix representations, and
matrices with complicated elements.
Parameters
----------
a : array_like
The array-like input.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
sparse_ok : bool, optional
True if scipy sparse matrices are allowed.
objects_ok : bool, optional
True if arrays with dype('O') are allowed.
mask_ok : bool, optional
True if masked arrays are allowed.
as_inexact : bool, optional
True to convert the input array to a np.inexact dtype.
Returns
-------
ret : ndarray
The converted validated array.
"""
if not sparse_ok:
import scipy.sparse
if scipy.sparse.issparse(a):
msg = ('Sparse matrices are not supported by this function. '
'Perhaps one of the scipy.sparse.linalg functions '
'would work instead.')
raise ValueError(msg)
if not mask_ok:
if np.ma.isMaskedArray(a):
raise ValueError('masked arrays are not supported')
toarray = np.asarray_chkfinite if check_finite else np.asarray
a = toarray(a)
if not objects_ok:
if a.dtype is np.dtype('O'):
raise ValueError('object arrays are not supported')
if as_inexact:
if not np.issubdtype(a.dtype, np.inexact):
a = toarray(a, dtype=np.float_)
return a
# Add a replacement for inspect.getfullargspec()/
# The version below is borrowed from Django,
# https://github.com/django/django/pull/4846.
# Note an inconsistency between inspect.getfullargspec(func) and
# inspect.signature(func). If `func` is a bound method, the latter does *not*
# list `self` as a first argument, while the former *does*.
# Hence, cook up a common ground replacement: `getfullargspec_no_self` which
# mimics `inspect.getfullargspec` but does not list `self`.
#
# This way, the caller code does not need to know whether it uses a legacy
# .getfullargspec or a bright and shiny .signature.
FullArgSpec = namedtuple('FullArgSpec',
['args', 'varargs', 'varkw', 'defaults',
'kwonlyargs', 'kwonlydefaults', 'annotations'])
def getfullargspec_no_self(func):
"""inspect.getfullargspec replacement using inspect.signature.
If func is a bound method, do not list the 'self' parameter.
Parameters
----------
func : callable
A callable to inspect
Returns
-------
fullargspec : FullArgSpec(args, varargs, varkw, defaults, kwonlyargs,
kwonlydefaults, annotations)
NOTE: if the first argument of `func` is self, it is *not*, I repeat
*not*, included in fullargspec.args.
This is done for consistency between inspect.getargspec() under
Python 2.x, and inspect.signature() under Python 3.x.
"""
sig = inspect.signature(func)
args = [
p.name for p in sig.parameters.values()
if p.kind in [inspect.Parameter.POSITIONAL_OR_KEYWORD,
inspect.Parameter.POSITIONAL_ONLY]
]
varargs = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.VAR_POSITIONAL
]
varargs = varargs[0] if varargs else None
varkw = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.VAR_KEYWORD
]
varkw = varkw[0] if varkw else None
defaults = tuple(
p.default for p in sig.parameters.values()
if (p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD and
p.default is not p.empty)
) or None
kwonlyargs = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.KEYWORD_ONLY
]
kwdefaults = {p.name: p.default for p in sig.parameters.values()
if p.kind == inspect.Parameter.KEYWORD_ONLY and
p.default is not p.empty}
annotations = {p.name: p.annotation for p in sig.parameters.values()
if p.annotation is not p.empty}
return FullArgSpec(args, varargs, varkw, defaults, kwonlyargs,
kwdefaults or None, annotations)
class MapWrapper(object):
"""
Parallelisation wrapper for working with map-like callables, such as
`multiprocessing.Pool.map`.
Parameters
----------
pool : int or map-like callable
If `pool` is an integer, then it specifies the number of threads to
use for parallelization. If ``int(pool) == 1``, then no parallel
processing is used and the map builtin is used.
If ``pool == -1``, then the pool will utilize all available CPUs.
If `pool` is a map-like callable that follows the same
calling sequence as the built-in map function, then this callable is
used for parallelization.
"""
def __init__(self, pool=1):
self.pool = None
self._mapfunc = map
self._own_pool = False
if callable(pool):
self.pool = pool
self._mapfunc = self.pool
else:
from multiprocessing import Pool
# user supplies a number
if int(pool) == -1:
# use as many processors as possible
self.pool = Pool()
self._mapfunc = self.pool.map
self._own_pool = True
elif int(pool) == 1:
pass
elif int(pool) > 1:
# use the number of processors requested
self.pool = Pool(processes=int(pool))
self._mapfunc = self.pool.map
self._own_pool = True
else:
raise RuntimeError("Number of workers specified must be -1,"
" an int >= 1, or an object with a 'map' method")
def __enter__(self):
return self
def terminate(self):
if self._own_pool:
self.pool.terminate()
def join(self):
if self._own_pool:
self.pool.join()
def close(self):
if self._own_pool:
self.pool.close()
def __exit__(self, exc_type, exc_value, traceback):
if self._own_pool:
self.pool.close()
self.pool.terminate()
def __call__(self, func, iterable):
# only accept one iterable because that's all Pool.map accepts
try:
return self._mapfunc(func, iterable)
except TypeError as e:
# wrong number of arguments
raise TypeError("The map-like callable must be of the"
" form f(func, iterable)") from e
def rng_integers(gen, low, high=None, size=None, dtype='int64',
endpoint=False):
"""
Return random integers from low (inclusive) to high (exclusive), or if
endpoint=True, low (inclusive) to high (inclusive). Replaces
`RandomState.randint` (with endpoint=False) and
`RandomState.random_integers` (with endpoint=True).
Return random integers from the "discrete uniform" distribution of the
specified dtype. If high is None (the default), then results are from
0 to low.
Parameters
----------
gen: {None, np.random.RandomState, np.random.Generator}
Random number generator. If None, then the np.random.RandomState
singleton is used.
low: int or array-like of ints
Lowest (signed) integers to be drawn from the distribution (unless
high=None, in which case this parameter is 0 and this value is used
for high).
high: int or array-like of ints
If provided, one above the largest (signed) integer to be drawn from
the distribution (see above for behavior if high=None). If array-like,
must contain integer values.
size: None
Output shape. If the given shape is, e.g., (m, n, k), then m * n * k
samples are drawn. Default is None, in which case a single value is
returned.
dtype: {str, dtype}, optional
Desired dtype of the result. All dtypes are determined by their name,
i.e., 'int64', 'int', etc, so byteorder is not available and a specific
precision may have different C types depending on the platform.
The default value is np.int_.
endpoint: bool, optional
If True, sample from the interval [low, high] instead of the default
[low, high) Defaults to False.
Returns
-------
out: int or ndarray of ints
size-shaped array of random integers from the appropriate distribution,
or a single such random int if size not provided.
"""
if isinstance(gen, Generator):
return gen.integers(low, high=high, size=size, dtype=dtype,
endpoint=endpoint)
else:
if gen is None:
# default is RandomState singleton used by np.random.
gen = np.random.mtrand._rand
if endpoint:
# inclusive of endpoint
# remember that low and high can be arrays, so don't modify in
# place
if high is None:
return gen.randint(low + 1, size=size, dtype=dtype)
if high is not None:
return gen.randint(low, high=high + 1, size=size, dtype=dtype)
# exclusive
return gen.randint(low, high=high, size=size, dtype=dtype)
| 34.484342 | 84 | 0.618658 | import functools
import operator
import sys
import warnings
import numbers
from collections import namedtuple
import inspect
import math
import numpy as np
try:
from numpy.random import Generator as Generator
except ImportError:
class Generator():
pass
def _lazywhere(cond, arrays, f, fillvalue=None, f2=None):
if fillvalue is None:
if f2 is None:
raise ValueError("One of (fillvalue, f2) must be given.")
else:
fillvalue = np.nan
else:
if f2 is not None:
raise ValueError("Only one of (fillvalue, f2) can be given.")
arrays = np.broadcast_arrays(*arrays)
temp = tuple(np.extract(cond, arr) for arr in arrays)
tcode = np.mintypecode([a.dtype.char for a in arrays])
out = np.full(np.shape(arrays[0]), fill_value=fillvalue, dtype=tcode)
np.place(out, cond, f(*temp))
if f2 is not None:
temp = tuple(np.extract(~cond, arr) for arr in arrays)
np.place(out, ~cond, f2(*temp))
return out
def _lazyselect(condlist, choicelist, arrays, default=0):
arrays = np.broadcast_arrays(*arrays)
tcode = np.mintypecode([a.dtype.char for a in arrays])
out = np.full(np.shape(arrays[0]), fill_value=default, dtype=tcode)
for index in range(len(condlist)):
func, cond = choicelist[index], condlist[index]
if np.all(cond is False):
continue
cond, _ = np.broadcast_arrays(cond, arrays[0])
temp = tuple(np.extract(cond, arr) for arr in arrays)
np.place(out, cond, func(*temp))
return out
def _aligned_zeros(shape, dtype=float, order="C", align=None):
dtype = np.dtype(dtype)
if align is None:
align = dtype.alignment
if not hasattr(shape, '__len__'):
shape = (shape,)
size = functools.reduce(operator.mul, shape) * dtype.itemsize
buf = np.empty(size + align + 1, np.uint8)
offset = buf.__array_interface__['data'][0] % align
if offset != 0:
offset = align - offset
buf = buf[offset:offset+size+1][:-1]
data = np.ndarray(shape, dtype, buf, order=order)
data.fill(0)
return data
def _prune_array(array):
if array.base is not None and array.size < array.base.size // 2:
return array.copy()
return array
def prod(iterable):
product = 1
for x in iterable:
product *= x
return product
def float_factorial(n: int) -> float:
return float(math.factorial(n)) if n < 171 else np.inf
class DeprecatedImport(object):
def __init__(self, old_module_name, new_module_name):
self._old_name = old_module_name
self._new_name = new_module_name
__import__(self._new_name)
self._mod = sys.modules[self._new_name]
def __dir__(self):
return dir(self._mod)
def __getattr__(self, name):
warnings.warn("Module %s is deprecated, use %s instead"
% (self._old_name, self._new_name),
DeprecationWarning)
return getattr(self._mod, name)
def check_random_state(seed):
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
try:
if isinstance(seed, np.random.Generator):
return seed
except AttributeError:
pass
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def _asarray_validated(a, check_finite=True,
sparse_ok=False, objects_ok=False, mask_ok=False,
as_inexact=False):
if not sparse_ok:
import scipy.sparse
if scipy.sparse.issparse(a):
msg = ('Sparse matrices are not supported by this function. '
'Perhaps one of the scipy.sparse.linalg functions '
'would work instead.')
raise ValueError(msg)
if not mask_ok:
if np.ma.isMaskedArray(a):
raise ValueError('masked arrays are not supported')
toarray = np.asarray_chkfinite if check_finite else np.asarray
a = toarray(a)
if not objects_ok:
if a.dtype is np.dtype('O'):
raise ValueError('object arrays are not supported')
if as_inexact:
if not np.issubdtype(a.dtype, np.inexact):
a = toarray(a, dtype=np.float_)
return a
FullArgSpec = namedtuple('FullArgSpec',
['args', 'varargs', 'varkw', 'defaults',
'kwonlyargs', 'kwonlydefaults', 'annotations'])
def getfullargspec_no_self(func):
sig = inspect.signature(func)
args = [
p.name for p in sig.parameters.values()
if p.kind in [inspect.Parameter.POSITIONAL_OR_KEYWORD,
inspect.Parameter.POSITIONAL_ONLY]
]
varargs = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.VAR_POSITIONAL
]
varargs = varargs[0] if varargs else None
varkw = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.VAR_KEYWORD
]
varkw = varkw[0] if varkw else None
defaults = tuple(
p.default for p in sig.parameters.values()
if (p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD and
p.default is not p.empty)
) or None
kwonlyargs = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.KEYWORD_ONLY
]
kwdefaults = {p.name: p.default for p in sig.parameters.values()
if p.kind == inspect.Parameter.KEYWORD_ONLY and
p.default is not p.empty}
annotations = {p.name: p.annotation for p in sig.parameters.values()
if p.annotation is not p.empty}
return FullArgSpec(args, varargs, varkw, defaults, kwonlyargs,
kwdefaults or None, annotations)
class MapWrapper(object):
def __init__(self, pool=1):
self.pool = None
self._mapfunc = map
self._own_pool = False
if callable(pool):
self.pool = pool
self._mapfunc = self.pool
else:
from multiprocessing import Pool
if int(pool) == -1:
self.pool = Pool()
self._mapfunc = self.pool.map
self._own_pool = True
elif int(pool) == 1:
pass
elif int(pool) > 1:
self.pool = Pool(processes=int(pool))
self._mapfunc = self.pool.map
self._own_pool = True
else:
raise RuntimeError("Number of workers specified must be -1,"
" an int >= 1, or an object with a 'map' method")
def __enter__(self):
return self
def terminate(self):
if self._own_pool:
self.pool.terminate()
def join(self):
if self._own_pool:
self.pool.join()
def close(self):
if self._own_pool:
self.pool.close()
def __exit__(self, exc_type, exc_value, traceback):
if self._own_pool:
self.pool.close()
self.pool.terminate()
def __call__(self, func, iterable):
try:
return self._mapfunc(func, iterable)
except TypeError as e:
# wrong number of arguments
raise TypeError("The map-like callable must be of the"
" form f(func, iterable)") from e
def rng_integers(gen, low, high=None, size=None, dtype='int64',
endpoint=False):
if isinstance(gen, Generator):
return gen.integers(low, high=high, size=size, dtype=dtype,
endpoint=endpoint)
else:
if gen is None:
# default is RandomState singleton used by np.random.
gen = np.random.mtrand._rand
if endpoint:
# inclusive of endpoint
# remember that low and high can be arrays, so don't modify in
if high is None:
return gen.randint(low + 1, size=size, dtype=dtype)
if high is not None:
return gen.randint(low, high=high + 1, size=size, dtype=dtype)
return gen.randint(low, high=high, size=size, dtype=dtype)
| true | true |
1c35a915fceb0b31f4541e4a9cb30f32209280a0 | 2,441 | py | Python | task-example.py | EverAzureRest/batch_examples | 7daec97a468770c3d07cdb02f67951e5be75c153 | [
"MIT"
] | null | null | null | task-example.py | EverAzureRest/batch_examples | 7daec97a468770c3d07cdb02f67951e5be75c153 | [
"MIT"
] | null | null | null | task-example.py | EverAzureRest/batch_examples | 7daec97a468770c3d07cdb02f67951e5be75c153 | [
"MIT"
] | null | null | null |
from datetime import datetime, timedelta
import azure.batch.batch_auth as batchauth
import azure.batch._batch_service_client as batch
import uuid
import datetime
import time
# Batch account credentials
BATCH_ACCOUNT_NAME = ''
BATCH_ACCOUNT_URL = ''
BATCH_ACCOUNT_KEY = ''
# Create a Batch service client. We'll now be interacting with the Batch
# service in addition to Storage.
credentials = batchauth.SharedKeyCredentials(BATCH_ACCOUNT_NAME,
BATCH_ACCOUNT_KEY)
batch_client = batch.BatchServiceClient(
credentials,
batch_url=BATCH_ACCOUNT_URL)
pool = batch_client.pool.get(
pool_id='testPool'
)
##ToDO: Create nodes prior to run.
poolResizeParam = batch.models.PoolResizeParameter(
target_dedicated_nodes=1
)
batch_client.pool.resize(
pool_id=pool.id,
pool_resize_parameter=poolResizeParam
)
job = batch.models.JobAddParameter(
id=str(uuid.uuid1()),
display_name='myBatchJob',
pool_info=batch.models.PoolInformation(
pool_id=pool.id
),
uses_task_dependencies = 'true'
)
job1 = batch_client.job.add(job)
task1 = batch.models.TaskAddParameter(
id='task1',
command_line='cmd /c echo "Hello From Batch" >task.txt'
)
dependentTasks = list()
dependentTasks.append(task1.id)
task2 = batch.models.TaskAddParameter(
id='task2',
command_line = 'cmd /c echo "this is task2 - should execute after task 1" >task2.txt',
depends_on = batch.models.TaskDependencies(task_ids=dependentTasks)
)
tasks = list()
tasks.append(task1)
tasks.append(task2)
batch_client.task.add_collection(
job_id=job.id,
value=tasks
)
# Perform action with the batch_client
jobs = batch_client.job.list()
for job in jobs:
print(job.id)
##Todo, watch tasks for completion and resize pool to zero
job_timeout = timedelta(minutes=30)
timeout_expiration = datetime.datetime.now() + job_timeout
while datetime.datetime.now() < timeout_expiration:
tasks = batch_client.task.list(job.id)
incomplete_tasks = [task for task in tasks if
task.state != batch.models.TaskState.completed]
if not incomplete_tasks:
time.sleep(600)
newpoolResizeParam = batch.models.PoolResizeParameter(
target_dedicated_nodes=0
)
batch_client.pool.resize(
pool_id=pool.id,
pool_resize_parameter=newpoolResizeParam
)
else:
time.sleep(1) | 24.656566 | 90 | 0.711184 |
from datetime import datetime, timedelta
import azure.batch.batch_auth as batchauth
import azure.batch._batch_service_client as batch
import uuid
import datetime
import time
BATCH_ACCOUNT_NAME = ''
BATCH_ACCOUNT_URL = ''
BATCH_ACCOUNT_KEY = ''
# service in addition to Storage.
credentials = batchauth.SharedKeyCredentials(BATCH_ACCOUNT_NAME,
BATCH_ACCOUNT_KEY)
batch_client = batch.BatchServiceClient(
credentials,
batch_url=BATCH_ACCOUNT_URL)
pool = batch_client.pool.get(
pool_id='testPool'
)
##ToDO: Create nodes prior to run.
poolResizeParam = batch.models.PoolResizeParameter(
target_dedicated_nodes=1
)
batch_client.pool.resize(
pool_id=pool.id,
pool_resize_parameter=poolResizeParam
)
job = batch.models.JobAddParameter(
id=str(uuid.uuid1()),
display_name='myBatchJob',
pool_info=batch.models.PoolInformation(
pool_id=pool.id
),
uses_task_dependencies = 'true'
)
job1 = batch_client.job.add(job)
task1 = batch.models.TaskAddParameter(
id='task1',
command_line='cmd /c echo "Hello From Batch" >task.txt'
)
dependentTasks = list()
dependentTasks.append(task1.id)
task2 = batch.models.TaskAddParameter(
id='task2',
command_line = 'cmd /c echo "this is task2 - should execute after task 1" >task2.txt',
depends_on = batch.models.TaskDependencies(task_ids=dependentTasks)
)
tasks = list()
tasks.append(task1)
tasks.append(task2)
batch_client.task.add_collection(
job_id=job.id,
value=tasks
)
# Perform action with the batch_client
jobs = batch_client.job.list()
for job in jobs:
print(job.id)
##Todo, watch tasks for completion and resize pool to zero
job_timeout = timedelta(minutes=30)
timeout_expiration = datetime.datetime.now() + job_timeout
while datetime.datetime.now() < timeout_expiration:
tasks = batch_client.task.list(job.id)
incomplete_tasks = [task for task in tasks if
task.state != batch.models.TaskState.completed]
if not incomplete_tasks:
time.sleep(600)
newpoolResizeParam = batch.models.PoolResizeParameter(
target_dedicated_nodes=0
)
batch_client.pool.resize(
pool_id=pool.id,
pool_resize_parameter=newpoolResizeParam
)
else:
time.sleep(1) | true | true |
1c35a9dccb3bab73f67b1b1fbe686f62f3c44b14 | 58,897 | py | Python | oscar/lib/python2.7/site-packages/django/test/testcases.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | oscar/lib/python2.7/site-packages/django/test/testcases.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | oscar/lib/python2.7/site-packages/django/test/testcases.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import unicode_literals
import difflib
import json
import posixpath
import sys
import threading
import unittest
import warnings
from collections import Counter
from contextlib import contextmanager
from copy import copy
from functools import wraps
from unittest.util import safe_repr
from django.apps import apps
from django.conf import settings
from django.core import mail
from django.core.exceptions import ValidationError
from django.core.files import locks
from django.core.handlers.wsgi import WSGIHandler, get_path_info
from django.core.management import call_command
from django.core.management.color import no_style
from django.core.management.sql import emit_post_migrate_signal
from django.core.servers.basehttp import WSGIRequestHandler, WSGIServer
from django.db import DEFAULT_DB_ALIAS, connection, connections, transaction
from django.forms.fields import CharField
from django.http import QueryDict
from django.http.request import split_domain_port, validate_host
from django.test.client import Client
from django.test.html import HTMLParseError, parse_html
from django.test.signals import setting_changed, template_rendered
from django.test.utils import (
CaptureQueriesContext, ContextList, compare_xml, modify_settings,
override_settings,
)
from django.utils import six
from django.utils.decorators import classproperty
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text
from django.utils.six.moves.urllib.parse import (
unquote, urljoin, urlparse, urlsplit, urlunsplit,
)
from django.utils.six.moves.urllib.request import url2pathname
from django.views.static import serve
__all__ = ('TestCase', 'TransactionTestCase',
'SimpleTestCase', 'skipIfDBFeature', 'skipUnlessDBFeature')
def to_list(value):
"""
Puts value into a list if it's not already one.
Returns an empty list if value is None.
"""
if value is None:
value = []
elif not isinstance(value, list):
value = [value]
return value
def assert_and_parse_html(self, html, user_msg, msg):
try:
dom = parse_html(html)
except HTMLParseError as e:
standardMsg = '%s\n%s' % (msg, e)
self.fail(self._formatMessage(user_msg, standardMsg))
return dom
class _AssertNumQueriesContext(CaptureQueriesContext):
def __init__(self, test_case, num, connection):
self.test_case = test_case
self.num = num
super(_AssertNumQueriesContext, self).__init__(connection)
def __exit__(self, exc_type, exc_value, traceback):
super(_AssertNumQueriesContext, self).__exit__(exc_type, exc_value, traceback)
if exc_type is not None:
return
executed = len(self)
self.test_case.assertEqual(
executed, self.num,
"%d queries executed, %d expected\nCaptured queries were:\n%s" % (
executed, self.num,
'\n'.join(
query['sql'] for query in self.captured_queries
)
)
)
class _AssertTemplateUsedContext(object):
def __init__(self, test_case, template_name):
self.test_case = test_case
self.template_name = template_name
self.rendered_templates = []
self.rendered_template_names = []
self.context = ContextList()
def on_template_render(self, sender, signal, template, context, **kwargs):
self.rendered_templates.append(template)
self.rendered_template_names.append(template.name)
self.context.append(copy(context))
def test(self):
return self.template_name in self.rendered_template_names
def message(self):
return '%s was not rendered.' % self.template_name
def __enter__(self):
template_rendered.connect(self.on_template_render)
return self
def __exit__(self, exc_type, exc_value, traceback):
template_rendered.disconnect(self.on_template_render)
if exc_type is not None:
return
if not self.test():
message = self.message()
if len(self.rendered_templates) == 0:
message += ' No template was rendered.'
else:
message += ' Following templates were rendered: %s' % (
', '.join(self.rendered_template_names))
self.test_case.fail(message)
class _AssertTemplateNotUsedContext(_AssertTemplateUsedContext):
def test(self):
return self.template_name not in self.rendered_template_names
def message(self):
return '%s was rendered.' % self.template_name
class _CursorFailure(object):
def __init__(self, cls_name, wrapped):
self.cls_name = cls_name
self.wrapped = wrapped
def __call__(self):
raise AssertionError(
"Database queries aren't allowed in SimpleTestCase. "
"Either use TestCase or TransactionTestCase to ensure proper test isolation or "
"set %s.allow_database_queries to True to silence this failure." % self.cls_name
)
class SimpleTestCase(unittest.TestCase):
# The class we'll use for the test client self.client.
# Can be overridden in derived classes.
client_class = Client
_overridden_settings = None
_modified_settings = None
# Tests shouldn't be allowed to query the database since
# this base class doesn't enforce any isolation.
allow_database_queries = False
@classmethod
def setUpClass(cls):
super(SimpleTestCase, cls).setUpClass()
if cls._overridden_settings:
cls._cls_overridden_context = override_settings(**cls._overridden_settings)
cls._cls_overridden_context.enable()
if cls._modified_settings:
cls._cls_modified_context = modify_settings(cls._modified_settings)
cls._cls_modified_context.enable()
if not cls.allow_database_queries:
for alias in connections:
connection = connections[alias]
connection.cursor = _CursorFailure(cls.__name__, connection.cursor)
connection.chunked_cursor = _CursorFailure(cls.__name__, connection.chunked_cursor)
@classmethod
def tearDownClass(cls):
if not cls.allow_database_queries:
for alias in connections:
connection = connections[alias]
connection.cursor = connection.cursor.wrapped
connection.chunked_cursor = connection.chunked_cursor.wrapped
if hasattr(cls, '_cls_modified_context'):
cls._cls_modified_context.disable()
delattr(cls, '_cls_modified_context')
if hasattr(cls, '_cls_overridden_context'):
cls._cls_overridden_context.disable()
delattr(cls, '_cls_overridden_context')
super(SimpleTestCase, cls).tearDownClass()
def __call__(self, result=None):
"""
Wrapper around default __call__ method to perform common Django test
set up. This means that user-defined Test Cases aren't required to
include a call to super().setUp().
"""
testMethod = getattr(self, self._testMethodName)
skipped = (
getattr(self.__class__, "__unittest_skip__", False) or
getattr(testMethod, "__unittest_skip__", False)
)
if not skipped:
try:
self._pre_setup()
except Exception:
result.addError(self, sys.exc_info())
return
super(SimpleTestCase, self).__call__(result)
if not skipped:
try:
self._post_teardown()
except Exception:
result.addError(self, sys.exc_info())
return
def _pre_setup(self):
"""Performs any pre-test setup. This includes:
* Creating a test client.
* Clearing the mail test outbox.
"""
self.client = self.client_class()
mail.outbox = []
def _post_teardown(self):
"""Perform any post-test things."""
pass
def settings(self, **kwargs):
"""
A context manager that temporarily sets a setting and reverts to the original value when exiting the context.
"""
return override_settings(**kwargs)
def modify_settings(self, **kwargs):
"""
A context manager that temporarily applies changes a list setting and
reverts back to the original value when exiting the context.
"""
return modify_settings(**kwargs)
def assertRedirects(self, response, expected_url, status_code=302,
target_status_code=200, host=None, msg_prefix='',
fetch_redirect_response=True):
"""Asserts that a response redirected to a specific URL, and that the
redirect URL can be loaded.
Note that assertRedirects won't work for external links since it uses
TestClient to do a request (use fetch_redirect_response=False to check
such links without fetching them).
"""
if host is not None:
warnings.warn(
"The host argument is deprecated and no longer used by assertRedirects",
RemovedInDjango20Warning, stacklevel=2
)
if msg_prefix:
msg_prefix += ": "
if hasattr(response, 'redirect_chain'):
# The request was a followed redirect
self.assertTrue(
len(response.redirect_chain) > 0,
msg_prefix + "Response didn't redirect as expected: Response code was %d (expected %d)"
% (response.status_code, status_code)
)
self.assertEqual(
response.redirect_chain[0][1], status_code,
msg_prefix + "Initial response didn't redirect as expected: Response code was %d (expected %d)"
% (response.redirect_chain[0][1], status_code)
)
url, status_code = response.redirect_chain[-1]
scheme, netloc, path, query, fragment = urlsplit(url)
self.assertEqual(
response.status_code, target_status_code,
msg_prefix + "Response didn't redirect as expected: Final Response code was %d (expected %d)"
% (response.status_code, target_status_code)
)
else:
# Not a followed redirect
self.assertEqual(
response.status_code, status_code,
msg_prefix + "Response didn't redirect as expected: Response code was %d (expected %d)"
% (response.status_code, status_code)
)
url = response.url
scheme, netloc, path, query, fragment = urlsplit(url)
# Prepend the request path to handle relative path redirects.
if not path.startswith('/'):
url = urljoin(response.request['PATH_INFO'], url)
path = urljoin(response.request['PATH_INFO'], path)
if fetch_redirect_response:
# netloc might be empty, or in cases where Django tests the
# HTTP scheme, the convention is for netloc to be 'testserver'.
# Trust both as "internal" URLs here.
domain, port = split_domain_port(netloc)
if domain and not validate_host(domain, settings.ALLOWED_HOSTS):
raise ValueError(
"The test client is unable to fetch remote URLs (got %s). "
"If the host is served by Django, add '%s' to ALLOWED_HOSTS. "
"Otherwise, use assertRedirects(..., fetch_redirect_response=False)."
% (url, domain)
)
redirect_response = response.client.get(path, QueryDict(query), secure=(scheme == 'https'))
# Get the redirection page, using the same client that was used
# to obtain the original response.
self.assertEqual(
redirect_response.status_code, target_status_code,
msg_prefix + "Couldn't retrieve redirection page '%s': response code was %d (expected %d)"
% (path, redirect_response.status_code, target_status_code)
)
if url != expected_url:
# For temporary backwards compatibility, try to compare with a relative url
e_scheme, e_netloc, e_path, e_query, e_fragment = urlsplit(expected_url)
relative_url = urlunsplit(('', '', e_path, e_query, e_fragment))
if url == relative_url:
warnings.warn(
"assertRedirects had to strip the scheme and domain from the "
"expected URL, as it was always added automatically to URLs "
"before Django 1.9. Please update your expected URLs by "
"removing the scheme and domain.",
RemovedInDjango20Warning, stacklevel=2)
expected_url = relative_url
self.assertEqual(
url, expected_url,
msg_prefix + "Response redirected to '%s', expected '%s'" % (url, expected_url)
)
def _assert_contains(self, response, text, status_code, msg_prefix, html):
# If the response supports deferred rendering and hasn't been rendered
# yet, then ensure that it does get rendered before proceeding further.
if hasattr(response, 'render') and callable(response.render) and not response.is_rendered:
response.render()
if msg_prefix:
msg_prefix += ": "
self.assertEqual(
response.status_code, status_code,
msg_prefix + "Couldn't retrieve content: Response code was %d"
" (expected %d)" % (response.status_code, status_code)
)
if response.streaming:
content = b''.join(response.streaming_content)
else:
content = response.content
if not isinstance(text, bytes) or html:
text = force_text(text, encoding=response.charset)
content = content.decode(response.charset)
text_repr = "'%s'" % text
else:
text_repr = repr(text)
if html:
content = assert_and_parse_html(self, content, None, "Response's content is not valid HTML:")
text = assert_and_parse_html(self, text, None, "Second argument is not valid HTML:")
real_count = content.count(text)
return (text_repr, real_count, msg_prefix)
def assertContains(self, response, text, count=None, status_code=200, msg_prefix='', html=False):
"""
Asserts that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected), and that
``text`` occurs ``count`` times in the content of the response.
If ``count`` is None, the count doesn't matter - the assertion is true
if the text occurs at least once in the response.
"""
text_repr, real_count, msg_prefix = self._assert_contains(
response, text, status_code, msg_prefix, html)
if count is not None:
self.assertEqual(
real_count, count,
msg_prefix + "Found %d instances of %s in response (expected %d)" % (real_count, text_repr, count)
)
else:
self.assertTrue(real_count != 0, msg_prefix + "Couldn't find %s in response" % text_repr)
def assertNotContains(self, response, text, status_code=200, msg_prefix='', html=False):
"""
Asserts that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected), and that
``text`` doesn't occurs in the content of the response.
"""
text_repr, real_count, msg_prefix = self._assert_contains(
response, text, status_code, msg_prefix, html)
self.assertEqual(real_count, 0, msg_prefix + "Response should not contain %s" % text_repr)
def assertFormError(self, response, form, field, errors, msg_prefix=''):
"""
Asserts that a form used to render the response has a specific field
error.
"""
if msg_prefix:
msg_prefix += ": "
# Put context(s) into a list to simplify processing.
contexts = to_list(response.context)
if not contexts:
self.fail(msg_prefix + "Response did not use any contexts to render the response")
# Put error(s) into a list to simplify processing.
errors = to_list(errors)
# Search all contexts for the error.
found_form = False
for i, context in enumerate(contexts):
if form not in context:
continue
found_form = True
for err in errors:
if field:
if field in context[form].errors:
field_errors = context[form].errors[field]
self.assertTrue(
err in field_errors,
msg_prefix + "The field '%s' on form '%s' in"
" context %d does not contain the error '%s'"
" (actual errors: %s)" %
(field, form, i, err, repr(field_errors))
)
elif field in context[form].fields:
self.fail(
msg_prefix + "The field '%s' on form '%s' in context %d contains no errors" %
(field, form, i)
)
else:
self.fail(
msg_prefix + "The form '%s' in context %d does not contain the field '%s'" %
(form, i, field)
)
else:
non_field_errors = context[form].non_field_errors()
self.assertTrue(
err in non_field_errors,
msg_prefix + "The form '%s' in context %d does not"
" contain the non-field error '%s'"
" (actual errors: %s)" %
(form, i, err, non_field_errors)
)
if not found_form:
self.fail(msg_prefix + "The form '%s' was not used to render the response" % form)
def assertFormsetError(self, response, formset, form_index, field, errors,
msg_prefix=''):
"""
Asserts that a formset used to render the response has a specific error.
For field errors, specify the ``form_index`` and the ``field``.
For non-field errors, specify the ``form_index`` and the ``field`` as
None.
For non-form errors, specify ``form_index`` as None and the ``field``
as None.
"""
# Add punctuation to msg_prefix
if msg_prefix:
msg_prefix += ": "
# Put context(s) into a list to simplify processing.
contexts = to_list(response.context)
if not contexts:
self.fail(msg_prefix + 'Response did not use any contexts to '
'render the response')
# Put error(s) into a list to simplify processing.
errors = to_list(errors)
# Search all contexts for the error.
found_formset = False
for i, context in enumerate(contexts):
if formset not in context:
continue
found_formset = True
for err in errors:
if field is not None:
if field in context[formset].forms[form_index].errors:
field_errors = context[formset].forms[form_index].errors[field]
self.assertTrue(
err in field_errors,
msg_prefix + "The field '%s' on formset '%s', "
"form %d in context %d does not contain the "
"error '%s' (actual errors: %s)" %
(field, formset, form_index, i, err, repr(field_errors))
)
elif field in context[formset].forms[form_index].fields:
self.fail(
msg_prefix + "The field '%s' on formset '%s', form %d in context %d contains no errors"
% (field, formset, form_index, i)
)
else:
self.fail(
msg_prefix + "The formset '%s', form %d in context %d does not contain the field '%s'"
% (formset, form_index, i, field)
)
elif form_index is not None:
non_field_errors = context[formset].forms[form_index].non_field_errors()
self.assertFalse(
len(non_field_errors) == 0,
msg_prefix + "The formset '%s', form %d in context %d "
"does not contain any non-field errors." % (formset, form_index, i)
)
self.assertTrue(
err in non_field_errors,
msg_prefix + "The formset '%s', form %d in context %d "
"does not contain the non-field error '%s' (actual errors: %s)"
% (formset, form_index, i, err, repr(non_field_errors))
)
else:
non_form_errors = context[formset].non_form_errors()
self.assertFalse(
len(non_form_errors) == 0,
msg_prefix + "The formset '%s' in context %d does not "
"contain any non-form errors." % (formset, i)
)
self.assertTrue(
err in non_form_errors,
msg_prefix + "The formset '%s' in context %d does not "
"contain the non-form error '%s' (actual errors: %s)"
% (formset, i, err, repr(non_form_errors))
)
if not found_formset:
self.fail(msg_prefix + "The formset '%s' was not used to render the response" % formset)
def _assert_template_used(self, response, template_name, msg_prefix):
if response is None and template_name is None:
raise TypeError('response and/or template_name argument must be provided')
if msg_prefix:
msg_prefix += ": "
if template_name is not None and response is not None and not hasattr(response, 'templates'):
raise ValueError(
"assertTemplateUsed() and assertTemplateNotUsed() are only "
"usable on responses fetched using the Django test Client."
)
if not hasattr(response, 'templates') or (response is None and template_name):
if response:
template_name = response
response = None
# use this template with context manager
return template_name, None, msg_prefix
template_names = [t.name for t in response.templates if t.name is not None]
return None, template_names, msg_prefix
def assertTemplateUsed(self, response=None, template_name=None, msg_prefix='', count=None):
"""
Asserts that the template with the provided name was used in rendering
the response. Also usable as context manager.
"""
context_mgr_template, template_names, msg_prefix = self._assert_template_used(
response, template_name, msg_prefix)
if context_mgr_template:
# Use assertTemplateUsed as context manager.
return _AssertTemplateUsedContext(self, context_mgr_template)
if not template_names:
self.fail(msg_prefix + "No templates used to render the response")
self.assertTrue(
template_name in template_names,
msg_prefix + "Template '%s' was not a template used to render"
" the response. Actual template(s) used: %s"
% (template_name, ', '.join(template_names))
)
if count is not None:
self.assertEqual(
template_names.count(template_name), count,
msg_prefix + "Template '%s' was expected to be rendered %d "
"time(s) but was actually rendered %d time(s)."
% (template_name, count, template_names.count(template_name))
)
def assertTemplateNotUsed(self, response=None, template_name=None, msg_prefix=''):
"""
Asserts that the template with the provided name was NOT used in
rendering the response. Also usable as context manager.
"""
context_mgr_template, template_names, msg_prefix = self._assert_template_used(
response, template_name, msg_prefix
)
if context_mgr_template:
# Use assertTemplateNotUsed as context manager.
return _AssertTemplateNotUsedContext(self, context_mgr_template)
self.assertFalse(
template_name in template_names,
msg_prefix + "Template '%s' was used unexpectedly in rendering the response" % template_name
)
@contextmanager
def _assert_raises_message_cm(self, expected_exception, expected_message):
with self.assertRaises(expected_exception) as cm:
yield cm
self.assertIn(expected_message, str(cm.exception))
def assertRaisesMessage(self, expected_exception, expected_message, *args, **kwargs):
"""
Asserts that expected_message is found in the the message of a raised
exception.
Args:
expected_exception: Exception class expected to be raised.
expected_message: expected error message string value.
args: Function to be called and extra positional args.
kwargs: Extra kwargs.
"""
# callable_obj was a documented kwarg in Django 1.8 and older.
callable_obj = kwargs.pop('callable_obj', None)
if callable_obj:
warnings.warn(
'The callable_obj kwarg is deprecated. Pass the callable '
'as a positional argument instead.', RemovedInDjango20Warning
)
elif len(args):
callable_obj = args[0]
args = args[1:]
cm = self._assert_raises_message_cm(expected_exception, expected_message)
# Assertion used in context manager fashion.
if callable_obj is None:
return cm
# Assertion was passed a callable.
with cm:
callable_obj(*args, **kwargs)
def assertFieldOutput(self, fieldclass, valid, invalid, field_args=None,
field_kwargs=None, empty_value=''):
"""
Asserts that a form field behaves correctly with various inputs.
Args:
fieldclass: the class of the field to be tested.
valid: a dictionary mapping valid inputs to their expected
cleaned values.
invalid: a dictionary mapping invalid inputs to one or more
raised error messages.
field_args: the args passed to instantiate the field
field_kwargs: the kwargs passed to instantiate the field
empty_value: the expected clean output for inputs in empty_values
"""
if field_args is None:
field_args = []
if field_kwargs is None:
field_kwargs = {}
required = fieldclass(*field_args, **field_kwargs)
optional = fieldclass(*field_args, **dict(field_kwargs, required=False))
# test valid inputs
for input, output in valid.items():
self.assertEqual(required.clean(input), output)
self.assertEqual(optional.clean(input), output)
# test invalid inputs
for input, errors in invalid.items():
with self.assertRaises(ValidationError) as context_manager:
required.clean(input)
self.assertEqual(context_manager.exception.messages, errors)
with self.assertRaises(ValidationError) as context_manager:
optional.clean(input)
self.assertEqual(context_manager.exception.messages, errors)
# test required inputs
error_required = [force_text(required.error_messages['required'])]
for e in required.empty_values:
with self.assertRaises(ValidationError) as context_manager:
required.clean(e)
self.assertEqual(context_manager.exception.messages, error_required)
self.assertEqual(optional.clean(e), empty_value)
# test that max_length and min_length are always accepted
if issubclass(fieldclass, CharField):
field_kwargs.update({'min_length': 2, 'max_length': 20})
self.assertIsInstance(fieldclass(*field_args, **field_kwargs), fieldclass)
def assertHTMLEqual(self, html1, html2, msg=None):
"""
Asserts that two HTML snippets are semantically the same.
Whitespace in most cases is ignored, and attribute ordering is not
significant. The passed-in arguments must be valid HTML.
"""
dom1 = assert_and_parse_html(self, html1, msg, 'First argument is not valid HTML:')
dom2 = assert_and_parse_html(self, html2, msg, 'Second argument is not valid HTML:')
if dom1 != dom2:
standardMsg = '%s != %s' % (
safe_repr(dom1, True), safe_repr(dom2, True))
diff = ('\n' + '\n'.join(difflib.ndiff(
six.text_type(dom1).splitlines(),
six.text_type(dom2).splitlines(),
)))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertHTMLNotEqual(self, html1, html2, msg=None):
"""Asserts that two HTML snippets are not semantically equivalent."""
dom1 = assert_and_parse_html(self, html1, msg, 'First argument is not valid HTML:')
dom2 = assert_and_parse_html(self, html2, msg, 'Second argument is not valid HTML:')
if dom1 == dom2:
standardMsg = '%s == %s' % (
safe_repr(dom1, True), safe_repr(dom2, True))
self.fail(self._formatMessage(msg, standardMsg))
def assertInHTML(self, needle, haystack, count=None, msg_prefix=''):
needle = assert_and_parse_html(self, needle, None, 'First argument is not valid HTML:')
haystack = assert_and_parse_html(self, haystack, None, 'Second argument is not valid HTML:')
real_count = haystack.count(needle)
if count is not None:
self.assertEqual(
real_count, count,
msg_prefix + "Found %d instances of '%s' in response (expected %d)" % (real_count, needle, count)
)
else:
self.assertTrue(real_count != 0, msg_prefix + "Couldn't find '%s' in response" % needle)
def assertJSONEqual(self, raw, expected_data, msg=None):
"""
Asserts that the JSON fragments raw and expected_data are equal.
Usual JSON non-significant whitespace rules apply as the heavyweight
is delegated to the json library.
"""
try:
data = json.loads(raw)
except ValueError:
self.fail("First argument is not valid JSON: %r" % raw)
if isinstance(expected_data, six.string_types):
try:
expected_data = json.loads(expected_data)
except ValueError:
self.fail("Second argument is not valid JSON: %r" % expected_data)
self.assertEqual(data, expected_data, msg=msg)
def assertJSONNotEqual(self, raw, expected_data, msg=None):
"""
Asserts that the JSON fragments raw and expected_data are not equal.
Usual JSON non-significant whitespace rules apply as the heavyweight
is delegated to the json library.
"""
try:
data = json.loads(raw)
except ValueError:
self.fail("First argument is not valid JSON: %r" % raw)
if isinstance(expected_data, six.string_types):
try:
expected_data = json.loads(expected_data)
except ValueError:
self.fail("Second argument is not valid JSON: %r" % expected_data)
self.assertNotEqual(data, expected_data, msg=msg)
def assertXMLEqual(self, xml1, xml2, msg=None):
"""
Asserts that two XML snippets are semantically the same.
Whitespace in most cases is ignored, and attribute ordering is not
significant. The passed-in arguments must be valid XML.
"""
try:
result = compare_xml(xml1, xml2)
except Exception as e:
standardMsg = 'First or second argument is not valid XML\n%s' % e
self.fail(self._formatMessage(msg, standardMsg))
else:
if not result:
standardMsg = '%s != %s' % (safe_repr(xml1, True), safe_repr(xml2, True))
diff = ('\n' + '\n'.join(
difflib.ndiff(
six.text_type(xml1).splitlines(),
six.text_type(xml2).splitlines(),
)
))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertXMLNotEqual(self, xml1, xml2, msg=None):
"""
Asserts that two XML snippets are not semantically equivalent.
Whitespace in most cases is ignored, and attribute ordering is not
significant. The passed-in arguments must be valid XML.
"""
try:
result = compare_xml(xml1, xml2)
except Exception as e:
standardMsg = 'First or second argument is not valid XML\n%s' % e
self.fail(self._formatMessage(msg, standardMsg))
else:
if result:
standardMsg = '%s == %s' % (safe_repr(xml1, True), safe_repr(xml2, True))
self.fail(self._formatMessage(msg, standardMsg))
if six.PY2:
assertCountEqual = unittest.TestCase.assertItemsEqual
assertNotRegex = unittest.TestCase.assertNotRegexpMatches
assertRaisesRegex = unittest.TestCase.assertRaisesRegexp
assertRegex = unittest.TestCase.assertRegexpMatches
class TransactionTestCase(SimpleTestCase):
# Subclasses can ask for resetting of auto increment sequence before each
# test case
reset_sequences = False
# Subclasses can enable only a subset of apps for faster tests
available_apps = None
# Subclasses can define fixtures which will be automatically installed.
fixtures = None
# If transactions aren't available, Django will serialize the database
# contents into a fixture during setup and flush and reload them
# during teardown (as flush does not restore data from migrations).
# This can be slow; this flag allows enabling on a per-case basis.
serialized_rollback = False
# Since tests will be wrapped in a transaction, or serialized if they
# are not available, we allow queries to be run.
allow_database_queries = True
def _pre_setup(self):
"""Performs any pre-test setup. This includes:
* If the class has an 'available_apps' attribute, restricting the app
registry to these applications, then firing post_migrate -- it must
run with the correct set of applications for the test case.
* If the class has a 'fixtures' attribute, installing these fixtures.
"""
super(TransactionTestCase, self)._pre_setup()
if self.available_apps is not None:
apps.set_available_apps(self.available_apps)
setting_changed.send(
sender=settings._wrapped.__class__,
setting='INSTALLED_APPS',
value=self.available_apps,
enter=True,
)
for db_name in self._databases_names(include_mirrors=False):
emit_post_migrate_signal(verbosity=0, interactive=False, db=db_name)
try:
self._fixture_setup()
except Exception:
if self.available_apps is not None:
apps.unset_available_apps()
setting_changed.send(
sender=settings._wrapped.__class__,
setting='INSTALLED_APPS',
value=settings.INSTALLED_APPS,
enter=False,
)
raise
@classmethod
def _databases_names(cls, include_mirrors=True):
# If the test case has a multi_db=True flag, act on all databases,
# including mirrors or not. Otherwise, just on the default DB.
if getattr(cls, 'multi_db', False):
return [
alias for alias in connections
if include_mirrors or not connections[alias].settings_dict['TEST']['MIRROR']
]
else:
return [DEFAULT_DB_ALIAS]
def _reset_sequences(self, db_name):
conn = connections[db_name]
if conn.features.supports_sequence_reset:
sql_list = conn.ops.sequence_reset_by_name_sql(
no_style(), conn.introspection.sequence_list())
if sql_list:
with transaction.atomic(using=db_name):
cursor = conn.cursor()
for sql in sql_list:
cursor.execute(sql)
def _fixture_setup(self):
for db_name in self._databases_names(include_mirrors=False):
# Reset sequences
if self.reset_sequences:
self._reset_sequences(db_name)
# If we need to provide replica initial data from migrated apps,
# then do so.
if self.serialized_rollback and hasattr(connections[db_name], "_test_serialized_contents"):
if self.available_apps is not None:
apps.unset_available_apps()
connections[db_name].creation.deserialize_db_from_string(
connections[db_name]._test_serialized_contents
)
if self.available_apps is not None:
apps.set_available_apps(self.available_apps)
if self.fixtures:
# We have to use this slightly awkward syntax due to the fact
# that we're using *args and **kwargs together.
call_command('loaddata', *self.fixtures,
**{'verbosity': 0, 'database': db_name})
def _should_reload_connections(self):
return True
def _post_teardown(self):
"""Performs any post-test things. This includes:
* Flushing the contents of the database, to leave a clean slate. If
the class has an 'available_apps' attribute, post_migrate isn't fired.
* Force-closing the connection, so the next test gets a clean cursor.
"""
try:
self._fixture_teardown()
super(TransactionTestCase, self)._post_teardown()
if self._should_reload_connections():
# Some DB cursors include SQL statements as part of cursor
# creation. If you have a test that does a rollback, the effect
# of these statements is lost, which can affect the operation of
# tests (e.g., losing a timezone setting causing objects to be
# created with the wrong time). To make sure this doesn't
# happen, get a clean connection at the start of every test.
for conn in connections.all():
conn.close()
finally:
if self.available_apps is not None:
apps.unset_available_apps()
setting_changed.send(sender=settings._wrapped.__class__,
setting='INSTALLED_APPS',
value=settings.INSTALLED_APPS,
enter=False)
def _fixture_teardown(self):
# Allow TRUNCATE ... CASCADE and don't emit the post_migrate signal
# when flushing only a subset of the apps
for db_name in self._databases_names(include_mirrors=False):
# Flush the database
inhibit_post_migrate = (
self.available_apps is not None or
( # Inhibit the post_migrate signal when using serialized
# rollback to avoid trying to recreate the serialized data.
self.serialized_rollback and
hasattr(connections[db_name], '_test_serialized_contents')
)
)
call_command('flush', verbosity=0, interactive=False,
database=db_name, reset_sequences=False,
allow_cascade=self.available_apps is not None,
inhibit_post_migrate=inhibit_post_migrate)
def assertQuerysetEqual(self, qs, values, transform=repr, ordered=True, msg=None):
items = six.moves.map(transform, qs)
if not ordered:
return self.assertEqual(Counter(items), Counter(values), msg=msg)
values = list(values)
# For example qs.iterator() could be passed as qs, but it does not
# have 'ordered' attribute.
if len(values) > 1 and hasattr(qs, 'ordered') and not qs.ordered:
raise ValueError("Trying to compare non-ordered queryset "
"against more than one ordered values")
return self.assertEqual(list(items), values, msg=msg)
def assertNumQueries(self, num, func=None, *args, **kwargs):
using = kwargs.pop("using", DEFAULT_DB_ALIAS)
conn = connections[using]
context = _AssertNumQueriesContext(self, num, conn)
if func is None:
return context
with context:
func(*args, **kwargs)
def connections_support_transactions():
"""
Returns True if all connections support transactions.
"""
return all(conn.features.supports_transactions
for conn in connections.all())
class TestCase(TransactionTestCase):
"""
Similar to TransactionTestCase, but uses `transaction.atomic()` to achieve
test isolation.
In most situations, TestCase should be preferred to TransactionTestCase as
it allows faster execution. However, there are some situations where using
TransactionTestCase might be necessary (e.g. testing some transactional
behavior).
On database backends with no transaction support, TestCase behaves as
TransactionTestCase.
"""
@classmethod
def _enter_atomics(cls):
"""Helper method to open atomic blocks for multiple databases"""
atomics = {}
for db_name in cls._databases_names():
atomics[db_name] = transaction.atomic(using=db_name)
atomics[db_name].__enter__()
return atomics
@classmethod
def _rollback_atomics(cls, atomics):
"""Rollback atomic blocks opened through the previous method"""
for db_name in reversed(cls._databases_names()):
transaction.set_rollback(True, using=db_name)
atomics[db_name].__exit__(None, None, None)
@classmethod
def setUpClass(cls):
super(TestCase, cls).setUpClass()
if not connections_support_transactions():
return
cls.cls_atomics = cls._enter_atomics()
if cls.fixtures:
for db_name in cls._databases_names(include_mirrors=False):
try:
call_command('loaddata', *cls.fixtures, **{
'verbosity': 0,
'commit': False,
'database': db_name,
})
except Exception:
cls._rollback_atomics(cls.cls_atomics)
raise
try:
cls.setUpTestData()
except Exception:
cls._rollback_atomics(cls.cls_atomics)
raise
@classmethod
def tearDownClass(cls):
if connections_support_transactions():
cls._rollback_atomics(cls.cls_atomics)
for conn in connections.all():
conn.close()
super(TestCase, cls).tearDownClass()
@classmethod
def setUpTestData(cls):
"""Load initial data for the TestCase"""
pass
def _should_reload_connections(self):
if connections_support_transactions():
return False
return super(TestCase, self)._should_reload_connections()
def _fixture_setup(self):
if not connections_support_transactions():
# If the backend does not support transactions, we should reload
# class data before each test
self.setUpTestData()
return super(TestCase, self)._fixture_setup()
assert not self.reset_sequences, 'reset_sequences cannot be used on TestCase instances'
self.atomics = self._enter_atomics()
def _fixture_teardown(self):
if not connections_support_transactions():
return super(TestCase, self)._fixture_teardown()
try:
for db_name in reversed(self._databases_names()):
if self._should_check_constraints(connections[db_name]):
connections[db_name].check_constraints()
finally:
self._rollback_atomics(self.atomics)
def _should_check_constraints(self, connection):
return (
connection.features.can_defer_constraint_checks and
not connection.needs_rollback and connection.is_usable()
)
class CheckCondition(object):
"""Descriptor class for deferred condition checking"""
def __init__(self, *conditions):
self.conditions = conditions
def add_condition(self, condition, reason):
return self.__class__(*self.conditions + ((condition, reason),))
def __get__(self, instance, cls=None):
# Trigger access for all bases.
if any(getattr(base, '__unittest_skip__', False) for base in cls.__bases__):
return True
for condition, reason in self.conditions:
if condition():
# Override this descriptor's value and set the skip reason.
cls.__unittest_skip__ = True
cls.__unittest_skip_why__ = reason
return True
return False
def _deferredSkip(condition, reason):
def decorator(test_func):
if not (isinstance(test_func, type) and
issubclass(test_func, unittest.TestCase)):
@wraps(test_func)
def skip_wrapper(*args, **kwargs):
if condition():
raise unittest.SkipTest(reason)
return test_func(*args, **kwargs)
test_item = skip_wrapper
else:
# Assume a class is decorated
test_item = test_func
# Retrieve the possibly existing value from the class's dict to
# avoid triggering the descriptor.
skip = test_func.__dict__.get('__unittest_skip__')
if isinstance(skip, CheckCondition):
test_item.__unittest_skip__ = skip.add_condition(condition, reason)
elif skip is not True:
test_item.__unittest_skip__ = CheckCondition((condition, reason))
return test_item
return decorator
def skipIfDBFeature(*features):
"""
Skip a test if a database has at least one of the named features.
"""
return _deferredSkip(
lambda: any(getattr(connection.features, feature, False) for feature in features),
"Database has feature(s) %s" % ", ".join(features)
)
def skipUnlessDBFeature(*features):
"""
Skip a test unless a database has all the named features.
"""
return _deferredSkip(
lambda: not all(getattr(connection.features, feature, False) for feature in features),
"Database doesn't support feature(s): %s" % ", ".join(features)
)
def skipUnlessAnyDBFeature(*features):
"""
Skip a test unless a database has any of the named features.
"""
return _deferredSkip(
lambda: not any(getattr(connection.features, feature, False) for feature in features),
"Database doesn't support any of the feature(s): %s" % ", ".join(features)
)
class QuietWSGIRequestHandler(WSGIRequestHandler):
"""
Just a regular WSGIRequestHandler except it doesn't log to the standard
output any of the requests received, so as to not clutter the output for
the tests' results.
"""
def log_message(*args):
pass
class FSFilesHandler(WSGIHandler):
"""
WSGI middleware that intercepts calls to a directory, as defined by one of
the *_ROOT settings, and serves those files, publishing them under *_URL.
"""
def __init__(self, application):
self.application = application
self.base_url = urlparse(self.get_base_url())
super(FSFilesHandler, self).__init__()
def _should_handle(self, path):
"""
Checks if the path should be handled. Ignores the path if:
* the host is provided as part of the base_url
* the request's path isn't under the media path (or equal)
"""
return path.startswith(self.base_url[2]) and not self.base_url[1]
def file_path(self, url):
"""
Returns the relative path to the file on disk for the given URL.
"""
relative_url = url[len(self.base_url[2]):]
return url2pathname(relative_url)
def get_response(self, request):
from django.http import Http404
if self._should_handle(request.path):
try:
return self.serve(request)
except Http404:
pass
return super(FSFilesHandler, self).get_response(request)
def serve(self, request):
os_rel_path = self.file_path(request.path)
os_rel_path = posixpath.normpath(unquote(os_rel_path))
# Emulate behavior of django.contrib.staticfiles.views.serve() when it
# invokes staticfiles' finders functionality.
# TODO: Modify if/when that internal API is refactored
final_rel_path = os_rel_path.replace('\\', '/').lstrip('/')
return serve(request, final_rel_path, document_root=self.get_base_dir())
def __call__(self, environ, start_response):
if not self._should_handle(get_path_info(environ)):
return self.application(environ, start_response)
return super(FSFilesHandler, self).__call__(environ, start_response)
class _StaticFilesHandler(FSFilesHandler):
"""
Handler for serving static files. A private class that is meant to be used
solely as a convenience by LiveServerThread.
"""
def get_base_dir(self):
return settings.STATIC_ROOT
def get_base_url(self):
return settings.STATIC_URL
class _MediaFilesHandler(FSFilesHandler):
"""
Handler for serving the media files. A private class that is meant to be
used solely as a convenience by LiveServerThread.
"""
def get_base_dir(self):
return settings.MEDIA_ROOT
def get_base_url(self):
return settings.MEDIA_URL
class LiveServerThread(threading.Thread):
"""
Thread for running a live http server while the tests are running.
"""
def __init__(self, host, static_handler, connections_override=None, port=0):
self.host = host
self.port = port
self.is_ready = threading.Event()
self.error = None
self.static_handler = static_handler
self.connections_override = connections_override
super(LiveServerThread, self).__init__()
def run(self):
"""
Sets up the live server and databases, and then loops over handling
http requests.
"""
if self.connections_override:
# Override this thread's database connections with the ones
# provided by the main thread.
for alias, conn in self.connections_override.items():
connections[alias] = conn
try:
# Create the handler for serving static and media files
handler = self.static_handler(_MediaFilesHandler(WSGIHandler()))
self.httpd = self._create_server()
# If binding to port zero, assign the port allocated by the OS.
if self.port == 0:
self.port = self.httpd.server_address[1]
self.httpd.set_app(handler)
self.is_ready.set()
self.httpd.serve_forever()
except Exception as e:
self.error = e
self.is_ready.set()
finally:
connections.close_all()
def _create_server(self):
return WSGIServer((self.host, self.port), QuietWSGIRequestHandler, allow_reuse_address=False)
def terminate(self):
if hasattr(self, 'httpd'):
# Stop the WSGI server
self.httpd.shutdown()
self.httpd.server_close()
self.join()
class LiveServerTestCase(TransactionTestCase):
"""
Does basically the same as TransactionTestCase but also launches a live
http server in a separate thread so that the tests may use another testing
framework, such as Selenium for example, instead of the built-in dummy
client.
Note that it inherits from TransactionTestCase instead of TestCase because
the threads do not share the same transactions (unless if using in-memory
sqlite) and each thread needs to commit all their transactions so that the
other thread can see the changes.
"""
host = 'localhost'
port = 0
server_thread_class = LiveServerThread
static_handler = _StaticFilesHandler
@classproperty
def live_server_url(cls):
return 'http://%s:%s' % (cls.host, cls.server_thread.port)
@classmethod
def setUpClass(cls):
super(LiveServerTestCase, cls).setUpClass()
connections_override = {}
for conn in connections.all():
# If using in-memory sqlite databases, pass the connections to
# the server thread.
if conn.vendor == 'sqlite' and conn.is_in_memory_db():
# Explicitly enable thread-shareability for this connection
conn.allow_thread_sharing = True
connections_override[conn.alias] = conn
cls._live_server_modified_settings = modify_settings(
ALLOWED_HOSTS={'append': cls.host},
)
cls._live_server_modified_settings.enable()
cls.server_thread = cls._create_server_thread(connections_override)
cls.server_thread.daemon = True
cls.server_thread.start()
# Wait for the live server to be ready
cls.server_thread.is_ready.wait()
if cls.server_thread.error:
# Clean up behind ourselves, since tearDownClass won't get called in
# case of errors.
cls._tearDownClassInternal()
raise cls.server_thread.error
@classmethod
def _create_server_thread(cls, connections_override):
return cls.server_thread_class(
cls.host,
cls.static_handler,
connections_override=connections_override,
port=cls.port,
)
@classmethod
def _tearDownClassInternal(cls):
# There may not be a 'server_thread' attribute if setUpClass() for some
# reasons has raised an exception.
if hasattr(cls, 'server_thread'):
# Terminate the live server's thread
cls.server_thread.terminate()
# Restore sqlite in-memory database connections' non-shareability
for conn in connections.all():
if conn.vendor == 'sqlite' and conn.is_in_memory_db():
conn.allow_thread_sharing = False
@classmethod
def tearDownClass(cls):
cls._tearDownClassInternal()
cls._live_server_modified_settings.disable()
super(LiveServerTestCase, cls).tearDownClass()
class SerializeMixin(object):
"""
Mixin to enforce serialization of TestCases that share a common resource.
Define a common 'lockfile' for each set of TestCases to serialize. This
file must exist on the filesystem.
Place it early in the MRO in order to isolate setUpClass / tearDownClass.
"""
lockfile = None
@classmethod
def setUpClass(cls):
if cls.lockfile is None:
raise ValueError(
"{}.lockfile isn't set. Set it to a unique value "
"in the base class.".format(cls.__name__))
cls._lockfile = open(cls.lockfile)
locks.lock(cls._lockfile, locks.LOCK_EX)
super(SerializeMixin, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(SerializeMixin, cls).tearDownClass()
cls._lockfile.close()
| 41.800568 | 118 | 0.596856 | from __future__ import unicode_literals
import difflib
import json
import posixpath
import sys
import threading
import unittest
import warnings
from collections import Counter
from contextlib import contextmanager
from copy import copy
from functools import wraps
from unittest.util import safe_repr
from django.apps import apps
from django.conf import settings
from django.core import mail
from django.core.exceptions import ValidationError
from django.core.files import locks
from django.core.handlers.wsgi import WSGIHandler, get_path_info
from django.core.management import call_command
from django.core.management.color import no_style
from django.core.management.sql import emit_post_migrate_signal
from django.core.servers.basehttp import WSGIRequestHandler, WSGIServer
from django.db import DEFAULT_DB_ALIAS, connection, connections, transaction
from django.forms.fields import CharField
from django.http import QueryDict
from django.http.request import split_domain_port, validate_host
from django.test.client import Client
from django.test.html import HTMLParseError, parse_html
from django.test.signals import setting_changed, template_rendered
from django.test.utils import (
CaptureQueriesContext, ContextList, compare_xml, modify_settings,
override_settings,
)
from django.utils import six
from django.utils.decorators import classproperty
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text
from django.utils.six.moves.urllib.parse import (
unquote, urljoin, urlparse, urlsplit, urlunsplit,
)
from django.utils.six.moves.urllib.request import url2pathname
from django.views.static import serve
__all__ = ('TestCase', 'TransactionTestCase',
'SimpleTestCase', 'skipIfDBFeature', 'skipUnlessDBFeature')
def to_list(value):
if value is None:
value = []
elif not isinstance(value, list):
value = [value]
return value
def assert_and_parse_html(self, html, user_msg, msg):
try:
dom = parse_html(html)
except HTMLParseError as e:
standardMsg = '%s\n%s' % (msg, e)
self.fail(self._formatMessage(user_msg, standardMsg))
return dom
class _AssertNumQueriesContext(CaptureQueriesContext):
def __init__(self, test_case, num, connection):
self.test_case = test_case
self.num = num
super(_AssertNumQueriesContext, self).__init__(connection)
def __exit__(self, exc_type, exc_value, traceback):
super(_AssertNumQueriesContext, self).__exit__(exc_type, exc_value, traceback)
if exc_type is not None:
return
executed = len(self)
self.test_case.assertEqual(
executed, self.num,
"%d queries executed, %d expected\nCaptured queries were:\n%s" % (
executed, self.num,
'\n'.join(
query['sql'] for query in self.captured_queries
)
)
)
class _AssertTemplateUsedContext(object):
def __init__(self, test_case, template_name):
self.test_case = test_case
self.template_name = template_name
self.rendered_templates = []
self.rendered_template_names = []
self.context = ContextList()
def on_template_render(self, sender, signal, template, context, **kwargs):
self.rendered_templates.append(template)
self.rendered_template_names.append(template.name)
self.context.append(copy(context))
def test(self):
return self.template_name in self.rendered_template_names
def message(self):
return '%s was not rendered.' % self.template_name
def __enter__(self):
template_rendered.connect(self.on_template_render)
return self
def __exit__(self, exc_type, exc_value, traceback):
template_rendered.disconnect(self.on_template_render)
if exc_type is not None:
return
if not self.test():
message = self.message()
if len(self.rendered_templates) == 0:
message += ' No template was rendered.'
else:
message += ' Following templates were rendered: %s' % (
', '.join(self.rendered_template_names))
self.test_case.fail(message)
class _AssertTemplateNotUsedContext(_AssertTemplateUsedContext):
def test(self):
return self.template_name not in self.rendered_template_names
def message(self):
return '%s was rendered.' % self.template_name
class _CursorFailure(object):
def __init__(self, cls_name, wrapped):
self.cls_name = cls_name
self.wrapped = wrapped
def __call__(self):
raise AssertionError(
"Database queries aren't allowed in SimpleTestCase. "
"Either use TestCase or TransactionTestCase to ensure proper test isolation or "
"set %s.allow_database_queries to True to silence this failure." % self.cls_name
)
class SimpleTestCase(unittest.TestCase):
# The class we'll use for the test client self.client.
client_class = Client
_overridden_settings = None
_modified_settings = None
# this base class doesn't enforce any isolation.
allow_database_queries = False
@classmethod
def setUpClass(cls):
super(SimpleTestCase, cls).setUpClass()
if cls._overridden_settings:
cls._cls_overridden_context = override_settings(**cls._overridden_settings)
cls._cls_overridden_context.enable()
if cls._modified_settings:
cls._cls_modified_context = modify_settings(cls._modified_settings)
cls._cls_modified_context.enable()
if not cls.allow_database_queries:
for alias in connections:
connection = connections[alias]
connection.cursor = _CursorFailure(cls.__name__, connection.cursor)
connection.chunked_cursor = _CursorFailure(cls.__name__, connection.chunked_cursor)
@classmethod
def tearDownClass(cls):
if not cls.allow_database_queries:
for alias in connections:
connection = connections[alias]
connection.cursor = connection.cursor.wrapped
connection.chunked_cursor = connection.chunked_cursor.wrapped
if hasattr(cls, '_cls_modified_context'):
cls._cls_modified_context.disable()
delattr(cls, '_cls_modified_context')
if hasattr(cls, '_cls_overridden_context'):
cls._cls_overridden_context.disable()
delattr(cls, '_cls_overridden_context')
super(SimpleTestCase, cls).tearDownClass()
def __call__(self, result=None):
testMethod = getattr(self, self._testMethodName)
skipped = (
getattr(self.__class__, "__unittest_skip__", False) or
getattr(testMethod, "__unittest_skip__", False)
)
if not skipped:
try:
self._pre_setup()
except Exception:
result.addError(self, sys.exc_info())
return
super(SimpleTestCase, self).__call__(result)
if not skipped:
try:
self._post_teardown()
except Exception:
result.addError(self, sys.exc_info())
return
def _pre_setup(self):
self.client = self.client_class()
mail.outbox = []
def _post_teardown(self):
pass
def settings(self, **kwargs):
return override_settings(**kwargs)
def modify_settings(self, **kwargs):
return modify_settings(**kwargs)
def assertRedirects(self, response, expected_url, status_code=302,
target_status_code=200, host=None, msg_prefix='',
fetch_redirect_response=True):
if host is not None:
warnings.warn(
"The host argument is deprecated and no longer used by assertRedirects",
RemovedInDjango20Warning, stacklevel=2
)
if msg_prefix:
msg_prefix += ": "
if hasattr(response, 'redirect_chain'):
self.assertTrue(
len(response.redirect_chain) > 0,
msg_prefix + "Response didn't redirect as expected: Response code was %d (expected %d)"
% (response.status_code, status_code)
)
self.assertEqual(
response.redirect_chain[0][1], status_code,
msg_prefix + "Initial response didn't redirect as expected: Response code was %d (expected %d)"
% (response.redirect_chain[0][1], status_code)
)
url, status_code = response.redirect_chain[-1]
scheme, netloc, path, query, fragment = urlsplit(url)
self.assertEqual(
response.status_code, target_status_code,
msg_prefix + "Response didn't redirect as expected: Final Response code was %d (expected %d)"
% (response.status_code, target_status_code)
)
else:
# Not a followed redirect
self.assertEqual(
response.status_code, status_code,
msg_prefix + "Response didn't redirect as expected: Response code was %d (expected %d)"
% (response.status_code, status_code)
)
url = response.url
scheme, netloc, path, query, fragment = urlsplit(url)
if not path.startswith('/'):
url = urljoin(response.request['PATH_INFO'], url)
path = urljoin(response.request['PATH_INFO'], path)
if fetch_redirect_response:
domain, port = split_domain_port(netloc)
if domain and not validate_host(domain, settings.ALLOWED_HOSTS):
raise ValueError(
"The test client is unable to fetch remote URLs (got %s). "
"If the host is served by Django, add '%s' to ALLOWED_HOSTS. "
"Otherwise, use assertRedirects(..., fetch_redirect_response=False)."
% (url, domain)
)
redirect_response = response.client.get(path, QueryDict(query), secure=(scheme == 'https'))
self.assertEqual(
redirect_response.status_code, target_status_code,
msg_prefix + "Couldn't retrieve redirection page '%s': response code was %d (expected %d)"
% (path, redirect_response.status_code, target_status_code)
)
if url != expected_url:
# For temporary backwards compatibility, try to compare with a relative url
e_scheme, e_netloc, e_path, e_query, e_fragment = urlsplit(expected_url)
relative_url = urlunsplit(('', '', e_path, e_query, e_fragment))
if url == relative_url:
warnings.warn(
"assertRedirects had to strip the scheme and domain from the "
"expected URL, as it was always added automatically to URLs "
"before Django 1.9. Please update your expected URLs by "
"removing the scheme and domain.",
RemovedInDjango20Warning, stacklevel=2)
expected_url = relative_url
self.assertEqual(
url, expected_url,
msg_prefix + "Response redirected to '%s', expected '%s'" % (url, expected_url)
)
def _assert_contains(self, response, text, status_code, msg_prefix, html):
# If the response supports deferred rendering and hasn't been rendered
if hasattr(response, 'render') and callable(response.render) and not response.is_rendered:
response.render()
if msg_prefix:
msg_prefix += ": "
self.assertEqual(
response.status_code, status_code,
msg_prefix + "Couldn't retrieve content: Response code was %d"
" (expected %d)" % (response.status_code, status_code)
)
if response.streaming:
content = b''.join(response.streaming_content)
else:
content = response.content
if not isinstance(text, bytes) or html:
text = force_text(text, encoding=response.charset)
content = content.decode(response.charset)
text_repr = "'%s'" % text
else:
text_repr = repr(text)
if html:
content = assert_and_parse_html(self, content, None, "Response's content is not valid HTML:")
text = assert_and_parse_html(self, text, None, "Second argument is not valid HTML:")
real_count = content.count(text)
return (text_repr, real_count, msg_prefix)
def assertContains(self, response, text, count=None, status_code=200, msg_prefix='', html=False):
text_repr, real_count, msg_prefix = self._assert_contains(
response, text, status_code, msg_prefix, html)
if count is not None:
self.assertEqual(
real_count, count,
msg_prefix + "Found %d instances of %s in response (expected %d)" % (real_count, text_repr, count)
)
else:
self.assertTrue(real_count != 0, msg_prefix + "Couldn't find %s in response" % text_repr)
def assertNotContains(self, response, text, status_code=200, msg_prefix='', html=False):
text_repr, real_count, msg_prefix = self._assert_contains(
response, text, status_code, msg_prefix, html)
self.assertEqual(real_count, 0, msg_prefix + "Response should not contain %s" % text_repr)
def assertFormError(self, response, form, field, errors, msg_prefix=''):
if msg_prefix:
msg_prefix += ": "
# Put context(s) into a list to simplify processing.
contexts = to_list(response.context)
if not contexts:
self.fail(msg_prefix + "Response did not use any contexts to render the response")
# Put error(s) into a list to simplify processing.
errors = to_list(errors)
# Search all contexts for the error.
found_form = False
for i, context in enumerate(contexts):
if form not in context:
continue
found_form = True
for err in errors:
if field:
if field in context[form].errors:
field_errors = context[form].errors[field]
self.assertTrue(
err in field_errors,
msg_prefix + "The field '%s' on form '%s' in"
" context %d does not contain the error '%s'"
" (actual errors: %s)" %
(field, form, i, err, repr(field_errors))
)
elif field in context[form].fields:
self.fail(
msg_prefix + "The field '%s' on form '%s' in context %d contains no errors" %
(field, form, i)
)
else:
self.fail(
msg_prefix + "The form '%s' in context %d does not contain the field '%s'" %
(form, i, field)
)
else:
non_field_errors = context[form].non_field_errors()
self.assertTrue(
err in non_field_errors,
msg_prefix + "The form '%s' in context %d does not"
" contain the non-field error '%s'"
" (actual errors: %s)" %
(form, i, err, non_field_errors)
)
if not found_form:
self.fail(msg_prefix + "The form '%s' was not used to render the response" % form)
def assertFormsetError(self, response, formset, form_index, field, errors,
msg_prefix=''):
# Add punctuation to msg_prefix
if msg_prefix:
msg_prefix += ": "
# Put context(s) into a list to simplify processing.
contexts = to_list(response.context)
if not contexts:
self.fail(msg_prefix + 'Response did not use any contexts to '
'render the response')
# Put error(s) into a list to simplify processing.
errors = to_list(errors)
# Search all contexts for the error.
found_formset = False
for i, context in enumerate(contexts):
if formset not in context:
continue
found_formset = True
for err in errors:
if field is not None:
if field in context[formset].forms[form_index].errors:
field_errors = context[formset].forms[form_index].errors[field]
self.assertTrue(
err in field_errors,
msg_prefix + "The field '%s' on formset '%s', "
"form %d in context %d does not contain the "
"error '%s' (actual errors: %s)" %
(field, formset, form_index, i, err, repr(field_errors))
)
elif field in context[formset].forms[form_index].fields:
self.fail(
msg_prefix + "The field '%s' on formset '%s', form %d in context %d contains no errors"
% (field, formset, form_index, i)
)
else:
self.fail(
msg_prefix + "The formset '%s', form %d in context %d does not contain the field '%s'"
% (formset, form_index, i, field)
)
elif form_index is not None:
non_field_errors = context[formset].forms[form_index].non_field_errors()
self.assertFalse(
len(non_field_errors) == 0,
msg_prefix + "The formset '%s', form %d in context %d "
"does not contain any non-field errors." % (formset, form_index, i)
)
self.assertTrue(
err in non_field_errors,
msg_prefix + "The formset '%s', form %d in context %d "
"does not contain the non-field error '%s' (actual errors: %s)"
% (formset, form_index, i, err, repr(non_field_errors))
)
else:
non_form_errors = context[formset].non_form_errors()
self.assertFalse(
len(non_form_errors) == 0,
msg_prefix + "The formset '%s' in context %d does not "
"contain any non-form errors." % (formset, i)
)
self.assertTrue(
err in non_form_errors,
msg_prefix + "The formset '%s' in context %d does not "
"contain the non-form error '%s' (actual errors: %s)"
% (formset, i, err, repr(non_form_errors))
)
if not found_formset:
self.fail(msg_prefix + "The formset '%s' was not used to render the response" % formset)
def _assert_template_used(self, response, template_name, msg_prefix):
if response is None and template_name is None:
raise TypeError('response and/or template_name argument must be provided')
if msg_prefix:
msg_prefix += ": "
if template_name is not None and response is not None and not hasattr(response, 'templates'):
raise ValueError(
"assertTemplateUsed() and assertTemplateNotUsed() are only "
"usable on responses fetched using the Django test Client."
)
if not hasattr(response, 'templates') or (response is None and template_name):
if response:
template_name = response
response = None
# use this template with context manager
return template_name, None, msg_prefix
template_names = [t.name for t in response.templates if t.name is not None]
return None, template_names, msg_prefix
def assertTemplateUsed(self, response=None, template_name=None, msg_prefix='', count=None):
context_mgr_template, template_names, msg_prefix = self._assert_template_used(
response, template_name, msg_prefix)
if context_mgr_template:
# Use assertTemplateUsed as context manager.
return _AssertTemplateUsedContext(self, context_mgr_template)
if not template_names:
self.fail(msg_prefix + "No templates used to render the response")
self.assertTrue(
template_name in template_names,
msg_prefix + "Template '%s' was not a template used to render"
" the response. Actual template(s) used: %s"
% (template_name, ', '.join(template_names))
)
if count is not None:
self.assertEqual(
template_names.count(template_name), count,
msg_prefix + "Template '%s' was expected to be rendered %d "
"time(s) but was actually rendered %d time(s)."
% (template_name, count, template_names.count(template_name))
)
def assertTemplateNotUsed(self, response=None, template_name=None, msg_prefix=''):
context_mgr_template, template_names, msg_prefix = self._assert_template_used(
response, template_name, msg_prefix
)
if context_mgr_template:
# Use assertTemplateNotUsed as context manager.
return _AssertTemplateNotUsedContext(self, context_mgr_template)
self.assertFalse(
template_name in template_names,
msg_prefix + "Template '%s' was used unexpectedly in rendering the response" % template_name
)
@contextmanager
def _assert_raises_message_cm(self, expected_exception, expected_message):
with self.assertRaises(expected_exception) as cm:
yield cm
self.assertIn(expected_message, str(cm.exception))
def assertRaisesMessage(self, expected_exception, expected_message, *args, **kwargs):
# callable_obj was a documented kwarg in Django 1.8 and older.
callable_obj = kwargs.pop('callable_obj', None)
if callable_obj:
warnings.warn(
'The callable_obj kwarg is deprecated. Pass the callable '
'as a positional argument instead.', RemovedInDjango20Warning
)
elif len(args):
callable_obj = args[0]
args = args[1:]
cm = self._assert_raises_message_cm(expected_exception, expected_message)
# Assertion used in context manager fashion.
if callable_obj is None:
return cm
# Assertion was passed a callable.
with cm:
callable_obj(*args, **kwargs)
def assertFieldOutput(self, fieldclass, valid, invalid, field_args=None,
field_kwargs=None, empty_value=''):
if field_args is None:
field_args = []
if field_kwargs is None:
field_kwargs = {}
required = fieldclass(*field_args, **field_kwargs)
optional = fieldclass(*field_args, **dict(field_kwargs, required=False))
# test valid inputs
for input, output in valid.items():
self.assertEqual(required.clean(input), output)
self.assertEqual(optional.clean(input), output)
# test invalid inputs
for input, errors in invalid.items():
with self.assertRaises(ValidationError) as context_manager:
required.clean(input)
self.assertEqual(context_manager.exception.messages, errors)
with self.assertRaises(ValidationError) as context_manager:
optional.clean(input)
self.assertEqual(context_manager.exception.messages, errors)
# test required inputs
error_required = [force_text(required.error_messages['required'])]
for e in required.empty_values:
with self.assertRaises(ValidationError) as context_manager:
required.clean(e)
self.assertEqual(context_manager.exception.messages, error_required)
self.assertEqual(optional.clean(e), empty_value)
# test that max_length and min_length are always accepted
if issubclass(fieldclass, CharField):
field_kwargs.update({'min_length': 2, 'max_length': 20})
self.assertIsInstance(fieldclass(*field_args, **field_kwargs), fieldclass)
def assertHTMLEqual(self, html1, html2, msg=None):
dom1 = assert_and_parse_html(self, html1, msg, 'First argument is not valid HTML:')
dom2 = assert_and_parse_html(self, html2, msg, 'Second argument is not valid HTML:')
if dom1 != dom2:
standardMsg = '%s != %s' % (
safe_repr(dom1, True), safe_repr(dom2, True))
diff = ('\n' + '\n'.join(difflib.ndiff(
six.text_type(dom1).splitlines(),
six.text_type(dom2).splitlines(),
)))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertHTMLNotEqual(self, html1, html2, msg=None):
dom1 = assert_and_parse_html(self, html1, msg, 'First argument is not valid HTML:')
dom2 = assert_and_parse_html(self, html2, msg, 'Second argument is not valid HTML:')
if dom1 == dom2:
standardMsg = '%s == %s' % (
safe_repr(dom1, True), safe_repr(dom2, True))
self.fail(self._formatMessage(msg, standardMsg))
def assertInHTML(self, needle, haystack, count=None, msg_prefix=''):
needle = assert_and_parse_html(self, needle, None, 'First argument is not valid HTML:')
haystack = assert_and_parse_html(self, haystack, None, 'Second argument is not valid HTML:')
real_count = haystack.count(needle)
if count is not None:
self.assertEqual(
real_count, count,
msg_prefix + "Found %d instances of '%s' in response (expected %d)" % (real_count, needle, count)
)
else:
self.assertTrue(real_count != 0, msg_prefix + "Couldn't find '%s' in response" % needle)
def assertJSONEqual(self, raw, expected_data, msg=None):
try:
data = json.loads(raw)
except ValueError:
self.fail("First argument is not valid JSON: %r" % raw)
if isinstance(expected_data, six.string_types):
try:
expected_data = json.loads(expected_data)
except ValueError:
self.fail("Second argument is not valid JSON: %r" % expected_data)
self.assertEqual(data, expected_data, msg=msg)
def assertJSONNotEqual(self, raw, expected_data, msg=None):
try:
data = json.loads(raw)
except ValueError:
self.fail("First argument is not valid JSON: %r" % raw)
if isinstance(expected_data, six.string_types):
try:
expected_data = json.loads(expected_data)
except ValueError:
self.fail("Second argument is not valid JSON: %r" % expected_data)
self.assertNotEqual(data, expected_data, msg=msg)
def assertXMLEqual(self, xml1, xml2, msg=None):
try:
result = compare_xml(xml1, xml2)
except Exception as e:
standardMsg = 'First or second argument is not valid XML\n%s' % e
self.fail(self._formatMessage(msg, standardMsg))
else:
if not result:
standardMsg = '%s != %s' % (safe_repr(xml1, True), safe_repr(xml2, True))
diff = ('\n' + '\n'.join(
difflib.ndiff(
six.text_type(xml1).splitlines(),
six.text_type(xml2).splitlines(),
)
))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertXMLNotEqual(self, xml1, xml2, msg=None):
try:
result = compare_xml(xml1, xml2)
except Exception as e:
standardMsg = 'First or second argument is not valid XML\n%s' % e
self.fail(self._formatMessage(msg, standardMsg))
else:
if result:
standardMsg = '%s == %s' % (safe_repr(xml1, True), safe_repr(xml2, True))
self.fail(self._formatMessage(msg, standardMsg))
if six.PY2:
assertCountEqual = unittest.TestCase.assertItemsEqual
assertNotRegex = unittest.TestCase.assertNotRegexpMatches
assertRaisesRegex = unittest.TestCase.assertRaisesRegexp
assertRegex = unittest.TestCase.assertRegexpMatches
class TransactionTestCase(SimpleTestCase):
reset_sequences = False
available_apps = None
fixtures = None
# contents into a fixture during setup and flush and reload them
# during teardown (as flush does not restore data from migrations).
# This can be slow; this flag allows enabling on a per-case basis.
serialized_rollback = False
# Since tests will be wrapped in a transaction, or serialized if they
# are not available, we allow queries to be run.
allow_database_queries = True
def _pre_setup(self):
super(TransactionTestCase, self)._pre_setup()
if self.available_apps is not None:
apps.set_available_apps(self.available_apps)
setting_changed.send(
sender=settings._wrapped.__class__,
setting='INSTALLED_APPS',
value=self.available_apps,
enter=True,
)
for db_name in self._databases_names(include_mirrors=False):
emit_post_migrate_signal(verbosity=0, interactive=False, db=db_name)
try:
self._fixture_setup()
except Exception:
if self.available_apps is not None:
apps.unset_available_apps()
setting_changed.send(
sender=settings._wrapped.__class__,
setting='INSTALLED_APPS',
value=settings.INSTALLED_APPS,
enter=False,
)
raise
@classmethod
def _databases_names(cls, include_mirrors=True):
# If the test case has a multi_db=True flag, act on all databases,
# including mirrors or not. Otherwise, just on the default DB.
if getattr(cls, 'multi_db', False):
return [
alias for alias in connections
if include_mirrors or not connections[alias].settings_dict['TEST']['MIRROR']
]
else:
return [DEFAULT_DB_ALIAS]
def _reset_sequences(self, db_name):
conn = connections[db_name]
if conn.features.supports_sequence_reset:
sql_list = conn.ops.sequence_reset_by_name_sql(
no_style(), conn.introspection.sequence_list())
if sql_list:
with transaction.atomic(using=db_name):
cursor = conn.cursor()
for sql in sql_list:
cursor.execute(sql)
def _fixture_setup(self):
for db_name in self._databases_names(include_mirrors=False):
# Reset sequences
if self.reset_sequences:
self._reset_sequences(db_name)
# If we need to provide replica initial data from migrated apps,
# then do so.
if self.serialized_rollback and hasattr(connections[db_name], "_test_serialized_contents"):
if self.available_apps is not None:
apps.unset_available_apps()
connections[db_name].creation.deserialize_db_from_string(
connections[db_name]._test_serialized_contents
)
if self.available_apps is not None:
apps.set_available_apps(self.available_apps)
if self.fixtures:
# We have to use this slightly awkward syntax due to the fact
# that we're using *args and **kwargs together.
call_command('loaddata', *self.fixtures,
**{'verbosity': 0, 'database': db_name})
def _should_reload_connections(self):
return True
def _post_teardown(self):
try:
self._fixture_teardown()
super(TransactionTestCase, self)._post_teardown()
if self._should_reload_connections():
# happen, get a clean connection at the start of every test.
for conn in connections.all():
conn.close()
finally:
if self.available_apps is not None:
apps.unset_available_apps()
setting_changed.send(sender=settings._wrapped.__class__,
setting='INSTALLED_APPS',
value=settings.INSTALLED_APPS,
enter=False)
def _fixture_teardown(self):
# Allow TRUNCATE ... CASCADE and don't emit the post_migrate signal
for db_name in self._databases_names(include_mirrors=False):
inhibit_post_migrate = (
self.available_apps is not None or
(
self.serialized_rollback and
hasattr(connections[db_name], '_test_serialized_contents')
)
)
call_command('flush', verbosity=0, interactive=False,
database=db_name, reset_sequences=False,
allow_cascade=self.available_apps is not None,
inhibit_post_migrate=inhibit_post_migrate)
def assertQuerysetEqual(self, qs, values, transform=repr, ordered=True, msg=None):
items = six.moves.map(transform, qs)
if not ordered:
return self.assertEqual(Counter(items), Counter(values), msg=msg)
values = list(values)
if len(values) > 1 and hasattr(qs, 'ordered') and not qs.ordered:
raise ValueError("Trying to compare non-ordered queryset "
"against more than one ordered values")
return self.assertEqual(list(items), values, msg=msg)
def assertNumQueries(self, num, func=None, *args, **kwargs):
using = kwargs.pop("using", DEFAULT_DB_ALIAS)
conn = connections[using]
context = _AssertNumQueriesContext(self, num, conn)
if func is None:
return context
with context:
func(*args, **kwargs)
def connections_support_transactions():
return all(conn.features.supports_transactions
for conn in connections.all())
class TestCase(TransactionTestCase):
@classmethod
def _enter_atomics(cls):
atomics = {}
for db_name in cls._databases_names():
atomics[db_name] = transaction.atomic(using=db_name)
atomics[db_name].__enter__()
return atomics
@classmethod
def _rollback_atomics(cls, atomics):
for db_name in reversed(cls._databases_names()):
transaction.set_rollback(True, using=db_name)
atomics[db_name].__exit__(None, None, None)
@classmethod
def setUpClass(cls):
super(TestCase, cls).setUpClass()
if not connections_support_transactions():
return
cls.cls_atomics = cls._enter_atomics()
if cls.fixtures:
for db_name in cls._databases_names(include_mirrors=False):
try:
call_command('loaddata', *cls.fixtures, **{
'verbosity': 0,
'commit': False,
'database': db_name,
})
except Exception:
cls._rollback_atomics(cls.cls_atomics)
raise
try:
cls.setUpTestData()
except Exception:
cls._rollback_atomics(cls.cls_atomics)
raise
@classmethod
def tearDownClass(cls):
if connections_support_transactions():
cls._rollback_atomics(cls.cls_atomics)
for conn in connections.all():
conn.close()
super(TestCase, cls).tearDownClass()
@classmethod
def setUpTestData(cls):
pass
def _should_reload_connections(self):
if connections_support_transactions():
return False
return super(TestCase, self)._should_reload_connections()
def _fixture_setup(self):
if not connections_support_transactions():
self.setUpTestData()
return super(TestCase, self)._fixture_setup()
assert not self.reset_sequences, 'reset_sequences cannot be used on TestCase instances'
self.atomics = self._enter_atomics()
def _fixture_teardown(self):
if not connections_support_transactions():
return super(TestCase, self)._fixture_teardown()
try:
for db_name in reversed(self._databases_names()):
if self._should_check_constraints(connections[db_name]):
connections[db_name].check_constraints()
finally:
self._rollback_atomics(self.atomics)
def _should_check_constraints(self, connection):
return (
connection.features.can_defer_constraint_checks and
not connection.needs_rollback and connection.is_usable()
)
class CheckCondition(object):
def __init__(self, *conditions):
self.conditions = conditions
def add_condition(self, condition, reason):
return self.__class__(*self.conditions + ((condition, reason),))
def __get__(self, instance, cls=None):
if any(getattr(base, '__unittest_skip__', False) for base in cls.__bases__):
return True
for condition, reason in self.conditions:
if condition():
cls.__unittest_skip__ = True
cls.__unittest_skip_why__ = reason
return True
return False
def _deferredSkip(condition, reason):
def decorator(test_func):
if not (isinstance(test_func, type) and
issubclass(test_func, unittest.TestCase)):
@wraps(test_func)
def skip_wrapper(*args, **kwargs):
if condition():
raise unittest.SkipTest(reason)
return test_func(*args, **kwargs)
test_item = skip_wrapper
else:
# Assume a class is decorated
test_item = test_func
# Retrieve the possibly existing value from the class's dict to
skip = test_func.__dict__.get('__unittest_skip__')
if isinstance(skip, CheckCondition):
test_item.__unittest_skip__ = skip.add_condition(condition, reason)
elif skip is not True:
test_item.__unittest_skip__ = CheckCondition((condition, reason))
return test_item
return decorator
def skipIfDBFeature(*features):
return _deferredSkip(
lambda: any(getattr(connection.features, feature, False) for feature in features),
"Database has feature(s) %s" % ", ".join(features)
)
def skipUnlessDBFeature(*features):
return _deferredSkip(
lambda: not all(getattr(connection.features, feature, False) for feature in features),
"Database doesn't support feature(s): %s" % ", ".join(features)
)
def skipUnlessAnyDBFeature(*features):
return _deferredSkip(
lambda: not any(getattr(connection.features, feature, False) for feature in features),
"Database doesn't support any of the feature(s): %s" % ", ".join(features)
)
class QuietWSGIRequestHandler(WSGIRequestHandler):
def log_message(*args):
pass
class FSFilesHandler(WSGIHandler):
def __init__(self, application):
self.application = application
self.base_url = urlparse(self.get_base_url())
super(FSFilesHandler, self).__init__()
def _should_handle(self, path):
return path.startswith(self.base_url[2]) and not self.base_url[1]
def file_path(self, url):
relative_url = url[len(self.base_url[2]):]
return url2pathname(relative_url)
def get_response(self, request):
from django.http import Http404
if self._should_handle(request.path):
try:
return self.serve(request)
except Http404:
pass
return super(FSFilesHandler, self).get_response(request)
def serve(self, request):
os_rel_path = self.file_path(request.path)
os_rel_path = posixpath.normpath(unquote(os_rel_path))
# TODO: Modify if/when that internal API is refactored
final_rel_path = os_rel_path.replace('\\', '/').lstrip('/')
return serve(request, final_rel_path, document_root=self.get_base_dir())
def __call__(self, environ, start_response):
if not self._should_handle(get_path_info(environ)):
return self.application(environ, start_response)
return super(FSFilesHandler, self).__call__(environ, start_response)
class _StaticFilesHandler(FSFilesHandler):
def get_base_dir(self):
return settings.STATIC_ROOT
def get_base_url(self):
return settings.STATIC_URL
class _MediaFilesHandler(FSFilesHandler):
def get_base_dir(self):
return settings.MEDIA_ROOT
def get_base_url(self):
return settings.MEDIA_URL
class LiveServerThread(threading.Thread):
def __init__(self, host, static_handler, connections_override=None, port=0):
self.host = host
self.port = port
self.is_ready = threading.Event()
self.error = None
self.static_handler = static_handler
self.connections_override = connections_override
super(LiveServerThread, self).__init__()
def run(self):
if self.connections_override:
# Override this thread's database connections with the ones
for alias, conn in self.connections_override.items():
connections[alias] = conn
try:
handler = self.static_handler(_MediaFilesHandler(WSGIHandler()))
self.httpd = self._create_server()
if self.port == 0:
self.port = self.httpd.server_address[1]
self.httpd.set_app(handler)
self.is_ready.set()
self.httpd.serve_forever()
except Exception as e:
self.error = e
self.is_ready.set()
finally:
connections.close_all()
def _create_server(self):
return WSGIServer((self.host, self.port), QuietWSGIRequestHandler, allow_reuse_address=False)
def terminate(self):
if hasattr(self, 'httpd'):
self.httpd.shutdown()
self.httpd.server_close()
self.join()
class LiveServerTestCase(TransactionTestCase):
host = 'localhost'
port = 0
server_thread_class = LiveServerThread
static_handler = _StaticFilesHandler
@classproperty
def live_server_url(cls):
return 'http://%s:%s' % (cls.host, cls.server_thread.port)
@classmethod
def setUpClass(cls):
super(LiveServerTestCase, cls).setUpClass()
connections_override = {}
for conn in connections.all():
if conn.vendor == 'sqlite' and conn.is_in_memory_db():
conn.allow_thread_sharing = True
connections_override[conn.alias] = conn
cls._live_server_modified_settings = modify_settings(
ALLOWED_HOSTS={'append': cls.host},
)
cls._live_server_modified_settings.enable()
cls.server_thread = cls._create_server_thread(connections_override)
cls.server_thread.daemon = True
cls.server_thread.start()
cls.server_thread.is_ready.wait()
if cls.server_thread.error:
# case of errors.
cls._tearDownClassInternal()
raise cls.server_thread.error
@classmethod
def _create_server_thread(cls, connections_override):
return cls.server_thread_class(
cls.host,
cls.static_handler,
connections_override=connections_override,
port=cls.port,
)
@classmethod
def _tearDownClassInternal(cls):
# There may not be a 'server_thread' attribute if setUpClass() for some
# reasons has raised an exception.
if hasattr(cls, 'server_thread'):
# Terminate the live server's thread
cls.server_thread.terminate()
for conn in connections.all():
if conn.vendor == 'sqlite' and conn.is_in_memory_db():
conn.allow_thread_sharing = False
@classmethod
def tearDownClass(cls):
cls._tearDownClassInternal()
cls._live_server_modified_settings.disable()
super(LiveServerTestCase, cls).tearDownClass()
class SerializeMixin(object):
lockfile = None
@classmethod
def setUpClass(cls):
if cls.lockfile is None:
raise ValueError(
"{}.lockfile isn't set. Set it to a unique value "
"in the base class.".format(cls.__name__))
cls._lockfile = open(cls.lockfile)
locks.lock(cls._lockfile, locks.LOCK_EX)
super(SerializeMixin, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(SerializeMixin, cls).tearDownClass()
cls._lockfile.close()
| true | true |
1c35aa1277ffe802f90bac0cd78c1c4a49041400 | 69,352 | py | Python | hack/test_errata.py | Davoska/cincinnati-graph-data | 3bc79fdcefa72f570e0757c0bfd46d4302543264 | [
"Apache-2.0"
] | null | null | null | hack/test_errata.py | Davoska/cincinnati-graph-data | 3bc79fdcefa72f570e0757c0bfd46d4302543264 | [
"Apache-2.0"
] | null | null | null | hack/test_errata.py | Davoska/cincinnati-graph-data | 3bc79fdcefa72f570e0757c0bfd46d4302543264 | [
"Apache-2.0"
] | null | null | null | import copy
import datetime
import os
import tempfile
import unittest
import urllib
from unittest.mock import MagicMock
from unittest.mock import patch
import errata
class GithubUserMock():
def __init__(self, login):
self.login = login
class GithubLabelMock():
def __init__(self, name):
self.name = name
class GithubPRMock:
def __init__(self, user, title, labels=[], number=0, body="", url="", html_url=""):
self.user = user
self.title = title
self.labels = labels
self.number = number
self.body = body
self.url = url
self.html_url = html_url
self.create_issue_comment = MagicMock()
def __eq__(self, other):
if not isinstance(other, GithubPRMock):
return False
return self.user == other.user \
and self.title == other.title \
and self.labels == other.labels \
and self.number == other.number \
and self.body == other.body \
and self.url == other.url \
and self.html_url == other.html_url
class ExtractErrataNumberFromBodyTest(unittest.TestCase):
def test_url_starting_with_valid_errata_marker(self):
"""
Test errata number extraction from valid URLs.
URLs starting with corresponding ERRATA_MARKER in errata.py.
"""
param_list = [
('https://errata.devel.redhat.com/advisory/12345', 12345),
('https://errata.devel.redhat.com/advisory/67890', 67890),
('https://errata.devel.redhat.com/advisory/13579', 13579),
('https://errata.devel.redhat.com/advisory/24680', 24680),
('https://errata.devel.redhat.com/advisory/', None),
('https://errata.devel.redhat.com/advisory/invalid', None)
]
for (url, expected) in param_list:
with self.subTest(url=url):
self.assertEqual(errata.extract_errata_number_from_body(url), expected)
def test_invalid_url(self):
"""
Test errata number extraction from invalid URLs.
"""
param_list = [
'http://errata.devel.redhat.com/advisory/12345',
'https://errrata.devel.redhat.com/advisory/12345',
'https://errata.dvel.reddhat.com/advisori/12345',
'https://errata.devel.redhat.com/12345',
'https://errata.devel.com/advisory/12345',
'https://errata.redhat.com/advisory/12345',
'https://devel.redhat.com/advisory/12345',
'https://redhat.com/advisory/12345',
'https://errata.com/advisory/12345'
]
for url in param_list:
with self.subTest(url=url):
self.assertEqual(errata.extract_errata_number_from_body(url), None)
def test_missing_url(self):
"""
Test errata number extraction from missing URLs.
"""
param_list = [
'errata',
'12345',
'errata is 12345'
]
for body in param_list:
with self.subTest(body=body):
self.assertEqual(errata.extract_errata_number_from_body(body), None)
def test_url_is_not_on_the_first_line(self):
"""
Test errata number extraction from valid URLs which are not located on the first line.
"""
param_list = [
'\nhttps://errata.devel.redhat.com/advisory/12345',
'\n\nhttps://errata.devel.redhat.com/advisory/12345'
]
for body in param_list:
with self.subTest(body=body):
self.assertEqual(errata.extract_errata_number_from_body(body), None)
class SaveAndLoadTest(unittest.TestCase):
def test_load_nonexisting_file(self):
"""
Test loading a nonexisting file.
"""
with tempfile.TemporaryDirectory() as tempdir:
cachepath = os.path.join(tempdir, "cache.json")
self.assertCountEqual(errata.load(cachepath), {})
def test_save_and_load_as_a_pair(self):
"""
Test using errata.save and errata.load as a pair to confirm their functionality.
"""
param_list = [
(),
({"foo": "bar"}),
({"value": "1234"}),
({"company": "Red Hat"}),
({"foo": "bar"}, {"value": "1234"}, {"errata": "1234"}),
({"value": "1234"}, {"foo": "bar"}, {"errata": "1234"})
]
for cache in param_list:
with self.subTest():
with tempfile.TemporaryDirectory() as tempdir:
cachepath = os.path.join(tempdir, "cache.json")
errata.save(cachepath, cache)
self.assertCountEqual(errata.load(cachepath), cache)
class PollTest(unittest.TestCase):
def setUp(self):
self.raw_messages = [
(
True,
{
"additional_unnecessary_info": "shouldn't be processed",
"msg": {
"errata_id": 11,
"product": "RHOSE",
"to": "SHIPPED_LIVE",
}
}
),
(
True,
{
"additional_unnecessary_info": "shouldn't be processed",
"msg": {
"errata_id": 12,
"product": "RHOSE",
"to": "SHIPPED_LIVE",
}
}
),
(
False,
{
"additional_unnecessary_info": "shouldn't be processed",
"msg": {
"errata_id": 21,
"product": "RHOSE",
"to": "QE",
}
}
),
(
False,
{
"additional_unnecessary_info": "shouldn't be processed",
"msg": {
"errata_id": 22,
"product": "RHEL",
"to": "SHIPPED_LIVE",
}
}
),
(
False,
{
"additional_unnecessary_info": "shouldn't be processed",
"msg": {
"errata_id": 23,
"product": "RHEL",
"to": "QE",
}
}
),
(
False,
{
"additional_unnecessary_info": "shouldn't be processed",
"msg": {
"errata_id": 24,
"product": "SHIPPED_LIVE",
"to": "RHOSE",
}
}
)
]
self.valid_messages = [x[1] for x in self.raw_messages if x[0]]
self.invalid_messages = [x[1] for x in self.raw_messages if not x[0]]
@patch("json.load")
@patch("urllib.request.urlopen")
def test_params_of_urlopen_call(self, urlopen_mock, json_load_mock):
"""
Test parameters used in the data_grepper's url which is used for getting raw messages.
"""
urlopen_mock.return_value = MagicMock()
json_load_mock.return_value = {
"raw_messages": [],
"pages": 1
}
polled_messages = []
for message in errata.poll(period=datetime.timedelta(seconds=3600)):
polled_messages.append(message)
# Get params of the url used in urlopen in errata.poll
parsed_url = urllib.parse.urlparse(urlopen_mock.call_args[0][0])
params = urllib.parse.parse_qs(parsed_url.query)
# Assert if parameters complies with datagrepper reference
self.assertGreater(int(params["page"][0]), 0) # Page must be greater than 0
self.assertLessEqual(int(params["rows_per_page"][0]), 100) # Must be less than or equal to 100
self.assertEqual(params["category"][0], "errata") # Should only look for errata category
self.assertEqual(params["contains"][0], "RHOSE") # Only messages containing RHOSE
@patch("json.load")
@patch("urllib.request.urlopen")
def test_number_of_returned_pages_is_zero(self, urlopen_mock, json_load_mock):
"""
Test poll's functionality if returned data contains number of pages equal to zero.
"""
urlopen_mock.return_value = MagicMock()
json_load_mock.return_value = {
"raw_messages": [],
"pages": 0
}
polled_messages = []
for message in errata.poll(period=datetime.timedelta(seconds=3600)):
polled_messages.append(message)
self.assertEqual(polled_messages, [])
@patch("json.load")
@patch("urllib.request.urlopen")
def test_no_raw_messages(self, urlopen_mock, json_load_mock):
"""
Test polling messages if data doesn't contain any raw messages.
"""
urlopen_mock.return_value = MagicMock()
json_load_mock.return_value = {
"raw_messages": [],
"pages": 1
}
polled_messages = []
for message in errata.poll(period=datetime.timedelta(seconds=3600)):
polled_messages.append(message)
self.assertEqual(polled_messages, [])
@patch("json.load")
@patch("time.sleep")
@patch("urllib.request.urlopen")
def test_unresponsive_url_becomes_responsive(self, urlopen_mock, sleep_mock, json_load_mock):
"""
Test polling messages if request.urlopen throws exception on a first try.
"""
urlopen_mock.side_effect = [
Exception("Unresponsive, request.urlopen has failed"),
MagicMock()
]
json_load_mock.return_value = {
"raw_messages": self.valid_messages,
"pages": 1
}
polled_messages = []
for message in errata.poll(period=datetime.timedelta(seconds=3600)):
polled_messages.append(message)
sleep_mock.assert_called_once() # URL wasn't responsive only once, so time.sleep should have been called only once
expected_msgs = [x['msg'] for x in self.valid_messages]
self.assertEqual(polled_messages, expected_msgs)
@patch("json.load")
@patch("urllib.request.urlopen")
def test_multiple_messages(self, urlopen_mock, json_load_mock):
"""
Test polling messages from raw messages that include wanted and unwanted messages.
"""
urlopen_mock.return_value = MagicMock()
messages = self.valid_messages + self.invalid_messages
json_load_mock.return_value = {
"raw_messages": messages,
"pages": 1
}
polled_messages = []
for message in errata.poll(period=datetime.timedelta(seconds=3600)):
polled_messages.append(message)
expected_msgs = [x['msg'] for x in self.valid_messages]
self.assertEqual(polled_messages, expected_msgs)
class SynopsisMatchTest(unittest.TestCase):
def test_match(self):
"""
Ensure we match only the synopses that we want to match.
"""
for synopsis, expected in [
(
'Moderate: OpenShift Container Platform 4.7.13 bug fix and security update',
{
'impact': 'Moderate',
'version': '4.7.13',
'major': '4',
'minor': '7',
'patch': '13',
'prerelease': None,
'build': None,
'type': 'bug fix and security update',
},
),
(
'Moderate: OpenShift Container Platform 4.7.5 security and bug fix update',
{
'impact': 'Moderate',
'version': '4.7.5',
'major': '4',
'minor': '7',
'patch': '5',
'prerelease': None,
'build': None,
'type': 'security and bug fix update',
},
),
(
'OpenShift Container Platform 4.6 GA Images',
{
'impact': None,
'version': '4.6',
'major': '4',
'minor': '6',
'patch': None,
'prerelease': None,
'build': None,
'type': 'GA Images',
},
),
(
'OpenShift Container Platform 4.5.11 optional CSI driver Operators bug fix update',
None,
),
(
'Moderate: OpenShift Container Platform 4.5.20 bug fix and golang security update',
{
'impact': 'Moderate',
'version': '4.5.20',
'major': '4',
'minor': '5',
'patch': '20',
'prerelease': None,
'build': None,
'type': 'bug fix and golang security update',
},
),
(
'Low: OpenShift Container Platform 4.3.40 security and bug fix update',
{
'impact': 'Low',
'version': '4.3.40',
'major': '4',
'minor': '3',
'patch': '40',
'prerelease': None,
'build': None,
'type': 'security and bug fix update',
},
),
]:
with self.subTest(synopsis=synopsis):
actual = errata._SYNOPSIS_REGEXP.match(synopsis)
if actual:
self.assertEqual(actual.groupdict(), expected)
else:
self.assertEqual(actual, expected)
class AdvisoryPhrasingsTest(unittest.TestCase):
def test_phrasings(self):
"""
Ensure we can construct synonym phrasins.
"""
for advisory, expected in [
(
'RHBA-123',
['RHBA-123', 'RHSA-123'],
),
(
'RHSA-123',
['RHBA-123', 'RHSA-123'],
),
(
'https://example.com/RHBA-123',
['https://example.com/RHBA-123', 'https://example.com/RHSA-123'],
),
(
'https://example.com/RHBA-123/abc',
['https://example.com/RHBA-123/abc', 'https://example.com/RHSA-123/abc'],
),
]:
with self.subTest(advisory=advisory):
actual = list(errata.advisory_phrasings(advisory=advisory))
self.assertEqual(actual, expected)
class NotifyTest(unittest.TestCase):
def setUp(self):
self.messages_including_approved_pr = [
(
{
"errata_id": 11,
"fulladvisory": "RHSA-2020:0000-00",
"product": "RHOSE",
"to": "SHIPPED_LIVE",
"synopsis": "OpenShift Container Platform 4.6 GA Images",
"when": "2021-01-01 12:00:00 UTC",
"uri": "Public_Errata_URI_11",
"approved_pr": "PR_HTML_URL_11"
},
'<!subteam^STE7S7ZU2>: '
'RHSA-2020:0000-00 shipped '
'2021-01-01 12:00:00 UTC: '
'OpenShift Container Platform 4.6 GA Images '
'Public_Errata_URI_11'
'\nPR PR_HTML_URL_11 has been approved'
),
(
{
"errata_id": 12,
"fulladvisory": "RHSA-2020:2000-20",
"product": "RHOSE",
"to": "SHIPPED_LIVE",
"synopsis": "Moderate: OpenShift Container Platform 4.5.20 bug fix and golang security update",
"when": "2021-01-02 13:00:00 UTC",
"uri": "Public_Errata_URI_12",
"approved_pr": "PR_HTML_URL_12"
},
'<!subteam^STE7S7ZU2>: '
'RHSA-2020:2000-20 shipped '
'2021-01-02 13:00:00 UTC: '
'Moderate: OpenShift Container Platform 4.5.20 bug fix and golang security update '
'Public_Errata_URI_12'
'\nPR PR_HTML_URL_12 has been approved'
)
]
self.messages_not_including_approved_pr = [
(
{
"errata_id": 21,
"fulladvisory": "RHSA-2020:0000-00",
"product": "RHOSE",
"to": "SHIPPED_LIVE",
"synopsis": "OpenShift Container Platform 4.6 GA Images",
"when": "2021-01-01 12:00:00 UTC",
"uri": "Public_Errata_URI_21",
},
'<!subteam^STE7S7ZU2>: '
'RHSA-2020:0000-00 shipped '
'2021-01-01 12:00:00 UTC: '
'OpenShift Container Platform 4.6 GA Images '
'Public_Errata_URI_21'
),
(
{
"errata_id": 22,
"fulladvisory": "RHSA-2020:2000-20",
"product": "RHOSE",
"to": "SHIPPED_LIVE",
"synopsis": "Moderate: OpenShift Container Platform 4.5.20 bug fix and golang security update",
"when": "2021-01-02 13:00:00 UTC",
"uri": "Public_Errata_URI_22",
},
'<!subteam^STE7S7ZU2>: '
'RHSA-2020:2000-20 shipped '
'2021-01-02 13:00:00 UTC: '
'Moderate: OpenShift Container Platform 4.5.20 bug fix and golang security update '
'Public_Errata_URI_22'
)
]
self.messages = \
self.messages_including_approved_pr + \
self.messages_not_including_approved_pr
@patch("builtins.print")
@patch("urllib.request.urlopen")
def test_no_webhook(self, urlopen_mock, print_mock):
"""
Test functionality of notify if parameter webhook is set to its default value.
"""
for message in self.messages:
with self.subTest(message=message):
errata.notify(message[0])
expected_message = message[0]
self.assertEqual(print_mock.call_args, unittest.mock.call(expected_message))
@patch("urllib.request.urlopen")
def test_format_of_message_not_including_approved_pr(self, urlopen_mock):
"""
Test format of data passed as argument to request.urlopen in errata.get_open_prs_to_fast.
This tests encoded format of the message in data as well.
Only testing messages including approved_pr key.
"""
for (message, expected_message_in_data_to_be_uploaded) in self.messages_not_including_approved_pr:
with self.subTest(message=message):
expected_data_to_be_uploaded = urllib.parse.urlencode({
'payload': {
'text': expected_message_in_data_to_be_uploaded
}
}).encode('utf-8')
errata.notify(message, MagicMock())
uploaded_data = urlopen_mock.call_args[1]['data']
self.assertEqual(uploaded_data, expected_data_to_be_uploaded)
@patch("urllib.request.urlopen")
def test_format_of_message_including_approved_pr(self, urlopen_mock):
"""
Test format of data passed as argument to request.urlopen in errata.get_open_prs_to_fast.
This tests encoded format of the message in data as well.
Only testing messages that do not include approved_pr key.
"""
for (message, expected_message_in_data_to_be_uploaded) in self.messages_including_approved_pr:
with self.subTest(message=message):
expected_data_to_be_uploaded = urllib.parse.urlencode({
'payload': {
'text': expected_message_in_data_to_be_uploaded
}
}).encode('utf-8')
errata.notify(message, MagicMock())
uploaded_data = urlopen_mock.call_args[1]['data']
self.assertEqual(uploaded_data, expected_data_to_be_uploaded)
class GetOpenPRsToFastTest(unittest.TestCase):
def setUp(self):
self.repo = MagicMock()
self.labels_multiple_including_lgtm = [
[
GithubLabelMock('lgtm')
],
[
GithubLabelMock('bug'), GithubLabelMock('duplicate'), GithubLabelMock('lgtm'),
GithubLabelMock('documentation'), GithubLabelMock('invalid')
],
[
GithubLabelMock('wontfix'), GithubLabelMock('lgtm'),
GithubLabelMock('question'), GithubLabelMock('invalid')
],
[
GithubLabelMock('help wanted'), GithubLabelMock('lgtm'),
GithubLabelMock('good first issue'), GithubLabelMock('bug')
]
]
self.labels_multiple_not_including_lgtm = [
[
],
[
GithubLabelMock('wontfix'), GithubLabelMock('bug'),
GithubLabelMock('question'), GithubLabelMock('invalid')
],
[
GithubLabelMock('help wanted'), GithubLabelMock('invalid'),
GithubLabelMock('good first issue'), GithubLabelMock('duplicate')
],
[
GithubLabelMock('bug'), GithubLabelMock('duplicate'), GithubLabelMock('invalid'),
GithubLabelMock('documentation'), GithubLabelMock('enhancement')
]
]
self.prs_correct_and_expected_to_be_yielded = [
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 3.0.0 in fast channel(s)"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in fast channel(s)"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.1.2 in fast channel(s)"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.2.3 in fast channel(s)"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.6.0 in fast channel(s)"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in fast channel(s)", self.labels_multiple_not_including_lgtm[0]),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in fast channel(s)", self.labels_multiple_not_including_lgtm[1]),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in fast channel(s)", self.labels_multiple_not_including_lgtm[2]),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in fast channel(s)", self.labels_multiple_not_including_lgtm[3]),
]
self.prs_including_the_lgtm_label = [
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in fast channel(s)", self.labels_multiple_including_lgtm[0]),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in fast channel(s)", self.labels_multiple_including_lgtm[1]),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in fast channel(s)", self.labels_multiple_including_lgtm[2]),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in fast channel(s)", self.labels_multiple_including_lgtm[3])
]
self.prs_author_is_not_openshift_bot = [
GithubPRMock(GithubUserMock("user1234"), "Enable 4.0.0 in fast channel(s)"),
GithubPRMock(GithubUserMock("bot-openshift"), "Enable 4.0.0 in fast channel(s)"),
GithubPRMock(GithubUserMock("Openshift-Bot"), "Enable 4.0.0 in fast channel(s)"),
GithubPRMock(GithubUserMock("GitHubUser1234"), "Enable 4.0.0 in fast channel(s)")
]
self.prs_title_not_starting_with_Enable = [
GithubPRMock(GithubUserMock("openshift-bot"), ""),
GithubPRMock(GithubUserMock("openshift-bot"), "Fix component"),
GithubPRMock(GithubUserMock("openshift-bot"), "Add features in fast channel(s)"),
GithubPRMock(GithubUserMock("openshift-bot"), "enable 4.0.0 in fast channel(s)"),
GithubPRMock(GithubUserMock("openshift-bot"), "Disable 4.0.0 in fast channel(s)"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enablee 4.0.0 in fast channel(s)")
]
self.prs_do_not_target_fast = [
GithubPRMock(GithubUserMock("openshift-bot"), "Enable "),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in channel(s)"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in FAST channel(s)"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in faast channel(s)"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in stable channel(s)"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in candidate channel(s)")
]
def test_prs_including_the_lgtm_label(self):
"""
Test retrieving PRs which include the LGTM label. These PRs should be skipped.
"""
self.repo.get_pulls = MagicMock(return_value=self.prs_including_the_lgtm_label)
open_prs_to_fast = []
for pr in errata.get_open_prs_to_fast(self.repo):
open_prs_to_fast.append(pr)
expected_prs = []
self.assertEqual(open_prs_to_fast, expected_prs)
def test_prs_author_is_not_openshift_bot(self):
"""
Test getting PRs whose author is not openshift-bot. These PRs should be skipped.
"""
self.repo.get_pulls = MagicMock(return_value=self.prs_author_is_not_openshift_bot)
open_prs_to_fast = []
for pr in errata.get_open_prs_to_fast(self.repo):
open_prs_to_fast.append(pr)
expected_prs = []
self.assertEqual(open_prs_to_fast, expected_prs)
def test_unknown_prs_should_be_skipped(self):
"""
Test getting unknown PRs. These PRs should be skipped.
"""
self.repo.get_pulls = MagicMock(return_value=self.prs_title_not_starting_with_Enable)
open_prs_to_fast = []
for pr in errata.get_open_prs_to_fast(self.repo):
open_prs_to_fast.append(pr)
expected_prs = []
self.assertEqual(open_prs_to_fast, expected_prs)
def test_ignore_prs_which_dont_target_fast(self):
"""
Test getting PRs which don't target fast. These PRs should be skipped.
"""
self.repo.get_pulls = MagicMock(return_value=self.prs_do_not_target_fast)
open_prs_to_fast = []
for pr in errata.get_open_prs_to_fast(self.repo):
open_prs_to_fast.append(pr)
expected_prs = []
self.assertEqual(open_prs_to_fast, expected_prs)
def test_correct_prs_should_be_yielded(self):
"""
Test getting PRs which are correct and should be yielded back.
"""
self.repo.get_pulls = MagicMock(return_value=self.prs_correct_and_expected_to_be_yielded)
open_prs_to_fast = []
for pr in errata.get_open_prs_to_fast(self.repo):
open_prs_to_fast.append(pr)
expected_prs = self.prs_correct_and_expected_to_be_yielded
self.assertEqual(open_prs_to_fast, expected_prs)
def test_get_pulls_query_params(self):
"""
Test query params used for getting the initial PRs from the repository.
"""
self.repo.get_pulls = MagicMock(return_value=[])
open_prs_to_fast = []
for pr in errata.get_open_prs_to_fast(self.repo):
open_prs_to_fast.append(pr)
expected_params = {
'state': 'open',
'base': 'master',
'sort': 'created',
}
self.assertEqual(self.repo.get_pulls.call_args, (unittest.mock.call(**expected_params)))
class LgtmFastPrForErrata(unittest.TestCase):
def setUp(self):
self.repo = MagicMock()
self.github_object_mock = MagicMock()
self.github_object_mock.get_repo.return_value = self.repo
self.prs_with_html_url_of_expected_pr = [
(
[
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 3.0.0 in fast channel(s)", [], 1, "https://errata.devel.redhat.com/advisory/1111", "PR_URL1", "PR_HTML_URL1"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in fast channel(s)", [], 2, "https://errata.devel.redhat.com/advisory/1234", "PR_URL2", "PR_HTML_URL2"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.1.2 in fast channel(s)", [], 3, "https://errata.devel.redhat.com/advisory/5678", "PR_URL3", "PR_HTML_URL3"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.2.3 in fast channel(s)", [], 4, "https://errata.devel.redhat.com/advisory/1357", "PR_URL4", "PR_HTML_URL4")
],
{
"errata_id": 1357
},
"PR_HTML_URL4" # HTML url of a PR which body has the wanted errata id.
),
(
[
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 3.0.0 in fast channel(s)", [], 12345, "https://errata.devel.redhat.com/advisory/41", "PR_URL12345", "PR_HTML_URL12345"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in fast channel(s)", [], 12354, "https://errata.devel.redhat.com/advisory/42", "PR_URL12354", "PR_HTML_URL12354"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.1.2 in fast channel(s)", [], 12340, "https://errata.devel.redhat.com/advisory/43", "PR_URL12340", "PR_HTML_URL12340"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.2.3 in fast channel(s)", [], 43215, "https://errata.devel.redhat.com/advisory/44", "PR_URL43215", "PR_HTML_URL43215")
],
{
"errata_id": 41
},
"PR_HTML_URL12345"
),
(
[
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 3.0.0 in fast channel(s)", [], 1111, "https://errata.devel.redhat.com/advisory/51", "PR_URL1111", "PR_HTML_URL1111"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in fast channel(s)", [], 2222, "https://errata.devel.redhat.com/advisory/62", "PR_URL2222", "PR_HTML_URL2222"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.1.2 in fast channel(s)", [], 3333, "https://errata.devel.redhat.com/advisory/73", "PR_URL3333", "PR_HTML_URL3333"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.2.3 in fast channel(s)", [], 4444, "https://errata.devel.redhat.com/advisory/84", "PR_URL4444", "PR_HTML_URL4444")
],
{
"errata_id": 73
},
"PR_HTML_URL3333"
)
]
self.prs_with_index_of_expected_pr = [
(
[
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 3.0.0 in fast channel(s)", [], 1, "https://errata.devel.redhat.com/advisory/1111", "PR_URL1", "PR_HTML_URL1"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in fast channel(s)", [], 2, "https://errata.devel.redhat.com/advisory/1234", "PR_URL2", "PR_HTML_URL2"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.1.2 in fast channel(s)", [], 3, "https://errata.devel.redhat.com/advisory/5678", "PR_URL3", "PR_HTML_URL3"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.2.3 in fast channel(s)", [], 4, "https://errata.devel.redhat.com/advisory/1357", "PR_URL4", "PR_HTML_URL4")
],
{
"errata_id": 1357
},
3 # Index of the PR which has the wanted errata id.
),
(
[
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 3.0.0 in fast channel(s)", [], 12345, "https://errata.devel.redhat.com/advisory/41", "PR_URL12345", "PR_HTML_URL12345"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in fast channel(s)", [], 12354, "https://errata.devel.redhat.com/advisory/42", "PR_URL12354", "PR_HTML_URL12354"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.1.2 in fast channel(s)", [], 12340, "https://errata.devel.redhat.com/advisory/43", "PR_URL12340", "PR_HTML_URL12340"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.2.3 in fast channel(s)", [], 43215, "https://errata.devel.redhat.com/advisory/44", "PR_URL43215", "PR_HTML_URL43215")
],
{
"errata_id": 41
},
0
),
(
[
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 3.0.0 in fast channel(s)", [], 1111, "https://errata.devel.redhat.com/advisory/51", "PR_URL1111", "PR_HTML_URL1111"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in fast channel(s)", [], 2222, "https://errata.devel.redhat.com/advisory/62", "PR_URL2222", "PR_HTML_URL2222"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.1.2 in fast channel(s)", [], 3333, "https://errata.devel.redhat.com/advisory/73", "PR_URL3333", "PR_HTML_URL3333"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.2.3 in fast channel(s)", [], 4444, "https://errata.devel.redhat.com/advisory/84", "PR_URL4444", "PR_HTML_URL4444")
],
{
"errata_id": 73
},
2
)
]
self.prs_with_invalid_errata_url = [
(
[
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 3.0.0 in fast channel(s)", [], 1, "", "PR_URL1", "PR_HTML_URL1"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in fast channel(s)", [], 2, "https://errata", "PR_URL2", "PR_HTML_URL2"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.1.2 in fast channel(s)", [], 3, "https://redhat.com/advisory/84", "PR_URL3", "PR_HTML_URL3"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.2.3 in fast channel(s)", [], 4, "https://errata.devel.redhat.com", "PR_URL4", "PR_HTML_URL4")
],
{
"errata_id": 21
}
)
]
@patch("github.Github")
def test_return_value_is_correct_for_specific_pr(self, Github_mock):
"""
Test retrieving the HTML url of a PR which is related to a specific errata id.
"""
githubrepo = MagicMock()
githubtoken = MagicMock()
Github_mock.return_value = self.github_object_mock
param_list = self.prs_with_html_url_of_expected_pr
for (prs, message, expected_pr_html_url) in param_list:
with self.subTest(prs_body=[x.body for x in prs], message=message):
self.repo.get_pulls = MagicMock(return_value=prs)
pr_html_url = errata.lgtm_fast_pr_for_errata(githubrepo, githubtoken, message)
self.assertEqual(pr_html_url, expected_pr_html_url)
@patch("github.Github")
def test_only_create_issue_on_the_expected_pr(self, Github_mock):
"""
Test creating an issue comment only on the PR which is related to the specific errata id.
"""
githubrepo = MagicMock()
githubtoken = MagicMock()
Github_mock.return_value = self.github_object_mock
param_list = self.prs_with_index_of_expected_pr
for (prs, message, expected_index_of_pr_to_create_issue) in param_list:
self.repo.get_pulls = MagicMock(return_value=prs)
errata.lgtm_fast_pr_for_errata(githubrepo, githubtoken, message)
for index, pr in enumerate(prs):
with self.subTest(prs_body=[x.body for x in prs], message=message):
if index == expected_index_of_pr_to_create_issue:
pr.create_issue_comment.assert_called_once()
else:
pr.create_issue_comment.assert_not_called()
@patch("github.Github")
def test_issue_comment_format(self, Github_mock):
"""
Test the format of the created issue comment on the PR which is related to the specific errata id.
"""
githubrepo = MagicMock()
githubtoken = MagicMock()
Github_mock.return_value = self.github_object_mock
param_list = self.prs_with_index_of_expected_pr
for (prs, message, expected_index_of_pr_to_create_issue) in param_list:
with self.subTest(prs_body=[x.body for x in prs], message=message):
self.repo.get_pulls = MagicMock(return_value=prs)
errata.lgtm_fast_pr_for_errata(githubrepo, githubtoken, message)
issue_comment = prs[expected_index_of_pr_to_create_issue].create_issue_comment.call_args
expected_issue_comment = "Autoapproving PR to fast after the errata has shipped\n/lgtm"
self.assertEqual(issue_comment, (unittest.mock.call(expected_issue_comment)))
@patch("github.Github")
def test_prs_include_invalid_errata_url(self, Github_mock):
"""
Test PRs which body include invalid errata url.
These prs should be skipped.
"""
githubrepo = MagicMock()
githubtoken = MagicMock()
Github_mock.return_value = self.github_object_mock
param_list = self.prs_with_invalid_errata_url
for (prs, message) in param_list:
with self.subTest(body=[x.body for x in prs]):
self.repo.get_pulls = MagicMock(return_value=prs)
pr_html_url = errata.lgtm_fast_pr_for_errata(githubrepo, githubtoken, message)
self.assertEqual(pr_html_url, None)
class PublicErrataUriTest(unittest.TestCase):
def setUp(self):
self.nodes_valid = [
(
{ # nodes received via urlopen
"nodes": [
{
"version": "4.0.0",
"metadata": {
"url": "https://access.redhat.com/errata/RHBA-2020:0000"
}
}
]
},
( # Parameteres for calling errata.public_errata_uri
"4.0.0",
"RHBA-2020:0000",
"candidate-4.0.0",
),
# Expected uri of the wanted node
"https://access.redhat.com/errata/RHBA-2020:0000",
),
(
{
"nodes": [
{
"version": "4.1.0",
"metadata": {
"url": "https://access.redhat.com/errata/RHBA-2020:1000"
}
}
]
},
(
"4.1.0",
"RHBA-2020:1000",
"candidate-4.1.0",
),
"https://access.redhat.com/errata/RHBA-2020:1000",
),
(
{
"nodes": [
{
"version": "4.2.0",
"metadata": {
"url": "https://access.redhat.com/errata/RHBA-2020:2000"
}
}
]
},
(
"4.2.0",
"RHBA-2020:2000",
"candidate-4.2.0",
),
"https://access.redhat.com/errata/RHBA-2020:2000",
),
]
@patch("json.load")
@patch("urllib.request.urlopen")
def test_should_return_uri_of_same_version(self, urlopen_mock, json_load_mock):
"""
Test if URL of the node with the same version as the parameter is returned.
"""
for (data, params, expected_errata_uri) in self.nodes_valid:
version = params[0]
channel = params[2]
json_load_mock.return_value = data
with self.subTest(version=version):
errata_uri = errata.public_errata_uri(version=version, advisory="", channel=channel)
self.assertEqual(errata_uri, expected_errata_uri)
@patch("json.load")
@patch("urllib.request.urlopen")
def test_should_return_uri_of_the_same_advisory(self, urlopen_mock, json_load_mock):
"""
Test if URL of the node with the same advisory as the parameter is returned.
"""
for (data, params, expected_errata_uri) in self.nodes_valid:
advisory = params[1]
channel = params[2]
json_load_mock.return_value = data
with self.subTest(advisory=advisory):
errata_uri = errata.public_errata_uri(version="", advisory=advisory, channel=channel)
self.assertEqual(errata_uri, expected_errata_uri)
@patch("json.load")
@patch("urllib.request.urlopen")
def test_zero_nodes_received(self, urlopen_mock, json_load_mock):
"""
Test if None is returned when zero nodes are received.
"""
json_load_mock.return_value = {
"nodes": []
}
for (_, params, _) in self.nodes_valid:
version = params[0]
advisory = params[1]
channel = params[2]
with self.subTest(version=version, advisory=advisory):
errata_uri = errata.public_errata_uri(version=version, advisory=advisory, channel=channel)
self.assertEqual(errata_uri, None)
@patch("json.load")
@patch("urllib.request.urlopen")
def test_zero_nodes_match(self, urlopen_mock, json_load_mock):
"""
Test if None is returned when zero nodes match wanted version or advisory.
"""
for (data, params, _) in self.nodes_valid:
version = params[0]
advisory = params[1]
channel = params[2]
json_load_mock.return_value = data
with self.subTest(version=version, advisory=advisory):
errata_uri = errata.public_errata_uri(version="", advisory="", channel=channel)
self.assertEqual(errata_uri, None)
@patch("time.sleep")
@patch("json.load")
@patch("urllib.request.urlopen")
def test_unresponsive_url_becomes_responsive(self, urlopen_mock, json_load_mock, sleep_mock):
"""
Test requesting messages if request.urlopen throws exception on a first try.
"""
for (data, params, expected_errata_uri) in self.nodes_valid:
version = params[0]
advisory = params[1]
channel = params[2]
json_load_mock.return_value = data
urlopen_mock.side_effect = [
Exception("Unresponsive, request.urlopen has failed"),
MagicMock()
]
sleep_mock.reset_mock()
with self.subTest():
errata_uri = errata.public_errata_uri(version=version, advisory=advisory, channel=channel)
sleep_mock.assert_called_once()
self.assertEqual(errata_uri, expected_errata_uri)
class ProcessMessageTest(unittest.TestCase):
def setUp(self):
self.valid_params = [
(
"https://access.redhat.com/errata/RHBA-2020:0000",
{
"synopsis": "Moderate: OpenShift Container Platform 4.0.0 bug fix and golang security update",
"fulladvisory": "RHBA-2020:0000-01",
"when": "2021-01-01 00:00:00 UTC",
}
),
(
"https://access.redhat.com/errata/RHBA-2021:0749",
{
"synopsis": "OpenShift Container Platform 4.7.2 bug fix update",
"fulladvisory": "RHBA-2021:0749-06",
"when": "2021-03-16 08:42:16 UTC",
}
)
]
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_should_raise_exception_when_new_invalid_synopsis_is_received(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
"""
Test processing an invalid synopsis which is not in the excluded cache.
Should raise the ValueError exception.
"""
public_errata_uri_mock.return_value = "https://access.redhat.com/errata/RHBA-2020:0000"
invalid_synopsis = "Invalid Synopsis 0.0.0"
message = {
"synopsis": invalid_synopsis,
"fulladvisory": "RHBA-2020:0000-01",
"when": "2021-01-01 00:00:00 UTC",
}
cache = {}
excluded_cache = {}
with self.assertRaises(ValueError):
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_content_of_cache_when_invalid_synopsis_is_received(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
"""
Test content of the cache should remain unchanged when invalid synopsis is received.
"""
public_errata_uri_mock.return_value = "https://access.redhat.com/errata/RHBA-2020:0000"
invalid_synopsis = "Invalid Synopsis 0.0.0"
cache = {
"RHBA-2020:0000-01":
{
"synopsis": "Moderate: OpenShift Container Platform 4.0.0 bug fix and golang security update",
"uri": "https://access.redhat.com/errata/RHBA-2020:0000",
"when": "2021-01-01 00:00:00 UTC",
}
}
cache_copy = copy.deepcopy(cache)
message = {
"synopsis": invalid_synopsis,
"fulladvisory": "RHBA-2020:0000-01",
"when": "2021-01-01 00:00:00 UTC",
}
excluded_cache = {}
with self.assertRaises(ValueError):
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
self.assertDictEqual(cache, cache_copy)
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_should_add_new_invalid_synopsis_to_the_excluded_cache(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
"""
Test processing invalid synopsis which is not in the excluded cache.
Should add the synopsis and the fulladvisory to the excluded cache.
"""
public_errata_uri_mock.return_value = "https://access.redhat.com/errata/RHBA-2020:0000"
invalid_synopsis = "Invalid Synopsis 0.0.0"
message = {
"synopsis": invalid_synopsis,
"fulladvisory": "RHBA-2020:0000-01",
"when": "2021-01-01 00:00:00 UTC",
}
cache = {}
excluded_cache = {}
with self.assertRaises(ValueError):
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
self.assertDictEqual(
excluded_cache,
{
invalid_synopsis: "RHBA-2020:0000-01",
}
)
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_should_not_lgtm_fast_pr_when_new_invalid_synopsis_is_received(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
"""
Test if there isn't an attempt to lgtm fast pr when a new invalid synopsis is received.
The new invalid synopsis wasn't saved in the excluded cache.
"""
public_errata_uri_mock.return_value = "https://access.redhat.com/errata/RHBA-2020:0000"
invalid_synopsis = "Invalid Synopsis 0.0.0"
message = {
"synopsis": invalid_synopsis,
"fulladvisory": "RHBA-2020:0000-01",
"when": "2021-01-01 00:00:00 UTC",
}
cache = {}
excluded_cache = {}
with self.assertRaises(ValueError):
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
lgtm_fast_pr_for_errata_mock.assert_not_called()
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_should_not_notify_when_new_invalid_synopsis_is_received(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
"""
Test if there isn't an attempt to notify when a new invalid synopsis is received.
The new invalid synopsis wasn't saved in the excluded cache.
"""
public_errata_uri_mock.return_value = "https://access.redhat.com/errata/RHBA-2020:0000"
invalid_synopsis = "Invalid Synopsis 0.0.0"
message = {
"synopsis": invalid_synopsis,
"fulladvisory": "RHBA-2020:0000-01",
"when": "2021-01-01 00:00:00 UTC",
}
cache = {}
excluded_cache = {}
with self.assertRaises(ValueError):
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
notify_mock.assert_not_called()
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_content_of_excluded_cache_when_reprocessing_invalid_synopsis(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
"""
Test processing invalid synopsis which is already in the excluded cache.
Should not change the content of the excluded cache.
"""
public_errata_uri_mock.return_value = "https://access.redhat.com/errata/RHBA-2020:0000"
invalid_synopsis = "Invalid Synopsis 0.0.0"
invalid_synopsis_2 = "Invalid 1.0.0"
excluded_cache = {
invalid_synopsis: "RHBA-2020:0000-01",
invalid_synopsis_2: "RHBA-2020:1111-01"
}
excluded_cache_copy = copy.deepcopy(excluded_cache)
message = {
"synopsis": invalid_synopsis,
"fulladvisory": "RHBA-2020:0000-01",
"when": "2021-01-01 00:00:00 UTC",
}
cache = {}
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
self.assertDictEqual(excluded_cache, excluded_cache_copy)
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_should_not_lgtm_fast_pr_when_reprocessing_invalid_synopsis(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
"""
Test if there isn't an attempt to lgtm fast pr
when an already processed invalid synopsis is received.
Invalid synopsis is saved in the excluded cache.
"""
public_errata_uri_mock.return_value = "https://access.redhat.com/errata/RHBA-2020:0000"
invalid_synopsis = "Invalid Synopsis 0.0.0"
message = {
"synopsis": invalid_synopsis,
"fulladvisory": "RHBA-2020:0000-01",
"when": "2021-01-01 00:00:00 UTC",
}
cache = {}
excluded_cache = {
invalid_synopsis: "RHBA-2020:0000-01"
}
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
lgtm_fast_pr_for_errata_mock.assert_not_called()
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_should_not_notify_when_reprocessing_invalid_synopsis(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
"""
Test if there isn't an attempt to notify
when an already processed invalid synopsis is received.
Invalid synopsis is saved in the excluded cache.
"""
public_errata_uri_mock.return_value = "https://access.redhat.com/errata/RHBA-2020:0000"
invalid_synopsis = "Invalid Synopsis 0.0.0"
message = {
"synopsis": invalid_synopsis,
"fulladvisory": "RHBA-2020:0000-01",
"when": "2021-01-01 00:00:00 UTC",
}
cache = {}
excluded_cache = {
invalid_synopsis: "RHBA-2020:0000-01",
}
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
notify_mock.assert_not_called()
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_should_add_new_valid_synopsis_to_the_cache(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
"""
Test processing valid synopsis which is not in the cache.
Should add the synopsis's data to the cache.
"""
for (public_errata_uri, message) in self.valid_params:
with self.subTest(message=message, errata_uri=public_errata_uri):
public_errata_uri_mock.return_value = public_errata_uri
message_copy = copy.deepcopy(message)
cache = {}
excluded_cache = {}
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
self.assertDictEqual(
cache,
{
message_copy['fulladvisory']:
{
"when": message_copy['when'],
"synopsis": message_copy['synopsis'],
"uri": public_errata_uri,
}
}
)
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_should_notify_when_new_valid_synopsis_is_received(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
"""
Test if there is an attempt to notify when a new valid synopsis is received.
"""
for (public_errata_uri, message) in self.valid_params:
with self.subTest(message=message, errata_uri=public_errata_uri):
public_errata_uri_mock.return_value = public_errata_uri
notify_mock.reset_mock()
cache = {}
excluded_cache = {}
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
notify_mock.assert_called_once()
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_should_lgtm_fast_pr_when_new_valid_synopsis_is_received(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
"""
Test if there is an attempt to lgtm fast pr when a new valid synopsis is received.
"""
for (public_errata_uri, message) in self.valid_params:
with self.subTest(message=message, errata_uri=public_errata_uri):
public_errata_uri_mock.return_value = public_errata_uri
lgtm_fast_pr_for_errata_mock.reset_mock()
cache = {}
excluded_cache = {}
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
lgtm_fast_pr_for_errata_mock.assert_called_once()
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_content_of_cache_when_reprocessing_valid_synopsis(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
"""
Test processing valid synopsis which is already in the cache.
Should not change the content of the cache.
"""
for (public_errata_uri, message) in self.valid_params:
with self.subTest(message=message, errata_uri=public_errata_uri):
public_errata_uri_mock.return_value = public_errata_uri
cache = {}
cache[message['fulladvisory']] = {
'when': message['when'],
'synopsis': message['synopsis'],
'uri': public_errata_uri,
}
cache_copy = copy.deepcopy(cache)
excluded_cache = {}
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
self.assertDictEqual(cache, cache_copy)
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_should_not_notify_when_reprocessing_valid_synopsis(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
"""
Test if there isn't an attempt to notify when
reprocessing a valid synopsis.
The valid synopsis is already saved in the cache.
"""
for (public_errata_uri, message) in self.valid_params:
with self.subTest(message=message, errata_uri=public_errata_uri):
public_errata_uri_mock.return_value = public_errata_uri
notify_mock.reset_mock()
cache = {}
cache[message['fulladvisory']] = {
'when': message['when'],
'synopsis': message['synopsis'],
'uri': public_errata_uri,
}
excluded_cache = {}
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
notify_mock.assert_not_called()
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_should_not_lgtm_fast_pr_when_reprocessing_valid_synopsis(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
"""
Test if there isn't an attempt to lgtm fast PR when
reprocessing a valid synopsis.
The valid synopsis is already saved in the cache.
"""
for (public_errata_uri, message) in self.valid_params:
with self.subTest(message=message, errata_uri=public_errata_uri):
public_errata_uri_mock.return_value = public_errata_uri
lgtm_fast_pr_for_errata_mock.reset_mock()
cache = {}
cache[message['fulladvisory']] = {
'when': message['when'],
'synopsis': message['synopsis'],
'uri': public_errata_uri,
}
excluded_cache = {}
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
lgtm_fast_pr_for_errata_mock.assert_not_called()
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_should_not_notify_for_valid_synopsis_does_not_have_public_errata(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
"""
Test processing a new valid synopsis which does not have a public errata uri.
Test if there isn't attempt to notify.
"""
for (public_errata_uri, message) in self.valid_params:
with self.subTest(message=message, errata_uri=public_errata_uri):
public_errata_uri_mock.return_value = None
notify_mock.reset_mock()
cache = {}
excluded_cache = {}
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
notify_mock.assert_not_called()
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_should_not_lgtm_fast_pr_for_valid_synopsis_does_not_have_public_errata(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
"""
Test processing a new valid synopsis which does not have a public errata uri.
Test if there isn't attempt to lgtm fast pr for a message's synopsis.
"""
for (public_errata_uri, message) in self.valid_params:
with self.subTest(message=message, errata_uri=public_errata_uri):
public_errata_uri_mock.return_value = None
lgtm_fast_pr_for_errata_mock.reset_mock()
cache = {}
excluded_cache = {}
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
lgtm_fast_pr_for_errata_mock.assert_not_called()
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_should_not_notify_when_public_errata_does_not_match_synopsis(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
"""
Test processing a new valid synopsis which does not have a matching public errata uri.
Test if there isn't attempt to notify
when the public errata uri does not match message's advisory.
"""
for (public_errata_uri, message) in self.valid_params:
with self.subTest(message=message, errata_uri=public_errata_uri):
public_errata_uri_mock.return_value = 'non_matching_errata_uri'
lgtm_fast_pr_for_errata_mock.reset_mock()
notify_mock.reset_mock()
cache = {}
excluded_cache = {}
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
notify_mock.assert_not_called()
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_should_not_lgtm_fast_pr_when_public_errata_does_not_match_synopsis(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
"""
Test processing a new valid synopsis which does not have a matching public errata uri.
Test if there isn't attempt to lgtm fast pr for a message's synopsis
when the public errata uri does not match message's advisory.
"""
for (public_errata_uri, message) in self.valid_params:
with self.subTest(message=message, errata_uri=public_errata_uri):
public_errata_uri_mock.return_value = 'non_matching_errata_uri'
lgtm_fast_pr_for_errata_mock.reset_mock()
notify_mock.reset_mock()
cache = {}
excluded_cache = {}
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
lgtm_fast_pr_for_errata_mock.assert_not_called()
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_processing_valid_message_multiple_times(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
"""
Processing multiple valid messages.
Should attempt to notify and to lgtm the fast pr once for the same message.
"""
for (public_errata_uri, message) in self.valid_params:
public_errata_uri_mock.return_value = public_errata_uri
lgtm_fast_pr_for_errata_mock.reset_mock()
notify_mock.reset_mock()
message_copy = copy.deepcopy(message)
cache = {}
excluded_cache = {}
for _ in range(10):
message = copy.deepcopy(message_copy)
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
with self.subTest(message=message, errata_uri=public_errata_uri):
lgtm_fast_pr_for_errata_mock.assert_called_once()
with self.subTest(message=message, errata_uri=public_errata_uri):
notify_mock.assert_called_once()
if __name__ == '__main__':
unittest.main()
| 40.509346 | 194 | 0.552818 | import copy
import datetime
import os
import tempfile
import unittest
import urllib
from unittest.mock import MagicMock
from unittest.mock import patch
import errata
class GithubUserMock():
def __init__(self, login):
self.login = login
class GithubLabelMock():
def __init__(self, name):
self.name = name
class GithubPRMock:
def __init__(self, user, title, labels=[], number=0, body="", url="", html_url=""):
self.user = user
self.title = title
self.labels = labels
self.number = number
self.body = body
self.url = url
self.html_url = html_url
self.create_issue_comment = MagicMock()
def __eq__(self, other):
if not isinstance(other, GithubPRMock):
return False
return self.user == other.user \
and self.title == other.title \
and self.labels == other.labels \
and self.number == other.number \
and self.body == other.body \
and self.url == other.url \
and self.html_url == other.html_url
class ExtractErrataNumberFromBodyTest(unittest.TestCase):
def test_url_starting_with_valid_errata_marker(self):
param_list = [
('https://errata.devel.redhat.com/advisory/12345', 12345),
('https://errata.devel.redhat.com/advisory/67890', 67890),
('https://errata.devel.redhat.com/advisory/13579', 13579),
('https://errata.devel.redhat.com/advisory/24680', 24680),
('https://errata.devel.redhat.com/advisory/', None),
('https://errata.devel.redhat.com/advisory/invalid', None)
]
for (url, expected) in param_list:
with self.subTest(url=url):
self.assertEqual(errata.extract_errata_number_from_body(url), expected)
def test_invalid_url(self):
param_list = [
'http://errata.devel.redhat.com/advisory/12345',
'https://errrata.devel.redhat.com/advisory/12345',
'https://errata.dvel.reddhat.com/advisori/12345',
'https://errata.devel.redhat.com/12345',
'https://errata.devel.com/advisory/12345',
'https://errata.redhat.com/advisory/12345',
'https://devel.redhat.com/advisory/12345',
'https://redhat.com/advisory/12345',
'https://errata.com/advisory/12345'
]
for url in param_list:
with self.subTest(url=url):
self.assertEqual(errata.extract_errata_number_from_body(url), None)
def test_missing_url(self):
param_list = [
'errata',
'12345',
'errata is 12345'
]
for body in param_list:
with self.subTest(body=body):
self.assertEqual(errata.extract_errata_number_from_body(body), None)
def test_url_is_not_on_the_first_line(self):
param_list = [
'\nhttps://errata.devel.redhat.com/advisory/12345',
'\n\nhttps://errata.devel.redhat.com/advisory/12345'
]
for body in param_list:
with self.subTest(body=body):
self.assertEqual(errata.extract_errata_number_from_body(body), None)
class SaveAndLoadTest(unittest.TestCase):
def test_load_nonexisting_file(self):
with tempfile.TemporaryDirectory() as tempdir:
cachepath = os.path.join(tempdir, "cache.json")
self.assertCountEqual(errata.load(cachepath), {})
def test_save_and_load_as_a_pair(self):
param_list = [
(),
({"foo": "bar"}),
({"value": "1234"}),
({"company": "Red Hat"}),
({"foo": "bar"}, {"value": "1234"}, {"errata": "1234"}),
({"value": "1234"}, {"foo": "bar"}, {"errata": "1234"})
]
for cache in param_list:
with self.subTest():
with tempfile.TemporaryDirectory() as tempdir:
cachepath = os.path.join(tempdir, "cache.json")
errata.save(cachepath, cache)
self.assertCountEqual(errata.load(cachepath), cache)
class PollTest(unittest.TestCase):
def setUp(self):
self.raw_messages = [
(
True,
{
"additional_unnecessary_info": "shouldn't be processed",
"msg": {
"errata_id": 11,
"product": "RHOSE",
"to": "SHIPPED_LIVE",
}
}
),
(
True,
{
"additional_unnecessary_info": "shouldn't be processed",
"msg": {
"errata_id": 12,
"product": "RHOSE",
"to": "SHIPPED_LIVE",
}
}
),
(
False,
{
"additional_unnecessary_info": "shouldn't be processed",
"msg": {
"errata_id": 21,
"product": "RHOSE",
"to": "QE",
}
}
),
(
False,
{
"additional_unnecessary_info": "shouldn't be processed",
"msg": {
"errata_id": 22,
"product": "RHEL",
"to": "SHIPPED_LIVE",
}
}
),
(
False,
{
"additional_unnecessary_info": "shouldn't be processed",
"msg": {
"errata_id": 23,
"product": "RHEL",
"to": "QE",
}
}
),
(
False,
{
"additional_unnecessary_info": "shouldn't be processed",
"msg": {
"errata_id": 24,
"product": "SHIPPED_LIVE",
"to": "RHOSE",
}
}
)
]
self.valid_messages = [x[1] for x in self.raw_messages if x[0]]
self.invalid_messages = [x[1] for x in self.raw_messages if not x[0]]
@patch("json.load")
@patch("urllib.request.urlopen")
def test_params_of_urlopen_call(self, urlopen_mock, json_load_mock):
urlopen_mock.return_value = MagicMock()
json_load_mock.return_value = {
"raw_messages": [],
"pages": 1
}
polled_messages = []
for message in errata.poll(period=datetime.timedelta(seconds=3600)):
polled_messages.append(message)
parsed_url = urllib.parse.urlparse(urlopen_mock.call_args[0][0])
params = urllib.parse.parse_qs(parsed_url.query)
self.assertGreater(int(params["page"][0]), 0)
self.assertLessEqual(int(params["rows_per_page"][0]), 100)
self.assertEqual(params["category"][0], "errata")
self.assertEqual(params["contains"][0], "RHOSE")
@patch("json.load")
@patch("urllib.request.urlopen")
def test_number_of_returned_pages_is_zero(self, urlopen_mock, json_load_mock):
urlopen_mock.return_value = MagicMock()
json_load_mock.return_value = {
"raw_messages": [],
"pages": 0
}
polled_messages = []
for message in errata.poll(period=datetime.timedelta(seconds=3600)):
polled_messages.append(message)
self.assertEqual(polled_messages, [])
@patch("json.load")
@patch("urllib.request.urlopen")
def test_no_raw_messages(self, urlopen_mock, json_load_mock):
urlopen_mock.return_value = MagicMock()
json_load_mock.return_value = {
"raw_messages": [],
"pages": 1
}
polled_messages = []
for message in errata.poll(period=datetime.timedelta(seconds=3600)):
polled_messages.append(message)
self.assertEqual(polled_messages, [])
@patch("json.load")
@patch("time.sleep")
@patch("urllib.request.urlopen")
def test_unresponsive_url_becomes_responsive(self, urlopen_mock, sleep_mock, json_load_mock):
urlopen_mock.side_effect = [
Exception("Unresponsive, request.urlopen has failed"),
MagicMock()
]
json_load_mock.return_value = {
"raw_messages": self.valid_messages,
"pages": 1
}
polled_messages = []
for message in errata.poll(period=datetime.timedelta(seconds=3600)):
polled_messages.append(message)
sleep_mock.assert_called_once()
expected_msgs = [x['msg'] for x in self.valid_messages]
self.assertEqual(polled_messages, expected_msgs)
@patch("json.load")
@patch("urllib.request.urlopen")
def test_multiple_messages(self, urlopen_mock, json_load_mock):
urlopen_mock.return_value = MagicMock()
messages = self.valid_messages + self.invalid_messages
json_load_mock.return_value = {
"raw_messages": messages,
"pages": 1
}
polled_messages = []
for message in errata.poll(period=datetime.timedelta(seconds=3600)):
polled_messages.append(message)
expected_msgs = [x['msg'] for x in self.valid_messages]
self.assertEqual(polled_messages, expected_msgs)
class SynopsisMatchTest(unittest.TestCase):
def test_match(self):
for synopsis, expected in [
(
'Moderate: OpenShift Container Platform 4.7.13 bug fix and security update',
{
'impact': 'Moderate',
'version': '4.7.13',
'major': '4',
'minor': '7',
'patch': '13',
'prerelease': None,
'build': None,
'type': 'bug fix and security update',
},
),
(
'Moderate: OpenShift Container Platform 4.7.5 security and bug fix update',
{
'impact': 'Moderate',
'version': '4.7.5',
'major': '4',
'minor': '7',
'patch': '5',
'prerelease': None,
'build': None,
'type': 'security and bug fix update',
},
),
(
'OpenShift Container Platform 4.6 GA Images',
{
'impact': None,
'version': '4.6',
'major': '4',
'minor': '6',
'patch': None,
'prerelease': None,
'build': None,
'type': 'GA Images',
},
),
(
'OpenShift Container Platform 4.5.11 optional CSI driver Operators bug fix update',
None,
),
(
'Moderate: OpenShift Container Platform 4.5.20 bug fix and golang security update',
{
'impact': 'Moderate',
'version': '4.5.20',
'major': '4',
'minor': '5',
'patch': '20',
'prerelease': None,
'build': None,
'type': 'bug fix and golang security update',
},
),
(
'Low: OpenShift Container Platform 4.3.40 security and bug fix update',
{
'impact': 'Low',
'version': '4.3.40',
'major': '4',
'minor': '3',
'patch': '40',
'prerelease': None,
'build': None,
'type': 'security and bug fix update',
},
),
]:
with self.subTest(synopsis=synopsis):
actual = errata._SYNOPSIS_REGEXP.match(synopsis)
if actual:
self.assertEqual(actual.groupdict(), expected)
else:
self.assertEqual(actual, expected)
class AdvisoryPhrasingsTest(unittest.TestCase):
def test_phrasings(self):
for advisory, expected in [
(
'RHBA-123',
['RHBA-123', 'RHSA-123'],
),
(
'RHSA-123',
['RHBA-123', 'RHSA-123'],
),
(
'https://example.com/RHBA-123',
['https://example.com/RHBA-123', 'https://example.com/RHSA-123'],
),
(
'https://example.com/RHBA-123/abc',
['https://example.com/RHBA-123/abc', 'https://example.com/RHSA-123/abc'],
),
]:
with self.subTest(advisory=advisory):
actual = list(errata.advisory_phrasings(advisory=advisory))
self.assertEqual(actual, expected)
class NotifyTest(unittest.TestCase):
def setUp(self):
self.messages_including_approved_pr = [
(
{
"errata_id": 11,
"fulladvisory": "RHSA-2020:0000-00",
"product": "RHOSE",
"to": "SHIPPED_LIVE",
"synopsis": "OpenShift Container Platform 4.6 GA Images",
"when": "2021-01-01 12:00:00 UTC",
"uri": "Public_Errata_URI_11",
"approved_pr": "PR_HTML_URL_11"
},
'<!subteam^STE7S7ZU2>: '
'RHSA-2020:0000-00 shipped '
'2021-01-01 12:00:00 UTC: '
'OpenShift Container Platform 4.6 GA Images '
'Public_Errata_URI_11'
'\nPR PR_HTML_URL_11 has been approved'
),
(
{
"errata_id": 12,
"fulladvisory": "RHSA-2020:2000-20",
"product": "RHOSE",
"to": "SHIPPED_LIVE",
"synopsis": "Moderate: OpenShift Container Platform 4.5.20 bug fix and golang security update",
"when": "2021-01-02 13:00:00 UTC",
"uri": "Public_Errata_URI_12",
"approved_pr": "PR_HTML_URL_12"
},
'<!subteam^STE7S7ZU2>: '
'RHSA-2020:2000-20 shipped '
'2021-01-02 13:00:00 UTC: '
'Moderate: OpenShift Container Platform 4.5.20 bug fix and golang security update '
'Public_Errata_URI_12'
'\nPR PR_HTML_URL_12 has been approved'
)
]
self.messages_not_including_approved_pr = [
(
{
"errata_id": 21,
"fulladvisory": "RHSA-2020:0000-00",
"product": "RHOSE",
"to": "SHIPPED_LIVE",
"synopsis": "OpenShift Container Platform 4.6 GA Images",
"when": "2021-01-01 12:00:00 UTC",
"uri": "Public_Errata_URI_21",
},
'<!subteam^STE7S7ZU2>: '
'RHSA-2020:0000-00 shipped '
'2021-01-01 12:00:00 UTC: '
'OpenShift Container Platform 4.6 GA Images '
'Public_Errata_URI_21'
),
(
{
"errata_id": 22,
"fulladvisory": "RHSA-2020:2000-20",
"product": "RHOSE",
"to": "SHIPPED_LIVE",
"synopsis": "Moderate: OpenShift Container Platform 4.5.20 bug fix and golang security update",
"when": "2021-01-02 13:00:00 UTC",
"uri": "Public_Errata_URI_22",
},
'<!subteam^STE7S7ZU2>: '
'RHSA-2020:2000-20 shipped '
'2021-01-02 13:00:00 UTC: '
'Moderate: OpenShift Container Platform 4.5.20 bug fix and golang security update '
'Public_Errata_URI_22'
)
]
self.messages = \
self.messages_including_approved_pr + \
self.messages_not_including_approved_pr
@patch("builtins.print")
@patch("urllib.request.urlopen")
def test_no_webhook(self, urlopen_mock, print_mock):
for message in self.messages:
with self.subTest(message=message):
errata.notify(message[0])
expected_message = message[0]
self.assertEqual(print_mock.call_args, unittest.mock.call(expected_message))
@patch("urllib.request.urlopen")
def test_format_of_message_not_including_approved_pr(self, urlopen_mock):
for (message, expected_message_in_data_to_be_uploaded) in self.messages_not_including_approved_pr:
with self.subTest(message=message):
expected_data_to_be_uploaded = urllib.parse.urlencode({
'payload': {
'text': expected_message_in_data_to_be_uploaded
}
}).encode('utf-8')
errata.notify(message, MagicMock())
uploaded_data = urlopen_mock.call_args[1]['data']
self.assertEqual(uploaded_data, expected_data_to_be_uploaded)
@patch("urllib.request.urlopen")
def test_format_of_message_including_approved_pr(self, urlopen_mock):
for (message, expected_message_in_data_to_be_uploaded) in self.messages_including_approved_pr:
with self.subTest(message=message):
expected_data_to_be_uploaded = urllib.parse.urlencode({
'payload': {
'text': expected_message_in_data_to_be_uploaded
}
}).encode('utf-8')
errata.notify(message, MagicMock())
uploaded_data = urlopen_mock.call_args[1]['data']
self.assertEqual(uploaded_data, expected_data_to_be_uploaded)
class GetOpenPRsToFastTest(unittest.TestCase):
def setUp(self):
self.repo = MagicMock()
self.labels_multiple_including_lgtm = [
[
GithubLabelMock('lgtm')
],
[
GithubLabelMock('bug'), GithubLabelMock('duplicate'), GithubLabelMock('lgtm'),
GithubLabelMock('documentation'), GithubLabelMock('invalid')
],
[
GithubLabelMock('wontfix'), GithubLabelMock('lgtm'),
GithubLabelMock('question'), GithubLabelMock('invalid')
],
[
GithubLabelMock('help wanted'), GithubLabelMock('lgtm'),
GithubLabelMock('good first issue'), GithubLabelMock('bug')
]
]
self.labels_multiple_not_including_lgtm = [
[
],
[
GithubLabelMock('wontfix'), GithubLabelMock('bug'),
GithubLabelMock('question'), GithubLabelMock('invalid')
],
[
GithubLabelMock('help wanted'), GithubLabelMock('invalid'),
GithubLabelMock('good first issue'), GithubLabelMock('duplicate')
],
[
GithubLabelMock('bug'), GithubLabelMock('duplicate'), GithubLabelMock('invalid'),
GithubLabelMock('documentation'), GithubLabelMock('enhancement')
]
]
self.prs_correct_and_expected_to_be_yielded = [
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 3.0.0 in fast channel(s)"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in fast channel(s)"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.1.2 in fast channel(s)"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.2.3 in fast channel(s)"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.6.0 in fast channel(s)"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in fast channel(s)", self.labels_multiple_not_including_lgtm[0]),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in fast channel(s)", self.labels_multiple_not_including_lgtm[1]),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in fast channel(s)", self.labels_multiple_not_including_lgtm[2]),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in fast channel(s)", self.labels_multiple_not_including_lgtm[3]),
]
self.prs_including_the_lgtm_label = [
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in fast channel(s)", self.labels_multiple_including_lgtm[0]),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in fast channel(s)", self.labels_multiple_including_lgtm[1]),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in fast channel(s)", self.labels_multiple_including_lgtm[2]),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in fast channel(s)", self.labels_multiple_including_lgtm[3])
]
self.prs_author_is_not_openshift_bot = [
GithubPRMock(GithubUserMock("user1234"), "Enable 4.0.0 in fast channel(s)"),
GithubPRMock(GithubUserMock("bot-openshift"), "Enable 4.0.0 in fast channel(s)"),
GithubPRMock(GithubUserMock("Openshift-Bot"), "Enable 4.0.0 in fast channel(s)"),
GithubPRMock(GithubUserMock("GitHubUser1234"), "Enable 4.0.0 in fast channel(s)")
]
self.prs_title_not_starting_with_Enable = [
GithubPRMock(GithubUserMock("openshift-bot"), ""),
GithubPRMock(GithubUserMock("openshift-bot"), "Fix component"),
GithubPRMock(GithubUserMock("openshift-bot"), "Add features in fast channel(s)"),
GithubPRMock(GithubUserMock("openshift-bot"), "enable 4.0.0 in fast channel(s)"),
GithubPRMock(GithubUserMock("openshift-bot"), "Disable 4.0.0 in fast channel(s)"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enablee 4.0.0 in fast channel(s)")
]
self.prs_do_not_target_fast = [
GithubPRMock(GithubUserMock("openshift-bot"), "Enable "),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in channel(s)"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in FAST channel(s)"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in faast channel(s)"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in stable channel(s)"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in candidate channel(s)")
]
def test_prs_including_the_lgtm_label(self):
self.repo.get_pulls = MagicMock(return_value=self.prs_including_the_lgtm_label)
open_prs_to_fast = []
for pr in errata.get_open_prs_to_fast(self.repo):
open_prs_to_fast.append(pr)
expected_prs = []
self.assertEqual(open_prs_to_fast, expected_prs)
def test_prs_author_is_not_openshift_bot(self):
self.repo.get_pulls = MagicMock(return_value=self.prs_author_is_not_openshift_bot)
open_prs_to_fast = []
for pr in errata.get_open_prs_to_fast(self.repo):
open_prs_to_fast.append(pr)
expected_prs = []
self.assertEqual(open_prs_to_fast, expected_prs)
def test_unknown_prs_should_be_skipped(self):
self.repo.get_pulls = MagicMock(return_value=self.prs_title_not_starting_with_Enable)
open_prs_to_fast = []
for pr in errata.get_open_prs_to_fast(self.repo):
open_prs_to_fast.append(pr)
expected_prs = []
self.assertEqual(open_prs_to_fast, expected_prs)
def test_ignore_prs_which_dont_target_fast(self):
self.repo.get_pulls = MagicMock(return_value=self.prs_do_not_target_fast)
open_prs_to_fast = []
for pr in errata.get_open_prs_to_fast(self.repo):
open_prs_to_fast.append(pr)
expected_prs = []
self.assertEqual(open_prs_to_fast, expected_prs)
def test_correct_prs_should_be_yielded(self):
self.repo.get_pulls = MagicMock(return_value=self.prs_correct_and_expected_to_be_yielded)
open_prs_to_fast = []
for pr in errata.get_open_prs_to_fast(self.repo):
open_prs_to_fast.append(pr)
expected_prs = self.prs_correct_and_expected_to_be_yielded
self.assertEqual(open_prs_to_fast, expected_prs)
def test_get_pulls_query_params(self):
self.repo.get_pulls = MagicMock(return_value=[])
open_prs_to_fast = []
for pr in errata.get_open_prs_to_fast(self.repo):
open_prs_to_fast.append(pr)
expected_params = {
'state': 'open',
'base': 'master',
'sort': 'created',
}
self.assertEqual(self.repo.get_pulls.call_args, (unittest.mock.call(**expected_params)))
class LgtmFastPrForErrata(unittest.TestCase):
def setUp(self):
self.repo = MagicMock()
self.github_object_mock = MagicMock()
self.github_object_mock.get_repo.return_value = self.repo
self.prs_with_html_url_of_expected_pr = [
(
[
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 3.0.0 in fast channel(s)", [], 1, "https://errata.devel.redhat.com/advisory/1111", "PR_URL1", "PR_HTML_URL1"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in fast channel(s)", [], 2, "https://errata.devel.redhat.com/advisory/1234", "PR_URL2", "PR_HTML_URL2"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.1.2 in fast channel(s)", [], 3, "https://errata.devel.redhat.com/advisory/5678", "PR_URL3", "PR_HTML_URL3"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.2.3 in fast channel(s)", [], 4, "https://errata.devel.redhat.com/advisory/1357", "PR_URL4", "PR_HTML_URL4")
],
{
"errata_id": 1357
},
"PR_HTML_URL4" # HTML url of a PR which body has the wanted errata id.
),
(
[
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 3.0.0 in fast channel(s)", [], 12345, "https://errata.devel.redhat.com/advisory/41", "PR_URL12345", "PR_HTML_URL12345"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in fast channel(s)", [], 12354, "https://errata.devel.redhat.com/advisory/42", "PR_URL12354", "PR_HTML_URL12354"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.1.2 in fast channel(s)", [], 12340, "https://errata.devel.redhat.com/advisory/43", "PR_URL12340", "PR_HTML_URL12340"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.2.3 in fast channel(s)", [], 43215, "https://errata.devel.redhat.com/advisory/44", "PR_URL43215", "PR_HTML_URL43215")
],
{
"errata_id": 41
},
"PR_HTML_URL12345"
),
(
[
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 3.0.0 in fast channel(s)", [], 1111, "https://errata.devel.redhat.com/advisory/51", "PR_URL1111", "PR_HTML_URL1111"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in fast channel(s)", [], 2222, "https://errata.devel.redhat.com/advisory/62", "PR_URL2222", "PR_HTML_URL2222"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.1.2 in fast channel(s)", [], 3333, "https://errata.devel.redhat.com/advisory/73", "PR_URL3333", "PR_HTML_URL3333"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.2.3 in fast channel(s)", [], 4444, "https://errata.devel.redhat.com/advisory/84", "PR_URL4444", "PR_HTML_URL4444")
],
{
"errata_id": 73
},
"PR_HTML_URL3333"
)
]
self.prs_with_index_of_expected_pr = [
(
[
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 3.0.0 in fast channel(s)", [], 1, "https://errata.devel.redhat.com/advisory/1111", "PR_URL1", "PR_HTML_URL1"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in fast channel(s)", [], 2, "https://errata.devel.redhat.com/advisory/1234", "PR_URL2", "PR_HTML_URL2"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.1.2 in fast channel(s)", [], 3, "https://errata.devel.redhat.com/advisory/5678", "PR_URL3", "PR_HTML_URL3"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.2.3 in fast channel(s)", [], 4, "https://errata.devel.redhat.com/advisory/1357", "PR_URL4", "PR_HTML_URL4")
],
{
"errata_id": 1357
},
3 # Index of the PR which has the wanted errata id.
),
(
[
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 3.0.0 in fast channel(s)", [], 12345, "https://errata.devel.redhat.com/advisory/41", "PR_URL12345", "PR_HTML_URL12345"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in fast channel(s)", [], 12354, "https://errata.devel.redhat.com/advisory/42", "PR_URL12354", "PR_HTML_URL12354"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.1.2 in fast channel(s)", [], 12340, "https://errata.devel.redhat.com/advisory/43", "PR_URL12340", "PR_HTML_URL12340"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.2.3 in fast channel(s)", [], 43215, "https://errata.devel.redhat.com/advisory/44", "PR_URL43215", "PR_HTML_URL43215")
],
{
"errata_id": 41
},
0
),
(
[
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 3.0.0 in fast channel(s)", [], 1111, "https://errata.devel.redhat.com/advisory/51", "PR_URL1111", "PR_HTML_URL1111"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in fast channel(s)", [], 2222, "https://errata.devel.redhat.com/advisory/62", "PR_URL2222", "PR_HTML_URL2222"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.1.2 in fast channel(s)", [], 3333, "https://errata.devel.redhat.com/advisory/73", "PR_URL3333", "PR_HTML_URL3333"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.2.3 in fast channel(s)", [], 4444, "https://errata.devel.redhat.com/advisory/84", "PR_URL4444", "PR_HTML_URL4444")
],
{
"errata_id": 73
},
2
)
]
self.prs_with_invalid_errata_url = [
(
[
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 3.0.0 in fast channel(s)", [], 1, "", "PR_URL1", "PR_HTML_URL1"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in fast channel(s)", [], 2, "https://errata", "PR_URL2", "PR_HTML_URL2"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.1.2 in fast channel(s)", [], 3, "https://redhat.com/advisory/84", "PR_URL3", "PR_HTML_URL3"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.2.3 in fast channel(s)", [], 4, "https://errata.devel.redhat.com", "PR_URL4", "PR_HTML_URL4")
],
{
"errata_id": 21
}
)
]
@patch("github.Github")
def test_return_value_is_correct_for_specific_pr(self, Github_mock):
githubrepo = MagicMock()
githubtoken = MagicMock()
Github_mock.return_value = self.github_object_mock
param_list = self.prs_with_html_url_of_expected_pr
for (prs, message, expected_pr_html_url) in param_list:
with self.subTest(prs_body=[x.body for x in prs], message=message):
self.repo.get_pulls = MagicMock(return_value=prs)
pr_html_url = errata.lgtm_fast_pr_for_errata(githubrepo, githubtoken, message)
self.assertEqual(pr_html_url, expected_pr_html_url)
@patch("github.Github")
def test_only_create_issue_on_the_expected_pr(self, Github_mock):
githubrepo = MagicMock()
githubtoken = MagicMock()
Github_mock.return_value = self.github_object_mock
param_list = self.prs_with_index_of_expected_pr
for (prs, message, expected_index_of_pr_to_create_issue) in param_list:
self.repo.get_pulls = MagicMock(return_value=prs)
errata.lgtm_fast_pr_for_errata(githubrepo, githubtoken, message)
for index, pr in enumerate(prs):
with self.subTest(prs_body=[x.body for x in prs], message=message):
if index == expected_index_of_pr_to_create_issue:
pr.create_issue_comment.assert_called_once()
else:
pr.create_issue_comment.assert_not_called()
@patch("github.Github")
def test_issue_comment_format(self, Github_mock):
githubrepo = MagicMock()
githubtoken = MagicMock()
Github_mock.return_value = self.github_object_mock
param_list = self.prs_with_index_of_expected_pr
for (prs, message, expected_index_of_pr_to_create_issue) in param_list:
with self.subTest(prs_body=[x.body for x in prs], message=message):
self.repo.get_pulls = MagicMock(return_value=prs)
errata.lgtm_fast_pr_for_errata(githubrepo, githubtoken, message)
issue_comment = prs[expected_index_of_pr_to_create_issue].create_issue_comment.call_args
expected_issue_comment = "Autoapproving PR to fast after the errata has shipped\n/lgtm"
self.assertEqual(issue_comment, (unittest.mock.call(expected_issue_comment)))
@patch("github.Github")
def test_prs_include_invalid_errata_url(self, Github_mock):
githubrepo = MagicMock()
githubtoken = MagicMock()
Github_mock.return_value = self.github_object_mock
param_list = self.prs_with_invalid_errata_url
for (prs, message) in param_list:
with self.subTest(body=[x.body for x in prs]):
self.repo.get_pulls = MagicMock(return_value=prs)
pr_html_url = errata.lgtm_fast_pr_for_errata(githubrepo, githubtoken, message)
self.assertEqual(pr_html_url, None)
class PublicErrataUriTest(unittest.TestCase):
def setUp(self):
self.nodes_valid = [
(
{ # nodes received via urlopen
"nodes": [
{
"version": "4.0.0",
"metadata": {
"url": "https://access.redhat.com/errata/RHBA-2020:0000"
}
}
]
},
( # Parameteres for calling errata.public_errata_uri
"4.0.0",
"RHBA-2020:0000",
"candidate-4.0.0",
),
# Expected uri of the wanted node
"https://access.redhat.com/errata/RHBA-2020:0000",
),
(
{
"nodes": [
{
"version": "4.1.0",
"metadata": {
"url": "https://access.redhat.com/errata/RHBA-2020:1000"
}
}
]
},
(
"4.1.0",
"RHBA-2020:1000",
"candidate-4.1.0",
),
"https://access.redhat.com/errata/RHBA-2020:1000",
),
(
{
"nodes": [
{
"version": "4.2.0",
"metadata": {
"url": "https://access.redhat.com/errata/RHBA-2020:2000"
}
}
]
},
(
"4.2.0",
"RHBA-2020:2000",
"candidate-4.2.0",
),
"https://access.redhat.com/errata/RHBA-2020:2000",
),
]
@patch("json.load")
@patch("urllib.request.urlopen")
def test_should_return_uri_of_same_version(self, urlopen_mock, json_load_mock):
for (data, params, expected_errata_uri) in self.nodes_valid:
version = params[0]
channel = params[2]
json_load_mock.return_value = data
with self.subTest(version=version):
errata_uri = errata.public_errata_uri(version=version, advisory="", channel=channel)
self.assertEqual(errata_uri, expected_errata_uri)
@patch("json.load")
@patch("urllib.request.urlopen")
def test_should_return_uri_of_the_same_advisory(self, urlopen_mock, json_load_mock):
for (data, params, expected_errata_uri) in self.nodes_valid:
advisory = params[1]
channel = params[2]
json_load_mock.return_value = data
with self.subTest(advisory=advisory):
errata_uri = errata.public_errata_uri(version="", advisory=advisory, channel=channel)
self.assertEqual(errata_uri, expected_errata_uri)
@patch("json.load")
@patch("urllib.request.urlopen")
def test_zero_nodes_received(self, urlopen_mock, json_load_mock):
json_load_mock.return_value = {
"nodes": []
}
for (_, params, _) in self.nodes_valid:
version = params[0]
advisory = params[1]
channel = params[2]
with self.subTest(version=version, advisory=advisory):
errata_uri = errata.public_errata_uri(version=version, advisory=advisory, channel=channel)
self.assertEqual(errata_uri, None)
@patch("json.load")
@patch("urllib.request.urlopen")
def test_zero_nodes_match(self, urlopen_mock, json_load_mock):
for (data, params, _) in self.nodes_valid:
version = params[0]
advisory = params[1]
channel = params[2]
json_load_mock.return_value = data
with self.subTest(version=version, advisory=advisory):
errata_uri = errata.public_errata_uri(version="", advisory="", channel=channel)
self.assertEqual(errata_uri, None)
@patch("time.sleep")
@patch("json.load")
@patch("urllib.request.urlopen")
def test_unresponsive_url_becomes_responsive(self, urlopen_mock, json_load_mock, sleep_mock):
for (data, params, expected_errata_uri) in self.nodes_valid:
version = params[0]
advisory = params[1]
channel = params[2]
json_load_mock.return_value = data
urlopen_mock.side_effect = [
Exception("Unresponsive, request.urlopen has failed"),
MagicMock()
]
sleep_mock.reset_mock()
with self.subTest():
errata_uri = errata.public_errata_uri(version=version, advisory=advisory, channel=channel)
sleep_mock.assert_called_once()
self.assertEqual(errata_uri, expected_errata_uri)
class ProcessMessageTest(unittest.TestCase):
def setUp(self):
self.valid_params = [
(
"https://access.redhat.com/errata/RHBA-2020:0000",
{
"synopsis": "Moderate: OpenShift Container Platform 4.0.0 bug fix and golang security update",
"fulladvisory": "RHBA-2020:0000-01",
"when": "2021-01-01 00:00:00 UTC",
}
),
(
"https://access.redhat.com/errata/RHBA-2021:0749",
{
"synopsis": "OpenShift Container Platform 4.7.2 bug fix update",
"fulladvisory": "RHBA-2021:0749-06",
"when": "2021-03-16 08:42:16 UTC",
}
)
]
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_should_raise_exception_when_new_invalid_synopsis_is_received(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
public_errata_uri_mock.return_value = "https://access.redhat.com/errata/RHBA-2020:0000"
invalid_synopsis = "Invalid Synopsis 0.0.0"
message = {
"synopsis": invalid_synopsis,
"fulladvisory": "RHBA-2020:0000-01",
"when": "2021-01-01 00:00:00 UTC",
}
cache = {}
excluded_cache = {}
with self.assertRaises(ValueError):
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_content_of_cache_when_invalid_synopsis_is_received(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
public_errata_uri_mock.return_value = "https://access.redhat.com/errata/RHBA-2020:0000"
invalid_synopsis = "Invalid Synopsis 0.0.0"
cache = {
"RHBA-2020:0000-01":
{
"synopsis": "Moderate: OpenShift Container Platform 4.0.0 bug fix and golang security update",
"uri": "https://access.redhat.com/errata/RHBA-2020:0000",
"when": "2021-01-01 00:00:00 UTC",
}
}
cache_copy = copy.deepcopy(cache)
message = {
"synopsis": invalid_synopsis,
"fulladvisory": "RHBA-2020:0000-01",
"when": "2021-01-01 00:00:00 UTC",
}
excluded_cache = {}
with self.assertRaises(ValueError):
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
self.assertDictEqual(cache, cache_copy)
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_should_add_new_invalid_synopsis_to_the_excluded_cache(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
public_errata_uri_mock.return_value = "https://access.redhat.com/errata/RHBA-2020:0000"
invalid_synopsis = "Invalid Synopsis 0.0.0"
message = {
"synopsis": invalid_synopsis,
"fulladvisory": "RHBA-2020:0000-01",
"when": "2021-01-01 00:00:00 UTC",
}
cache = {}
excluded_cache = {}
with self.assertRaises(ValueError):
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
self.assertDictEqual(
excluded_cache,
{
invalid_synopsis: "RHBA-2020:0000-01",
}
)
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_should_not_lgtm_fast_pr_when_new_invalid_synopsis_is_received(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
public_errata_uri_mock.return_value = "https://access.redhat.com/errata/RHBA-2020:0000"
invalid_synopsis = "Invalid Synopsis 0.0.0"
message = {
"synopsis": invalid_synopsis,
"fulladvisory": "RHBA-2020:0000-01",
"when": "2021-01-01 00:00:00 UTC",
}
cache = {}
excluded_cache = {}
with self.assertRaises(ValueError):
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
lgtm_fast_pr_for_errata_mock.assert_not_called()
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_should_not_notify_when_new_invalid_synopsis_is_received(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
public_errata_uri_mock.return_value = "https://access.redhat.com/errata/RHBA-2020:0000"
invalid_synopsis = "Invalid Synopsis 0.0.0"
message = {
"synopsis": invalid_synopsis,
"fulladvisory": "RHBA-2020:0000-01",
"when": "2021-01-01 00:00:00 UTC",
}
cache = {}
excluded_cache = {}
with self.assertRaises(ValueError):
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
notify_mock.assert_not_called()
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_content_of_excluded_cache_when_reprocessing_invalid_synopsis(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
public_errata_uri_mock.return_value = "https://access.redhat.com/errata/RHBA-2020:0000"
invalid_synopsis = "Invalid Synopsis 0.0.0"
invalid_synopsis_2 = "Invalid 1.0.0"
excluded_cache = {
invalid_synopsis: "RHBA-2020:0000-01",
invalid_synopsis_2: "RHBA-2020:1111-01"
}
excluded_cache_copy = copy.deepcopy(excluded_cache)
message = {
"synopsis": invalid_synopsis,
"fulladvisory": "RHBA-2020:0000-01",
"when": "2021-01-01 00:00:00 UTC",
}
cache = {}
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
self.assertDictEqual(excluded_cache, excluded_cache_copy)
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_should_not_lgtm_fast_pr_when_reprocessing_invalid_synopsis(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
public_errata_uri_mock.return_value = "https://access.redhat.com/errata/RHBA-2020:0000"
invalid_synopsis = "Invalid Synopsis 0.0.0"
message = {
"synopsis": invalid_synopsis,
"fulladvisory": "RHBA-2020:0000-01",
"when": "2021-01-01 00:00:00 UTC",
}
cache = {}
excluded_cache = {
invalid_synopsis: "RHBA-2020:0000-01"
}
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
lgtm_fast_pr_for_errata_mock.assert_not_called()
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_should_not_notify_when_reprocessing_invalid_synopsis(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
public_errata_uri_mock.return_value = "https://access.redhat.com/errata/RHBA-2020:0000"
invalid_synopsis = "Invalid Synopsis 0.0.0"
message = {
"synopsis": invalid_synopsis,
"fulladvisory": "RHBA-2020:0000-01",
"when": "2021-01-01 00:00:00 UTC",
}
cache = {}
excluded_cache = {
invalid_synopsis: "RHBA-2020:0000-01",
}
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
notify_mock.assert_not_called()
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_should_add_new_valid_synopsis_to_the_cache(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
for (public_errata_uri, message) in self.valid_params:
with self.subTest(message=message, errata_uri=public_errata_uri):
public_errata_uri_mock.return_value = public_errata_uri
message_copy = copy.deepcopy(message)
cache = {}
excluded_cache = {}
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
self.assertDictEqual(
cache,
{
message_copy['fulladvisory']:
{
"when": message_copy['when'],
"synopsis": message_copy['synopsis'],
"uri": public_errata_uri,
}
}
)
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_should_notify_when_new_valid_synopsis_is_received(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
for (public_errata_uri, message) in self.valid_params:
with self.subTest(message=message, errata_uri=public_errata_uri):
public_errata_uri_mock.return_value = public_errata_uri
notify_mock.reset_mock()
cache = {}
excluded_cache = {}
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
notify_mock.assert_called_once()
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_should_lgtm_fast_pr_when_new_valid_synopsis_is_received(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
for (public_errata_uri, message) in self.valid_params:
with self.subTest(message=message, errata_uri=public_errata_uri):
public_errata_uri_mock.return_value = public_errata_uri
lgtm_fast_pr_for_errata_mock.reset_mock()
cache = {}
excluded_cache = {}
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
lgtm_fast_pr_for_errata_mock.assert_called_once()
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_content_of_cache_when_reprocessing_valid_synopsis(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
for (public_errata_uri, message) in self.valid_params:
with self.subTest(message=message, errata_uri=public_errata_uri):
public_errata_uri_mock.return_value = public_errata_uri
cache = {}
cache[message['fulladvisory']] = {
'when': message['when'],
'synopsis': message['synopsis'],
'uri': public_errata_uri,
}
cache_copy = copy.deepcopy(cache)
excluded_cache = {}
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
self.assertDictEqual(cache, cache_copy)
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_should_not_notify_when_reprocessing_valid_synopsis(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
for (public_errata_uri, message) in self.valid_params:
with self.subTest(message=message, errata_uri=public_errata_uri):
public_errata_uri_mock.return_value = public_errata_uri
notify_mock.reset_mock()
cache = {}
cache[message['fulladvisory']] = {
'when': message['when'],
'synopsis': message['synopsis'],
'uri': public_errata_uri,
}
excluded_cache = {}
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
notify_mock.assert_not_called()
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_should_not_lgtm_fast_pr_when_reprocessing_valid_synopsis(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
for (public_errata_uri, message) in self.valid_params:
with self.subTest(message=message, errata_uri=public_errata_uri):
public_errata_uri_mock.return_value = public_errata_uri
lgtm_fast_pr_for_errata_mock.reset_mock()
cache = {}
cache[message['fulladvisory']] = {
'when': message['when'],
'synopsis': message['synopsis'],
'uri': public_errata_uri,
}
excluded_cache = {}
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
lgtm_fast_pr_for_errata_mock.assert_not_called()
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_should_not_notify_for_valid_synopsis_does_not_have_public_errata(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
for (public_errata_uri, message) in self.valid_params:
with self.subTest(message=message, errata_uri=public_errata_uri):
public_errata_uri_mock.return_value = None
notify_mock.reset_mock()
cache = {}
excluded_cache = {}
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
notify_mock.assert_not_called()
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_should_not_lgtm_fast_pr_for_valid_synopsis_does_not_have_public_errata(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
for (public_errata_uri, message) in self.valid_params:
with self.subTest(message=message, errata_uri=public_errata_uri):
public_errata_uri_mock.return_value = None
lgtm_fast_pr_for_errata_mock.reset_mock()
cache = {}
excluded_cache = {}
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
lgtm_fast_pr_for_errata_mock.assert_not_called()
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_should_not_notify_when_public_errata_does_not_match_synopsis(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
for (public_errata_uri, message) in self.valid_params:
with self.subTest(message=message, errata_uri=public_errata_uri):
public_errata_uri_mock.return_value = 'non_matching_errata_uri'
lgtm_fast_pr_for_errata_mock.reset_mock()
notify_mock.reset_mock()
cache = {}
excluded_cache = {}
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
notify_mock.assert_not_called()
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_should_not_lgtm_fast_pr_when_public_errata_does_not_match_synopsis(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
for (public_errata_uri, message) in self.valid_params:
with self.subTest(message=message, errata_uri=public_errata_uri):
public_errata_uri_mock.return_value = 'non_matching_errata_uri'
lgtm_fast_pr_for_errata_mock.reset_mock()
notify_mock.reset_mock()
cache = {}
excluded_cache = {}
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
lgtm_fast_pr_for_errata_mock.assert_not_called()
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_processing_valid_message_multiple_times(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
for (public_errata_uri, message) in self.valid_params:
public_errata_uri_mock.return_value = public_errata_uri
lgtm_fast_pr_for_errata_mock.reset_mock()
notify_mock.reset_mock()
message_copy = copy.deepcopy(message)
cache = {}
excluded_cache = {}
for _ in range(10):
message = copy.deepcopy(message_copy)
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
with self.subTest(message=message, errata_uri=public_errata_uri):
lgtm_fast_pr_for_errata_mock.assert_called_once()
with self.subTest(message=message, errata_uri=public_errata_uri):
notify_mock.assert_called_once()
if __name__ == '__main__':
unittest.main()
| true | true |
1c35aa5795469720903de6148dacc6c54b641b80 | 8,290 | py | Python | Tensorflow-master/experiments/2D_car/car_env.py | gustasvs/AI | 23360a8865e8211568594c2b2ced11dcdc9b0006 | [
"MIT"
] | 1 | 2022-02-03T18:21:28.000Z | 2022-02-03T18:21:28.000Z | Tensorflow-master/experiments/2D_car/car_env.py | gustasvs/AI | 23360a8865e8211568594c2b2ced11dcdc9b0006 | [
"MIT"
] | null | null | null | Tensorflow-master/experiments/2D_car/car_env.py | gustasvs/AI | 23360a8865e8211568594c2b2ced11dcdc9b0006 | [
"MIT"
] | null | null | null | import numpy as np
import pyglet
pyglet.clock.set_fps_limit(10000)
class CarEnv(object):
n_sensor = 5
action_dim = 1
state_dim = n_sensor
viewer = None
viewer_xy = (1080, 720)
sensor_max = 150.
start_point = [100, 100]
speed = 50.
dt = 0.1
def __init__(self, discrete_action=False):
self.is_discrete_action = discrete_action
if discrete_action:
self.actions = [-1, 0, 1]
else:
self.action_bound = [-1, 1]
self.terminal = False
# node1 (x, y, r, w, l),
self.car_info = np.array([0, 0, 0, 20, 40], dtype=np.float64) # car coordination
self.obstacles_coords = [
# np.array([
# [120, 120],
# [380, 120],
# [380, 380],
# [120, 380],]),
np.array([
[500, 100],
[200, 100],
[200, 200],
[100, 200],])# ,
# np.array([
# [300, 300],
# [400, 300],
# [400, 400],
# [300, 400],])
]
self.sensor_info = self.sensor_max + np.zeros((self.n_sensor, 3)) # n sensors, (distance, end_x, end_y)
def step(self, action):
if self.is_discrete_action:
action = self.actions[action]
else:
action = np.clip(action, *self.action_bound)[0]
self.car_info[2] += action * np.pi/30 # max r = 6 degree
self.car_info[:2] = self.car_info[:2] + \
self.speed * self.dt * np.array([np.cos(self.car_info[2]), np.sin(self.car_info[2])])
self._update_sensor()
s = self._get_state()
r = -1 if self.terminal else 0
return s, r, self.terminal
def reset(self):
self.terminal = False
self.car_info[:3] = np.array([*self.start_point, -np.pi/2])
self._update_sensor()
return self._get_state()
def render(self):
if self.viewer is None:
self.viewer = Viewer(*self.viewer_xy, self.car_info, self.sensor_info,self.obstacles_coords)
self.viewer.render()
def sample_action(self):
if self.is_discrete_action:
a = np.random.choice(list(range(3)))
else:
a = np.random.uniform(*self.action_bound, size=self.action_dim)
return a
def set_fps(self, fps=30):
pyglet.clock.set_fps_limit(fps)
def _get_state(self):
state = self.sensor_info[:, 0].flatten()/self.sensor_max
return state
def obstacles_collision(self, obstacle, s, q):
for oi in range(len(obstacle)):
p = obstacle[oi]
r = obstacle[(oi + 1) % len(obstacle)] - obstacle[oi]
if np.cross(r, s) != 0: # may collision
t = np.cross((q - p), s) / np.cross(r, s)
u = np.cross((q - p), r) / np.cross(r, s)
if 0 <= t <= 1 and 0 <= u <= 1:
intersection = q + u * s
self.possible_intersections.append(intersection)
self.possible_sensor_distance.append(np.linalg.norm(u*s))
def _update_sensor(self):
cx, cy, rotation = self.car_info[:3]
n_sensors = len(self.sensor_info)
sensor_theta = np.linspace(-np.pi / 2, np.pi / 2, n_sensors)
xs = cx + (np.zeros((n_sensors, ))+self.sensor_max) * np.cos(sensor_theta)
ys = cy + (np.zeros((n_sensors, ))+self.sensor_max) * np.sin(sensor_theta)
xys = np.array([[x, y] for x, y in zip(xs, ys)]) # shape (5 sensors, 2)
# sensors
tmp_x = xys[:, 0] - cx
tmp_y = xys[:, 1] - cy
# apply rotation
rotated_x = tmp_x * np.cos(rotation) - tmp_y * np.sin(rotation)
rotated_y = tmp_x * np.sin(rotation) + tmp_y * np.cos(rotation)
# rotated x y
self.sensor_info[:, -2:] = np.vstack([rotated_x+cx, rotated_y+cy]).T
q = np.array([cx, cy])
for si in range(len(self.sensor_info)):
s = self.sensor_info[si, -2:] - q
self.possible_sensor_distance = [self.sensor_max]
self.possible_intersections = [self.sensor_info[si, -2:]]
# obstacle collision
for ob in range(len(self.obstacles_coords)):
self.obstacles_collision(self.obstacles_coords[ob], s, q)
# window collision
win_coord = np.array([
[0, 0],
[self.viewer_xy[0], 0],
[*self.viewer_xy],
[0, self.viewer_xy[1]],
[0, 0],
])
for oi in range(4):
p = win_coord[oi]
r = win_coord[(oi + 1) % len(win_coord)] - win_coord[oi]
if np.cross(r, s) != 0: # may collision
t = np.cross((q - p), s) / np.cross(r, s)
u = np.cross((q - p), r) / np.cross(r, s)
if 0 <= t <= 1 and 0 <= u <= 1:
intersection = p + t * r
self.possible_intersections.append(intersection)
self.possible_sensor_distance.append(np.linalg.norm(intersection - q))
distance = np.min(self.possible_sensor_distance)
distance_index = np.argmin(self.possible_sensor_distance)
self.sensor_info[si, 0] = distance
self.sensor_info[si, -2:] = self.possible_intersections[distance_index]
if distance < self.car_info[-1]/2:
self.terminal = True
class Viewer(pyglet.window.Window):
color = {
'background': [1]*3 + [1]
}
fps_display = pyglet.clock.ClockDisplay()
bar_thc = 5
def __init__(self, width, height, car_info, sensor_info, obstacles_coords):
super(Viewer, self).__init__(width, height, resizable=False, caption='2D car', vsync=False) # vsync=False to not use the monitor FPS
self.set_location(x=80, y=10)
pyglet.gl.glClearColor(*self.color['background'])
self.car_info = car_info
self.sensor_info = sensor_info
self.batch = pyglet.graphics.Batch()
background = pyglet.graphics.OrderedGroup(0)
foreground = pyglet.graphics.OrderedGroup(1)
self.sensors = []
line_coord = [0, 0] * 2
c = (73, 73, 73) * 2
for i in range(len(self.sensor_info)):
self.sensors.append(self.batch.add(2, pyglet.gl.GL_LINES, foreground, ('v2f', line_coord), ('c3B', c)))
car_box = [0, 0] * 4
c = (249, 86, 86) * 4
self.car = self.batch.add(4, pyglet.gl.GL_QUADS, foreground, ('v2f', car_box), ('c3B', c))
c = (134, 181, 244) * 4
for ob in range(len(obstacles_coords)):
#self.obstacle =
self.batch.add(4, pyglet.gl.GL_QUADS, background, ('v2f', obstacles_coords[ob].flatten()), ('c3B', c))
def render(self):
pyglet.clock.tick()
self._update()
self.switch_to()
self.dispatch_events()
self.dispatch_event('on_draw')
self.flip()
def on_draw(self):
self.clear()
self.batch.draw()
# self.fps_display.draw()
def _update(self):
cx, cy, r, w, l = self.car_info
# sensors
for i, sensor in enumerate(self.sensors):
sensor.vertices = [cx, cy, *self.sensor_info[i, -2:]]
# car
xys = [
[cx + l / 2, cy + w / 2],
[cx - l / 2, cy + w / 2],
[cx - l / 2, cy - w / 2],
[cx + l / 2, cy - w / 2],
]
r_xys = []
for x, y in xys:
tempX = x - cx
tempY = y - cy
# apply rotation
rotatedX = tempX * np.cos(r) - tempY * np.sin(r)
rotatedY = tempX * np.sin(r) + tempY * np.cos(r)
# rotated x y
x = rotatedX + cx
y = rotatedY + cy
r_xys += [x, y]
self.car.vertices = r_xys
if __name__ == '__main__':
np.random.seed(1)
env = CarEnv()
env.set_fps(30)
for ep in range(20):
s = env.reset()
# for t in range(100):
while True:
env.render()
s, r, done = env.step(env.sample_action())
if done:
break | 34.39834 | 141 | 0.516164 | import numpy as np
import pyglet
pyglet.clock.set_fps_limit(10000)
class CarEnv(object):
n_sensor = 5
action_dim = 1
state_dim = n_sensor
viewer = None
viewer_xy = (1080, 720)
sensor_max = 150.
start_point = [100, 100]
speed = 50.
dt = 0.1
def __init__(self, discrete_action=False):
self.is_discrete_action = discrete_action
if discrete_action:
self.actions = [-1, 0, 1]
else:
self.action_bound = [-1, 1]
self.terminal = False
self.car_info = np.array([0, 0, 0, 20, 40], dtype=np.float64)
self.obstacles_coords = [
np.array([
[500, 100],
[200, 100],
[200, 200],
[100, 200],])
]
self.sensor_info = self.sensor_max + np.zeros((self.n_sensor, 3))
def step(self, action):
if self.is_discrete_action:
action = self.actions[action]
else:
action = np.clip(action, *self.action_bound)[0]
self.car_info[2] += action * np.pi/30
self.car_info[:2] = self.car_info[:2] + \
self.speed * self.dt * np.array([np.cos(self.car_info[2]), np.sin(self.car_info[2])])
self._update_sensor()
s = self._get_state()
r = -1 if self.terminal else 0
return s, r, self.terminal
def reset(self):
self.terminal = False
self.car_info[:3] = np.array([*self.start_point, -np.pi/2])
self._update_sensor()
return self._get_state()
def render(self):
if self.viewer is None:
self.viewer = Viewer(*self.viewer_xy, self.car_info, self.sensor_info,self.obstacles_coords)
self.viewer.render()
def sample_action(self):
if self.is_discrete_action:
a = np.random.choice(list(range(3)))
else:
a = np.random.uniform(*self.action_bound, size=self.action_dim)
return a
def set_fps(self, fps=30):
pyglet.clock.set_fps_limit(fps)
def _get_state(self):
state = self.sensor_info[:, 0].flatten()/self.sensor_max
return state
def obstacles_collision(self, obstacle, s, q):
for oi in range(len(obstacle)):
p = obstacle[oi]
r = obstacle[(oi + 1) % len(obstacle)] - obstacle[oi]
if np.cross(r, s) != 0:
t = np.cross((q - p), s) / np.cross(r, s)
u = np.cross((q - p), r) / np.cross(r, s)
if 0 <= t <= 1 and 0 <= u <= 1:
intersection = q + u * s
self.possible_intersections.append(intersection)
self.possible_sensor_distance.append(np.linalg.norm(u*s))
def _update_sensor(self):
cx, cy, rotation = self.car_info[:3]
n_sensors = len(self.sensor_info)
sensor_theta = np.linspace(-np.pi / 2, np.pi / 2, n_sensors)
xs = cx + (np.zeros((n_sensors, ))+self.sensor_max) * np.cos(sensor_theta)
ys = cy + (np.zeros((n_sensors, ))+self.sensor_max) * np.sin(sensor_theta)
xys = np.array([[x, y] for x, y in zip(xs, ys)])
tmp_x = xys[:, 0] - cx
tmp_y = xys[:, 1] - cy
rotated_x = tmp_x * np.cos(rotation) - tmp_y * np.sin(rotation)
rotated_y = tmp_x * np.sin(rotation) + tmp_y * np.cos(rotation)
self.sensor_info[:, -2:] = np.vstack([rotated_x+cx, rotated_y+cy]).T
q = np.array([cx, cy])
for si in range(len(self.sensor_info)):
s = self.sensor_info[si, -2:] - q
self.possible_sensor_distance = [self.sensor_max]
self.possible_intersections = [self.sensor_info[si, -2:]]
for ob in range(len(self.obstacles_coords)):
self.obstacles_collision(self.obstacles_coords[ob], s, q)
win_coord = np.array([
[0, 0],
[self.viewer_xy[0], 0],
[*self.viewer_xy],
[0, self.viewer_xy[1]],
[0, 0],
])
for oi in range(4):
p = win_coord[oi]
r = win_coord[(oi + 1) % len(win_coord)] - win_coord[oi]
if np.cross(r, s) != 0:
t = np.cross((q - p), s) / np.cross(r, s)
u = np.cross((q - p), r) / np.cross(r, s)
if 0 <= t <= 1 and 0 <= u <= 1:
intersection = p + t * r
self.possible_intersections.append(intersection)
self.possible_sensor_distance.append(np.linalg.norm(intersection - q))
distance = np.min(self.possible_sensor_distance)
distance_index = np.argmin(self.possible_sensor_distance)
self.sensor_info[si, 0] = distance
self.sensor_info[si, -2:] = self.possible_intersections[distance_index]
if distance < self.car_info[-1]/2:
self.terminal = True
class Viewer(pyglet.window.Window):
color = {
'background': [1]*3 + [1]
}
fps_display = pyglet.clock.ClockDisplay()
bar_thc = 5
def __init__(self, width, height, car_info, sensor_info, obstacles_coords):
super(Viewer, self).__init__(width, height, resizable=False, caption='2D car', vsync=False)
self.set_location(x=80, y=10)
pyglet.gl.glClearColor(*self.color['background'])
self.car_info = car_info
self.sensor_info = sensor_info
self.batch = pyglet.graphics.Batch()
background = pyglet.graphics.OrderedGroup(0)
foreground = pyglet.graphics.OrderedGroup(1)
self.sensors = []
line_coord = [0, 0] * 2
c = (73, 73, 73) * 2
for i in range(len(self.sensor_info)):
self.sensors.append(self.batch.add(2, pyglet.gl.GL_LINES, foreground, ('v2f', line_coord), ('c3B', c)))
car_box = [0, 0] * 4
c = (249, 86, 86) * 4
self.car = self.batch.add(4, pyglet.gl.GL_QUADS, foreground, ('v2f', car_box), ('c3B', c))
c = (134, 181, 244) * 4
for ob in range(len(obstacles_coords)):
self.batch.add(4, pyglet.gl.GL_QUADS, background, ('v2f', obstacles_coords[ob].flatten()), ('c3B', c))
def render(self):
pyglet.clock.tick()
self._update()
self.switch_to()
self.dispatch_events()
self.dispatch_event('on_draw')
self.flip()
def on_draw(self):
self.clear()
self.batch.draw()
def _update(self):
cx, cy, r, w, l = self.car_info
for i, sensor in enumerate(self.sensors):
sensor.vertices = [cx, cy, *self.sensor_info[i, -2:]]
xys = [
[cx + l / 2, cy + w / 2],
[cx - l / 2, cy + w / 2],
[cx - l / 2, cy - w / 2],
[cx + l / 2, cy - w / 2],
]
r_xys = []
for x, y in xys:
tempX = x - cx
tempY = y - cy
rotatedX = tempX * np.cos(r) - tempY * np.sin(r)
rotatedY = tempX * np.sin(r) + tempY * np.cos(r)
x = rotatedX + cx
y = rotatedY + cy
r_xys += [x, y]
self.car.vertices = r_xys
if __name__ == '__main__':
np.random.seed(1)
env = CarEnv()
env.set_fps(30)
for ep in range(20):
s = env.reset()
while True:
env.render()
s, r, done = env.step(env.sample_action())
if done:
break | true | true |
1c35acc60445d40021308b66c56037df70001c8a | 1,341 | py | Python | tests/test_0806-empty-lists-cartesian-fix.py | BioGeek/awkward-1.0 | 0cfb4e43c41d5c7d9830cc7b1d750485c0a93eb2 | [
"BSD-3-Clause"
] | 519 | 2019-10-17T12:36:22.000Z | 2022-03-26T23:28:19.000Z | tests/test_0806-empty-lists-cartesian-fix.py | BioGeek/awkward-1.0 | 0cfb4e43c41d5c7d9830cc7b1d750485c0a93eb2 | [
"BSD-3-Clause"
] | 924 | 2019-11-03T21:05:01.000Z | 2022-03-31T22:44:30.000Z | tests/test_0806-empty-lists-cartesian-fix.py | BioGeek/awkward-1.0 | 0cfb4e43c41d5c7d9830cc7b1d750485c0a93eb2 | [
"BSD-3-Clause"
] | 56 | 2019-12-17T15:49:22.000Z | 2022-03-09T20:34:06.000Z | # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
from __future__ import absolute_import
import pytest # noqa: F401
import numpy as np # noqa: F401
import awkward as ak # noqa: F401
def test_empty_arrays_cartesian():
one = ak.Array([])
two = one = ak.Array([])
with pytest.raises(ValueError) as err:
ak.to_list(ak.cartesian([one, two]))
assert isinstance(err.value, ValueError)
ak.to_list(ak.concatenate([one, two], axis=0))
def test_cartesian():
muon = ak.Array([[{"pt": 1.0}], []], with_name="muon")
electron = ak.Array([[], [{"pt": 1.0}]], with_name="electron")
muon = muon[muon.pt > 5]
electron = electron[electron.pt > 5]
leptons = ak.concatenate([muon, electron], axis=1)
candidate = ak.firsts(leptons)
assert ak.to_list(ak.Array(candidate)) == [None, None]
result = ak.cartesian([candidate, candidate], axis=0)
assert ak.to_list(result) == [
(None, None),
(None, None),
(None, None),
(None, None),
]
result = ak.cartesian([candidate, ak.Array([[1, 2, 3], []])], axis=1)
assert ak.to_list(result) == [None, None]
one, two = ak.broadcast_arrays(candidate, ak.Array([[1, 2, 3], []]))
assert ak.to_list(one) == [None, None]
assert ak.to_list(two) == [None, None]
| 28.531915 | 87 | 0.61745 |
from __future__ import absolute_import
import pytest
import numpy as np
import awkward as ak
def test_empty_arrays_cartesian():
one = ak.Array([])
two = one = ak.Array([])
with pytest.raises(ValueError) as err:
ak.to_list(ak.cartesian([one, two]))
assert isinstance(err.value, ValueError)
ak.to_list(ak.concatenate([one, two], axis=0))
def test_cartesian():
muon = ak.Array([[{"pt": 1.0}], []], with_name="muon")
electron = ak.Array([[], [{"pt": 1.0}]], with_name="electron")
muon = muon[muon.pt > 5]
electron = electron[electron.pt > 5]
leptons = ak.concatenate([muon, electron], axis=1)
candidate = ak.firsts(leptons)
assert ak.to_list(ak.Array(candidate)) == [None, None]
result = ak.cartesian([candidate, candidate], axis=0)
assert ak.to_list(result) == [
(None, None),
(None, None),
(None, None),
(None, None),
]
result = ak.cartesian([candidate, ak.Array([[1, 2, 3], []])], axis=1)
assert ak.to_list(result) == [None, None]
one, two = ak.broadcast_arrays(candidate, ak.Array([[1, 2, 3], []]))
assert ak.to_list(one) == [None, None]
assert ak.to_list(two) == [None, None]
| true | true |
1c35ad0d92514753f02b80b801f52e4c875bc666 | 678 | py | Python | publichealth/home/migrations/0006_auto_20170308_2025.py | pcoder/public-health-ch | cebc4849653560c54238b67814074353ff7c01f3 | [
"MIT"
] | 2 | 2020-10-29T16:27:21.000Z | 2021-06-07T12:47:46.000Z | publichealth/home/migrations/0006_auto_20170308_2025.py | pcoder/public-health-ch | cebc4849653560c54238b67814074353ff7c01f3 | [
"MIT"
] | 11 | 2017-05-09T10:50:28.000Z | 2021-12-15T17:01:23.000Z | publichealth/home/migrations/0006_auto_20170308_2025.py | pcoder/public-health-ch | cebc4849653560c54238b67814074353ff7c01f3 | [
"MIT"
] | 4 | 2017-04-24T13:06:55.000Z | 2021-06-04T02:18:32.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-08 19:25
from __future__ import unicode_literals
from django.db import migrations
import wagtail.core.fields
class Migration(migrations.Migration):
dependencies = [
('home', '0005_auto_20170308_2023'),
]
operations = [
migrations.AlterField(
model_name='homepage',
name='body_de',
field=wagtail.core.fields.RichTextField(blank=True, default=''),
),
migrations.AlterField(
model_name='homepage',
name='body_fr',
field=wagtail.core.fields.RichTextField(blank=True, default=''),
),
]
| 25.111111 | 76 | 0.610619 |
from __future__ import unicode_literals
from django.db import migrations
import wagtail.core.fields
class Migration(migrations.Migration):
dependencies = [
('home', '0005_auto_20170308_2023'),
]
operations = [
migrations.AlterField(
model_name='homepage',
name='body_de',
field=wagtail.core.fields.RichTextField(blank=True, default=''),
),
migrations.AlterField(
model_name='homepage',
name='body_fr',
field=wagtail.core.fields.RichTextField(blank=True, default=''),
),
]
| true | true |
1c35ad55636c2686b513661ccc358f40c7bd6bba | 1,571 | py | Python | tests/test_featurizers/test_fasttext_featurizer.py | tienhoang1994/rasa-nlu-examples | fe12dbc814d992382c1ca1d926b340139200928f | [
"Apache-2.0"
] | 1 | 2022-03-31T17:00:38.000Z | 2022-03-31T17:00:38.000Z | tests/test_featurizers/test_fasttext_featurizer.py | tienhoang1994/rasa-nlu-examples | fe12dbc814d992382c1ca1d926b340139200928f | [
"Apache-2.0"
] | null | null | null | tests/test_featurizers/test_fasttext_featurizer.py | tienhoang1994/rasa-nlu-examples | fe12dbc814d992382c1ca1d926b340139200928f | [
"Apache-2.0"
] | null | null | null | import pathlib
import pytest
from rasa.nlu.tokenizers.whitespace_tokenizer import WhitespaceTokenizer
from .dense_featurizer_checks import dense_standard_test_combinations
from rasa_nlu_examples.featurizers.dense.fasttext_featurizer import FastTextFeaturizer
from rasa.engine.storage.resource import Resource
from rasa.engine.storage.local_model_storage import LocalModelStorage
from rasa.engine.graph import ExecutionContext
test_folder = pathlib.Path(__file__).parent.parent.absolute()
cache_path = str(test_folder / "data" / "fasttext" / "custom_fasttext_model.bin")
node_storage = LocalModelStorage("tmp/storage")
node_resource = Resource("tokenizer")
context = ExecutionContext(node_storage, node_resource)
config = {"cache_path": cache_path}
tokenizer = WhitespaceTokenizer(config=WhitespaceTokenizer.get_default_config())
featurizer = FastTextFeaturizer(config=config, name=context.node_name)
@pytest.mark.fasttext
def test_model_loaded():
assert featurizer
@pytest.mark.fasttext
@pytest.mark.parametrize(
"test_fn,tok,feat,msg",
dense_standard_test_combinations(tokenizer=tokenizer, featurizer=featurizer),
)
def test_featurizer_checks(test_fn, tok, feat, msg):
test_fn(tok, feat, msg)
@pytest.mark.fasttext
def test_raise_cachedir_not_exists():
with pytest.raises(FileNotFoundError):
FastTextFeaturizer(config={"cache_path": "foobar.kv"}, name=context.node_name)
@pytest.mark.fasttext
def test_raise_cachedir_not_given():
with pytest.raises(ValueError):
FastTextFeaturizer(config={}, name=context.node_name)
| 32.061224 | 86 | 0.809675 | import pathlib
import pytest
from rasa.nlu.tokenizers.whitespace_tokenizer import WhitespaceTokenizer
from .dense_featurizer_checks import dense_standard_test_combinations
from rasa_nlu_examples.featurizers.dense.fasttext_featurizer import FastTextFeaturizer
from rasa.engine.storage.resource import Resource
from rasa.engine.storage.local_model_storage import LocalModelStorage
from rasa.engine.graph import ExecutionContext
test_folder = pathlib.Path(__file__).parent.parent.absolute()
cache_path = str(test_folder / "data" / "fasttext" / "custom_fasttext_model.bin")
node_storage = LocalModelStorage("tmp/storage")
node_resource = Resource("tokenizer")
context = ExecutionContext(node_storage, node_resource)
config = {"cache_path": cache_path}
tokenizer = WhitespaceTokenizer(config=WhitespaceTokenizer.get_default_config())
featurizer = FastTextFeaturizer(config=config, name=context.node_name)
@pytest.mark.fasttext
def test_model_loaded():
assert featurizer
@pytest.mark.fasttext
@pytest.mark.parametrize(
"test_fn,tok,feat,msg",
dense_standard_test_combinations(tokenizer=tokenizer, featurizer=featurizer),
)
def test_featurizer_checks(test_fn, tok, feat, msg):
test_fn(tok, feat, msg)
@pytest.mark.fasttext
def test_raise_cachedir_not_exists():
with pytest.raises(FileNotFoundError):
FastTextFeaturizer(config={"cache_path": "foobar.kv"}, name=context.node_name)
@pytest.mark.fasttext
def test_raise_cachedir_not_given():
with pytest.raises(ValueError):
FastTextFeaturizer(config={}, name=context.node_name)
| true | true |
1c35ae89ea08ad34a1f9ffe529f3f9ee74d3d51c | 1,203 | py | Python | venv/lib/python3.8/site-packages/test/test_api_service_out.py | akshitgoyal/csc398nlp | 6adf80cb7fa3737f88faf73a6e818da495b95ab4 | [
"MIT"
] | 1 | 2020-09-28T10:09:25.000Z | 2020-09-28T10:09:25.000Z | venv/lib/python3.8/site-packages/test/test_api_service_out.py | akshitgoyal/NLP-Research-Project | 6adf80cb7fa3737f88faf73a6e818da495b95ab4 | [
"MIT"
] | null | null | null | venv/lib/python3.8/site-packages/test/test_api_service_out.py | akshitgoyal/NLP-Research-Project | 6adf80cb7fa3737f88faf73a6e818da495b95ab4 | [
"MIT"
] | 1 | 2020-07-01T18:46:20.000Z | 2020-07-01T18:46:20.000Z | # coding: utf-8
"""
NamSor API v2
NamSor API v2 : enpoints to process personal names (gender, cultural origin or ethnicity) in all alphabets or languages. Use GET methods for small tests, but prefer POST methods for higher throughput (batch processing of up to 100 names at a time). Need something you can't find here? We have many more features coming soon. Let us know, we'll do our best to add it! # noqa: E501
OpenAPI spec version: 2.0.10
Contact: contact@namsor.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import openapi_client
from openapi_client.models.api_service_out import APIServiceOut # noqa: E501
from openapi_client.rest import ApiException
class TestAPIServiceOut(unittest.TestCase):
"""APIServiceOut unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testAPIServiceOut(self):
"""Test APIServiceOut"""
# FIXME: construct object with mandatory attributes with example values
# model = openapi_client.models.api_service_out.APIServiceOut() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 29.341463 | 385 | 0.720698 |
from __future__ import absolute_import
import unittest
import openapi_client
from openapi_client.models.api_service_out import APIServiceOut
from openapi_client.rest import ApiException
class TestAPIServiceOut(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testAPIServiceOut(self):
s
if __name__ == '__main__':
unittest.main()
| true | true |
1c35aecec3f373c15ba7cd460b115cc89022eb60 | 111 | py | Python | datatrans/fooddata/__init__.py | KooCook/datatrans | 65c80da4d8a1ed67963b9d704b361c864cb1151b | [
"BSD-3-Clause"
] | 1 | 2020-10-24T04:07:42.000Z | 2020-10-24T04:07:42.000Z | datatrans/fooddata/__init__.py | KooCook/datatrans | 65c80da4d8a1ed67963b9d704b361c864cb1151b | [
"BSD-3-Clause"
] | null | null | null | datatrans/fooddata/__init__.py | KooCook/datatrans | 65c80da4d8a1ed67963b9d704b361c864cb1151b | [
"BSD-3-Clause"
] | null | null | null | from datatrans.fooddata import api
from datatrans.fooddata import detail
from datatrans.fooddata import search
| 27.75 | 37 | 0.864865 | from datatrans.fooddata import api
from datatrans.fooddata import detail
from datatrans.fooddata import search
| true | true |
1c35afab8a78f4681bf577dc3bbd6a8f18a92c36 | 657 | py | Python | kospeech/checkpoint/__init__.py | daiyaanarfeen/kospeech | 5aff5c7647e5cceceddf7b22c991777fc3792400 | [
"Apache-2.0"
] | 257 | 2020-06-06T14:20:47.000Z | 2021-08-12T05:01:39.000Z | kospeech/checkpoint/__init__.py | daiyaanarfeen/kospeech | 5aff5c7647e5cceceddf7b22c991777fc3792400 | [
"Apache-2.0"
] | 100 | 2020-06-08T00:39:28.000Z | 2021-08-04T11:22:02.000Z | kospeech/checkpoint/__init__.py | daiyaanarfeen/kospeech | 5aff5c7647e5cceceddf7b22c991777fc3792400 | [
"Apache-2.0"
] | 96 | 2020-06-10T06:12:52.000Z | 2021-08-09T14:40:01.000Z | # Copyright (c) 2020, Soohwan Kim. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from kospeech.checkpoint.checkpoint import Checkpoint
| 41.0625 | 74 | 0.768645 |
from kospeech.checkpoint.checkpoint import Checkpoint
| true | true |
1c35b01d681087d3df62b6a76903aa79019ea58d | 509 | py | Python | magPi_02_pygameGraphicsWindow.py | oniMoNaku/thePit | f82d2dc70346e6188fca493a4b9373aa99ccfa32 | [
"Unlicense"
] | null | null | null | magPi_02_pygameGraphicsWindow.py | oniMoNaku/thePit | f82d2dc70346e6188fca493a4b9373aa99ccfa32 | [
"Unlicense"
] | null | null | null | magPi_02_pygameGraphicsWindow.py | oniMoNaku/thePit | f82d2dc70346e6188fca493a4b9373aa99ccfa32 | [
"Unlicense"
] | null | null | null | # today is 389e
# the python pit
# magPi - 02
# OPEN A PYGAME GRAPHICS WINDOW
import os, pygame
from pygame.locals import *
pygame.init()
clock = pygame.time.Clock()
os.environ['SDL_VIDEO_WINDOW_POS'] = 'center'
# This title appears along the top of the graphics window
pygame.display.set_caption("The Title Of My Program")
# Opens a graphics window called 'screen' with width 400 height 200
screen = pygame.display.set_mode([400,200],0,32)
pygame.time.wait(5000) # A 5 second pause before ending the program | 33.933333 | 67 | 0.760314 |
import os, pygame
from pygame.locals import *
pygame.init()
clock = pygame.time.Clock()
os.environ['SDL_VIDEO_WINDOW_POS'] = 'center'
pygame.display.set_caption("The Title Of My Program")
screen = pygame.display.set_mode([400,200],0,32)
pygame.time.wait(5000) | true | true |
1c35b0a824439fa3f3b63575ab92750ffcf360c6 | 7,628 | py | Python | tests/unit/test_opentelemetry_tracing.py | jprice-quizlet/python-bigquery | dcfbac267fbf66d189b0cc7e76f4712122a74b7b | [
"Apache-2.0"
] | null | null | null | tests/unit/test_opentelemetry_tracing.py | jprice-quizlet/python-bigquery | dcfbac267fbf66d189b0cc7e76f4712122a74b7b | [
"Apache-2.0"
] | null | null | null | tests/unit/test_opentelemetry_tracing.py | jprice-quizlet/python-bigquery | dcfbac267fbf66d189b0cc7e76f4712122a74b7b | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import sys
import mock
try:
import opentelemetry
from opentelemetry import trace
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import SimpleExportSpanProcessor
from opentelemetry.sdk.trace.export.in_memory_span_exporter import (
InMemorySpanExporter,
)
except ImportError:
opentelemetry = None
import pytest
from six.moves import reload_module
from google.cloud.bigquery import opentelemetry_tracing
TEST_SPAN_NAME = "bar"
TEST_SPAN_ATTRIBUTES = {"foo": "baz"}
@pytest.mark.skipif(opentelemetry is None, reason="Require `opentelemetry`")
@pytest.fixture
def setup():
reload_module(opentelemetry_tracing)
tracer_provider = TracerProvider()
memory_exporter = InMemorySpanExporter()
span_processor = SimpleExportSpanProcessor(memory_exporter)
tracer_provider.add_span_processor(span_processor)
trace.set_tracer_provider(tracer_provider)
yield memory_exporter
@pytest.mark.skipif(opentelemetry is None, reason="Require `opentelemetry`")
def test_opentelemetry_not_installed(setup, monkeypatch):
monkeypatch.setitem(sys.modules, "opentelemetry", None)
reload_module(opentelemetry_tracing)
with opentelemetry_tracing.create_span("No-op for opentelemetry") as span:
assert span is None
@pytest.mark.skipif(opentelemetry is None, reason="Require `opentelemetry`")
def test_opentelemetry_success(setup):
expected_attributes = {"foo": "baz", "db.system": "BigQuery"}
with opentelemetry_tracing.create_span(
TEST_SPAN_NAME, attributes=TEST_SPAN_ATTRIBUTES, client=None, job_ref=None
) as span:
assert span is not None
assert span.name == TEST_SPAN_NAME
assert span.attributes == expected_attributes
@pytest.mark.skipif(opentelemetry is None, reason="Require `opentelemetry`")
def test_default_client_attributes(setup):
expected_attributes = {
"foo": "baz",
"db.system": "BigQuery",
"db.name": "test_project",
"location": "test_location",
}
with mock.patch("google.cloud.bigquery.client.Client") as test_client:
test_client.project = "test_project"
test_client.location = "test_location"
with opentelemetry_tracing.create_span(
TEST_SPAN_NAME, attributes=TEST_SPAN_ATTRIBUTES, client=test_client
) as span:
assert span is not None
assert span.name == TEST_SPAN_NAME
assert span.attributes == expected_attributes
@pytest.mark.skipif(opentelemetry is None, reason="Require `opentelemetry`")
def test_default_job_attributes(setup):
import google.cloud._helpers
time_created = datetime.datetime(
2010, 5, 19, 16, 0, 0, tzinfo=google.cloud._helpers.UTC
)
started_time = datetime.datetime(
2011, 10, 1, 16, 0, 0, tzinfo=google.cloud._helpers.UTC
)
ended_time = datetime.datetime(
2011, 10, 2, 16, 0, 0, tzinfo=google.cloud._helpers.UTC
)
error_result = [
{"errorResult1": "some_error_result1", "errorResult2": "some_error_result2"}
]
expected_attributes = {
"db.system": "BigQuery",
"db.name": "test_project_id",
"location": "test_location",
"num_child_jobs": "0",
"job_id": "test_job_id",
"foo": "baz",
"parent_job_id": "parent_job_id",
"timeCreated": time_created.isoformat(),
"timeStarted": started_time.isoformat(),
"timeEnded": ended_time.isoformat(),
"hasErrors": True,
"state": "some_job_state",
}
with mock.patch("google.cloud.bigquery.job._AsyncJob") as test_job_ref:
test_job_ref.job_id = "test_job_id"
test_job_ref.location = "test_location"
test_job_ref.project = "test_project_id"
test_job_ref.num_child_jobs = "0"
test_job_ref.parent_job_id = "parent_job_id"
test_job_ref.created = time_created
test_job_ref.started = started_time
test_job_ref.ended = ended_time
test_job_ref.error_result = error_result
test_job_ref.state = "some_job_state"
with opentelemetry_tracing.create_span(
TEST_SPAN_NAME, attributes=TEST_SPAN_ATTRIBUTES, job_ref=test_job_ref
) as span:
assert span is not None
assert span.name == TEST_SPAN_NAME
assert span.attributes == expected_attributes
@pytest.mark.skipif(opentelemetry is None, reason="Require `opentelemetry`")
def test_default_no_data_leakage(setup):
import google.auth.credentials
from google.cloud.bigquery import client
from google.cloud.bigquery import job
mock_credentials = mock.Mock(spec=google.auth.credentials.Credentials)
test_client = client.Client(
project="test_project", credentials=mock_credentials, location="test_location"
)
expected_attributes = {
"foo": "baz",
"db.system": "BigQuery",
"db.name": "test_project",
"location": "test_location",
}
with opentelemetry_tracing.create_span(
TEST_SPAN_NAME, attributes=TEST_SPAN_ATTRIBUTES, client=test_client
) as span:
assert span.name == TEST_SPAN_NAME
assert span.attributes == expected_attributes
test_job_reference = job._JobReference(
job_id="test_job_id", project="test_project_id", location="test_location"
)
test_client = client.Client(
project="test_project", credentials=mock_credentials, location="test_location"
)
test_job = job._AsyncJob(job_id=test_job_reference, client=test_client)
expected_attributes = {
"db.system": "BigQuery",
"db.name": "test_project_id",
"location": "test_location",
"num_child_jobs": 0,
"job_id": "test_job_id",
"foo": "baz",
"hasErrors": False,
}
with opentelemetry_tracing.create_span(
TEST_SPAN_NAME, attributes=TEST_SPAN_ATTRIBUTES, job_ref=test_job
) as span:
assert span.name == TEST_SPAN_NAME
assert span.attributes == expected_attributes
@pytest.mark.skipif(opentelemetry is None, reason="Require `opentelemetry`")
def test_span_creation_error(setup):
import google.auth.credentials
from google.cloud.bigquery import client
from google.api_core.exceptions import GoogleAPICallError, InvalidArgument
mock_credentials = mock.Mock(spec=google.auth.credentials.Credentials)
test_client = client.Client(
project="test_project", credentials=mock_credentials, location="test_location"
)
expected_attributes = {
"foo": "baz",
"db.system": "BigQuery",
"db.name": "test_project",
"location": "test_location",
}
with pytest.raises(GoogleAPICallError):
with opentelemetry_tracing.create_span(
TEST_SPAN_NAME, attributes=TEST_SPAN_ATTRIBUTES, client=test_client
) as span:
assert span.name == TEST_SPAN_NAME
assert span.attributes == expected_attributes
raise InvalidArgument("test_error")
| 35.812207 | 86 | 0.700708 |
import datetime
import sys
import mock
try:
import opentelemetry
from opentelemetry import trace
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import SimpleExportSpanProcessor
from opentelemetry.sdk.trace.export.in_memory_span_exporter import (
InMemorySpanExporter,
)
except ImportError:
opentelemetry = None
import pytest
from six.moves import reload_module
from google.cloud.bigquery import opentelemetry_tracing
TEST_SPAN_NAME = "bar"
TEST_SPAN_ATTRIBUTES = {"foo": "baz"}
@pytest.mark.skipif(opentelemetry is None, reason="Require `opentelemetry`")
@pytest.fixture
def setup():
reload_module(opentelemetry_tracing)
tracer_provider = TracerProvider()
memory_exporter = InMemorySpanExporter()
span_processor = SimpleExportSpanProcessor(memory_exporter)
tracer_provider.add_span_processor(span_processor)
trace.set_tracer_provider(tracer_provider)
yield memory_exporter
@pytest.mark.skipif(opentelemetry is None, reason="Require `opentelemetry`")
def test_opentelemetry_not_installed(setup, monkeypatch):
monkeypatch.setitem(sys.modules, "opentelemetry", None)
reload_module(opentelemetry_tracing)
with opentelemetry_tracing.create_span("No-op for opentelemetry") as span:
assert span is None
@pytest.mark.skipif(opentelemetry is None, reason="Require `opentelemetry`")
def test_opentelemetry_success(setup):
expected_attributes = {"foo": "baz", "db.system": "BigQuery"}
with opentelemetry_tracing.create_span(
TEST_SPAN_NAME, attributes=TEST_SPAN_ATTRIBUTES, client=None, job_ref=None
) as span:
assert span is not None
assert span.name == TEST_SPAN_NAME
assert span.attributes == expected_attributes
@pytest.mark.skipif(opentelemetry is None, reason="Require `opentelemetry`")
def test_default_client_attributes(setup):
expected_attributes = {
"foo": "baz",
"db.system": "BigQuery",
"db.name": "test_project",
"location": "test_location",
}
with mock.patch("google.cloud.bigquery.client.Client") as test_client:
test_client.project = "test_project"
test_client.location = "test_location"
with opentelemetry_tracing.create_span(
TEST_SPAN_NAME, attributes=TEST_SPAN_ATTRIBUTES, client=test_client
) as span:
assert span is not None
assert span.name == TEST_SPAN_NAME
assert span.attributes == expected_attributes
@pytest.mark.skipif(opentelemetry is None, reason="Require `opentelemetry`")
def test_default_job_attributes(setup):
import google.cloud._helpers
time_created = datetime.datetime(
2010, 5, 19, 16, 0, 0, tzinfo=google.cloud._helpers.UTC
)
started_time = datetime.datetime(
2011, 10, 1, 16, 0, 0, tzinfo=google.cloud._helpers.UTC
)
ended_time = datetime.datetime(
2011, 10, 2, 16, 0, 0, tzinfo=google.cloud._helpers.UTC
)
error_result = [
{"errorResult1": "some_error_result1", "errorResult2": "some_error_result2"}
]
expected_attributes = {
"db.system": "BigQuery",
"db.name": "test_project_id",
"location": "test_location",
"num_child_jobs": "0",
"job_id": "test_job_id",
"foo": "baz",
"parent_job_id": "parent_job_id",
"timeCreated": time_created.isoformat(),
"timeStarted": started_time.isoformat(),
"timeEnded": ended_time.isoformat(),
"hasErrors": True,
"state": "some_job_state",
}
with mock.patch("google.cloud.bigquery.job._AsyncJob") as test_job_ref:
test_job_ref.job_id = "test_job_id"
test_job_ref.location = "test_location"
test_job_ref.project = "test_project_id"
test_job_ref.num_child_jobs = "0"
test_job_ref.parent_job_id = "parent_job_id"
test_job_ref.created = time_created
test_job_ref.started = started_time
test_job_ref.ended = ended_time
test_job_ref.error_result = error_result
test_job_ref.state = "some_job_state"
with opentelemetry_tracing.create_span(
TEST_SPAN_NAME, attributes=TEST_SPAN_ATTRIBUTES, job_ref=test_job_ref
) as span:
assert span is not None
assert span.name == TEST_SPAN_NAME
assert span.attributes == expected_attributes
@pytest.mark.skipif(opentelemetry is None, reason="Require `opentelemetry`")
def test_default_no_data_leakage(setup):
import google.auth.credentials
from google.cloud.bigquery import client
from google.cloud.bigquery import job
mock_credentials = mock.Mock(spec=google.auth.credentials.Credentials)
test_client = client.Client(
project="test_project", credentials=mock_credentials, location="test_location"
)
expected_attributes = {
"foo": "baz",
"db.system": "BigQuery",
"db.name": "test_project",
"location": "test_location",
}
with opentelemetry_tracing.create_span(
TEST_SPAN_NAME, attributes=TEST_SPAN_ATTRIBUTES, client=test_client
) as span:
assert span.name == TEST_SPAN_NAME
assert span.attributes == expected_attributes
test_job_reference = job._JobReference(
job_id="test_job_id", project="test_project_id", location="test_location"
)
test_client = client.Client(
project="test_project", credentials=mock_credentials, location="test_location"
)
test_job = job._AsyncJob(job_id=test_job_reference, client=test_client)
expected_attributes = {
"db.system": "BigQuery",
"db.name": "test_project_id",
"location": "test_location",
"num_child_jobs": 0,
"job_id": "test_job_id",
"foo": "baz",
"hasErrors": False,
}
with opentelemetry_tracing.create_span(
TEST_SPAN_NAME, attributes=TEST_SPAN_ATTRIBUTES, job_ref=test_job
) as span:
assert span.name == TEST_SPAN_NAME
assert span.attributes == expected_attributes
@pytest.mark.skipif(opentelemetry is None, reason="Require `opentelemetry`")
def test_span_creation_error(setup):
import google.auth.credentials
from google.cloud.bigquery import client
from google.api_core.exceptions import GoogleAPICallError, InvalidArgument
mock_credentials = mock.Mock(spec=google.auth.credentials.Credentials)
test_client = client.Client(
project="test_project", credentials=mock_credentials, location="test_location"
)
expected_attributes = {
"foo": "baz",
"db.system": "BigQuery",
"db.name": "test_project",
"location": "test_location",
}
with pytest.raises(GoogleAPICallError):
with opentelemetry_tracing.create_span(
TEST_SPAN_NAME, attributes=TEST_SPAN_ATTRIBUTES, client=test_client
) as span:
assert span.name == TEST_SPAN_NAME
assert span.attributes == expected_attributes
raise InvalidArgument("test_error")
| true | true |
1c35b178ec8c60bfe97e723609f7a883c6a014de | 2,808 | py | Python | sensorflow/console.py | maxpowel/sensorflow-python | 7c6f23087fbff085c43dd6d3bc00ce8dae884484 | [
"MIT"
] | null | null | null | sensorflow/console.py | maxpowel/sensorflow-python | 7c6f23087fbff085c43dd6d3bc00ce8dae884484 | [
"MIT"
] | null | null | null | sensorflow/console.py | maxpowel/sensorflow-python | 7c6f23087fbff085c43dd6d3bc00ce8dae884484 | [
"MIT"
] | null | null | null | import sensorflow
import cmd
# example of config: ds18b20 0x28 0xFF 0x10 0x93 0x6F 0x14 0x4 0x11
# example of config: dht 11 14
# 28ff5d216d1404cd
# 28FF608D6F140451
# 28FF10936F140411
# Robohuerto
# dht 11 9
# dht 21 6
# ina219
print("Initializing...")
source = sensorflow.SerialSource()
serializer = sensorflow.JsonSerializer()
sf = sensorflow.Sensorflow(source=source, serializer=serializer)
sf.ping()
def ds18b20(params):
return sensorflow.DS18B20Sensor([int(i, 0) for i in params])
def dht(params):
return sensorflow.DHTSensor(*[int(i) for i in params])
def ina219(params):
return sensorflow.INA219Sensor()
configs = {
"ds18b20": ds18b20,
"dht": dht,
"ina219": ina219
}
class SensorflowCommands(cmd.Cmd):
def do_read(self, line):
print(sf.sensor_read())
def do_status(self, line):
print(sf.status())
def do_ping(self, line):
sf.ping()
def do_config(self, line):
if line == "help":
print("Available sensors:")
print([i for i in configs.keys()])
else:
i = 0
read = True
configuration_list = []
while read:
sensor_config = input("Sensor {i}: ".format(i=i))
params = sensor_config.split()
if len(params) > 0:
sensor_type = params.pop(0)
sensor_type = sensor_type.lower()
if sensor_type in configs:
try:
configuration_list.append(configs[sensor_type](params))
i += 1
except Exception as e:
print("Error")
print(str(e))
else:
print("{sensor_type} is not available, available are:")
print([i for i in configs.keys()])
else:
read = False
response = None
while response is None:
response = input("Will be written the configuration for {n}, sensors. Continue with it? (y/n)".format(n=len(configuration_list)))
if response == "y":
print(sf.configure(configuration_list))
elif response != "n":
response = None
def do_exit(self, line):
sf.close()
exit()
# def do_greet(self, line):
# from roams.fonio.kernel_dev_command import greet
# try:
# greet()
# except:
# print("Exception in user code:")
# print('-' * 60)
# traceback.print_exc(file=sys.stdout)
# print('-' * 60)
try:
SensorflowCommands().cmdloop()
except KeyboardInterrupt:
sf.close()
| 26.742857 | 145 | 0.530627 | import sensorflow
import cmd
print("Initializing...")
source = sensorflow.SerialSource()
serializer = sensorflow.JsonSerializer()
sf = sensorflow.Sensorflow(source=source, serializer=serializer)
sf.ping()
def ds18b20(params):
return sensorflow.DS18B20Sensor([int(i, 0) for i in params])
def dht(params):
return sensorflow.DHTSensor(*[int(i) for i in params])
def ina219(params):
return sensorflow.INA219Sensor()
configs = {
"ds18b20": ds18b20,
"dht": dht,
"ina219": ina219
}
class SensorflowCommands(cmd.Cmd):
def do_read(self, line):
print(sf.sensor_read())
def do_status(self, line):
print(sf.status())
def do_ping(self, line):
sf.ping()
def do_config(self, line):
if line == "help":
print("Available sensors:")
print([i for i in configs.keys()])
else:
i = 0
read = True
configuration_list = []
while read:
sensor_config = input("Sensor {i}: ".format(i=i))
params = sensor_config.split()
if len(params) > 0:
sensor_type = params.pop(0)
sensor_type = sensor_type.lower()
if sensor_type in configs:
try:
configuration_list.append(configs[sensor_type](params))
i += 1
except Exception as e:
print("Error")
print(str(e))
else:
print("{sensor_type} is not available, available are:")
print([i for i in configs.keys()])
else:
read = False
response = None
while response is None:
response = input("Will be written the configuration for {n}, sensors. Continue with it? (y/n)".format(n=len(configuration_list)))
if response == "y":
print(sf.configure(configuration_list))
elif response != "n":
response = None
def do_exit(self, line):
sf.close()
exit()
try:
SensorflowCommands().cmdloop()
except KeyboardInterrupt:
sf.close()
| true | true |
1c35b191a748d9f7c658e46f2120c2f6153782c5 | 1,247 | py | Python | html2md/commands/KeepTag.py | IstvanOri/HTML2MD | f358a25135f9ca28266c774dafc4948cb8df33e6 | [
"Beerware"
] | null | null | null | html2md/commands/KeepTag.py | IstvanOri/HTML2MD | f358a25135f9ca28266c774dafc4948cb8df33e6 | [
"Beerware"
] | null | null | null | html2md/commands/KeepTag.py | IstvanOri/HTML2MD | f358a25135f9ca28266c774dafc4948cb8df33e6 | [
"Beerware"
] | 1 | 2021-11-08T01:53:55.000Z | 2021-11-08T01:53:55.000Z | import re
from html2md.commands.Command import Command
class KeepTag(Command):
"""
Command that keeps the original HTML tag. Any number of parameters can be passed for this Command. If no
parameters are passed, all attributes will be kept. If at least one parameter is passed, then only those
attributes will be kept that are in the parameter list.
"""
SHORT_TAGS = ["img", "br"]
def __init__(self, args):
super().__init__()
self._whitelist = []
for key, value in args.items():
self._whitelist.append(value)
def __copy__(self):
return KeepTag({i: self._whitelist[i] for i in range(0, len(self._whitelist))})
def execute(self) -> str:
"""
Returns the content linearized
:return: "The content without linebrakes"
"""
result = "<" + self.tag
for attr in self._attrs:
if len(self._whitelist) == 0 or attr[0] in self._whitelist:
result += " "+attr[0] + "=\"" + attr[1] + "\""
if self.tag in self.SHORT_TAGS:
result += "/>"
else:
result += ">"
result += super().execute()
result += "</" + self.tag + ">"
return result
| 31.175 | 108 | 0.57097 | import re
from html2md.commands.Command import Command
class KeepTag(Command):
SHORT_TAGS = ["img", "br"]
def __init__(self, args):
super().__init__()
self._whitelist = []
for key, value in args.items():
self._whitelist.append(value)
def __copy__(self):
return KeepTag({i: self._whitelist[i] for i in range(0, len(self._whitelist))})
def execute(self) -> str:
result = "<" + self.tag
for attr in self._attrs:
if len(self._whitelist) == 0 or attr[0] in self._whitelist:
result += " "+attr[0] + "=\"" + attr[1] + "\""
if self.tag in self.SHORT_TAGS:
result += "/>"
else:
result += ">"
result += super().execute()
result += "</" + self.tag + ">"
return result
| true | true |
1c35b406fe80a3f1d3c20b084895811eb57aef56 | 1,704 | py | Python | cluster/silhouette.py | thomas-mazumder/project5 | b8f2eda71dcfb550d030a2ee2d9b136005198aca | [
"MIT"
] | null | null | null | cluster/silhouette.py | thomas-mazumder/project5 | b8f2eda71dcfb550d030a2ee2d9b136005198aca | [
"MIT"
] | null | null | null | cluster/silhouette.py | thomas-mazumder/project5 | b8f2eda71dcfb550d030a2ee2d9b136005198aca | [
"MIT"
] | null | null | null | import numpy as np
from scipy.spatial.distance import cdist
class Silhouette:
def __init__(self, metric: str = "euclidean"):
"""
inputs:
metric: str
the name of the distance metric to use
"""
self._metric = metric
def score(self, X: np.ndarray, y: np.ndarray) -> np.ndarray:
"""
calculates the silhouette score for each of the observations
inputs:
X: np.ndarray
A 2D matrix where the rows are observations and columns are features.
y: np.ndarray
a 1D array representing the cluster labels for each of the observations in `X`
outputs:
np.ndarray
a 1D array with the silhouette scores for each of the observations in `X`
"""
s = np.zeros(X.shape[0])
distances = cdist(X, X, self._metric)
for i in range(X.shape[0]):
a = self._calculate_a(distances, y, i)
b = self._calculate_b(distances, y, i)
s[i] = (b - a)/np.max([a, b])
return s
def _calculate_a(self, distances, y, i):
"""
Calculate the intra cluster distance for a data point
"""
distances = distances[i,y == y[i]]
return np.sum(distances)/(np.sum(y == y[i]) - 1)
def _calculate_b(self, distances, y, i):
"""
Calculate the inter cluster distance for a data point
"""
inter_distances = np.ones(np.max(y)) * np.inf
for j in range(np.max(y)):
if j != y[i]:
inter_distances[j] = np.sum(distances[i,y == j])/np.sum(y == j)
return np.min(inter_distances)
| 32.150943 | 94 | 0.545188 | import numpy as np
from scipy.spatial.distance import cdist
class Silhouette:
def __init__(self, metric: str = "euclidean"):
self._metric = metric
def score(self, X: np.ndarray, y: np.ndarray) -> np.ndarray:
s = np.zeros(X.shape[0])
distances = cdist(X, X, self._metric)
for i in range(X.shape[0]):
a = self._calculate_a(distances, y, i)
b = self._calculate_b(distances, y, i)
s[i] = (b - a)/np.max([a, b])
return s
def _calculate_a(self, distances, y, i):
distances = distances[i,y == y[i]]
return np.sum(distances)/(np.sum(y == y[i]) - 1)
def _calculate_b(self, distances, y, i):
inter_distances = np.ones(np.max(y)) * np.inf
for j in range(np.max(y)):
if j != y[i]:
inter_distances[j] = np.sum(distances[i,y == j])/np.sum(y == j)
return np.min(inter_distances)
| true | true |
1c35b41d2b552f2d8aeffa311e0ce09792ebbbc7 | 1,486 | py | Python | lingobarter/core/app.py | LeightonStreet/LingoBarter | 3fffd95c38973ca9b9ce284070522ba758efe489 | [
"Apache-2.0"
] | 7 | 2016-01-22T05:01:52.000Z | 2019-02-07T10:23:12.000Z | lingobarter/core/app.py | LeightonStreet/LeightonStreet | 3fffd95c38973ca9b9ce284070522ba758efe489 | [
"Apache-2.0"
] | 6 | 2016-03-26T23:32:47.000Z | 2016-04-01T07:10:42.000Z | lingobarter/core/app.py | LeightonStreet/LeightonStreet | 3fffd95c38973ca9b9ce284070522ba758efe489 | [
"Apache-2.0"
] | 1 | 2016-03-26T23:31:00.000Z | 2016-03-26T23:31:00.000Z | from flask import Flask, Blueprint
# noinspection PyProtectedMember
from flask.helpers import _endpoint_from_view_func
from lingobarter.core.config import LingobarterConfig
from lingobarter.utils.aliases import dispatch_aliases
class LingobarterApp(Flask):
"""
Implements customizations on Flask
- Config handler
- Aliases dispatching before request
"""
config_class = LingobarterConfig
def make_config(self, instance_relative=False):
"""This method should be removed when Flask is >=0.11
:param instance_relative:
"""
root_path = self.root_path
if instance_relative:
root_path = self.instance_path
return self.config_class(root_path, self.default_config)
def preprocess_request(self):
return dispatch_aliases() or super(LingobarterApp, self).preprocess_request()
def add_lingobarter_url_rule(self, rule, endpoint=None, view_func=None, **options):
if endpoint is None:
endpoint = _endpoint_from_view_func(view_func)
if not endpoint.startswith('lingobarter.'):
endpoint = 'lingobarter.core.' + endpoint
self.add_url_rule(rule, endpoint, view_func, **options)
class LingobarterModule(Blueprint):
"""Overwrite blueprint namespace to lingobarter.modules.name"""
def __init__(self, name, *args, **kwargs):
name = "lingobarter.modules." + name
super(LingobarterModule, self).__init__(name, *args, **kwargs)
| 34.55814 | 87 | 0.709287 | from flask import Flask, Blueprint
from flask.helpers import _endpoint_from_view_func
from lingobarter.core.config import LingobarterConfig
from lingobarter.utils.aliases import dispatch_aliases
class LingobarterApp(Flask):
config_class = LingobarterConfig
def make_config(self, instance_relative=False):
root_path = self.root_path
if instance_relative:
root_path = self.instance_path
return self.config_class(root_path, self.default_config)
def preprocess_request(self):
return dispatch_aliases() or super(LingobarterApp, self).preprocess_request()
def add_lingobarter_url_rule(self, rule, endpoint=None, view_func=None, **options):
if endpoint is None:
endpoint = _endpoint_from_view_func(view_func)
if not endpoint.startswith('lingobarter.'):
endpoint = 'lingobarter.core.' + endpoint
self.add_url_rule(rule, endpoint, view_func, **options)
class LingobarterModule(Blueprint):
def __init__(self, name, *args, **kwargs):
name = "lingobarter.modules." + name
super(LingobarterModule, self).__init__(name, *args, **kwargs)
| true | true |
1c35b4d5930fa5ad8ea2fa1ce9f5dcc19f8380d7 | 3,523 | py | Python | MARS/test_single_stream.py | zzz2010/Contrib | d351d83da718145cef9f6c98598f7fedc027efe5 | [
"Apache-2.0"
] | 20 | 2020-03-13T13:40:32.000Z | 2022-03-10T07:31:48.000Z | MARS/test_single_stream.py | zzz2010/Contrib | d351d83da718145cef9f6c98598f7fedc027efe5 | [
"Apache-2.0"
] | 34 | 2020-02-20T11:04:58.000Z | 2022-03-12T00:54:26.000Z | MARS/test_single_stream.py | zzz2010/Contrib | d351d83da718145cef9f6c98598f7fedc027efe5 | [
"Apache-2.0"
] | 41 | 2020-02-14T09:34:39.000Z | 2022-03-10T07:31:42.000Z | #coding=utf-8
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import getpass
import os
import socket
import numpy as np
from PIL import Image, ImageFilter
import argparse
import time
import sys
import pdb
import math
from utils import *
from dataset.dataset import *
from dataset.preprocess_data import *
from models.model import generate_model
from opts import parse_opts
import paddle
import paddle.fluid as fluid
if __name__=="__main__":
opt = parse_opts()
print(opt)
opt.arch = '{}-{}'.format(opt.model, opt.model_depth)
with fluid.dygraph.guard(place = fluid.CUDAPlace(0)):
print("Preprocessing validation data ...")
test_data = globals()['{}_test'.format(opt.dataset)](split = opt.split, train = 0, opt = opt)
test_dataloader = paddle.batch(test_data, batch_size=opt.batch_size, drop_last=False)
if opt.modality=='Flow': opt.input_channels = 2
else: opt.input_channels = 3
# Loading model and checkpoint
model,_ = generate_model(opt)
if opt.modality=='RGB' and opt.RGB_resume_path!='':
para_dict, _ = fluid.dygraph.load_dygraph(opt.RGB_resume_path)
model.set_dict(para_dict)
if opt.modality=='Flow' and opt.Flow_resume_path!='':
para_dict, _ = fluid.dygraph.load_dygraph(opt.Flow_resume_path)
model.set_dict(para_dict)
model.eval()
accuracies = AverageMeter()
clip_accuracies = AverageMeter()
#Path to store results
result_path = "{}/{}/".format(opt.result_path, opt.dataset)
if not os.path.exists(result_path):
os.makedirs(result_path)
for i, data in enumerate(test_dataloader()):
#输入视频图像、光流
# pdb.set_trace()
clip = np.array([x[0] for x in data]).astype('float32')
# #输入视频图像、光流的标签
targets = np.array([x[1] for x in data]).astype('int')
clip = np.squeeze(clip)
if opt.modality == 'Flow':
inputs = np.zeros((int(clip.shape[1]/opt.sample_duration), 2, opt.sample_duration, opt.sample_size, opt.sample_size),dtype=np.float32)
else:
inputs = np.zeros((int(clip.shape[1]/opt.sample_duration), 3, opt.sample_duration, opt.sample_size, opt.sample_size),dtype=np.float32)
for k in range(inputs.shape[0]):
inputs[k,:,:,:,:] = clip[:,k*opt.sample_duration:(k+1)*opt.sample_duration,:,:]
#将视频图像和光流分离开
inputs = fluid.dygraph.base.to_variable(inputs)
targets = fluid.dygraph.base.to_variable(targets)
outputs= model(inputs)
preds = fluid.layers.reduce_mean(outputs, dim=0, keep_dim=True)
# pdb.set_trace()
acc = calculate_accuracy(preds, targets)
accuracies.update(acc[0], targets.shape[0])
print("Video accuracy = ", accuracies.avg) | 39.58427 | 150 | 0.644905 |
import getpass
import os
import socket
import numpy as np
from PIL import Image, ImageFilter
import argparse
import time
import sys
import pdb
import math
from utils import *
from dataset.dataset import *
from dataset.preprocess_data import *
from models.model import generate_model
from opts import parse_opts
import paddle
import paddle.fluid as fluid
if __name__=="__main__":
opt = parse_opts()
print(opt)
opt.arch = '{}-{}'.format(opt.model, opt.model_depth)
with fluid.dygraph.guard(place = fluid.CUDAPlace(0)):
print("Preprocessing validation data ...")
test_data = globals()['{}_test'.format(opt.dataset)](split = opt.split, train = 0, opt = opt)
test_dataloader = paddle.batch(test_data, batch_size=opt.batch_size, drop_last=False)
if opt.modality=='Flow': opt.input_channels = 2
else: opt.input_channels = 3
model,_ = generate_model(opt)
if opt.modality=='RGB' and opt.RGB_resume_path!='':
para_dict, _ = fluid.dygraph.load_dygraph(opt.RGB_resume_path)
model.set_dict(para_dict)
if opt.modality=='Flow' and opt.Flow_resume_path!='':
para_dict, _ = fluid.dygraph.load_dygraph(opt.Flow_resume_path)
model.set_dict(para_dict)
model.eval()
accuracies = AverageMeter()
clip_accuracies = AverageMeter()
result_path = "{}/{}/".format(opt.result_path, opt.dataset)
if not os.path.exists(result_path):
os.makedirs(result_path)
for i, data in enumerate(test_dataloader()):
clip = np.array([x[0] for x in data]).astype('float32')
= np.array([x[1] for x in data]).astype('int')
clip = np.squeeze(clip)
if opt.modality == 'Flow':
inputs = np.zeros((int(clip.shape[1]/opt.sample_duration), 2, opt.sample_duration, opt.sample_size, opt.sample_size),dtype=np.float32)
else:
inputs = np.zeros((int(clip.shape[1]/opt.sample_duration), 3, opt.sample_duration, opt.sample_size, opt.sample_size),dtype=np.float32)
for k in range(inputs.shape[0]):
inputs[k,:,:,:,:] = clip[:,k*opt.sample_duration:(k+1)*opt.sample_duration,:,:]
inputs = fluid.dygraph.base.to_variable(inputs)
targets = fluid.dygraph.base.to_variable(targets)
outputs= model(inputs)
preds = fluid.layers.reduce_mean(outputs, dim=0, keep_dim=True)
acc = calculate_accuracy(preds, targets)
accuracies.update(acc[0], targets.shape[0])
print("Video accuracy = ", accuracies.avg) | true | true |
1c35b4de14d7b520bf863c15dc70dc198786a1fb | 1,443 | py | Python | components/forms.py | alexdeathway/Gecom | 2a0fc87887d73d15eba183625dc8a429defe851f | [
"MIT"
] | 7 | 2021-11-15T06:28:05.000Z | 2022-02-22T11:36:00.000Z | components/forms.py | alexdeathway/Gecom | 2a0fc87887d73d15eba183625dc8a429defe851f | [
"MIT"
] | 3 | 2021-11-02T16:10:49.000Z | 2022-02-01T08:30:38.000Z | components/forms.py | alexdeathway/Gecom | 2a0fc87887d73d15eba183625dc8a429defe851f | [
"MIT"
] | null | null | null | from django import forms
from .models import ComponentsModel
from games.models import OrganisationModel
class ComponentCreationForm(forms.ModelForm):
def __init__(self,*args, **kwargs):
request=kwargs.pop("request")
vendor=OrganisationModel.objects.filter(owner=request.user)
super(ComponentCreationForm,self).__init__(*args,**kwargs)
self.fields["vendor"]=forms.ModelChoiceField(queryset=vendor)
class Meta:
model=ComponentsModel
labels={
"vendor": "Vendor or your organisation",
}
fields=[
"name",
"category",
"cover",
"price",
"description",
"vendor"
]
class ComponentUpdateForm(forms.ModelForm):
def __init__(self,*args, **kwargs):
request=kwargs.pop("request")
vendor=OrganisationModel.objects.filter(owner=request.user)
super(ComponentUpdateForm,self).__init__(*args,**kwargs)
self.fields["vendor"]=forms.ModelChoiceField(queryset=vendor)
class Meta:
model=ComponentsModel
labels={
"vendor": "Vendor or your organisation",
}
fields=[
"name",
"category",
"cover",
"price",
"description",
"vendor"
]
| 29.44898 | 73 | 0.543313 | from django import forms
from .models import ComponentsModel
from games.models import OrganisationModel
class ComponentCreationForm(forms.ModelForm):
def __init__(self,*args, **kwargs):
request=kwargs.pop("request")
vendor=OrganisationModel.objects.filter(owner=request.user)
super(ComponentCreationForm,self).__init__(*args,**kwargs)
self.fields["vendor"]=forms.ModelChoiceField(queryset=vendor)
class Meta:
model=ComponentsModel
labels={
"vendor": "Vendor or your organisation",
}
fields=[
"name",
"category",
"cover",
"price",
"description",
"vendor"
]
class ComponentUpdateForm(forms.ModelForm):
def __init__(self,*args, **kwargs):
request=kwargs.pop("request")
vendor=OrganisationModel.objects.filter(owner=request.user)
super(ComponentUpdateForm,self).__init__(*args,**kwargs)
self.fields["vendor"]=forms.ModelChoiceField(queryset=vendor)
class Meta:
model=ComponentsModel
labels={
"vendor": "Vendor or your organisation",
}
fields=[
"name",
"category",
"cover",
"price",
"description",
"vendor"
]
| true | true |
1c35b57c2f4cfea93ddbdf6894d8b6e1688954c3 | 2,276 | py | Python | brl_gym/scripts/maze/run.py | gilwoolee/brl_gym | 9c0784e9928f12d2ee0528c79a533202d3afb640 | [
"BSD-3-Clause"
] | 2 | 2020-08-07T05:50:44.000Z | 2022-03-03T08:46:10.000Z | brl_gym/scripts/maze/run.py | gilwoolee/brl_gym | 9c0784e9928f12d2ee0528c79a533202d3afb640 | [
"BSD-3-Clause"
] | null | null | null | brl_gym/scripts/maze/run.py | gilwoolee/brl_gym | 9c0784e9928f12d2ee0528c79a533202d3afb640 | [
"BSD-3-Clause"
] | null | null | null | import os
import glob
#os.system('source ~/venv/brl/bin/activate')
rootdir = "/home/gilwoo/models/maze/"
algos = [x[0] for x in os.walk(rootdir) if "checkpoints" in x[0]]
num_trials = 500
dry_run = False
algo_to_alg = {
# "single_expert_rbpo": ["bppo2_expert", "Maze-entropy-hidden-no-reward-v0"],
# "entropy_hidden_rbpo": ["bppo2_expert", "Maze-entropy-hidden-no-reward-v0"],
#"rbpo_stronger_expert": ["bppo2_expert", "Maze-no-entropy-v0"],
# "entropy_rbpo": ["bppo2_expert", "Maze-entropy-only-no-reward-v0"],
"bpo_noent": ["ppo2","Maze-no-entropy-v0", 0.0],
# "upmle": ["ppo2", "Maze-upmle-no-reward-v0"],
# "expert_no_residual": ["bpo_expert_no_residual", "Maze-no-entropy-v0"],
# "noentropy_rbpo": ["bppo2_expert", "Maze-no-entropy-v0"],
# "rbpo_hidden_belief_no_ent_reward": ["bppo2_expert", "Maze-entropy-hidden-no-reward-v0"],
# "rbpo-noent-alpha-1.0":["bppo2_expert", "Maze-no-entropy-v0", 1.0]
}
for algo in algos:
algname = algo.split("/")[-2]
if algname not in algo_to_alg:
continue
print("--------------------")
alg, env, alpha = algo_to_alg[algname]
print(algo, alg, alpha)
checkpoints = glob.glob(os.path.join(algo, "*"))
checkpoints.sort()
last = int(checkpoints[-1].split("/")[-1])
outputdir = "/home/gilwoo/output/maze/"+algname
if not os.path.exists(outputdir):
print("Make ", outputdir)
os.makedirs(outputdir)
for i in [1] + list(range(100, last, 100)):
outputfile = "{}/{}.txt".format(outputdir, str(i).zfill(5))
if os.path.exists(outputfile):
continue
if alg.startswith("ppo2"):
cmd = "python -m brl_baselines.run --alg={} --env={} --num_timesteps=0 --play --load_path={}/{} --num_env=1 --num_trials={} --output={}/{}.txt".format(alg, env, algo, str(i).zfill(5), num_trials, outputdir, str(i).zfill(5))
else:
cmd = "python -m brl_baselines.run --alg={} --env={} --num_timesteps=0 --play --load_path={}/{} --num_env=1 --num_trials={} --output={}/{}.txt --residual_weight={}".format(alg, env, algo, str(i).zfill(5), num_trials, outputdir, str(i).zfill(5),alpha)
print(cmd)
if not dry_run:
os.system(cmd)
# import sys; sys.exit(0)
| 38.576271 | 263 | 0.612039 | import os
import glob
rootdir = "/home/gilwoo/models/maze/"
algos = [x[0] for x in os.walk(rootdir) if "checkpoints" in x[0]]
num_trials = 500
dry_run = False
algo_to_alg = {
"bpo_noent": ["ppo2","Maze-no-entropy-v0", 0.0],
}
for algo in algos:
algname = algo.split("/")[-2]
if algname not in algo_to_alg:
continue
print("--------------------")
alg, env, alpha = algo_to_alg[algname]
print(algo, alg, alpha)
checkpoints = glob.glob(os.path.join(algo, "*"))
checkpoints.sort()
last = int(checkpoints[-1].split("/")[-1])
outputdir = "/home/gilwoo/output/maze/"+algname
if not os.path.exists(outputdir):
print("Make ", outputdir)
os.makedirs(outputdir)
for i in [1] + list(range(100, last, 100)):
outputfile = "{}/{}.txt".format(outputdir, str(i).zfill(5))
if os.path.exists(outputfile):
continue
if alg.startswith("ppo2"):
cmd = "python -m brl_baselines.run --alg={} --env={} --num_timesteps=0 --play --load_path={}/{} --num_env=1 --num_trials={} --output={}/{}.txt".format(alg, env, algo, str(i).zfill(5), num_trials, outputdir, str(i).zfill(5))
else:
cmd = "python -m brl_baselines.run --alg={} --env={} --num_timesteps=0 --play --load_path={}/{} --num_env=1 --num_trials={} --output={}/{}.txt --residual_weight={}".format(alg, env, algo, str(i).zfill(5), num_trials, outputdir, str(i).zfill(5),alpha)
print(cmd)
if not dry_run:
os.system(cmd)
| true | true |
1c35b69db59c65edc0e17a2718112c7f160758e4 | 2,810 | py | Python | NewTests/test2LevelReconstruction.py | Yuval-H/iclr_17_compression | f9b04a6cb93e32d17c2f2548614690dee8840d78 | [
"MIT"
] | null | null | null | NewTests/test2LevelReconstruction.py | Yuval-H/iclr_17_compression | f9b04a6cb93e32d17c2f2548614690dee8840d78 | [
"MIT"
] | null | null | null | NewTests/test2LevelReconstruction.py | Yuval-H/iclr_17_compression | f9b04a6cb93e32d17c2f2548614690dee8840d78 | [
"MIT"
] | null | null | null |
import torch.nn.functional as F
import torch
from torchvision import transforms
import matplotlib.pyplot as plt
import os
from PIL import Image, ImageChops
import glob
import numpy as np
from model_new import *
from model import *
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# Load the small images AE model
model_orig_weights = '/home/access/dev/iclr_17_compression/checkpoints_new/new loss - L2 before binarize/rec+hamm/iter_3.pth.tar'
#model = ImageCompressor_new()
model_orig = ImageCompressor_new(out_channel_N=256)
global_step_ignore = load_model(model_orig, model_orig_weights)
model_orig = model_orig.to(device)
model_orig.eval()
# Load the small images AE model
model_diff_weights = '/home/access/dev/iclr_17_compression/checkpoints/iter_117600.pth.tar'
#model_diff = ImageCompressor_new()
#model_diff = ImageCompressor_new(out_channel_N=32)
model_diff = ImageCompressor()
global_step_ignore = load_model(model_diff, model_diff_weights)
model_diff = model_diff.to(device)
model_diff.eval()
# Define transform for small(trained model) and original image size.
tsfm_original = transforms.Compose([transforms.Resize((384, 1248), interpolation=Image.BICUBIC)])
tsfm_original_tensor = transforms.Compose([transforms.Resize((384, 1248), interpolation=Image.BICUBIC), transforms.ToTensor()])
tsfm_tensor = transforms.Compose([transforms.ToTensor()])
#path = '/home/access/dev/data_sets/kitti/flow_2015/data_scene_flow/training/image_2'
path = '/home/access/dev/data_sets/kitti/data_stereo_flow_multiview/train_small_set_8/image_02'
files = os.listdir(path)
avg_psnr = 0
for i in range(len(files)):
file_name = os.path.join(path, files[i])
image = Image.open(file_name)#.convert('RGB')
# Get rec image from model_orig:
img_input = tsfm_original_tensor(image)[None, ...].to(device)
clipped_recon_image, z_cam1, _ = model_orig(img_input)
img_original_recon = torch.squeeze(clipped_recon_image).permute(1, 2, 0).cpu().detach().numpy()
# Get diff image from model_diff:
## Calc diff
img_original_np = np.array(tsfm_original(image))
diff = np.clip((127 + (img_original_np - img_original_recon * 255)), 0, 255).astype(np.uint8)
diff_pil = Image.fromarray(diff)
## send through model_diff
diff_input = tsfm_tensor(diff_pil)[None, ...].to(device)
clipped_recon_image, z_cam1, _ = model_diff(diff_input)
diff_recon = torch.squeeze(clipped_recon_image).permute(1, 2, 0).cpu().detach().numpy()
# Combine two image to get final output:
final_rec = (img_original_recon + diff_recon - 127/255)
mse = np.mean(np.square(final_rec - img_original_np/255))
rms = np.sqrt(mse)
psnr = -20 * np.log10(rms)
avg_psnr += psnr
print(psnr)
avg_psnr = avg_psnr/len(files)
print('avg psnr = ', avg_psnr)
print('done') | 38.493151 | 129 | 0.753737 |
import torch.nn.functional as F
import torch
from torchvision import transforms
import matplotlib.pyplot as plt
import os
from PIL import Image, ImageChops
import glob
import numpy as np
from model_new import *
from model import *
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model_orig_weights = '/home/access/dev/iclr_17_compression/checkpoints_new/new loss - L2 before binarize/rec+hamm/iter_3.pth.tar'
model_orig = ImageCompressor_new(out_channel_N=256)
global_step_ignore = load_model(model_orig, model_orig_weights)
model_orig = model_orig.to(device)
model_orig.eval()
model_diff_weights = '/home/access/dev/iclr_17_compression/checkpoints/iter_117600.pth.tar'
model_diff = ImageCompressor()
global_step_ignore = load_model(model_diff, model_diff_weights)
model_diff = model_diff.to(device)
model_diff.eval()
tsfm_original = transforms.Compose([transforms.Resize((384, 1248), interpolation=Image.BICUBIC)])
tsfm_original_tensor = transforms.Compose([transforms.Resize((384, 1248), interpolation=Image.BICUBIC), transforms.ToTensor()])
tsfm_tensor = transforms.Compose([transforms.ToTensor()])
path = '/home/access/dev/data_sets/kitti/data_stereo_flow_multiview/train_small_set_8/image_02'
files = os.listdir(path)
avg_psnr = 0
for i in range(len(files)):
file_name = os.path.join(path, files[i])
image = Image.open(file_name)
img_input = tsfm_original_tensor(image)[None, ...].to(device)
clipped_recon_image, z_cam1, _ = model_orig(img_input)
img_original_recon = torch.squeeze(clipped_recon_image).permute(1, 2, 0).cpu().detach().numpy()
iginal_np = np.array(tsfm_original(image))
diff = np.clip((127 + (img_original_np - img_original_recon * 255)), 0, 255).astype(np.uint8)
diff_pil = Image.fromarray(diff)
nsor(diff_pil)[None, ...].to(device)
clipped_recon_image, z_cam1, _ = model_diff(diff_input)
diff_recon = torch.squeeze(clipped_recon_image).permute(1, 2, 0).cpu().detach().numpy()
final_rec = (img_original_recon + diff_recon - 127/255)
mse = np.mean(np.square(final_rec - img_original_np/255))
rms = np.sqrt(mse)
psnr = -20 * np.log10(rms)
avg_psnr += psnr
print(psnr)
avg_psnr = avg_psnr/len(files)
print('avg psnr = ', avg_psnr)
print('done') | true | true |
1c35b6f97058cc0c4330c60686dccc5d255a7f0c | 780 | py | Python | tests/wallet/test_taproot.py | zcomputerwiz/profit-blockchain | d6d4337ea7c418c66f05f22a263e94190452aed6 | [
"Apache-2.0"
] | 7 | 2022-03-15T01:33:35.000Z | 2022-03-26T21:29:45.000Z | tests/wallet/test_taproot.py | zcomputerwiz/profit-blockchain | d6d4337ea7c418c66f05f22a263e94190452aed6 | [
"Apache-2.0"
] | 3 | 2022-03-19T23:02:18.000Z | 2022-03-19T23:02:19.000Z | tests/wallet/test_taproot.py | zcomputerwiz/profit-blockchain | d6d4337ea7c418c66f05f22a263e94190452aed6 | [
"Apache-2.0"
] | null | null | null | from profit.wallet.puzzles.p2_delegated_puzzle_or_hidden_puzzle import (
DEFAULT_HIDDEN_PUZZLE,
calculate_synthetic_offset,
calculate_synthetic_public_key,
)
from tests.core.make_block_generator import int_to_public_key
class TestTaproot:
def test_1(self):
for main_secret_exponent in range(500, 600):
hidden_puzzle_hash = DEFAULT_HIDDEN_PUZZLE.get_tree_hash()
main_pubkey = int_to_public_key(main_secret_exponent)
offset = calculate_synthetic_offset(main_pubkey, hidden_puzzle_hash)
offset_pubkey = int_to_public_key(offset)
spk1 = main_pubkey + offset_pubkey
spk2 = calculate_synthetic_public_key(main_pubkey, hidden_puzzle_hash)
assert spk1 == spk2
return 0
| 37.142857 | 82 | 0.733333 | from profit.wallet.puzzles.p2_delegated_puzzle_or_hidden_puzzle import (
DEFAULT_HIDDEN_PUZZLE,
calculate_synthetic_offset,
calculate_synthetic_public_key,
)
from tests.core.make_block_generator import int_to_public_key
class TestTaproot:
def test_1(self):
for main_secret_exponent in range(500, 600):
hidden_puzzle_hash = DEFAULT_HIDDEN_PUZZLE.get_tree_hash()
main_pubkey = int_to_public_key(main_secret_exponent)
offset = calculate_synthetic_offset(main_pubkey, hidden_puzzle_hash)
offset_pubkey = int_to_public_key(offset)
spk1 = main_pubkey + offset_pubkey
spk2 = calculate_synthetic_public_key(main_pubkey, hidden_puzzle_hash)
assert spk1 == spk2
return 0
| true | true |
1c35b7241c7dbc5dfd5192653c881704f6539a0f | 474 | py | Python | src/identity.py | Neotoxic-off/Obit | a1ecab8e1b49f3c65cdb0ab09d7b366712fb5c86 | [
"BSL-1.0"
] | 1 | 2021-12-31T15:46:45.000Z | 2021-12-31T15:46:45.000Z | src/identity.py | Neotoxic-off/Obit | a1ecab8e1b49f3c65cdb0ab09d7b366712fb5c86 | [
"BSL-1.0"
] | null | null | null | src/identity.py | Neotoxic-off/Obit | a1ecab8e1b49f3c65cdb0ab09d7b366712fb5c86 | [
"BSL-1.0"
] | null | null | null | from src.request import Request
class Identity:
def __init__(self):
self.request = Request()
def get(self, proxies):
result = None
print("[wait] checking identity")
result = self.request.get("https://ident.me", proxies)
if (result.status_code == 200):
print("[done] identity checked")
return (result.text)
print("[fail] something went wrong: {}".format(result.text))
return (result)
| 27.882353 | 68 | 0.592827 | from src.request import Request
class Identity:
def __init__(self):
self.request = Request()
def get(self, proxies):
result = None
print("[wait] checking identity")
result = self.request.get("https://ident.me", proxies)
if (result.status_code == 200):
print("[done] identity checked")
return (result.text)
print("[fail] something went wrong: {}".format(result.text))
return (result)
| true | true |
1c35b92ff10a96e3a19c0e13cad7b453da696748 | 6,910 | py | Python | recognize-from-microphone.py | ArpanBose11/Music_Recogniser_Omega | 584ca1e77436a54ac2589bb9be839ec392b8b2c2 | [
"MIT"
] | null | null | null | recognize-from-microphone.py | ArpanBose11/Music_Recogniser_Omega | 584ca1e77436a54ac2589bb9be839ec392b8b2c2 | [
"MIT"
] | null | null | null | recognize-from-microphone.py | ArpanBose11/Music_Recogniser_Omega | 584ca1e77436a54ac2589bb9be839ec392b8b2c2 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import argparse
import sys
from argparse import RawTextHelpFormatter
from itertools import zip_longest as izip_longest
import numpy as np
from termcolor import colored
import libs.fingerprint as fingerprint
from libs.config import get_config
from libs.db_sqlite import SqliteDatabase
from libs.reader_microphone import MicrophoneReader
from libs.visualiser_console import VisualiserConsole as visual_peak
from libs.visualiser_plot import VisualiserPlot as visual_plot
from contextlib import redirect_stdout
# from libs.db_mongo import MongoDatabase
def writeTofile(data, filename):
with open(filename, 'wb') as file:
file.write(data)
print("Stored blob data into: ", filename, "\n")
def align_matches(matches):
diff_counter = {}
largest = 0
largest_count = 0
song_id = -1
for tup in matches:
sid, diff = tup
if diff not in diff_counter:
diff_counter[diff] = {}
if sid not in diff_counter[diff]:
diff_counter[diff][sid] = 0
diff_counter[diff][sid] += 1
if diff_counter[diff][sid] > largest_count:
largest = diff
largest_count = diff_counter[diff][sid]
song_id = sid
songM = db.get_song_by_id(song_id)
#genreM= db.get_song_by_id(song_id)
#artistM=db.get_song_by_id(song_id)
nseconds = round(float(largest) / fingerprint.DEFAULT_FS *
fingerprint.DEFAULT_WINDOW_SIZE *
fingerprint.DEFAULT_OVERLAP_RATIO, 5)
return {
"SONG_ID": song_id,
"SONG_NAME": songM[1],
"CONFIDENCE": largest_count,
"OFFSET": int(largest),
"OFFSET_SECS": nseconds,
"GENRE": songM[3],
"ARTIST":songM[4],
"ART":songM[5],
"ALBUM": songM[6]
}
def grouper(iterable, n, fillvalue=None):
args = [iter(iterable)] * n
return (filter(None, values)
for values in izip_longest(fillvalue=fillvalue, *args))
def find_matches(samples, Fs=fingerprint.DEFAULT_FS):
hashes = fingerprint.fingerprint(1,samples, Fs=Fs)
return return_matches(hashes)
def return_matches(hashes):
mapper = {}
for hash, offset in hashes:
mapper[hash.upper()] = offset
values = mapper.keys()
# https://www.sqlite.org/limits.html
# To prevent excessive memory allocations,
# the maximum value of a host parameter number is SQLITE_MAX_VARIABLE_NUMBER, which defaults to 999 for SQLites
for split_values in map(list, grouper(values, 999)):
# @todo move to db related files
query = """
SELECT upper(hash), song_fk, offset
FROM fingerprints
WHERE upper(hash) IN (%s)
"""
query = query % ', '.join('?' * len(split_values))
x = db.executeAll(query, split_values)
matches_found = len(x)
if matches_found > 0:
msg = ' ** found %d hash matches (step %d/%d)'
#print(colored(msg, 'green') % (
#matches_found,
#len(split_values),
#len(values)
#))
pass
else:
msg = ' ** not matches found (step %d/%d)'
#print(colored(msg, 'red') % (len(split_values), len(values)))
for hash_code, sid, offset in x:
# (sid, db_offset - song_sampled_offset)
if isinstance(offset, bytes):
# offset come from fingerprint.py and numpy extraction/processing
offset = np.frombuffer(offset, dtype=np.int)[0]
yield sid, offset - mapper[hash_code]
if __name__ == '__main__':
sys.stdout = open("out.txt", "w")
config = get_config()
db = SqliteDatabase()
seconds = 6
chunksize = 2 ** 12 # 4096
channels = 1 # int(config['channels']) # 1=mono, 2=stereo
record_forever = False
visualise_console = bool(config['mic.visualise_console'])
visualise_plot = bool(config['mic.visualise_plot'])
reader = MicrophoneReader(None)
reader.start_recording(seconds=seconds,
chunksize=chunksize,
channels=channels)
msg = ' * started recording..'
#print(colored(msg, attrs=['dark']))
while True:
bufferSize = int(reader.rate / reader.chunksize * seconds)
for i in range(0, bufferSize):
nums = reader.process_recording()
if visualise_console:
msg = colored(' %05d', attrs=['dark']) + colored(' %s', 'green')
#print(msg % visual_peak.calc(nums))
else:
msg = ' processing %d of %d..' % (i, bufferSize)
#print(colored(msg, attrs=['dark']))
if not record_forever:
break
if visualise_plot:
data = reader.get_recorded_data()[0]
visual_plot.show(data)
reader.stop_recording()
msg = ' * recording has been stopped'
#print(colored(msg, attrs=['dark']))
data = reader.get_recorded_data()
msg = ' * recorded %d samples'
#print(colored(msg, attrs=['dark']) % len(data[0]))
# reader.save_recorded('test.wav')
Fs = fingerprint.DEFAULT_FS
channel_amount = len(data)
result = set()
matches = []
for channeln, channel in enumerate(data):
# TODO: Remove prints or change them into optional logging.
msg = ' fingerprinting channel %d/%d'
#print(colored(msg, attrs=['dark']) % (channeln + 1, channel_amount))
matches.extend(find_matches(channel))
msg = ' finished channel %d/%d, got %d hashes'
#print(colored(msg, attrs=['dark']) % (channeln + 1,
# channel_amount, len(matches)))
total_matches_found = len(matches)
#print('')
if total_matches_found > 0:
msg = ' ** totally found %d hash matches'
#print(colored(msg, 'green') % total_matches_found)
song = align_matches(matches)
msg = ' \n=> Song: %s \n'
#msg += ' offset: %d (%d secs)\n'
#msg += ' confidence: %d\n'
msg += ' Genre: %s\n'
msg += ' Artist: %s\n'
msg += ' Album:%s\n'
msg += '%s\n'
print(colored(msg, 'green') % (song['SONG_NAME'],
#song['SONG_ID'],
#song['OFFSET'], song['OFFSET_SECS'],
#song['CONFIDENCE'],
song['GENRE'],
song['ARTIST'],
song['ALBUM'],
song['SONG_NAME'] + song['ARTIST']))
photo=song['ART']
photoPath = "example" + ".jpg"
writeTofile(photo, photoPath)
else:
msg = ' \n\nNo matches found\n\n\n '
print(colored(msg, 'red'))
sys.stdout.close()
| 28.089431 | 115 | 0.568596 |
import argparse
import sys
from argparse import RawTextHelpFormatter
from itertools import zip_longest as izip_longest
import numpy as np
from termcolor import colored
import libs.fingerprint as fingerprint
from libs.config import get_config
from libs.db_sqlite import SqliteDatabase
from libs.reader_microphone import MicrophoneReader
from libs.visualiser_console import VisualiserConsole as visual_peak
from libs.visualiser_plot import VisualiserPlot as visual_plot
from contextlib import redirect_stdout
def writeTofile(data, filename):
with open(filename, 'wb') as file:
file.write(data)
print("Stored blob data into: ", filename, "\n")
def align_matches(matches):
diff_counter = {}
largest = 0
largest_count = 0
song_id = -1
for tup in matches:
sid, diff = tup
if diff not in diff_counter:
diff_counter[diff] = {}
if sid not in diff_counter[diff]:
diff_counter[diff][sid] = 0
diff_counter[diff][sid] += 1
if diff_counter[diff][sid] > largest_count:
largest = diff
largest_count = diff_counter[diff][sid]
song_id = sid
songM = db.get_song_by_id(song_id)
nseconds = round(float(largest) / fingerprint.DEFAULT_FS *
fingerprint.DEFAULT_WINDOW_SIZE *
fingerprint.DEFAULT_OVERLAP_RATIO, 5)
return {
"SONG_ID": song_id,
"SONG_NAME": songM[1],
"CONFIDENCE": largest_count,
"OFFSET": int(largest),
"OFFSET_SECS": nseconds,
"GENRE": songM[3],
"ARTIST":songM[4],
"ART":songM[5],
"ALBUM": songM[6]
}
def grouper(iterable, n, fillvalue=None):
args = [iter(iterable)] * n
return (filter(None, values)
for values in izip_longest(fillvalue=fillvalue, *args))
def find_matches(samples, Fs=fingerprint.DEFAULT_FS):
hashes = fingerprint.fingerprint(1,samples, Fs=Fs)
return return_matches(hashes)
def return_matches(hashes):
mapper = {}
for hash, offset in hashes:
mapper[hash.upper()] = offset
values = mapper.keys()
for split_values in map(list, grouper(values, 999)):
query = """
SELECT upper(hash), song_fk, offset
FROM fingerprints
WHERE upper(hash) IN (%s)
"""
query = query % ', '.join('?' * len(split_values))
x = db.executeAll(query, split_values)
matches_found = len(x)
if matches_found > 0:
msg = ' ** found %d hash matches (step %d/%d)'
pass
else:
msg = ' ** not matches found (step %d/%d)'
for hash_code, sid, offset in x:
if isinstance(offset, bytes):
offset = np.frombuffer(offset, dtype=np.int)[0]
yield sid, offset - mapper[hash_code]
if __name__ == '__main__':
sys.stdout = open("out.txt", "w")
config = get_config()
db = SqliteDatabase()
seconds = 6
chunksize = 2 ** 12
channels = 1 er = False
visualise_console = bool(config['mic.visualise_console'])
visualise_plot = bool(config['mic.visualise_plot'])
reader = MicrophoneReader(None)
reader.start_recording(seconds=seconds,
chunksize=chunksize,
channels=channels)
msg = ' * started recording..'
while True:
bufferSize = int(reader.rate / reader.chunksize * seconds)
for i in range(0, bufferSize):
nums = reader.process_recording()
if visualise_console:
msg = colored(' %05d', attrs=['dark']) + colored(' %s', 'green')
else:
msg = ' processing %d of %d..' % (i, bufferSize)
if not record_forever:
break
if visualise_plot:
data = reader.get_recorded_data()[0]
visual_plot.show(data)
reader.stop_recording()
msg = ' * recording has been stopped'
data = reader.get_recorded_data()
msg = ' * recorded %d samples'
Fs = fingerprint.DEFAULT_FS
channel_amount = len(data)
result = set()
matches = []
for channeln, channel in enumerate(data):
msg = ' fingerprinting channel %d/%d'
matches.extend(find_matches(channel))
msg = ' finished channel %d/%d, got %d hashes'
total_matches_found = len(matches)
if total_matches_found > 0:
msg = ' ** totally found %d hash matches'
song = align_matches(matches)
msg = ' \n=> Song: %s \n'
msg += ' Genre: %s\n'
msg += ' Artist: %s\n'
msg += ' Album:%s\n'
msg += '%s\n'
print(colored(msg, 'green') % (song['SONG_NAME'],
song['GENRE'],
song['ARTIST'],
song['ALBUM'],
song['SONG_NAME'] + song['ARTIST']))
photo=song['ART']
photoPath = "example" + ".jpg"
writeTofile(photo, photoPath)
else:
msg = ' \n\nNo matches found\n\n\n '
print(colored(msg, 'red'))
sys.stdout.close()
| true | true |
1c35ba0c374f56cfcfb07200f010c3f7ffe0a64f | 3,736 | py | Python | src/ralph/ui/reports.py | quamilek/ralph | bf7231ea096924332b874718b33cd1f43f9c783b | [
"Apache-2.0"
] | null | null | null | src/ralph/ui/reports.py | quamilek/ralph | bf7231ea096924332b874718b33cd1f43f9c783b | [
"Apache-2.0"
] | null | null | null | src/ralph/ui/reports.py | quamilek/ralph | bf7231ea096924332b874718b33cd1f43f9c783b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
from django.db.models.sql.aggregates import Aggregate
from ralph.discovery.models import HistoryCost, DeviceType
class SpanSum(Aggregate):
sql_function = "SUM"
sql_template = ("%(function)s(GREATEST(0, "
"DATEDIFF(LEAST(end, DATE('%(end)s')),"
"GREATEST(start, DATE('%(start)s')))) * %(field)s)")
default_alias = 'spansum'
def __init__(self, lookup, **extra):
self.lookup = lookup
self.extra = extra
def add_to_query(self, query, alias, col, source, is_summary):
super(SpanSum, self).__init__(col, source, is_summary, **self.extra)
query.aggregate_select[alias] = self
class SpanCount(Aggregate):
sql_function = "SUM"
sql_template = ("%(function)s(GREATEST(0, "
"DATEDIFF(LEAST(end, DATE('%(end)s')),"
"GREATEST(start, DATE('%(start)s')))))")
default_alias = 'spansum'
def __init__(self, **extra):
self.lookup = 'id'
self.extra = extra
def add_to_query(self, query, alias, col, source, is_summary):
super(SpanCount, self).__init__(col, source, is_summary, **self.extra)
query.aggregate_select[alias] = self
def get_total_cost(query, start, end):
"""
Calculate a total cost of the HistoryCost query in the specified time span.
"""
return query.aggregate(
SpanSum(
'daily_cost',
start=start.strftime('%Y-%m-%d'),
end=end.strftime('%Y-%m-%d'),
),
)['spansum']
def get_total_count(query, start, end):
"""
Count the devices in the given HistoryCost query in the specified time span.
The devices that are not in the query for the whole time are counted as a
fraction.
Additionally, the function returns the count of devices at the current date
time span, and a query with all the devices from the query.
"""
days = (end - start).days or 1
devices = HistoryCost.filter_span(start, end, query).values_list('device')
today = datetime.date.today()
count_now = query.filter(
end__gte=today
).values_list(
'device'
).distinct().count()
count = float(query.aggregate(
SpanCount(
start=start.strftime('%Y-%m-%d'),
end=end.strftime('%Y-%m-%d'),
),
)['spansum'] or 0) / days
return count, count_now, devices
def get_total_cores(query, start, end):
"""
Calculate the number of cores in the given HistoryCost query. Devices that
are not in the query for the whole time span are counted as a fraction.
Only the physical servers are included.
"""
days = (end - start).days or 1
query = query.exclude(device__model__type=DeviceType.virtual_server.id)
return float(query.aggregate(
SpanSum(
'cores',
start=start.strftime('%Y-%m-%d'),
end=end.strftime('%Y-%m-%d'),
),
)['spansum'] or 0) / days
def get_total_virtual_cores(query, start, end):
"""
Calculate the number of cores in the given HistoryCost query. Devices that
are not in the query for the whole time span are counted as a fraction.
Only the virtual servers are included.
"""
days = (end - start).days or 1
query = query.filter(device__model__type=DeviceType.virtual_server.id)
return float(query.aggregate(
SpanSum(
'cores',
start=start.strftime('%Y-%m-%d'),
end=end.strftime('%Y-%m-%d'),
),
)['spansum'] or 0)/ days
| 32.206897 | 80 | 0.625 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
from django.db.models.sql.aggregates import Aggregate
from ralph.discovery.models import HistoryCost, DeviceType
class SpanSum(Aggregate):
sql_function = "SUM"
sql_template = ("%(function)s(GREATEST(0, "
"DATEDIFF(LEAST(end, DATE('%(end)s')),"
"GREATEST(start, DATE('%(start)s')))) * %(field)s)")
default_alias = 'spansum'
def __init__(self, lookup, **extra):
self.lookup = lookup
self.extra = extra
def add_to_query(self, query, alias, col, source, is_summary):
super(SpanSum, self).__init__(col, source, is_summary, **self.extra)
query.aggregate_select[alias] = self
class SpanCount(Aggregate):
sql_function = "SUM"
sql_template = ("%(function)s(GREATEST(0, "
"DATEDIFF(LEAST(end, DATE('%(end)s')),"
"GREATEST(start, DATE('%(start)s')))))")
default_alias = 'spansum'
def __init__(self, **extra):
self.lookup = 'id'
self.extra = extra
def add_to_query(self, query, alias, col, source, is_summary):
super(SpanCount, self).__init__(col, source, is_summary, **self.extra)
query.aggregate_select[alias] = self
def get_total_cost(query, start, end):
return query.aggregate(
SpanSum(
'daily_cost',
start=start.strftime('%Y-%m-%d'),
end=end.strftime('%Y-%m-%d'),
),
)['spansum']
def get_total_count(query, start, end):
days = (end - start).days or 1
devices = HistoryCost.filter_span(start, end, query).values_list('device')
today = datetime.date.today()
count_now = query.filter(
end__gte=today
).values_list(
'device'
).distinct().count()
count = float(query.aggregate(
SpanCount(
start=start.strftime('%Y-%m-%d'),
end=end.strftime('%Y-%m-%d'),
),
)['spansum'] or 0) / days
return count, count_now, devices
def get_total_cores(query, start, end):
days = (end - start).days or 1
query = query.exclude(device__model__type=DeviceType.virtual_server.id)
return float(query.aggregate(
SpanSum(
'cores',
start=start.strftime('%Y-%m-%d'),
end=end.strftime('%Y-%m-%d'),
),
)['spansum'] or 0) / days
def get_total_virtual_cores(query, start, end):
days = (end - start).days or 1
query = query.filter(device__model__type=DeviceType.virtual_server.id)
return float(query.aggregate(
SpanSum(
'cores',
start=start.strftime('%Y-%m-%d'),
end=end.strftime('%Y-%m-%d'),
),
)['spansum'] or 0)/ days
| true | true |
1c35bae7b1f6110d35946c875695eb3d2011b0e3 | 4,496 | py | Python | bootcamp/articles/views.py | suhailvs/bootcamp | 23295a99085a843566367b73c134a83eb520c24d | [
"MIT"
] | null | null | null | bootcamp/articles/views.py | suhailvs/bootcamp | 23295a99085a843566367b73c134a83eb520c24d | [
"MIT"
] | null | null | null | bootcamp/articles/views.py | suhailvs/bootcamp | 23295a99085a843566367b73c134a83eb520c24d | [
"MIT"
] | null | null | null | from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponseForbidden, HttpResponseBadRequest, HttpResponse
from bootcamp.articles.models import Article, Tag, ArticleComment
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from bootcamp.articles.forms import ArticleForm
from django.shortcuts import get_object_or_404
from django.contrib.auth.decorators import login_required
from bootcamp.decorators import ajax_required
import markdown
from django.template.loader import render_to_string
def _articles(request, articles):
paginator = Paginator(articles, 10)
page = request.GET.get('page')
try:
articles = paginator.page(page)
except PageNotAnInteger:
articles = paginator.page(1)
except EmptyPage:
articles = paginator.page(paginator.num_pages)
popular_tags = Tag.get_popular_tags()
return render(request, 'articles/articles.html', {
'articles': articles,
'popular_tags': popular_tags
})
@login_required
def articles(request):
all_articles = Article.get_published()
return _articles(request, all_articles)
@login_required
def article(request, slug):
article = get_object_or_404(Article, slug=slug, status=Article.PUBLISHED)
return render(request, 'articles/article.html', {'article': article})
@login_required
def tag(request, tag_name):
tags = Tag.objects.filter(tag=tag_name)
articles = []
for tag in tags:
if tag.article.status == Article.PUBLISHED:
articles.append(tag.article)
return _articles(request, articles)
@login_required
def write(request):
if request.method == 'POST':
form = ArticleForm(request.POST)
if form.is_valid():
article = Article()
article.create_user = request.user
article.title = form.cleaned_data.get('title')
article.content = form.cleaned_data.get('content')
status = form.cleaned_data.get('status')
if status in [Article.PUBLISHED, Article.DRAFT]:
article.status = form.cleaned_data.get('status')
article.save()
tags = form.cleaned_data.get('tags')
article.create_tags(tags)
return redirect('/articles/')
else:
form = ArticleForm()
return render(request, 'articles/write.html', {'form': form})
@login_required
def drafts(request):
drafts = Article.objects.filter(create_user=request.user, status=Article.DRAFT)
return render(request, 'articles/drafts.html', {'drafts': drafts})
@login_required
def edit(request, id):
tags = ''
if id:
article = get_object_or_404(Article, pk=id)
for tag in article.get_tags():
tags = u'{0} {1}'.format(tags, tag.tag)
tags = tags.strip()
else:
article = Article(create_user=request.user)
if request.POST:
form = ArticleForm(request.POST, instance=article)
if form.is_valid():
form.save()
return redirect('/articles/')
else:
form = ArticleForm(instance=article, initial={'tags': tags})
return render(request, 'articles/edit.html', {'form': form})
@login_required
@ajax_required
def preview(request):
try:
if request.method == 'POST':
content = request.POST.get('content')
html = 'Nothing to display :('
if len(content.strip()) > 0:
html = markdown.markdown(content, safe_mode='escape')
return HttpResponse(html)
else:
return HttpResponseBadRequest()
except Exception:
return HttpResponseBadRequest()
@login_required
@ajax_required
def comment(request):
try:
if request.method == 'POST':
article_id = request.POST.get('article')
article = Article.objects.get(pk=article_id)
comment = request.POST.get('comment')
comment = comment.strip()
if len(comment) > 0:
article_comment = ArticleComment(user=request.user, article=article, comment=comment)
article_comment.save()
html = u''
for comment in article.get_comments():
html = u'{0}{1}'.format(html, render_to_string('articles/partial_article_comment.html', {'comment': comment}))
return HttpResponse(html)
else:
return HttpResponseBadRequest()
except Exception:
return HttpResponseBadRequest() | 35.68254 | 126 | 0.657028 | from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponseForbidden, HttpResponseBadRequest, HttpResponse
from bootcamp.articles.models import Article, Tag, ArticleComment
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from bootcamp.articles.forms import ArticleForm
from django.shortcuts import get_object_or_404
from django.contrib.auth.decorators import login_required
from bootcamp.decorators import ajax_required
import markdown
from django.template.loader import render_to_string
def _articles(request, articles):
paginator = Paginator(articles, 10)
page = request.GET.get('page')
try:
articles = paginator.page(page)
except PageNotAnInteger:
articles = paginator.page(1)
except EmptyPage:
articles = paginator.page(paginator.num_pages)
popular_tags = Tag.get_popular_tags()
return render(request, 'articles/articles.html', {
'articles': articles,
'popular_tags': popular_tags
})
@login_required
def articles(request):
all_articles = Article.get_published()
return _articles(request, all_articles)
@login_required
def article(request, slug):
article = get_object_or_404(Article, slug=slug, status=Article.PUBLISHED)
return render(request, 'articles/article.html', {'article': article})
@login_required
def tag(request, tag_name):
tags = Tag.objects.filter(tag=tag_name)
articles = []
for tag in tags:
if tag.article.status == Article.PUBLISHED:
articles.append(tag.article)
return _articles(request, articles)
@login_required
def write(request):
if request.method == 'POST':
form = ArticleForm(request.POST)
if form.is_valid():
article = Article()
article.create_user = request.user
article.title = form.cleaned_data.get('title')
article.content = form.cleaned_data.get('content')
status = form.cleaned_data.get('status')
if status in [Article.PUBLISHED, Article.DRAFT]:
article.status = form.cleaned_data.get('status')
article.save()
tags = form.cleaned_data.get('tags')
article.create_tags(tags)
return redirect('/articles/')
else:
form = ArticleForm()
return render(request, 'articles/write.html', {'form': form})
@login_required
def drafts(request):
drafts = Article.objects.filter(create_user=request.user, status=Article.DRAFT)
return render(request, 'articles/drafts.html', {'drafts': drafts})
@login_required
def edit(request, id):
tags = ''
if id:
article = get_object_or_404(Article, pk=id)
for tag in article.get_tags():
tags = u'{0} {1}'.format(tags, tag.tag)
tags = tags.strip()
else:
article = Article(create_user=request.user)
if request.POST:
form = ArticleForm(request.POST, instance=article)
if form.is_valid():
form.save()
return redirect('/articles/')
else:
form = ArticleForm(instance=article, initial={'tags': tags})
return render(request, 'articles/edit.html', {'form': form})
@login_required
@ajax_required
def preview(request):
try:
if request.method == 'POST':
content = request.POST.get('content')
html = 'Nothing to display :('
if len(content.strip()) > 0:
html = markdown.markdown(content, safe_mode='escape')
return HttpResponse(html)
else:
return HttpResponseBadRequest()
except Exception:
return HttpResponseBadRequest()
@login_required
@ajax_required
def comment(request):
try:
if request.method == 'POST':
article_id = request.POST.get('article')
article = Article.objects.get(pk=article_id)
comment = request.POST.get('comment')
comment = comment.strip()
if len(comment) > 0:
article_comment = ArticleComment(user=request.user, article=article, comment=comment)
article_comment.save()
html = u''
for comment in article.get_comments():
html = u'{0}{1}'.format(html, render_to_string('articles/partial_article_comment.html', {'comment': comment}))
return HttpResponse(html)
else:
return HttpResponseBadRequest()
except Exception:
return HttpResponseBadRequest() | true | true |
1c35bcf3fe4ea9bab6b8ad290a59a43bd504079a | 11,508 | py | Python | BookReviewsSentimentAnalyzer.py | hoossainalik/goodreads-reviewer | b4f47856b5c0e88f9bd5bc55b91f2cba8909ef27 | [
"MIT"
] | null | null | null | BookReviewsSentimentAnalyzer.py | hoossainalik/goodreads-reviewer | b4f47856b5c0e88f9bd5bc55b91f2cba8909ef27 | [
"MIT"
] | null | null | null | BookReviewsSentimentAnalyzer.py | hoossainalik/goodreads-reviewer | b4f47856b5c0e88f9bd5bc55b91f2cba8909ef27 | [
"MIT"
] | null | null | null | """-------------------------------------------
Package Name: BookReviewsSentimentAnalyzer
Author: Hussain Ali Khan
Version: 1.0.1
Last Modified: 12/02/2018
-------------------------------------------
"""
import sys
from PyQt5.QtWidgets import QDialog, QApplication, QMainWindow, QMessageBox, QDesktopWidget
from PyQt5.uic import loadUi
from PyQt5.QtCore import pyqtSlot, QTimer
import goodreads_api_client as gr
from PyQt5 import QtWidgets, QtGui
import requests
from requests import HTTPError
from bs4 import BeautifulSoup
import re
import pandas as pd
class BookProfiler(QMainWindow):
def __init__(self):
super(BookProfiler, self).__init__()
loadUi('book.ui', self)
qt_rectangle = self.frameGeometry()
center_point = QDesktopWidget().availableGeometry().center()
qt_rectangle.moveCenter(center_point)
self.move(qt_rectangle.topLeft())
self.search_btn.clicked.connect(self.search_book)
self.export_to_csv_btn.clicked.connect(self.export)
self.search_txt.setText("")
self.client = gr.Client(developer_key='NqaQK91zheH4ofJYuTmpA')
self.search_tbl.resizeRowsToContents()
self.search_tbl.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustToContents)
self.label_default_style = 'color: red; font-size: 16px; background-color: none; text-align: justify;'
self.book_data = {}
self.book_review_data = {}
self.clear_fields()
@pyqtSlot()
def search_book(self):
self.clear_fields()
book_isbn = self.search_txt.text()
if book_isbn != "":
if (len(book_isbn) == 10 or len(book_isbn) == 13) and book_isbn.isnumeric():
try:
book = self.get_book(book_isbn)
keys_wanted = ['id', 'title', 'isbn', 'isbn13', 'num_pages', 'authors', 'format',
'edition_information',
'publisher', 'publication_day', 'publication_month', 'publication_year',
'description']
reduced_book = {k: v for k, v in book.items() if k in keys_wanted}
book_id = ""
if reduced_book["id"] is not None:
book_id = reduced_book["id"]
authors = "N/A"
if len(reduced_book["authors"]["author"]) > 0:
try:
authors = reduced_book["authors"]["author"]["name"]
except TypeError:
author_names = []
for author in reduced_book["authors"]["author"]:
if author is not None:
author_names.append(author["name"])
authors = ', '.join(author_names)
date_published = "N/A"
if reduced_book["publication_day"] is not None:
if reduced_book["publication_month"] is not None:
if reduced_book["publication_year"] is not None:
date_published = reduced_book["publication_day"] + "/" + reduced_book[
"publication_month"] + "/" + \
reduced_book["publication_year"]
elif reduced_book["publication_month"] is not None:
if reduced_book["publication_year"] is not None:
date_published = reduced_book["publication_month"] + "/" + reduced_book["publication_year"]
isbn = "N/A"
if reduced_book["isbn"] is not None:
isbn = reduced_book["isbn"]
reviews = self.get_reviews(isbn)
self.book_review_data = reviews
isbn13 = "N/A"
if reduced_book["isbn13"] is not None:
isbn13 = reduced_book["isbn13"]
edition = "N/A"
if reduced_book["edition_information"] is not None:
edition = reduced_book["edition_information"]
book_format = "N/A"
if reduced_book["format"] is not None:
book_format = reduced_book["format"]
publisher = "N/A"
if reduced_book["publisher"] is not None:
publisher = reduced_book["publisher"]
pages = "N/A"
if reduced_book["num_pages"] is not None:
pages = reduced_book["num_pages"]
title = "N/A"
if reduced_book["title"] is not None:
title = reduced_book["title"]
description = "N/A"
if reduced_book["description"] is not None:
description = reduced_book["description"]
book_info = {
"isbn": isbn,
"isbn13": isbn13,
"title": title,
"authors": authors,
"pages": pages,
"date_published": date_published,
"edition": edition,
"format": book_format,
"publisher": publisher,
"description": description
}
self.book_data = book_info
self.show_information(book_info, reviews)
except HTTPError:
print("ISBN isn't Valid")
self.show_message("ISBN Not Found On Goodreads.com", "Error! ISBN Not Found!!")
else:
self.show_message("Please Enter A Valid ISBN Number", "Error! Invalid ISBN Entered!!")
else:
self.show_message("Please Enter ISBN Number To Search", "Error! Empty ISBN")
def get_book(self, isbn):
book = self.client.Book.show_by_isbn(str(isbn))
return book
def clear_fields(self):
self.book_isbn.setText("")
self.book_isbn13.setText("")
self.book_title.setText("")
self.book_authors.setText("")
self.book_pages.setText("")
self.book_date_published.setText("")
self.book_edition.setText("")
self.book_format.setText("")
self.book_publisher.setText("")
self.book_description.setText("")
self.search_tbl.setRowCount(0)
def get_reviews(self, isbn):
key = "NqaQK91zheH4ofJYuTmpA"
endpoint = "https://www.goodreads.com/api/reviews_widget_iframe?did=" + key +\
"&format=html&isbn=" + isbn + \
"&links=660&review_back=fff&stars=000&text=000"
r = requests.get(url=endpoint)
soup = BeautifulSoup(r.content, "html.parser")
reviews = soup.find_all('div', attrs={"itemprop": "reviews"})
review_by = []
review_rating = []
review_text = []
for review in reviews:
reviewer = review.find("span", attrs={"class": "gr_review_by"})
if reviewer is not None:
reviewer = reviewer.a
if reviewer is not None:
review_by.append(reviewer.text)
else:
review_by.append("N/A")
rating = review.find("span", attrs={"class": "gr_rating"})
if rating is not None:
review_rating.append(self.get_rating(rating.text))
else:
review_rating.append("N/A")
rev = review.find("div", attrs={"class": "gr_review_text"})
if rev is not None:
review_text.append(self.clean_text(rev.text))
else:
review_text.append("N/A")
revs = {"reviewer": review_by, "rating": review_rating, "review": review_text}
return revs
def show_information(self, book_info, reviews):
if reviews is not None:
reviewers = reviews["reviewer"]
ratings = reviews["rating"]
reviews_text = reviews["review"]
for rev in range(len(reviewers)):
pos = self.search_tbl.rowCount()
self.search_tbl.insertRow(pos)
self.search_tbl.setItem(pos, 0, QtWidgets.QTableWidgetItem(reviewers[rev]))
self.search_tbl.setItem(pos, 1, QtWidgets.QTableWidgetItem(str(ratings[rev])+"/5"))
self.search_tbl.setItem(pos, 2, QtWidgets.QTableWidgetItem(reviews_text[rev]))
self.search_tbl.resizeColumnsToContents()
self.book_isbn.setText(book_info["isbn"])
self.book_isbn.setStyleSheet(self.label_default_style)
self.book_isbn13.setText(book_info["isbn13"])
self.book_isbn13.setStyleSheet(self.label_default_style)
self.book_title.setText(book_info["title"])
self.book_title.setStyleSheet(self.label_default_style)
self.book_authors.setText(book_info["authors"])
self.book_authors.setStyleSheet(self.label_default_style)
self.book_pages.setText(book_info["pages"])
self.book_pages.setStyleSheet(self.label_default_style)
self.book_date_published.setText(book_info["date_published"])
self.book_date_published.setStyleSheet(self.label_default_style)
self.book_edition.setText(book_info["edition"])
self.book_edition.setStyleSheet(self.label_default_style)
self.book_format.setText(book_info["format"])
self.book_format.setStyleSheet(self.label_default_style)
self.book_publisher.setText(book_info["publisher"])
self.book_publisher.setStyleSheet(self.label_default_style)
self.book_description.setText(book_info["description"])
self.book_description.setStyleSheet(self.label_default_style)
def show_message(self, message, title):
choice = QMessageBox.question(self, title, message, QMessageBox.Ok)
if choice == QMessageBox.Ok:
print("OK")
else:
pass
def clean_text(self, review):
review = review.replace("\n", "")
review = review.replace("...", " ")
review = review.replace("more", " ")
review = re.sub('\s+', ' ', review).strip()
return review
def get_rating(self, stars):
rating_scale = {"★★★★★": 5, "★★★★☆": 4, "★★★☆☆": 3, "★★☆☆☆": 2, "★☆☆☆☆": 1}
return rating_scale[stars]
@pyqtSlot()
def export(self):
self.export_as_csv()
def export_as_csv(self):
book_df = pd.DataFrame(self.book_data, index=[0])
book_df.to_csv("Books/"+self.book_data["isbn"]+"_details.csv")
review_df = pd.DataFrame(self.book_review_data)
review_df.to_csv("Reviews/"+self.book_data["isbn"]+"_reviews.csv")
self.show_message("Exported Book And Review Details To CSV", "Data Exported!!")
if __name__ == "__main__":
app = QApplication(sys.argv)
window = BookProfiler()
window.show()
sys.exit(app.exec_())
| 39.010169 | 120 | 0.539277 |
import sys
from PyQt5.QtWidgets import QDialog, QApplication, QMainWindow, QMessageBox, QDesktopWidget
from PyQt5.uic import loadUi
from PyQt5.QtCore import pyqtSlot, QTimer
import goodreads_api_client as gr
from PyQt5 import QtWidgets, QtGui
import requests
from requests import HTTPError
from bs4 import BeautifulSoup
import re
import pandas as pd
class BookProfiler(QMainWindow):
def __init__(self):
super(BookProfiler, self).__init__()
loadUi('book.ui', self)
qt_rectangle = self.frameGeometry()
center_point = QDesktopWidget().availableGeometry().center()
qt_rectangle.moveCenter(center_point)
self.move(qt_rectangle.topLeft())
self.search_btn.clicked.connect(self.search_book)
self.export_to_csv_btn.clicked.connect(self.export)
self.search_txt.setText("")
self.client = gr.Client(developer_key='NqaQK91zheH4ofJYuTmpA')
self.search_tbl.resizeRowsToContents()
self.search_tbl.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustToContents)
self.label_default_style = 'color: red; font-size: 16px; background-color: none; text-align: justify;'
self.book_data = {}
self.book_review_data = {}
self.clear_fields()
@pyqtSlot()
def search_book(self):
self.clear_fields()
book_isbn = self.search_txt.text()
if book_isbn != "":
if (len(book_isbn) == 10 or len(book_isbn) == 13) and book_isbn.isnumeric():
try:
book = self.get_book(book_isbn)
keys_wanted = ['id', 'title', 'isbn', 'isbn13', 'num_pages', 'authors', 'format',
'edition_information',
'publisher', 'publication_day', 'publication_month', 'publication_year',
'description']
reduced_book = {k: v for k, v in book.items() if k in keys_wanted}
book_id = ""
if reduced_book["id"] is not None:
book_id = reduced_book["id"]
authors = "N/A"
if len(reduced_book["authors"]["author"]) > 0:
try:
authors = reduced_book["authors"]["author"]["name"]
except TypeError:
author_names = []
for author in reduced_book["authors"]["author"]:
if author is not None:
author_names.append(author["name"])
authors = ', '.join(author_names)
date_published = "N/A"
if reduced_book["publication_day"] is not None:
if reduced_book["publication_month"] is not None:
if reduced_book["publication_year"] is not None:
date_published = reduced_book["publication_day"] + "/" + reduced_book[
"publication_month"] + "/" + \
reduced_book["publication_year"]
elif reduced_book["publication_month"] is not None:
if reduced_book["publication_year"] is not None:
date_published = reduced_book["publication_month"] + "/" + reduced_book["publication_year"]
isbn = "N/A"
if reduced_book["isbn"] is not None:
isbn = reduced_book["isbn"]
reviews = self.get_reviews(isbn)
self.book_review_data = reviews
isbn13 = "N/A"
if reduced_book["isbn13"] is not None:
isbn13 = reduced_book["isbn13"]
edition = "N/A"
if reduced_book["edition_information"] is not None:
edition = reduced_book["edition_information"]
book_format = "N/A"
if reduced_book["format"] is not None:
book_format = reduced_book["format"]
publisher = "N/A"
if reduced_book["publisher"] is not None:
publisher = reduced_book["publisher"]
pages = "N/A"
if reduced_book["num_pages"] is not None:
pages = reduced_book["num_pages"]
title = "N/A"
if reduced_book["title"] is not None:
title = reduced_book["title"]
description = "N/A"
if reduced_book["description"] is not None:
description = reduced_book["description"]
book_info = {
"isbn": isbn,
"isbn13": isbn13,
"title": title,
"authors": authors,
"pages": pages,
"date_published": date_published,
"edition": edition,
"format": book_format,
"publisher": publisher,
"description": description
}
self.book_data = book_info
self.show_information(book_info, reviews)
except HTTPError:
print("ISBN isn't Valid")
self.show_message("ISBN Not Found On Goodreads.com", "Error! ISBN Not Found!!")
else:
self.show_message("Please Enter A Valid ISBN Number", "Error! Invalid ISBN Entered!!")
else:
self.show_message("Please Enter ISBN Number To Search", "Error! Empty ISBN")
def get_book(self, isbn):
book = self.client.Book.show_by_isbn(str(isbn))
return book
def clear_fields(self):
self.book_isbn.setText("")
self.book_isbn13.setText("")
self.book_title.setText("")
self.book_authors.setText("")
self.book_pages.setText("")
self.book_date_published.setText("")
self.book_edition.setText("")
self.book_format.setText("")
self.book_publisher.setText("")
self.book_description.setText("")
self.search_tbl.setRowCount(0)
def get_reviews(self, isbn):
key = "NqaQK91zheH4ofJYuTmpA"
endpoint = "https://www.goodreads.com/api/reviews_widget_iframe?did=" + key +\
"&format=html&isbn=" + isbn + \
"&links=660&review_back=fff&stars=000&text=000"
r = requests.get(url=endpoint)
soup = BeautifulSoup(r.content, "html.parser")
reviews = soup.find_all('div', attrs={"itemprop": "reviews"})
review_by = []
review_rating = []
review_text = []
for review in reviews:
reviewer = review.find("span", attrs={"class": "gr_review_by"})
if reviewer is not None:
reviewer = reviewer.a
if reviewer is not None:
review_by.append(reviewer.text)
else:
review_by.append("N/A")
rating = review.find("span", attrs={"class": "gr_rating"})
if rating is not None:
review_rating.append(self.get_rating(rating.text))
else:
review_rating.append("N/A")
rev = review.find("div", attrs={"class": "gr_review_text"})
if rev is not None:
review_text.append(self.clean_text(rev.text))
else:
review_text.append("N/A")
revs = {"reviewer": review_by, "rating": review_rating, "review": review_text}
return revs
def show_information(self, book_info, reviews):
if reviews is not None:
reviewers = reviews["reviewer"]
ratings = reviews["rating"]
reviews_text = reviews["review"]
for rev in range(len(reviewers)):
pos = self.search_tbl.rowCount()
self.search_tbl.insertRow(pos)
self.search_tbl.setItem(pos, 0, QtWidgets.QTableWidgetItem(reviewers[rev]))
self.search_tbl.setItem(pos, 1, QtWidgets.QTableWidgetItem(str(ratings[rev])+"/5"))
self.search_tbl.setItem(pos, 2, QtWidgets.QTableWidgetItem(reviews_text[rev]))
self.search_tbl.resizeColumnsToContents()
self.book_isbn.setText(book_info["isbn"])
self.book_isbn.setStyleSheet(self.label_default_style)
self.book_isbn13.setText(book_info["isbn13"])
self.book_isbn13.setStyleSheet(self.label_default_style)
self.book_title.setText(book_info["title"])
self.book_title.setStyleSheet(self.label_default_style)
self.book_authors.setText(book_info["authors"])
self.book_authors.setStyleSheet(self.label_default_style)
self.book_pages.setText(book_info["pages"])
self.book_pages.setStyleSheet(self.label_default_style)
self.book_date_published.setText(book_info["date_published"])
self.book_date_published.setStyleSheet(self.label_default_style)
self.book_edition.setText(book_info["edition"])
self.book_edition.setStyleSheet(self.label_default_style)
self.book_format.setText(book_info["format"])
self.book_format.setStyleSheet(self.label_default_style)
self.book_publisher.setText(book_info["publisher"])
self.book_publisher.setStyleSheet(self.label_default_style)
self.book_description.setText(book_info["description"])
self.book_description.setStyleSheet(self.label_default_style)
def show_message(self, message, title):
choice = QMessageBox.question(self, title, message, QMessageBox.Ok)
if choice == QMessageBox.Ok:
print("OK")
else:
pass
def clean_text(self, review):
review = review.replace("\n", "")
review = review.replace("...", " ")
review = review.replace("more", " ")
review = re.sub('\s+', ' ', review).strip()
return review
def get_rating(self, stars):
rating_scale = {"★★★★★": 5, "★★★★☆": 4, "★★★☆☆": 3, "★★☆☆☆": 2, "★☆☆☆☆": 1}
return rating_scale[stars]
@pyqtSlot()
def export(self):
self.export_as_csv()
def export_as_csv(self):
book_df = pd.DataFrame(self.book_data, index=[0])
book_df.to_csv("Books/"+self.book_data["isbn"]+"_details.csv")
review_df = pd.DataFrame(self.book_review_data)
review_df.to_csv("Reviews/"+self.book_data["isbn"]+"_reviews.csv")
self.show_message("Exported Book And Review Details To CSV", "Data Exported!!")
if __name__ == "__main__":
app = QApplication(sys.argv)
window = BookProfiler()
window.show()
sys.exit(app.exec_())
| true | true |
1c35bdf289ce9f23ba9e64b3a7ab60587588ed9e | 10,238 | py | Python | search/DrNAS/nb201space_progressive.py | MAC-AutoML/XNAS | 2c54ceb09b255cbcabd67f3c39fc777c4b2403f4 | [
"MIT"
] | 9 | 2021-04-21T08:14:03.000Z | 2021-11-26T11:52:40.000Z | search/DrNAS/nb201space_progressive.py | MAC-AutoML/XNAS | 2c54ceb09b255cbcabd67f3c39fc777c4b2403f4 | [
"MIT"
] | null | null | null | search/DrNAS/nb201space_progressive.py | MAC-AutoML/XNAS | 2c54ceb09b255cbcabd67f3c39fc777c4b2403f4 | [
"MIT"
] | 6 | 2021-05-19T02:36:43.000Z | 2021-12-03T07:21:37.000Z | import os
import torch
import torch.nn as nn
import torch.utils
import torch.backends.cudnn as cudnn
import xnas.core.logging as logging
import xnas.core.config as config
import xnas.core.meters as meters
import xnas.search_space.DrNAS.utils as utils
from xnas.core.builders import build_loss_fun, DrNAS_builder
from xnas.core.config import cfg
from xnas.core.timer import Timer
from xnas.core.trainer import setup_env, test_epoch
from xnas.datasets.loader import construct_loader
from xnas.search_algorithm.DrNAS import Architect
from torch.utils.tensorboard import SummaryWriter
from nas_201_api import NASBench201API as API
# Load config and check
config.load_cfg_fom_args()
config.assert_and_infer_cfg()
cfg.freeze()
# Tensorboard supplement
writer = SummaryWriter(log_dir=os.path.join(cfg.OUT_DIR, "tb"))
logger = logging.get_logger(__name__)
def distill(result):
result = result.split("\n")
cifar10 = result[5].replace(" ", "").split(":")
cifar100 = result[7].replace(" ", "").split(":")
imagenet16 = result[9].replace(" ", "").split(":")
cifar10_train = float(cifar10[1].strip(",test")[-7:-2].strip("="))
cifar10_test = float(cifar10[2][-7:-2].strip("="))
cifar100_train = float(cifar100[1].strip(",valid")[-7:-2].strip("="))
cifar100_valid = float(cifar100[2].strip(",test")[-7:-2].strip("="))
cifar100_test = float(cifar100[3][-7:-2].strip("="))
imagenet16_train = float(imagenet16[1].strip(",valid")[-7:-2].strip("="))
imagenet16_valid = float(imagenet16[2].strip(",test")[-7:-2].strip("="))
imagenet16_test = float(imagenet16[3][-7:-2].strip("="))
return (
cifar10_train,
cifar10_test,
cifar100_train,
cifar100_valid,
cifar100_test,
imagenet16_train,
imagenet16_valid,
imagenet16_test,
)
def main():
setup_env()
# follow DrNAS settings.
torch.set_num_threads(3)
cudnn.benchmark = True
if not "debug" in cfg.OUT_DIR:
api = API("./data/NAS-Bench-201-v1_1-096897.pth")
criterion = build_loss_fun().cuda()
assert cfg.DRNAS.METHOD in ["snas", "dirichlet", "darts"], "method not supported."
if cfg.DRNAS.METHOD == "snas":
# Create the decrease step for the gumbel softmax temperature
# cfg.OPTIM.MAX_EPOCH = 100
[tau_min, tau_max] = cfg.DRNAS.TAU
# Create the decrease step for the gumbel softmax temperature
tau_step = (tau_min - tau_max) / cfg.OPTIM.MAX_EPOCH
tau_epoch = tau_max
model = DrNAS_builder().cuda()
logger.info("param size = %fMB", utils.count_parameters_in_MB(model))
optimizer = torch.optim.SGD(
model.get_weights(),
cfg.OPTIM.BASE_LR,
momentum=cfg.OPTIM.MOMENTUM,
weight_decay=cfg.OPTIM.WEIGHT_DECAY,
)
train_loader, valid_loader = construct_loader(
cfg.SEARCH.DATASET,
cfg.SEARCH.SPLIT,
cfg.SEARCH.BATCH_SIZE,
cfg.SEARCH.DATAPATH,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
)
architect = Architect(model, cfg)
# configure progressive parameter
epoch = 0
ks = [4, 2]
num_keeps = [5, 3]
train_epochs = [2, 2] if "debug" in cfg.OUT_DIR else [50, 50]
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, float(sum(train_epochs)), eta_min=cfg.OPTIM.MIN_LR
)
train_meter = meters.TrainMeter(len(train_loader))
val_meter = meters.TestMeter(len(valid_loader))
# train_timer = Timer()
for i, current_epochs in enumerate(train_epochs):
for e in range(current_epochs):
lr = scheduler.get_lr()[0]
logger.info("epoch %d lr %e", epoch, lr)
genotype = model.genotype()
logger.info("genotype = %s", genotype)
model.show_arch_parameters(logger)
# training
# train_timer.tic()
top1err = train_epoch(
train_loader,
valid_loader,
model,
architect,
criterion,
optimizer,
lr,
train_meter,
e,
)
logger.info("Top1 err:%f", top1err)
# train_timer.toc()
# print("epoch time:{}".format(train_timer.diff))
# validation
test_epoch(valid_loader, model, val_meter, epoch, writer)
if not "debug" in cfg.OUT_DIR:
# nasbench201
result = api.query_by_arch(model.genotype())
logger.info("{:}".format(result))
(
cifar10_train,
cifar10_test,
cifar100_train,
cifar100_valid,
cifar100_test,
imagenet16_train,
imagenet16_valid,
imagenet16_test,
) = distill(result)
logger.info("cifar10 train %f test %f", cifar10_train, cifar10_test)
logger.info(
"cifar100 train %f valid %f test %f",
cifar100_train,
cifar100_valid,
cifar100_test,
)
logger.info(
"imagenet16 train %f valid %f test %f",
imagenet16_train,
imagenet16_valid,
imagenet16_test,
)
# tensorboard
writer.add_scalars(
"nasbench201/cifar10",
{"train": cifar10_train, "test": cifar10_test},
epoch,
)
writer.add_scalars(
"nasbench201/cifar100",
{
"train": cifar100_train,
"valid": cifar100_valid,
"test": cifar100_test,
},
epoch,
)
writer.add_scalars(
"nasbench201/imagenet16",
{
"train": imagenet16_train,
"valid": imagenet16_valid,
"test": imagenet16_test,
},
epoch,
)
utils.save_checkpoint(
{
"epoch": epoch + 1,
"state_dict": model.state_dict(),
"optimizer": optimizer.state_dict(),
"alpha": model.arch_parameters(),
},
False,
cfg.OUT_DIR,
)
epoch += 1
scheduler.step()
if cfg.DRNAS.METHOD == "snas":
# Decrease the temperature for the gumbel softmax linearly
tau_epoch += tau_step
logger.info("tau %f", tau_epoch)
model.set_tau(tau_epoch)
if not i == len(train_epochs) - 1:
model.pruning(num_keeps[i + 1])
# architect.pruning([model._mask])
model.wider(ks[i + 1])
optimizer = utils.configure_optimizer(
optimizer,
torch.optim.SGD(
model.get_weights(),
cfg.OPTIM.BASE_LR,
momentum=cfg.OPTIM.MOMENTUM,
weight_decay=cfg.OPTIM.WEIGHT_DECAY,
),
)
scheduler = utils.configure_scheduler(
scheduler,
torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, float(sum(train_epochs)), eta_min=cfg.OPTIM.MIN_LR
),
)
logger.info("pruning finish, %d ops left per edge", num_keeps[i + 1])
logger.info("network wider finish, current pc parameter %d", ks[i + 1])
genotype = model.genotype()
logger.info("genotype = %s", genotype)
model.show_arch_parameters(logger)
writer.close()
def train_epoch(
train_loader,
valid_loader,
model,
architect,
criterion,
optimizer,
lr,
train_meter,
cur_epoch,
):
train_meter.iter_tic()
cur_step = cur_epoch * len(train_loader)
writer.add_scalar("train/lr", lr, cur_step)
valid_loader_iter = iter(valid_loader)
for cur_iter, (trn_X, trn_y) in enumerate(train_loader):
model.train()
try:
(val_X, val_y) = next(valid_loader_iter)
except StopIteration:
valid_loader_iter = iter(valid_loader)
(val_X, val_y) = next(valid_loader_iter)
# Transfer the data to the current GPU device
trn_X, trn_y = trn_X.cuda(), trn_y.cuda(non_blocking=True)
val_X, val_y = val_X.cuda(), val_y.cuda(non_blocking=True)
if cur_epoch >= 10:
architect.step(
trn_X, trn_y, val_X, val_y, lr, optimizer, unrolled=cfg.DRNAS.UNROLLED
)
optimizer.zero_grad()
architect.optimizer.zero_grad()
logits = model(trn_X)
loss = criterion(logits, trn_y)
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), cfg.OPTIM.GRAD_CLIP)
optimizer.step()
optimizer.zero_grad()
architect.optimizer.zero_grad()
top1_err, top5_err = meters.topk_errors(logits, trn_y, [1, 5])
loss, top1_err, top5_err = loss.item(), top1_err.item(), top5_err.item()
train_meter.iter_toc()
# Update and log stats
# TODO: multiply with NUM_GPUS are disabled before appling parallel
# mb_size = trn_X.size(0) * cfg.NUM_GPUS
mb_size = trn_X.size(0)
train_meter.update_stats(top1_err, top5_err, loss, lr, mb_size)
train_meter.log_iter_stats(cur_epoch, cur_iter)
train_meter.iter_tic()
# write to tensorboard
writer.add_scalar("train/loss", loss, cur_step)
writer.add_scalar("train/top1_error", top1_err, cur_step)
writer.add_scalar("train/top5_error", top5_err, cur_step)
cur_step += 1
# Log epoch stats
train_meter.log_epoch_stats(cur_epoch)
train_meter.reset()
return top1_err
if __name__ == "__main__":
main()
| 32.814103 | 86 | 0.558312 | import os
import torch
import torch.nn as nn
import torch.utils
import torch.backends.cudnn as cudnn
import xnas.core.logging as logging
import xnas.core.config as config
import xnas.core.meters as meters
import xnas.search_space.DrNAS.utils as utils
from xnas.core.builders import build_loss_fun, DrNAS_builder
from xnas.core.config import cfg
from xnas.core.timer import Timer
from xnas.core.trainer import setup_env, test_epoch
from xnas.datasets.loader import construct_loader
from xnas.search_algorithm.DrNAS import Architect
from torch.utils.tensorboard import SummaryWriter
from nas_201_api import NASBench201API as API
config.load_cfg_fom_args()
config.assert_and_infer_cfg()
cfg.freeze()
writer = SummaryWriter(log_dir=os.path.join(cfg.OUT_DIR, "tb"))
logger = logging.get_logger(__name__)
def distill(result):
result = result.split("\n")
cifar10 = result[5].replace(" ", "").split(":")
cifar100 = result[7].replace(" ", "").split(":")
imagenet16 = result[9].replace(" ", "").split(":")
cifar10_train = float(cifar10[1].strip(",test")[-7:-2].strip("="))
cifar10_test = float(cifar10[2][-7:-2].strip("="))
cifar100_train = float(cifar100[1].strip(",valid")[-7:-2].strip("="))
cifar100_valid = float(cifar100[2].strip(",test")[-7:-2].strip("="))
cifar100_test = float(cifar100[3][-7:-2].strip("="))
imagenet16_train = float(imagenet16[1].strip(",valid")[-7:-2].strip("="))
imagenet16_valid = float(imagenet16[2].strip(",test")[-7:-2].strip("="))
imagenet16_test = float(imagenet16[3][-7:-2].strip("="))
return (
cifar10_train,
cifar10_test,
cifar100_train,
cifar100_valid,
cifar100_test,
imagenet16_train,
imagenet16_valid,
imagenet16_test,
)
def main():
setup_env()
torch.set_num_threads(3)
cudnn.benchmark = True
if not "debug" in cfg.OUT_DIR:
api = API("./data/NAS-Bench-201-v1_1-096897.pth")
criterion = build_loss_fun().cuda()
assert cfg.DRNAS.METHOD in ["snas", "dirichlet", "darts"], "method not supported."
if cfg.DRNAS.METHOD == "snas":
[tau_min, tau_max] = cfg.DRNAS.TAU
tau_step = (tau_min - tau_max) / cfg.OPTIM.MAX_EPOCH
tau_epoch = tau_max
model = DrNAS_builder().cuda()
logger.info("param size = %fMB", utils.count_parameters_in_MB(model))
optimizer = torch.optim.SGD(
model.get_weights(),
cfg.OPTIM.BASE_LR,
momentum=cfg.OPTIM.MOMENTUM,
weight_decay=cfg.OPTIM.WEIGHT_DECAY,
)
train_loader, valid_loader = construct_loader(
cfg.SEARCH.DATASET,
cfg.SEARCH.SPLIT,
cfg.SEARCH.BATCH_SIZE,
cfg.SEARCH.DATAPATH,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
)
architect = Architect(model, cfg)
epoch = 0
ks = [4, 2]
num_keeps = [5, 3]
train_epochs = [2, 2] if "debug" in cfg.OUT_DIR else [50, 50]
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, float(sum(train_epochs)), eta_min=cfg.OPTIM.MIN_LR
)
train_meter = meters.TrainMeter(len(train_loader))
val_meter = meters.TestMeter(len(valid_loader))
for i, current_epochs in enumerate(train_epochs):
for e in range(current_epochs):
lr = scheduler.get_lr()[0]
logger.info("epoch %d lr %e", epoch, lr)
genotype = model.genotype()
logger.info("genotype = %s", genotype)
model.show_arch_parameters(logger)
top1err = train_epoch(
train_loader,
valid_loader,
model,
architect,
criterion,
optimizer,
lr,
train_meter,
e,
)
logger.info("Top1 err:%f", top1err)
test_epoch(valid_loader, model, val_meter, epoch, writer)
if not "debug" in cfg.OUT_DIR:
result = api.query_by_arch(model.genotype())
logger.info("{:}".format(result))
(
cifar10_train,
cifar10_test,
cifar100_train,
cifar100_valid,
cifar100_test,
imagenet16_train,
imagenet16_valid,
imagenet16_test,
) = distill(result)
logger.info("cifar10 train %f test %f", cifar10_train, cifar10_test)
logger.info(
"cifar100 train %f valid %f test %f",
cifar100_train,
cifar100_valid,
cifar100_test,
)
logger.info(
"imagenet16 train %f valid %f test %f",
imagenet16_train,
imagenet16_valid,
imagenet16_test,
)
writer.add_scalars(
"nasbench201/cifar10",
{"train": cifar10_train, "test": cifar10_test},
epoch,
)
writer.add_scalars(
"nasbench201/cifar100",
{
"train": cifar100_train,
"valid": cifar100_valid,
"test": cifar100_test,
},
epoch,
)
writer.add_scalars(
"nasbench201/imagenet16",
{
"train": imagenet16_train,
"valid": imagenet16_valid,
"test": imagenet16_test,
},
epoch,
)
utils.save_checkpoint(
{
"epoch": epoch + 1,
"state_dict": model.state_dict(),
"optimizer": optimizer.state_dict(),
"alpha": model.arch_parameters(),
},
False,
cfg.OUT_DIR,
)
epoch += 1
scheduler.step()
if cfg.DRNAS.METHOD == "snas":
tau_epoch += tau_step
logger.info("tau %f", tau_epoch)
model.set_tau(tau_epoch)
if not i == len(train_epochs) - 1:
model.pruning(num_keeps[i + 1])
model.wider(ks[i + 1])
optimizer = utils.configure_optimizer(
optimizer,
torch.optim.SGD(
model.get_weights(),
cfg.OPTIM.BASE_LR,
momentum=cfg.OPTIM.MOMENTUM,
weight_decay=cfg.OPTIM.WEIGHT_DECAY,
),
)
scheduler = utils.configure_scheduler(
scheduler,
torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, float(sum(train_epochs)), eta_min=cfg.OPTIM.MIN_LR
),
)
logger.info("pruning finish, %d ops left per edge", num_keeps[i + 1])
logger.info("network wider finish, current pc parameter %d", ks[i + 1])
genotype = model.genotype()
logger.info("genotype = %s", genotype)
model.show_arch_parameters(logger)
writer.close()
def train_epoch(
train_loader,
valid_loader,
model,
architect,
criterion,
optimizer,
lr,
train_meter,
cur_epoch,
):
train_meter.iter_tic()
cur_step = cur_epoch * len(train_loader)
writer.add_scalar("train/lr", lr, cur_step)
valid_loader_iter = iter(valid_loader)
for cur_iter, (trn_X, trn_y) in enumerate(train_loader):
model.train()
try:
(val_X, val_y) = next(valid_loader_iter)
except StopIteration:
valid_loader_iter = iter(valid_loader)
(val_X, val_y) = next(valid_loader_iter)
trn_X, trn_y = trn_X.cuda(), trn_y.cuda(non_blocking=True)
val_X, val_y = val_X.cuda(), val_y.cuda(non_blocking=True)
if cur_epoch >= 10:
architect.step(
trn_X, trn_y, val_X, val_y, lr, optimizer, unrolled=cfg.DRNAS.UNROLLED
)
optimizer.zero_grad()
architect.optimizer.zero_grad()
logits = model(trn_X)
loss = criterion(logits, trn_y)
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), cfg.OPTIM.GRAD_CLIP)
optimizer.step()
optimizer.zero_grad()
architect.optimizer.zero_grad()
top1_err, top5_err = meters.topk_errors(logits, trn_y, [1, 5])
loss, top1_err, top5_err = loss.item(), top1_err.item(), top5_err.item()
train_meter.iter_toc()
mb_size = trn_X.size(0)
train_meter.update_stats(top1_err, top5_err, loss, lr, mb_size)
train_meter.log_iter_stats(cur_epoch, cur_iter)
train_meter.iter_tic()
writer.add_scalar("train/loss", loss, cur_step)
writer.add_scalar("train/top1_error", top1_err, cur_step)
writer.add_scalar("train/top5_error", top5_err, cur_step)
cur_step += 1
train_meter.log_epoch_stats(cur_epoch)
train_meter.reset()
return top1_err
if __name__ == "__main__":
main()
| true | true |
1c35be6009f1f3b90929631d9c03a85e7c351068 | 3,913 | py | Python | netbox/extras/admin.py | Netnod/netbox | 24344ccfafe6a6f6e71099fa2593a4eb8e737d5f | [
"Apache-2.0"
] | 1 | 2022-01-11T10:33:15.000Z | 2022-01-11T10:33:15.000Z | netbox/extras/admin.py | Netnod/netbox | 24344ccfafe6a6f6e71099fa2593a4eb8e737d5f | [
"Apache-2.0"
] | null | null | null | netbox/extras/admin.py | Netnod/netbox | 24344ccfafe6a6f6e71099fa2593a4eb8e737d5f | [
"Apache-2.0"
] | null | null | null | from django import forms
from django.contrib import admin
from netbox.admin import admin_site
from utilities.forms import LaxURLField
from .models import CustomField, CustomFieldChoice, CustomLink, Graph, ExportTemplate, TopologyMap, Webhook
def order_content_types(field):
"""
Order the list of available ContentTypes by application
"""
queryset = field.queryset.order_by('app_label', 'model')
field.choices = [(ct.pk, '{} > {}'.format(ct.app_label, ct.name)) for ct in queryset]
#
# Webhooks
#
class WebhookForm(forms.ModelForm):
payload_url = LaxURLField(
label='URL'
)
class Meta:
model = Webhook
exclude = []
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if 'obj_type' in self.fields:
order_content_types(self.fields['obj_type'])
@admin.register(Webhook, site=admin_site)
class WebhookAdmin(admin.ModelAdmin):
list_display = [
'name', 'models', 'payload_url', 'http_content_type', 'enabled', 'type_create', 'type_update',
'type_delete', 'ssl_verification',
]
form = WebhookForm
def models(self, obj):
return ', '.join([ct.name for ct in obj.obj_type.all()])
#
# Custom fields
#
class CustomFieldForm(forms.ModelForm):
class Meta:
model = CustomField
exclude = []
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
order_content_types(self.fields['obj_type'])
class CustomFieldChoiceAdmin(admin.TabularInline):
model = CustomFieldChoice
extra = 5
@admin.register(CustomField, site=admin_site)
class CustomFieldAdmin(admin.ModelAdmin):
inlines = [CustomFieldChoiceAdmin]
list_display = ['name', 'models', 'type', 'required', 'filter_logic', 'default', 'weight', 'description']
form = CustomFieldForm
def models(self, obj):
return ', '.join([ct.name for ct in obj.obj_type.all()])
#
# Custom links
#
class CustomLinkForm(forms.ModelForm):
class Meta:
model = CustomLink
exclude = []
widgets = {
'text': forms.Textarea,
'url': forms.Textarea,
}
help_texts = {
'text': 'Jinja2 template code for the link text. Reference the object as <code>{{ obj }}</code>. Links '
'which render as empty text will not be displayed.',
'url': 'Jinja2 template code for the link URL. Reference the object as <code>{{ obj }}</code>.',
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Format ContentType choices
order_content_types(self.fields['content_type'])
self.fields['content_type'].choices.insert(0, ('', '---------'))
@admin.register(CustomLink, site=admin_site)
class CustomLinkAdmin(admin.ModelAdmin):
list_display = ['name', 'content_type', 'group_name', 'weight']
form = CustomLinkForm
#
# Graphs
#
@admin.register(Graph, site=admin_site)
class GraphAdmin(admin.ModelAdmin):
list_display = ['name', 'type', 'weight', 'source']
#
# Export templates
#
class ExportTemplateForm(forms.ModelForm):
class Meta:
model = ExportTemplate
exclude = []
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Format ContentType choices
order_content_types(self.fields['content_type'])
self.fields['content_type'].choices.insert(0, ('', '---------'))
@admin.register(ExportTemplate, site=admin_site)
class ExportTemplateAdmin(admin.ModelAdmin):
list_display = ['name', 'content_type', 'description', 'mime_type', 'file_extension']
form = ExportTemplateForm
#
# Topology maps
#
@admin.register(TopologyMap, site=admin_site)
class TopologyMapAdmin(admin.ModelAdmin):
list_display = ['name', 'slug', 'site']
prepopulated_fields = {
'slug': ['name'],
}
| 25.083333 | 116 | 0.645029 | from django import forms
from django.contrib import admin
from netbox.admin import admin_site
from utilities.forms import LaxURLField
from .models import CustomField, CustomFieldChoice, CustomLink, Graph, ExportTemplate, TopologyMap, Webhook
def order_content_types(field):
queryset = field.queryset.order_by('app_label', 'model')
field.choices = [(ct.pk, '{} > {}'.format(ct.app_label, ct.name)) for ct in queryset]
class WebhookForm(forms.ModelForm):
payload_url = LaxURLField(
label='URL'
)
class Meta:
model = Webhook
exclude = []
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if 'obj_type' in self.fields:
order_content_types(self.fields['obj_type'])
@admin.register(Webhook, site=admin_site)
class WebhookAdmin(admin.ModelAdmin):
list_display = [
'name', 'models', 'payload_url', 'http_content_type', 'enabled', 'type_create', 'type_update',
'type_delete', 'ssl_verification',
]
form = WebhookForm
def models(self, obj):
return ', '.join([ct.name for ct in obj.obj_type.all()])
class CustomFieldForm(forms.ModelForm):
class Meta:
model = CustomField
exclude = []
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
order_content_types(self.fields['obj_type'])
class CustomFieldChoiceAdmin(admin.TabularInline):
model = CustomFieldChoice
extra = 5
@admin.register(CustomField, site=admin_site)
class CustomFieldAdmin(admin.ModelAdmin):
inlines = [CustomFieldChoiceAdmin]
list_display = ['name', 'models', 'type', 'required', 'filter_logic', 'default', 'weight', 'description']
form = CustomFieldForm
def models(self, obj):
return ', '.join([ct.name for ct in obj.obj_type.all()])
class CustomLinkForm(forms.ModelForm):
class Meta:
model = CustomLink
exclude = []
widgets = {
'text': forms.Textarea,
'url': forms.Textarea,
}
help_texts = {
'text': 'Jinja2 template code for the link text. Reference the object as <code>{{ obj }}</code>. Links '
'which render as empty text will not be displayed.',
'url': 'Jinja2 template code for the link URL. Reference the object as <code>{{ obj }}</code>.',
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
order_content_types(self.fields['content_type'])
self.fields['content_type'].choices.insert(0, ('', '---------'))
@admin.register(CustomLink, site=admin_site)
class CustomLinkAdmin(admin.ModelAdmin):
list_display = ['name', 'content_type', 'group_name', 'weight']
form = CustomLinkForm
@admin.register(Graph, site=admin_site)
class GraphAdmin(admin.ModelAdmin):
list_display = ['name', 'type', 'weight', 'source']
class ExportTemplateForm(forms.ModelForm):
class Meta:
model = ExportTemplate
exclude = []
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
order_content_types(self.fields['content_type'])
self.fields['content_type'].choices.insert(0, ('', '---------'))
@admin.register(ExportTemplate, site=admin_site)
class ExportTemplateAdmin(admin.ModelAdmin):
list_display = ['name', 'content_type', 'description', 'mime_type', 'file_extension']
form = ExportTemplateForm
@admin.register(TopologyMap, site=admin_site)
class TopologyMapAdmin(admin.ModelAdmin):
list_display = ['name', 'slug', 'site']
prepopulated_fields = {
'slug': ['name'],
}
| true | true |
1c35befc12f70e13e97a2aa569fc76e1372a6279 | 5,252 | py | Python | BluePlug/QtWork.py | liufeng3486/BluePlug | c7c5c769ed35c71ebc542d34848d6bf309abd051 | [
"MIT"
] | 1 | 2019-01-27T04:08:05.000Z | 2019-01-27T04:08:05.000Z | BluePlug/QtWork.py | liufeng3486/BluePlug | c7c5c769ed35c71ebc542d34848d6bf309abd051 | [
"MIT"
] | 5 | 2021-03-18T21:35:20.000Z | 2022-01-13T00:58:18.000Z | BluePlug/QtWork.py | liufeng3486/BluePlug | c7c5c769ed35c71ebc542d34848d6bf309abd051 | [
"MIT"
] | null | null | null | from PyQt5 import QtCore
from BluePlug.Base import *
import BluePlug.Init as Init
import BluePlug.Login as Login
from subprocess import Popen, PIPE, STDOUT
# import Answer,DailyQuest,PetFight,PlotCopy
# import SetInit
import BluePlug.MainQuest as MainQuest
# import CreateRole as CreateRole
import time,shutil
class Worker(QtCore.QThread):
sinOut = QtCore.pyqtSignal(str) # 自定义信号,执行run()函数时,从相关线程发射此信号
sinOut2 = QtCore.pyqtSignal(str)
# sinOut2 = "ddd"
def __init__(self,index=0, parent=None):
super(Worker, self).__init__(parent)
self.start = time.time()
self.index = index
self.cus_state = "Wait"
self.old_state = "Wait"
self.counter = -1
self.temp_counter = -1
self.working = True
self.function_1 = True
self.function_2 = True
self.function_3 = True
self.function_4 = True
self.function_5 = True
self.lv = 0
self.fpoint = 0
self.dail_end = 0
self.time_sleep = 1.5
self.function_list = [ self.function_1 ,self.function_2 ,self.function_3 ,self.function_4 ,self.function_5 ]
self.num = 0
self.list =[True,True,True,True,True] #skip,talk*3
def __del__(self):
self.working = False
self.wait()
def setValue(self,index,value):
# print("set",index,value)
# self.function_1 = 111
self.function_list[index] = value
# print("ddd:",self.function_1)
def getLvAndFpoint(self):
try:
lv = str(getLv(".//%s//screenshot.png" % str(self.index)))
fpoint = getFpoint(".//%s//screenshot.png" % str(self.index))
lv_int = int(lv)
fpoint_int = int(fpoint)
self.lv = lv
self.fpoint = fpoint
except:
pass
def get_image(self,name="screenshot.png"): # 获取图片
temp1 = '.\dnplayer2\dnconsole.exe adb --index %s --command "shell /system/bin/screencap -p /sdcard/screenshot.png"' % str(
self.index)
temp2 = '.\dnplayer2\dnconsole.exe adb --index %s --command "pull /sdcard/screenshot.png ./%s/%s"' % (
str(self.index), str(self.index), name)
command = temp1+"&&"+temp2
p = Popen(command, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
output, errors = p.communicate()
if errors:
print(errors)
pngTranspose("./%s/%s" % (str(self.index), name))
print ("do")
if time.time() - self.start > 600:
print("get log")
self.start = time.time()
shutil.copyfile("./%s/%s" % (str(self.index), name), "./%s/%s"%(str(self.index),str(int(time.time()))+".png"))
# with open("d:/ChangZhi/%s/%s" % (str(self.index), name),"r")
def subFunCall(self,func):
if self.counter == -1:
self.cus_state = func(self.index, channel=self.sinOut2)
print ("subFunCall",self.cus_state)
else:
if self.temp_counter == -1 :
self.temp_counter = self.counter
self.temp_counter,self.cus_state = func(index = self.index,finish=self.temp_counter, channel=self.sinOut2)
print("subFunCall",self.temp_counter,self.cus_state)
def subJobInit(self):
pass
# self.mainquest = MainQuest.MainQuest(self.index)
# self.login = Login.Login(self.index)
# self.init = Init.Init(self.index)
def mainrun(self):
self.subJobInit()
sign = 0
while self.working == True:
print(self.cus_state, "%" * 20)
sign += 1
self.get_image()
if sign % 50 == 0:
self.getLvAndFpoint()
if self.old_state != self.cus_state:
self.old_state = self.cus_state
self.count = -1
self.sleep((self.time_sleep))
self.run()
def run(self,index=0, user_message=[]): # index设备号 cus_state状态 0 未启动 1APP启动 2 登陆成功并获取初始状态
pass
# if self.cus_state == "Init_start":
# self.init.init_start()
# elif self.cus_state == "Init_check":
# self.init.start_check()
# elif self.cus_state == "Login":
# self.login.run()
# elif self.cus_state == "MainQuest":
# self.mainquest.run()
# elif self.cus_state == "Wait":
# self.sleep(5)
# elif self.cus_state:
# prt("error", channel=self.sinOut2)
# else:
# prt("dead",channel=self.sinOut2)
# print ("1")
class NewPlug(Worker):
def run(self):
if self.cus_state == "Init_start":
self.init.init_start()
elif self.cus_state == "Init_check":
self.init.start_check()
elif self.cus_state == "Login":
self.login.run()
elif self.cus_state == "MainQuest":
self.mainquest.run()
elif self.cus_state == "Wait":
self.sleep(5)
elif self.cus_state:
prt("error", channel=self.sinOut2)
else:
prt("dead",channel=self.sinOut2)
print ("1")
if __name__ == '__main__':
a = NewPlug()
a.cus_state = "MainQuest"
a.mainrun()
| 34.781457 | 132 | 0.56588 | from PyQt5 import QtCore
from BluePlug.Base import *
import BluePlug.Init as Init
import BluePlug.Login as Login
from subprocess import Popen, PIPE, STDOUT
import BluePlug.MainQuest as MainQuest
import time,shutil
class Worker(QtCore.QThread):
sinOut = QtCore.pyqtSignal(str)
sinOut2 = QtCore.pyqtSignal(str)
def __init__(self,index=0, parent=None):
super(Worker, self).__init__(parent)
self.start = time.time()
self.index = index
self.cus_state = "Wait"
self.old_state = "Wait"
self.counter = -1
self.temp_counter = -1
self.working = True
self.function_1 = True
self.function_2 = True
self.function_3 = True
self.function_4 = True
self.function_5 = True
self.lv = 0
self.fpoint = 0
self.dail_end = 0
self.time_sleep = 1.5
self.function_list = [ self.function_1 ,self.function_2 ,self.function_3 ,self.function_4 ,self.function_5 ]
self.num = 0
self.list =[True,True,True,True,True]
def __del__(self):
self.working = False
self.wait()
def setValue(self,index,value):
self.function_list[index] = value
def getLvAndFpoint(self):
try:
lv = str(getLv(".//%s//screenshot.png" % str(self.index)))
fpoint = getFpoint(".//%s//screenshot.png" % str(self.index))
lv_int = int(lv)
fpoint_int = int(fpoint)
self.lv = lv
self.fpoint = fpoint
except:
pass
def get_image(self,name="screenshot.png"):
temp1 = '.\dnplayer2\dnconsole.exe adb --index %s --command "shell /system/bin/screencap -p /sdcard/screenshot.png"' % str(
self.index)
temp2 = '.\dnplayer2\dnconsole.exe adb --index %s --command "pull /sdcard/screenshot.png ./%s/%s"' % (
str(self.index), str(self.index), name)
command = temp1+"&&"+temp2
p = Popen(command, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
output, errors = p.communicate()
if errors:
print(errors)
pngTranspose("./%s/%s" % (str(self.index), name))
print ("do")
if time.time() - self.start > 600:
print("get log")
self.start = time.time()
shutil.copyfile("./%s/%s" % (str(self.index), name), "./%s/%s"%(str(self.index),str(int(time.time()))+".png"))
def subFunCall(self,func):
if self.counter == -1:
self.cus_state = func(self.index, channel=self.sinOut2)
print ("subFunCall",self.cus_state)
else:
if self.temp_counter == -1 :
self.temp_counter = self.counter
self.temp_counter,self.cus_state = func(index = self.index,finish=self.temp_counter, channel=self.sinOut2)
print("subFunCall",self.temp_counter,self.cus_state)
def subJobInit(self):
pass
def mainrun(self):
self.subJobInit()
sign = 0
while self.working == True:
print(self.cus_state, "%" * 20)
sign += 1
self.get_image()
if sign % 50 == 0:
self.getLvAndFpoint()
if self.old_state != self.cus_state:
self.old_state = self.cus_state
self.count = -1
self.sleep((self.time_sleep))
self.run()
def run(self,index=0, user_message=[]):
pass
class NewPlug(Worker):
def run(self):
if self.cus_state == "Init_start":
self.init.init_start()
elif self.cus_state == "Init_check":
self.init.start_check()
elif self.cus_state == "Login":
self.login.run()
elif self.cus_state == "MainQuest":
self.mainquest.run()
elif self.cus_state == "Wait":
self.sleep(5)
elif self.cus_state:
prt("error", channel=self.sinOut2)
else:
prt("dead",channel=self.sinOut2)
print ("1")
if __name__ == '__main__':
a = NewPlug()
a.cus_state = "MainQuest"
a.mainrun()
| true | true |
1c35bf9c42a54b0ce049fb9e066a1d6d5b21b754 | 1,724 | py | Python | Setup/SendMail.py | djtorch26/DSP_FinalProject | 202d51778f79aaaf18573504c51dcc4c85021ac3 | [
"MIT"
] | null | null | null | Setup/SendMail.py | djtorch26/DSP_FinalProject | 202d51778f79aaaf18573504c51dcc4c85021ac3 | [
"MIT"
] | null | null | null | Setup/SendMail.py | djtorch26/DSP_FinalProject | 202d51778f79aaaf18573504c51dcc4c85021ac3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 22 09:29:36 2020
This works with python Version 3 only
@author: Dawson
"""
import os
import smtplib
from email import encoders
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
gmail_user = ""
gmail_pwd = ""
to = "djtorch123@gmail.com"
def emailFile(file):
attach = file
if "test" in file:
subject = "Wave Sound File"
text = "This is the recorded voice from the Microphone.\n To use this file you Must add the extension .wav to the no name file."
if "voice" in file:
subject = "Wave Graph File"
text = "This is a PNG file of the recorded voice.\n Add the .png once downloaded to view"
if "FFT" in file:
subject = "FFT File"
text = "This is a PNG file showing the FFT or frequency response of the Recorded Voice.\n Add the .png once downloaded to view"
msg = MIMEMultipart()
msg['From'] = gmail_user
msg['To'] = to
msg['Subject'] = subject
msg.attach(MIMEText(text))
part = MIMEBase('application', 'octet-stream')
part.set_payload(open(attach, 'rb').read())
encoders.encode_base64(part)
part.add_header('Content-Dispostion', 'attachment; filename=%s"' % os.path.basename(attach))
msg.attach(part)
mailServer = smtplib.SMTP("smtp.gmail.com",587)
mailServer.ehlo()
mailServer.starttls()
mailServer.ehlo()
mailServer.login(gmail_user, gmail_pwd)
mailServer.sendmail(gmail_user, to, msg.as_string())
mailServer.close()
print("Email Sent!")
#Function Tests
#emailFile('test.wav')
#emailFile('voiceWave.png')
#emailFile('FFTWave.png')
| 28.262295 | 137 | 0.667053 |
import os
import smtplib
from email import encoders
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
gmail_user = ""
gmail_pwd = ""
to = "djtorch123@gmail.com"
def emailFile(file):
attach = file
if "test" in file:
subject = "Wave Sound File"
text = "This is the recorded voice from the Microphone.\n To use this file you Must add the extension .wav to the no name file."
if "voice" in file:
subject = "Wave Graph File"
text = "This is a PNG file of the recorded voice.\n Add the .png once downloaded to view"
if "FFT" in file:
subject = "FFT File"
text = "This is a PNG file showing the FFT or frequency response of the Recorded Voice.\n Add the .png once downloaded to view"
msg = MIMEMultipart()
msg['From'] = gmail_user
msg['To'] = to
msg['Subject'] = subject
msg.attach(MIMEText(text))
part = MIMEBase('application', 'octet-stream')
part.set_payload(open(attach, 'rb').read())
encoders.encode_base64(part)
part.add_header('Content-Dispostion', 'attachment; filename=%s"' % os.path.basename(attach))
msg.attach(part)
mailServer = smtplib.SMTP("smtp.gmail.com",587)
mailServer.ehlo()
mailServer.starttls()
mailServer.ehlo()
mailServer.login(gmail_user, gmail_pwd)
mailServer.sendmail(gmail_user, to, msg.as_string())
mailServer.close()
print("Email Sent!")
#Function Tests
#emailFile('test.wav')
#emailFile('voiceWave.png')
#emailFile('FFTWave.png')
| true | true |
1c35bff77d70074ae8e9d66cad3a8a97caf271d0 | 451 | py | Python | libweasyl/libweasyl/alembic/versions/cbe0ea91af79_remove_non_original_audio_upload_report_.py | greysteil/wzl-test | 0f863b9e7c58e5861437618bd590126ca323140c | [
"Apache-2.0"
] | 1 | 2019-02-15T04:21:48.000Z | 2019-02-15T04:21:48.000Z | libweasyl/libweasyl/alembic/versions/cbe0ea91af79_remove_non_original_audio_upload_report_.py | kfkitsune/wzl-test | 27297ccb42e24d652a29aa82f5a667c7d9a6d8de | [
"Apache-2.0"
] | 254 | 2017-12-23T19:36:43.000Z | 2020-04-14T21:46:13.000Z | libweasyl/libweasyl/alembic/versions/cbe0ea91af79_remove_non_original_audio_upload_report_.py | greysteil/wzl-test | 0f863b9e7c58e5861437618bd590126ca323140c | [
"Apache-2.0"
] | 1 | 2017-12-23T18:42:16.000Z | 2017-12-23T18:42:16.000Z | # encoding: utf-8
"""Remove “Non-original audio upload” report type
Revision ID: cbe0ea91af79
Revises: c8c088918278
Create Date: 2016-08-11 01:21:10.906138
"""
# revision identifiers, used by Alembic.
revision = 'cbe0ea91af79'
down_revision = 'c8c088918278'
from alembic import op
def upgrade():
op.execute('UPDATE reportcomment SET violation = 2020 WHERE violation = 2100')
def downgrade():
raise Exception('Irreversible migration')
| 18.791667 | 82 | 0.745011 |
revision = 'cbe0ea91af79'
down_revision = 'c8c088918278'
from alembic import op
def upgrade():
op.execute('UPDATE reportcomment SET violation = 2020 WHERE violation = 2100')
def downgrade():
raise Exception('Irreversible migration')
| true | true |
1c35bffd725f29b683628d85125e5290faeee3bc | 283 | py | Python | helloworld/demo/management/commands/what_time_is_it.py | mingregister/helloworld | fd3bf75e8567b5be8fc6b89cfb3c874fc1c58276 | [
"Apache-2.0"
] | 13 | 2018-08-25T22:02:24.000Z | 2021-11-13T22:09:44.000Z | helloworld/demo/management/commands/what_time_is_it.py | mingregister/helloworld | fd3bf75e8567b5be8fc6b89cfb3c874fc1c58276 | [
"Apache-2.0"
] | null | null | null | helloworld/demo/management/commands/what_time_is_it.py | mingregister/helloworld | fd3bf75e8567b5be8fc6b89cfb3c874fc1c58276 | [
"Apache-2.0"
] | 7 | 2018-08-27T20:17:02.000Z | 2022-02-28T01:11:38.000Z | from django.core.management.base import BaseCommand
from django.utils import timezone
class Command(BaseCommand):
help = 'Displays current time'
def handle(self, *args, **kwargs):
time = timezone.now().strftime('%X')
self.stdout.write("It's now %s" % time)
| 28.3 | 51 | 0.681979 | from django.core.management.base import BaseCommand
from django.utils import timezone
class Command(BaseCommand):
help = 'Displays current time'
def handle(self, *args, **kwargs):
time = timezone.now().strftime('%X')
self.stdout.write("It's now %s" % time)
| true | true |
1c35c00d8c1d4ca62b074e558a9ce9247f3099f5 | 3,604 | py | Python | src/site-packages/pyicloud/services/reminders.py | nficano/alexa-find-my-iphone | d4621fd9d891cd820167c0cfdee2dc69cecac3ce | [
"MIT"
] | 9 | 2018-06-10T20:32:10.000Z | 2021-11-21T03:54:41.000Z | pyicloud/services/reminders.py | memkeytm/pyicloud | 46e1253ca5f608035ce862627c69190fc61c5bb2 | [
"MIT"
] | 479 | 2019-07-30T11:47:46.000Z | 2021-08-03T10:43:11.000Z | pyicloud/services/reminders.py | memkeytm/pyicloud | 46e1253ca5f608035ce862627c69190fc61c5bb2 | [
"MIT"
] | 5 | 2018-09-14T18:00:18.000Z | 2020-11-04T07:26:35.000Z | from __future__ import absolute_import
from datetime import datetime, timedelta
import time
import uuid
import json
from tzlocal import get_localzone
class RemindersService(object):
def __init__(self, service_root, session, params):
self.session = session
self.params = params
self._service_root = service_root
self.lists = {}
self.collections = {}
self.refresh()
def refresh(self):
params_reminders = dict(self.params)
params_reminders.update({
'clientVersion': '4.0',
'lang': 'en-us',
'usertz': get_localzone().zone
})
# Open reminders
req = self.session.get(
self._service_root + '/rd/startup',
params=params_reminders
)
startup = req.json()
self.lists = {}
self.collections = {}
for collection in startup['Collections']:
temp = []
self.collections[collection['title']] = {
'guid': collection['guid'],
'ctag': collection['ctag']
}
for reminder in startup['Reminders']:
if reminder['pGuid'] != collection['guid']:
continue
if 'dueDate' in reminder:
if reminder['dueDate']:
due = datetime(
reminder['dueDate'][1],
reminder['dueDate'][2], reminder['dueDate'][3],
reminder['dueDate'][4], reminder['dueDate'][5]
)
else:
due = None
else:
due = None
if reminder['description']:
desc = reminder['description']
else:
desc = ""
temp.append({
"title": reminder['title'],
"desc": desc,
"due": due
})
self.lists[collection['title']] = temp
def post(self, title, description="", collection=None):
pguid = 'tasks'
if collection:
if collection in self.collections:
pguid = self.collections[collection]['guid']
params_reminders = dict(self.params)
params_reminders.update({
'clientVersion': '4.0',
'lang': 'en-us',
'usertz': get_localzone().zone
})
req = self.session.post(
self._service_root + '/rd/reminders/tasks',
data=json.dumps({
"Reminders": {
'title': title,
"description": description,
"pGuid": pguid,
"etag": None,
"order": None,
"priority": 0,
"recurrence": None,
"alarms": [],
"startDate": None,
"startDateTz": None,
"startDateIsAllDay": False,
"completedDate": None,
"dueDate": None,
"dueDateIsAllDay": False,
"lastModifiedDate": None,
"createdDate": None,
"isFamily": None,
"createdDateExtended": int(time.time()*1000),
"guid": str(uuid.uuid4())
},
"ClientState": {"Collections": list(self.collections.values())}
}),
params=params_reminders)
return req.ok
| 32.468468 | 79 | 0.450333 | from __future__ import absolute_import
from datetime import datetime, timedelta
import time
import uuid
import json
from tzlocal import get_localzone
class RemindersService(object):
def __init__(self, service_root, session, params):
self.session = session
self.params = params
self._service_root = service_root
self.lists = {}
self.collections = {}
self.refresh()
def refresh(self):
params_reminders = dict(self.params)
params_reminders.update({
'clientVersion': '4.0',
'lang': 'en-us',
'usertz': get_localzone().zone
})
req = self.session.get(
self._service_root + '/rd/startup',
params=params_reminders
)
startup = req.json()
self.lists = {}
self.collections = {}
for collection in startup['Collections']:
temp = []
self.collections[collection['title']] = {
'guid': collection['guid'],
'ctag': collection['ctag']
}
for reminder in startup['Reminders']:
if reminder['pGuid'] != collection['guid']:
continue
if 'dueDate' in reminder:
if reminder['dueDate']:
due = datetime(
reminder['dueDate'][1],
reminder['dueDate'][2], reminder['dueDate'][3],
reminder['dueDate'][4], reminder['dueDate'][5]
)
else:
due = None
else:
due = None
if reminder['description']:
desc = reminder['description']
else:
desc = ""
temp.append({
"title": reminder['title'],
"desc": desc,
"due": due
})
self.lists[collection['title']] = temp
def post(self, title, description="", collection=None):
pguid = 'tasks'
if collection:
if collection in self.collections:
pguid = self.collections[collection]['guid']
params_reminders = dict(self.params)
params_reminders.update({
'clientVersion': '4.0',
'lang': 'en-us',
'usertz': get_localzone().zone
})
req = self.session.post(
self._service_root + '/rd/reminders/tasks',
data=json.dumps({
"Reminders": {
'title': title,
"description": description,
"pGuid": pguid,
"etag": None,
"order": None,
"priority": 0,
"recurrence": None,
"alarms": [],
"startDate": None,
"startDateTz": None,
"startDateIsAllDay": False,
"completedDate": None,
"dueDate": None,
"dueDateIsAllDay": False,
"lastModifiedDate": None,
"createdDate": None,
"isFamily": None,
"createdDateExtended": int(time.time()*1000),
"guid": str(uuid.uuid4())
},
"ClientState": {"Collections": list(self.collections.values())}
}),
params=params_reminders)
return req.ok
| true | true |
1c35c01a4e5d4244e1a3f8834e76867cd11b8334 | 771 | py | Python | classification/rebalancing.py | GT-RIPL/UNO-IC | 6a95f2c6bc52ad80bfb1da53fd046a3d4db310d0 | [
"MIT"
] | 24 | 2020-11-11T03:49:50.000Z | 2022-03-21T04:23:32.000Z | classification/rebalancing.py | GT-RIPL/UNO-IC | 6a95f2c6bc52ad80bfb1da53fd046a3d4db310d0 | [
"MIT"
] | 1 | 2021-07-15T02:46:34.000Z | 2021-07-15T02:46:34.000Z | classification/rebalancing.py | GT-RIPL/UNO-IC | 6a95f2c6bc52ad80bfb1da53fd046a3d4db310d0 | [
"MIT"
] | 2 | 2021-02-04T01:28:19.000Z | 2021-02-25T09:20:27.000Z | import torch
def prior_recbalancing(logit,beta,s_prior,t_prior=None):
# logit (b,c,h,w): pre-softmax network output
# beta (1,): user controlled hyperparameter
# s_prior (1,c): source (training) data prior
# t_prior (1,c): target (test) data prior (most likely uniform)
prob = torch.nn.Softmax(dim=1)(logit)
inv_prior = 1/s_prior
inv_prior[inv_prior == float("inf")] = 0
inv_prior = inv_prior.unsqueeze(0).float()
if t_prior is None:
prob_r = prob*inv_prior
else:
prob_r = prob*inv_prior*t_prior
prob_r = prob_r/prob_r.sum(1).unsqueeze(1) # nomalize to make valid prob
outputs = prob**(1-beta) * prob_r**beta
outputs = outputs/outputs.sum(1).unsqueeze(1) # nomalize to make valid prob
return outputs | 32.125 | 79 | 0.675746 | import torch
def prior_recbalancing(logit,beta,s_prior,t_prior=None):
prob = torch.nn.Softmax(dim=1)(logit)
inv_prior = 1/s_prior
inv_prior[inv_prior == float("inf")] = 0
inv_prior = inv_prior.unsqueeze(0).float()
if t_prior is None:
prob_r = prob*inv_prior
else:
prob_r = prob*inv_prior*t_prior
prob_r = prob_r/prob_r.sum(1).unsqueeze(1)
outputs = prob**(1-beta) * prob_r**beta
outputs = outputs/outputs.sum(1).unsqueeze(1)
return outputs | true | true |
1c35c09ad7a1f0eedac5ed226e72d2ede6b782d1 | 392 | py | Python | blog/migrations/0004_auto_20220105_0959.py | ns377792/Blog-in-django | 7bebbf7ce364f76a609fbe7c2815eacf2c47978e | [
"MIT"
] | null | null | null | blog/migrations/0004_auto_20220105_0959.py | ns377792/Blog-in-django | 7bebbf7ce364f76a609fbe7c2815eacf2c47978e | [
"MIT"
] | null | null | null | blog/migrations/0004_auto_20220105_0959.py | ns377792/Blog-in-django | 7bebbf7ce364f76a609fbe7c2815eacf2c47978e | [
"MIT"
] | 1 | 2022-01-16T09:14:55.000Z | 2022-01-16T09:14:55.000Z | # Generated by Django 2.2.12 on 2022-01-05 04:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0003_auto_20220105_0307'),
]
operations = [
migrations.AlterField(
model_name='blogpost',
name='description',
field=models.CharField(max_length=303),
),
]
| 20.631579 | 51 | 0.604592 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0003_auto_20220105_0307'),
]
operations = [
migrations.AlterField(
model_name='blogpost',
name='description',
field=models.CharField(max_length=303),
),
]
| true | true |
1c35c09ca8a8d96ab8bd36dfc9356abab5e3e5ce | 148 | py | Python | Python/Math/Power - Mod Power/solution.py | oleg-cherednik/hackerrank | a76580e300ad7af248ad7c7d6839777e554cc379 | [
"Apache-2.0"
] | 7 | 2020-04-02T16:18:46.000Z | 2021-02-12T14:06:44.000Z | Python/Math/Power - Mod Power/solution.py | oleg-cherednik/HackerRank | a76580e300ad7af248ad7c7d6839777e554cc379 | [
"Apache-2.0"
] | null | null | null | Python/Math/Power - Mod Power/solution.py | oleg-cherednik/HackerRank | a76580e300ad7af248ad7c7d6839777e554cc379 | [
"Apache-2.0"
] | 11 | 2020-05-06T08:28:43.000Z | 2021-12-08T17:25:45.000Z | #!/bin/python3
if __name__ == '__main__':
a = int(input())
b = int(input())
m = int(input())
print(a ** b)
print(pow(a, b, m))
| 16.444444 | 26 | 0.493243 |
if __name__ == '__main__':
a = int(input())
b = int(input())
m = int(input())
print(a ** b)
print(pow(a, b, m))
| true | true |
1c35c11409bf4f95b165e08bfd0e51c3cf1849fd | 885 | py | Python | moto/glacier/urls.py | gtourkas/moto | 307104417b579d23d02f670ff55217a2d4a16bee | [
"Apache-2.0"
] | 5,460 | 2015-01-01T01:11:17.000Z | 2022-03-31T23:45:38.000Z | moto/glacier/urls.py | gtourkas/moto | 307104417b579d23d02f670ff55217a2d4a16bee | [
"Apache-2.0"
] | 4,475 | 2015-01-05T19:37:30.000Z | 2022-03-31T13:55:12.000Z | moto/glacier/urls.py | gtourkas/moto | 307104417b579d23d02f670ff55217a2d4a16bee | [
"Apache-2.0"
] | 1,831 | 2015-01-14T00:00:44.000Z | 2022-03-31T20:30:04.000Z | from .responses import GlacierResponse
url_bases = [r"https?://glacier\.(.+)\.amazonaws.com"]
response = GlacierResponse()
url_paths = {
"{0}/(?P<account_number>.+)/vaults$": response.all_vault_response,
"{0}/(?P<account_number>.+)/vaults/(?P<vault_name>[^/]+)$": response.vault_response,
"{0}/(?P<account_number>.+)/vaults/(?P<vault_name>.+)/archives$": response.vault_archive_response,
"{0}/(?P<account_number>.+)/vaults/(?P<vault_name>.+)/archives/(?P<archive_id>.+)$": response.vault_archive_individual_response,
"{0}/(?P<account_number>.+)/vaults/(?P<vault_name>.+)/jobs$": response.vault_jobs_response,
"{0}/(?P<account_number>.+)/vaults/(?P<vault_name>.+)/jobs/(?P<job_id>[^/.]+)$": response.vault_jobs_individual_response,
"{0}/(?P<account_number>.+)/vaults/(?P<vault_name>.+)/jobs/(?P<job_id>.+)/output$": response.vault_jobs_output_response,
}
| 55.3125 | 132 | 0.674576 | from .responses import GlacierResponse
url_bases = [r"https?://glacier\.(.+)\.amazonaws.com"]
response = GlacierResponse()
url_paths = {
"{0}/(?P<account_number>.+)/vaults$": response.all_vault_response,
"{0}/(?P<account_number>.+)/vaults/(?P<vault_name>[^/]+)$": response.vault_response,
"{0}/(?P<account_number>.+)/vaults/(?P<vault_name>.+)/archives$": response.vault_archive_response,
"{0}/(?P<account_number>.+)/vaults/(?P<vault_name>.+)/archives/(?P<archive_id>.+)$": response.vault_archive_individual_response,
"{0}/(?P<account_number>.+)/vaults/(?P<vault_name>.+)/jobs$": response.vault_jobs_response,
"{0}/(?P<account_number>.+)/vaults/(?P<vault_name>.+)/jobs/(?P<job_id>[^/.]+)$": response.vault_jobs_individual_response,
"{0}/(?P<account_number>.+)/vaults/(?P<vault_name>.+)/jobs/(?P<job_id>.+)/output$": response.vault_jobs_output_response,
}
| true | true |
1c35c203c21c962485e649012e8eb15906f2f4ce | 2,157 | py | Python | contrib/examples/sensors/sample_polling_sensor.py | UbuntuEvangelist/st2 | 36af04f2caa03b396fb8ab00fd6d700e827fda8d | [
"Apache-2.0"
] | 1 | 2020-11-21T10:11:25.000Z | 2020-11-21T10:11:25.000Z | contrib/examples/sensors/sample_polling_sensor.py | UbuntuEvangelist/st2 | 36af04f2caa03b396fb8ab00fd6d700e827fda8d | [
"Apache-2.0"
] | 1 | 2015-06-08T15:27:11.000Z | 2015-06-08T15:27:11.000Z | contrib/examples/sensors/sample_polling_sensor.py | UbuntuEvangelist/st2 | 36af04f2caa03b396fb8ab00fd6d700e827fda8d | [
"Apache-2.0"
] | 13 | 2017-01-12T11:07:20.000Z | 2019-04-19T09:55:49.000Z | from st2reactor.sensor.base import PollingSensor
class SimplePollingSensor(PollingSensor):
"""
* self._sensor_service
- provides utilities like
get_logger() for writing to logs.
dispatch() for dispatching triggers into the system.
* self._config
- contains configuration that was specified as
config.yaml in the pack.
* self._poll_interval
- indicates the interval between two successive poll() calls.
"""
def setup(self):
# Setup stuff goes here. For example, you might establish connections
# to external system once and reuse it. This is called only once by the system.
pass
def poll(self):
# This is where the crux of the sensor work goes.
# This is called every self._poll_interval.
# For example, let's assume you want to query ec2 and get
# health information about your instances:
# some_data = aws_client.get('')
# payload = self._to_payload(some_data)
# # _to_triggers is something you'd write to convert the data format you have
# # into a standard python dictionary. This should follow the payload schema
# # registered for the trigger.
# self._sensor_service.dispatch(trigger, payload)
# # You can refer to the trigger as dict
# # { "name": ${trigger_name}, "pack": ${trigger_pack} }
# # or just simply by reference as string.
# # i.e. dispatch(${trigger_pack}.${trigger_name}, payload)
# # E.g.: dispatch('examples.foo_sensor', {'k1': 'stuff', 'k2': 'foo'})
pass
def cleanup(self):
# This is called when the st2 system goes down. You can perform cleanup operations like
# closing the connections to external system here.
pass
def add_trigger(self, trigger):
# This method is called when trigger is created
pass
def update_trigger(self, trigger):
# This method is called when trigger is updated
pass
def remove_trigger(self, trigger):
# This method is called when trigger is deleted
pass
| 38.517857 | 95 | 0.635605 | from st2reactor.sensor.base import PollingSensor
class SimplePollingSensor(PollingSensor):
def setup(self):
pass
def poll(self):
# health information about your instances:
# some_data = aws_client.get('')
# payload = self._to_payload(some_data)
# # _to_triggers is something you'd write to convert the data format you have
| true | true |
1c35c2188225f1996dd0aacc01f3551ffbf9e18b | 20,878 | py | Python | tests/cli/test_init_sqlite.py | lfpll/great_expectations | f61fa7c2e6e813cd5ff84ab7403e05271cada257 | [
"Apache-2.0"
] | 1 | 2020-04-10T18:07:58.000Z | 2020-04-10T18:07:58.000Z | tests/cli/test_init_sqlite.py | lfpll/great_expectations | f61fa7c2e6e813cd5ff84ab7403e05271cada257 | [
"Apache-2.0"
] | null | null | null | tests/cli/test_init_sqlite.py | lfpll/great_expectations | f61fa7c2e6e813cd5ff84ab7403e05271cada257 | [
"Apache-2.0"
] | null | null | null | import os
import re
import shutil
import pytest
from click.testing import CliRunner
from sqlalchemy import create_engine
from great_expectations import DataContext
from great_expectations.cli import cli
from great_expectations.data_context.util import file_relative_path
from great_expectations.util import gen_directory_tree_str
from tests.cli.test_cli import yaml
from tests.cli.test_datasource_sqlite import _add_datasource_and_credentials_to_context
from tests.cli.test_init_pandas import _delete_and_recreate_dir
from tests.cli.utils import assert_no_logging_messages_or_tracebacks
try:
from unittest import mock
except ImportError:
import mock
@pytest.fixture
def titanic_sqlite_db_file(tmp_path_factory):
from sqlalchemy import create_engine
temp_dir = str(tmp_path_factory.mktemp("foo_path"))
fixture_db_path = file_relative_path(__file__, "../test_sets/titanic.db")
db_path = os.path.join(temp_dir, "titanic.db")
shutil.copy(fixture_db_path, db_path)
engine = create_engine("sqlite:///{}".format(db_path))
assert engine.execute("select count(*) from titanic").fetchall()[0] == (1313,)
return db_path
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def test_cli_init_on_new_project(
mock_webbrowser, caplog, tmp_path_factory, titanic_sqlite_db_file
):
project_dir = str(tmp_path_factory.mktemp("test_cli_init_diff"))
ge_dir = os.path.join(project_dir, "great_expectations")
database_path = os.path.join(project_dir, "titanic.db")
shutil.copy(titanic_sqlite_db_file, database_path)
engine = create_engine("sqlite:///{}".format(database_path))
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["init", "-d", project_dir],
input="Y\n2\n6\ntitanic\n{}\n1\nwarning\n\n".format(
engine.url, catch_exceptions=False
),
)
stdout = result.output
assert len(stdout) < 3000, "CLI output is unreasonably long."
assert "Always know what to expect from your data" in stdout
assert "What data would you like Great Expectations to connect to" in stdout
assert "Which database backend are you using" in stdout
assert "Give your new data source a short name" in stdout
assert "What is the url/connection string for the sqlalchemy connection" in stdout
assert "Attempting to connect to your database." in stdout
assert "Great Expectations connected to your database" in stdout
assert "Which table would you like to use?" in stdout
assert "Name the new expectation suite [main.titanic.warning]" in stdout
assert (
"Great Expectations will choose a couple of columns and generate expectations about them"
in stdout
)
assert "Generating example Expectation Suite..." in stdout
assert "Building" in stdout
assert "Data Docs" in stdout
assert "A new Expectation suite 'warning' was added to your project" in stdout
assert "Great Expectations is now set up" in stdout
context = DataContext(ge_dir)
assert len(context.list_datasources()) == 1
assert context.list_datasources() == [
{
"class_name": "SqlAlchemyDatasource",
"name": "titanic",
"module_name": "great_expectations.datasource",
"credentials": {"url": str(engine.url)},
"data_asset_type": {
"class_name": "SqlAlchemyDataset",
"module_name": "great_expectations.dataset",
},
}
]
first_suite = context.list_expectation_suites()[0]
suite = context.get_expectation_suite(first_suite.expectation_suite_name)
assert len(suite.expectations) == 14
assert os.path.isdir(ge_dir)
config_path = os.path.join(project_dir, "great_expectations/great_expectations.yml")
assert os.path.isfile(config_path)
config = yaml.load(open(config_path, "r"))
data_source_class = config["datasources"]["titanic"]["data_asset_type"][
"class_name"
]
assert data_source_class == "SqlAlchemyDataset"
obs_tree = gen_directory_tree_str(ge_dir)
# Instead of monkey patching datetime, just regex out the time directories
date_safe_obs_tree = re.sub(r"\d*T\d*\.\d*Z", "9999.9999", obs_tree)
# Instead of monkey patching guids, just regex out the guids
guid_safe_obs_tree = re.sub(
r"[a-z0-9]{32}(?=\.(json|html))", "foobarbazguid", date_safe_obs_tree
)
assert (
guid_safe_obs_tree
== """\
great_expectations/
.gitignore
great_expectations.yml
checkpoints/
expectations/
warning.json
notebooks/
pandas/
validation_playground.ipynb
spark/
validation_playground.ipynb
sql/
validation_playground.ipynb
plugins/
custom_data_docs/
renderers/
styles/
data_docs_custom_styles.css
views/
uncommitted/
config_variables.yml
data_docs/
local_site/
index.html
expectations/
warning.html
static/
fonts/
HKGrotesk/
HKGrotesk-Bold.otf
HKGrotesk-BoldItalic.otf
HKGrotesk-Italic.otf
HKGrotesk-Light.otf
HKGrotesk-LightItalic.otf
HKGrotesk-Medium.otf
HKGrotesk-MediumItalic.otf
HKGrotesk-Regular.otf
HKGrotesk-SemiBold.otf
HKGrotesk-SemiBoldItalic.otf
images/
favicon.ico
glossary_scroller.gif
iterative-dev-loop.png
logo-long-vector.svg
logo-long.png
short-logo-vector.svg
short-logo.png
validation_failed_unexpected_values.gif
styles/
data_docs_custom_styles_template.css
data_docs_default_styles.css
validations/
warning/
9999.9999/
foobarbazguid.html
validations/
warning/
9999.9999/
foobarbazguid.json
"""
)
assert_no_logging_messages_or_tracebacks(caplog, result)
assert result.exit_code == 0
assert mock_webbrowser.call_count == 1
assert (
"{}/great_expectations/uncommitted/data_docs/local_site/validations/warning/".format(
project_dir
)
in mock_webbrowser.call_args[0][0]
)
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def test_cli_init_on_new_project_extra_whitespace_in_url(
mock_webbrowser, caplog, tmp_path_factory, titanic_sqlite_db_file
):
project_dir = str(tmp_path_factory.mktemp("test_cli_init_diff"))
ge_dir = os.path.join(project_dir, "great_expectations")
database_path = os.path.join(project_dir, "titanic.db")
shutil.copy(titanic_sqlite_db_file, database_path)
engine = create_engine("sqlite:///{}".format(database_path))
engine_url_with_added_whitespace = " " + str(engine.url) + " "
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["init", "-d", project_dir],
input="Y\n2\n6\ntitanic\n{}\n1\nwarning\n\n".format(
engine_url_with_added_whitespace, catch_exceptions=False
),
)
stdout = result.output
assert len(stdout) < 3000, "CLI output is unreasonably long."
assert "Always know what to expect from your data" in stdout
assert "What data would you like Great Expectations to connect to" in stdout
assert "Which database backend are you using" in stdout
assert "Give your new data source a short name" in stdout
assert "What is the url/connection string for the sqlalchemy connection" in stdout
assert "Attempting to connect to your database." in stdout
assert "Great Expectations connected to your database" in stdout
assert "Which table would you like to use?" in stdout
assert "Name the new expectation suite [main.titanic.warning]" in stdout
assert (
"Great Expectations will choose a couple of columns and generate expectations about them"
in stdout
)
assert "Generating example Expectation Suite..." in stdout
assert "Building" in stdout
assert "Data Docs" in stdout
assert "A new Expectation suite 'warning' was added to your project" in stdout
assert "Great Expectations is now set up" in stdout
context = DataContext(ge_dir)
assert len(context.list_datasources()) == 1
assert context.list_datasources() == [
{
"class_name": "SqlAlchemyDatasource",
"name": "titanic",
"module_name": "great_expectations.datasource",
"credentials": {"url": str(engine.url)},
"data_asset_type": {
"class_name": "SqlAlchemyDataset",
"module_name": "great_expectations.dataset",
},
}
]
first_suite = context.list_expectation_suites()[0]
suite = context.get_expectation_suite(first_suite.expectation_suite_name)
assert len(suite.expectations) == 14
assert os.path.isdir(ge_dir)
config_path = os.path.join(project_dir, "great_expectations/great_expectations.yml")
assert os.path.isfile(config_path)
config = yaml.load(open(config_path, "r"))
data_source_class = config["datasources"]["titanic"]["data_asset_type"][
"class_name"
]
assert data_source_class == "SqlAlchemyDataset"
assert_no_logging_messages_or_tracebacks(caplog, result)
assert result.exit_code == 0
assert mock_webbrowser.call_count == 1
assert (
"{}/great_expectations/uncommitted/data_docs/local_site/validations/warning/".format(
project_dir
)
in mock_webbrowser.call_args[0][0]
)
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def test_init_on_existing_project_with_no_datasources_should_continue_init_flow_and_add_one(
mock_webbrowser, caplog, initialized_sqlite_project, titanic_sqlite_db_file,
):
project_dir = initialized_sqlite_project
ge_dir = os.path.join(project_dir, DataContext.GE_DIR)
_remove_all_datasources(ge_dir)
os.remove(os.path.join(ge_dir, "expectations", "warning.json"))
context = DataContext(ge_dir)
assert not context.list_expectation_suites()
runner = CliRunner(mix_stderr=False)
url = "sqlite:///{}".format(titanic_sqlite_db_file)
with pytest.warns(
UserWarning, match="Warning. An existing `great_expectations.yml` was found"
):
result = runner.invoke(
cli,
["init", "-d", project_dir],
input="2\n6\nsqlite\nsqlite:///{}\n1\nmy_suite\n\n".format(
titanic_sqlite_db_file
),
catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 0
assert mock_webbrowser.call_count == 1
assert (
"{}/great_expectations/uncommitted/data_docs/local_site/validations/my_suite/".format(
project_dir
)
in mock_webbrowser.call_args[0][0]
)
assert "Error: invalid input" not in stdout
assert "Always know what to expect from your data" in stdout
assert "What data would you like Great Expectations to connect to" in stdout
assert (
"Next, we will configure database credentials and store them in the `sqlite` section"
in stdout
)
assert "What is the url/connection string for the sqlalchemy connection?" in stdout
assert "Which table would you like to use?" in stdout
assert "Great Expectations connected to your database" in stdout
assert "A new Expectation suite 'my_suite' was added to your project" in stdout
assert "This looks like an existing project that" not in stdout
config = _load_config_file(os.path.join(ge_dir, DataContext.GE_YML))
assert "sqlite" in config["datasources"].keys()
context = DataContext(ge_dir)
assert context.list_datasources() == [
{
"class_name": "SqlAlchemyDatasource",
"name": "sqlite",
"module_name": "great_expectations.datasource",
"credentials": {"url": url},
"data_asset_type": {
"class_name": "SqlAlchemyDataset",
"module_name": "great_expectations.dataset",
},
}
]
assert context.list_expectation_suites()[0].expectation_suite_name == "my_suite"
assert len(context.list_expectation_suites()) == 1
assert_no_logging_messages_or_tracebacks(caplog, result)
def _remove_all_datasources(ge_dir):
config_path = os.path.join(ge_dir, DataContext.GE_YML)
config = _load_config_file(config_path)
config["datasources"] = {}
with open(config_path, "w") as f:
yaml.dump(config, f)
context = DataContext(ge_dir)
assert context.list_datasources() == []
def _load_config_file(config_path):
assert os.path.isfile(config_path), "Config file is missing. Check path"
with open(config_path, "r") as f:
read = f.read()
config = yaml.load(read)
assert isinstance(config, dict)
return config
@pytest.fixture
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def initialized_sqlite_project(
mock_webbrowser, caplog, tmp_path_factory, titanic_sqlite_db_file
):
"""This is an initialized project through the CLI."""
project_dir = str(tmp_path_factory.mktemp("my_rad_project"))
engine = create_engine("sqlite:///{}".format(titanic_sqlite_db_file))
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["init", "-d", project_dir],
input="Y\n2\n5\ntitanic\n{}\n1\nwarning\n\n".format(engine.url),
catch_exceptions=False,
)
assert result.exit_code == 0
assert mock_webbrowser.call_count == 1
assert (
"{}/great_expectations/uncommitted/data_docs/local_site/validations/warning/".format(
project_dir
)
in mock_webbrowser.call_args[0][0]
)
assert_no_logging_messages_or_tracebacks(caplog, result)
context = DataContext(os.path.join(project_dir, DataContext.GE_DIR))
assert isinstance(context, DataContext)
assert len(context.list_datasources()) == 1
assert context.list_datasources() == [
{
"class_name": "SqlAlchemyDatasource",
"name": "titanic",
"module_name": "great_expectations.datasource",
"credentials": {"url": str(engine.url)},
"data_asset_type": {
"class_name": "SqlAlchemyDataset",
"module_name": "great_expectations.dataset",
},
}
]
return project_dir
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def test_init_on_existing_project_with_multiple_datasources_exist_do_nothing(
mock_webbrowser,
caplog,
initialized_sqlite_project,
titanic_sqlite_db,
empty_sqlite_db,
):
project_dir = initialized_sqlite_project
ge_dir = os.path.join(project_dir, DataContext.GE_DIR)
context = DataContext(ge_dir)
datasource_name = "wow_a_datasource"
context = _add_datasource_and_credentials_to_context(
context, datasource_name, empty_sqlite_db
)
assert len(context.list_datasources()) == 2
runner = CliRunner(mix_stderr=False)
with pytest.warns(
UserWarning, match="Warning. An existing `great_expectations.yml` was found"
):
result = runner.invoke(
cli, ["init", "-d", project_dir], input="n\n", catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 0
assert mock_webbrowser.call_count == 0
assert "Error: invalid input" not in stdout
assert "Always know what to expect from your data" in stdout
assert "This looks like an existing project that" in stdout
assert "appears complete" in stdout
assert "Would you like to build & view this project's Data Docs" in stdout
assert_no_logging_messages_or_tracebacks(caplog, result)
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def test_init_on_existing_project_with_datasource_with_existing_suite_offer_to_build_docs_answer_no(
mock_webbrowser, caplog, initialized_sqlite_project,
):
project_dir = initialized_sqlite_project
runner = CliRunner(mix_stderr=False)
with pytest.warns(
UserWarning, match="Warning. An existing `great_expectations.yml` was found"
):
result = runner.invoke(
cli, ["init", "-d", project_dir], input="n\n", catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 0
assert mock_webbrowser.call_count == 0
assert "Error: invalid input" not in stdout
assert "Always know what to expect from your data" in stdout
assert "This looks like an existing project that" in stdout
assert "appears complete" in stdout
assert "Would you like to build & view this project's Data Docs" in stdout
assert_no_logging_messages_or_tracebacks(caplog, result)
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def test_init_on_existing_project_with_datasource_with_existing_suite_offer_to_build_docs_answer_yes(
mock_webbrowser, caplog, initialized_sqlite_project,
):
project_dir = initialized_sqlite_project
runner = CliRunner(mix_stderr=False)
with pytest.warns(
UserWarning, match="Warning. An existing `great_expectations.yml` was found"
):
result = runner.invoke(
cli, ["init", "-d", project_dir], input="Y\n", catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 0
assert mock_webbrowser.call_count == 1
assert (
"{}/great_expectations/uncommitted/data_docs/local_site/index.html".format(
project_dir
)
in mock_webbrowser.call_args[0][0]
)
assert "Error: invalid input" not in stdout
assert "Always know what to expect from your data" in stdout
assert "This looks like an existing project that" in stdout
assert "appears complete" in stdout
assert "Would you like to build & view this project's Data Docs" in stdout
assert_no_logging_messages_or_tracebacks(caplog, result)
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def test_init_on_existing_project_with_datasource_with_no_suite_create_one(
mock_webbrowser, caplog, initialized_sqlite_project,
):
project_dir = initialized_sqlite_project
ge_dir = os.path.join(project_dir, DataContext.GE_DIR)
uncommitted_dir = os.path.join(ge_dir, "uncommitted")
# mangle the setup to remove all traces of any suite
expectations_dir = os.path.join(ge_dir, "expectations")
data_docs_dir = os.path.join(uncommitted_dir, "data_docs")
validations_dir = os.path.join(uncommitted_dir, "validations")
_delete_and_recreate_dir(expectations_dir)
_delete_and_recreate_dir(data_docs_dir)
_delete_and_recreate_dir(validations_dir)
context = DataContext(ge_dir)
assert context.list_expectation_suites() == []
runner = CliRunner(mix_stderr=False)
with pytest.warns(
UserWarning, match="Warning. An existing `great_expectations.yml` was found"
):
result = runner.invoke(
cli,
["init", "-d", project_dir],
input="1\nsink_me\n\n\n".format(
os.path.join(project_dir, "data/Titanic.csv")
),
catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 0
assert mock_webbrowser.call_count == 1
assert (
"{}/great_expectations/uncommitted/data_docs/local_site/validations/sink_me/".format(
project_dir
)
in mock_webbrowser.call_args[0][0]
)
assert "Always know what to expect from your data" in stdout
assert "Which table would you like to use?" in stdout
assert "Generating example Expectation Suite..." in stdout
assert "The following Data Docs sites were built" in stdout
assert "Great Expectations is now set up" in stdout
assert "A new Expectation suite 'sink_me' was added to your project" in stdout
assert "Error: invalid input" not in stdout
assert "This looks like an existing project that" not in stdout
assert_no_logging_messages_or_tracebacks(caplog, result)
context = DataContext(ge_dir)
assert len(context.list_expectation_suites()) == 1
| 36.183709 | 101 | 0.670562 | import os
import re
import shutil
import pytest
from click.testing import CliRunner
from sqlalchemy import create_engine
from great_expectations import DataContext
from great_expectations.cli import cli
from great_expectations.data_context.util import file_relative_path
from great_expectations.util import gen_directory_tree_str
from tests.cli.test_cli import yaml
from tests.cli.test_datasource_sqlite import _add_datasource_and_credentials_to_context
from tests.cli.test_init_pandas import _delete_and_recreate_dir
from tests.cli.utils import assert_no_logging_messages_or_tracebacks
try:
from unittest import mock
except ImportError:
import mock
@pytest.fixture
def titanic_sqlite_db_file(tmp_path_factory):
from sqlalchemy import create_engine
temp_dir = str(tmp_path_factory.mktemp("foo_path"))
fixture_db_path = file_relative_path(__file__, "../test_sets/titanic.db")
db_path = os.path.join(temp_dir, "titanic.db")
shutil.copy(fixture_db_path, db_path)
engine = create_engine("sqlite:///{}".format(db_path))
assert engine.execute("select count(*) from titanic").fetchall()[0] == (1313,)
return db_path
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def test_cli_init_on_new_project(
mock_webbrowser, caplog, tmp_path_factory, titanic_sqlite_db_file
):
project_dir = str(tmp_path_factory.mktemp("test_cli_init_diff"))
ge_dir = os.path.join(project_dir, "great_expectations")
database_path = os.path.join(project_dir, "titanic.db")
shutil.copy(titanic_sqlite_db_file, database_path)
engine = create_engine("sqlite:///{}".format(database_path))
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["init", "-d", project_dir],
input="Y\n2\n6\ntitanic\n{}\n1\nwarning\n\n".format(
engine.url, catch_exceptions=False
),
)
stdout = result.output
assert len(stdout) < 3000, "CLI output is unreasonably long."
assert "Always know what to expect from your data" in stdout
assert "What data would you like Great Expectations to connect to" in stdout
assert "Which database backend are you using" in stdout
assert "Give your new data source a short name" in stdout
assert "What is the url/connection string for the sqlalchemy connection" in stdout
assert "Attempting to connect to your database." in stdout
assert "Great Expectations connected to your database" in stdout
assert "Which table would you like to use?" in stdout
assert "Name the new expectation suite [main.titanic.warning]" in stdout
assert (
"Great Expectations will choose a couple of columns and generate expectations about them"
in stdout
)
assert "Generating example Expectation Suite..." in stdout
assert "Building" in stdout
assert "Data Docs" in stdout
assert "A new Expectation suite 'warning' was added to your project" in stdout
assert "Great Expectations is now set up" in stdout
context = DataContext(ge_dir)
assert len(context.list_datasources()) == 1
assert context.list_datasources() == [
{
"class_name": "SqlAlchemyDatasource",
"name": "titanic",
"module_name": "great_expectations.datasource",
"credentials": {"url": str(engine.url)},
"data_asset_type": {
"class_name": "SqlAlchemyDataset",
"module_name": "great_expectations.dataset",
},
}
]
first_suite = context.list_expectation_suites()[0]
suite = context.get_expectation_suite(first_suite.expectation_suite_name)
assert len(suite.expectations) == 14
assert os.path.isdir(ge_dir)
config_path = os.path.join(project_dir, "great_expectations/great_expectations.yml")
assert os.path.isfile(config_path)
config = yaml.load(open(config_path, "r"))
data_source_class = config["datasources"]["titanic"]["data_asset_type"][
"class_name"
]
assert data_source_class == "SqlAlchemyDataset"
obs_tree = gen_directory_tree_str(ge_dir)
date_safe_obs_tree = re.sub(r"\d*T\d*\.\d*Z", "9999.9999", obs_tree)
guid_safe_obs_tree = re.sub(
r"[a-z0-9]{32}(?=\.(json|html))", "foobarbazguid", date_safe_obs_tree
)
assert (
guid_safe_obs_tree
== """\
great_expectations/
.gitignore
great_expectations.yml
checkpoints/
expectations/
warning.json
notebooks/
pandas/
validation_playground.ipynb
spark/
validation_playground.ipynb
sql/
validation_playground.ipynb
plugins/
custom_data_docs/
renderers/
styles/
data_docs_custom_styles.css
views/
uncommitted/
config_variables.yml
data_docs/
local_site/
index.html
expectations/
warning.html
static/
fonts/
HKGrotesk/
HKGrotesk-Bold.otf
HKGrotesk-BoldItalic.otf
HKGrotesk-Italic.otf
HKGrotesk-Light.otf
HKGrotesk-LightItalic.otf
HKGrotesk-Medium.otf
HKGrotesk-MediumItalic.otf
HKGrotesk-Regular.otf
HKGrotesk-SemiBold.otf
HKGrotesk-SemiBoldItalic.otf
images/
favicon.ico
glossary_scroller.gif
iterative-dev-loop.png
logo-long-vector.svg
logo-long.png
short-logo-vector.svg
short-logo.png
validation_failed_unexpected_values.gif
styles/
data_docs_custom_styles_template.css
data_docs_default_styles.css
validations/
warning/
9999.9999/
foobarbazguid.html
validations/
warning/
9999.9999/
foobarbazguid.json
"""
)
assert_no_logging_messages_or_tracebacks(caplog, result)
assert result.exit_code == 0
assert mock_webbrowser.call_count == 1
assert (
"{}/great_expectations/uncommitted/data_docs/local_site/validations/warning/".format(
project_dir
)
in mock_webbrowser.call_args[0][0]
)
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def test_cli_init_on_new_project_extra_whitespace_in_url(
mock_webbrowser, caplog, tmp_path_factory, titanic_sqlite_db_file
):
project_dir = str(tmp_path_factory.mktemp("test_cli_init_diff"))
ge_dir = os.path.join(project_dir, "great_expectations")
database_path = os.path.join(project_dir, "titanic.db")
shutil.copy(titanic_sqlite_db_file, database_path)
engine = create_engine("sqlite:///{}".format(database_path))
engine_url_with_added_whitespace = " " + str(engine.url) + " "
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["init", "-d", project_dir],
input="Y\n2\n6\ntitanic\n{}\n1\nwarning\n\n".format(
engine_url_with_added_whitespace, catch_exceptions=False
),
)
stdout = result.output
assert len(stdout) < 3000, "CLI output is unreasonably long."
assert "Always know what to expect from your data" in stdout
assert "What data would you like Great Expectations to connect to" in stdout
assert "Which database backend are you using" in stdout
assert "Give your new data source a short name" in stdout
assert "What is the url/connection string for the sqlalchemy connection" in stdout
assert "Attempting to connect to your database." in stdout
assert "Great Expectations connected to your database" in stdout
assert "Which table would you like to use?" in stdout
assert "Name the new expectation suite [main.titanic.warning]" in stdout
assert (
"Great Expectations will choose a couple of columns and generate expectations about them"
in stdout
)
assert "Generating example Expectation Suite..." in stdout
assert "Building" in stdout
assert "Data Docs" in stdout
assert "A new Expectation suite 'warning' was added to your project" in stdout
assert "Great Expectations is now set up" in stdout
context = DataContext(ge_dir)
assert len(context.list_datasources()) == 1
assert context.list_datasources() == [
{
"class_name": "SqlAlchemyDatasource",
"name": "titanic",
"module_name": "great_expectations.datasource",
"credentials": {"url": str(engine.url)},
"data_asset_type": {
"class_name": "SqlAlchemyDataset",
"module_name": "great_expectations.dataset",
},
}
]
first_suite = context.list_expectation_suites()[0]
suite = context.get_expectation_suite(first_suite.expectation_suite_name)
assert len(suite.expectations) == 14
assert os.path.isdir(ge_dir)
config_path = os.path.join(project_dir, "great_expectations/great_expectations.yml")
assert os.path.isfile(config_path)
config = yaml.load(open(config_path, "r"))
data_source_class = config["datasources"]["titanic"]["data_asset_type"][
"class_name"
]
assert data_source_class == "SqlAlchemyDataset"
assert_no_logging_messages_or_tracebacks(caplog, result)
assert result.exit_code == 0
assert mock_webbrowser.call_count == 1
assert (
"{}/great_expectations/uncommitted/data_docs/local_site/validations/warning/".format(
project_dir
)
in mock_webbrowser.call_args[0][0]
)
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def test_init_on_existing_project_with_no_datasources_should_continue_init_flow_and_add_one(
mock_webbrowser, caplog, initialized_sqlite_project, titanic_sqlite_db_file,
):
project_dir = initialized_sqlite_project
ge_dir = os.path.join(project_dir, DataContext.GE_DIR)
_remove_all_datasources(ge_dir)
os.remove(os.path.join(ge_dir, "expectations", "warning.json"))
context = DataContext(ge_dir)
assert not context.list_expectation_suites()
runner = CliRunner(mix_stderr=False)
url = "sqlite:///{}".format(titanic_sqlite_db_file)
with pytest.warns(
UserWarning, match="Warning. An existing `great_expectations.yml` was found"
):
result = runner.invoke(
cli,
["init", "-d", project_dir],
input="2\n6\nsqlite\nsqlite:///{}\n1\nmy_suite\n\n".format(
titanic_sqlite_db_file
),
catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 0
assert mock_webbrowser.call_count == 1
assert (
"{}/great_expectations/uncommitted/data_docs/local_site/validations/my_suite/".format(
project_dir
)
in mock_webbrowser.call_args[0][0]
)
assert "Error: invalid input" not in stdout
assert "Always know what to expect from your data" in stdout
assert "What data would you like Great Expectations to connect to" in stdout
assert (
"Next, we will configure database credentials and store them in the `sqlite` section"
in stdout
)
assert "What is the url/connection string for the sqlalchemy connection?" in stdout
assert "Which table would you like to use?" in stdout
assert "Great Expectations connected to your database" in stdout
assert "A new Expectation suite 'my_suite' was added to your project" in stdout
assert "This looks like an existing project that" not in stdout
config = _load_config_file(os.path.join(ge_dir, DataContext.GE_YML))
assert "sqlite" in config["datasources"].keys()
context = DataContext(ge_dir)
assert context.list_datasources() == [
{
"class_name": "SqlAlchemyDatasource",
"name": "sqlite",
"module_name": "great_expectations.datasource",
"credentials": {"url": url},
"data_asset_type": {
"class_name": "SqlAlchemyDataset",
"module_name": "great_expectations.dataset",
},
}
]
assert context.list_expectation_suites()[0].expectation_suite_name == "my_suite"
assert len(context.list_expectation_suites()) == 1
assert_no_logging_messages_or_tracebacks(caplog, result)
def _remove_all_datasources(ge_dir):
config_path = os.path.join(ge_dir, DataContext.GE_YML)
config = _load_config_file(config_path)
config["datasources"] = {}
with open(config_path, "w") as f:
yaml.dump(config, f)
context = DataContext(ge_dir)
assert context.list_datasources() == []
def _load_config_file(config_path):
assert os.path.isfile(config_path), "Config file is missing. Check path"
with open(config_path, "r") as f:
read = f.read()
config = yaml.load(read)
assert isinstance(config, dict)
return config
@pytest.fixture
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def initialized_sqlite_project(
mock_webbrowser, caplog, tmp_path_factory, titanic_sqlite_db_file
):
project_dir = str(tmp_path_factory.mktemp("my_rad_project"))
engine = create_engine("sqlite:///{}".format(titanic_sqlite_db_file))
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["init", "-d", project_dir],
input="Y\n2\n5\ntitanic\n{}\n1\nwarning\n\n".format(engine.url),
catch_exceptions=False,
)
assert result.exit_code == 0
assert mock_webbrowser.call_count == 1
assert (
"{}/great_expectations/uncommitted/data_docs/local_site/validations/warning/".format(
project_dir
)
in mock_webbrowser.call_args[0][0]
)
assert_no_logging_messages_or_tracebacks(caplog, result)
context = DataContext(os.path.join(project_dir, DataContext.GE_DIR))
assert isinstance(context, DataContext)
assert len(context.list_datasources()) == 1
assert context.list_datasources() == [
{
"class_name": "SqlAlchemyDatasource",
"name": "titanic",
"module_name": "great_expectations.datasource",
"credentials": {"url": str(engine.url)},
"data_asset_type": {
"class_name": "SqlAlchemyDataset",
"module_name": "great_expectations.dataset",
},
}
]
return project_dir
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def test_init_on_existing_project_with_multiple_datasources_exist_do_nothing(
mock_webbrowser,
caplog,
initialized_sqlite_project,
titanic_sqlite_db,
empty_sqlite_db,
):
project_dir = initialized_sqlite_project
ge_dir = os.path.join(project_dir, DataContext.GE_DIR)
context = DataContext(ge_dir)
datasource_name = "wow_a_datasource"
context = _add_datasource_and_credentials_to_context(
context, datasource_name, empty_sqlite_db
)
assert len(context.list_datasources()) == 2
runner = CliRunner(mix_stderr=False)
with pytest.warns(
UserWarning, match="Warning. An existing `great_expectations.yml` was found"
):
result = runner.invoke(
cli, ["init", "-d", project_dir], input="n\n", catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 0
assert mock_webbrowser.call_count == 0
assert "Error: invalid input" not in stdout
assert "Always know what to expect from your data" in stdout
assert "This looks like an existing project that" in stdout
assert "appears complete" in stdout
assert "Would you like to build & view this project's Data Docs" in stdout
assert_no_logging_messages_or_tracebacks(caplog, result)
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def test_init_on_existing_project_with_datasource_with_existing_suite_offer_to_build_docs_answer_no(
mock_webbrowser, caplog, initialized_sqlite_project,
):
project_dir = initialized_sqlite_project
runner = CliRunner(mix_stderr=False)
with pytest.warns(
UserWarning, match="Warning. An existing `great_expectations.yml` was found"
):
result = runner.invoke(
cli, ["init", "-d", project_dir], input="n\n", catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 0
assert mock_webbrowser.call_count == 0
assert "Error: invalid input" not in stdout
assert "Always know what to expect from your data" in stdout
assert "This looks like an existing project that" in stdout
assert "appears complete" in stdout
assert "Would you like to build & view this project's Data Docs" in stdout
assert_no_logging_messages_or_tracebacks(caplog, result)
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def test_init_on_existing_project_with_datasource_with_existing_suite_offer_to_build_docs_answer_yes(
mock_webbrowser, caplog, initialized_sqlite_project,
):
project_dir = initialized_sqlite_project
runner = CliRunner(mix_stderr=False)
with pytest.warns(
UserWarning, match="Warning. An existing `great_expectations.yml` was found"
):
result = runner.invoke(
cli, ["init", "-d", project_dir], input="Y\n", catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 0
assert mock_webbrowser.call_count == 1
assert (
"{}/great_expectations/uncommitted/data_docs/local_site/index.html".format(
project_dir
)
in mock_webbrowser.call_args[0][0]
)
assert "Error: invalid input" not in stdout
assert "Always know what to expect from your data" in stdout
assert "This looks like an existing project that" in stdout
assert "appears complete" in stdout
assert "Would you like to build & view this project's Data Docs" in stdout
assert_no_logging_messages_or_tracebacks(caplog, result)
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def test_init_on_existing_project_with_datasource_with_no_suite_create_one(
mock_webbrowser, caplog, initialized_sqlite_project,
):
project_dir = initialized_sqlite_project
ge_dir = os.path.join(project_dir, DataContext.GE_DIR)
uncommitted_dir = os.path.join(ge_dir, "uncommitted")
# mangle the setup to remove all traces of any suite
expectations_dir = os.path.join(ge_dir, "expectations")
data_docs_dir = os.path.join(uncommitted_dir, "data_docs")
validations_dir = os.path.join(uncommitted_dir, "validations")
_delete_and_recreate_dir(expectations_dir)
_delete_and_recreate_dir(data_docs_dir)
_delete_and_recreate_dir(validations_dir)
context = DataContext(ge_dir)
assert context.list_expectation_suites() == []
runner = CliRunner(mix_stderr=False)
with pytest.warns(
UserWarning, match="Warning. An existing `great_expectations.yml` was found"
):
result = runner.invoke(
cli,
["init", "-d", project_dir],
input="1\nsink_me\n\n\n".format(
os.path.join(project_dir, "data/Titanic.csv")
),
catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 0
assert mock_webbrowser.call_count == 1
assert (
"{}/great_expectations/uncommitted/data_docs/local_site/validations/sink_me/".format(
project_dir
)
in mock_webbrowser.call_args[0][0]
)
assert "Always know what to expect from your data" in stdout
assert "Which table would you like to use?" in stdout
assert "Generating example Expectation Suite..." in stdout
assert "The following Data Docs sites were built" in stdout
assert "Great Expectations is now set up" in stdout
assert "A new Expectation suite 'sink_me' was added to your project" in stdout
assert "Error: invalid input" not in stdout
assert "This looks like an existing project that" not in stdout
assert_no_logging_messages_or_tracebacks(caplog, result)
context = DataContext(ge_dir)
assert len(context.list_expectation_suites()) == 1
| true | true |
1c35c23d2f75b4199e2713d59f34db2e7f69c57a | 3,951 | py | Python | desktop/core/ext-py/dnspython-1.15.0/dns/rdtypes/ANY/CERT.py | kokosing/hue | 2307f5379a35aae9be871e836432e6f45138b3d9 | [
"Apache-2.0"
] | 5,079 | 2015-01-01T03:39:46.000Z | 2022-03-31T07:38:22.000Z | desktop/core/ext-py/dnspython-1.15.0/dns/rdtypes/ANY/CERT.py | zks888/hue | 93a8c370713e70b216c428caa2f75185ef809deb | [
"Apache-2.0"
] | 1,623 | 2015-01-01T08:06:24.000Z | 2022-03-30T19:48:52.000Z | desktop/core/ext-py/dnspython-1.15.0/dns/rdtypes/ANY/CERT.py | zks888/hue | 93a8c370713e70b216c428caa2f75185ef809deb | [
"Apache-2.0"
] | 2,033 | 2015-01-04T07:18:02.000Z | 2022-03-28T19:55:47.000Z | # Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import struct
import base64
import dns.exception
import dns.dnssec
import dns.rdata
import dns.tokenizer
_ctype_by_value = {
1: 'PKIX',
2: 'SPKI',
3: 'PGP',
253: 'URI',
254: 'OID',
}
_ctype_by_name = {
'PKIX': 1,
'SPKI': 2,
'PGP': 3,
'URI': 253,
'OID': 254,
}
def _ctype_from_text(what):
v = _ctype_by_name.get(what)
if v is not None:
return v
return int(what)
def _ctype_to_text(what):
v = _ctype_by_value.get(what)
if v is not None:
return v
return str(what)
class CERT(dns.rdata.Rdata):
"""CERT record
@ivar certificate_type: certificate type
@type certificate_type: int
@ivar key_tag: key tag
@type key_tag: int
@ivar algorithm: algorithm
@type algorithm: int
@ivar certificate: the certificate or CRL
@type certificate: string
@see: RFC 2538"""
__slots__ = ['certificate_type', 'key_tag', 'algorithm', 'certificate']
def __init__(self, rdclass, rdtype, certificate_type, key_tag, algorithm,
certificate):
super(CERT, self).__init__(rdclass, rdtype)
self.certificate_type = certificate_type
self.key_tag = key_tag
self.algorithm = algorithm
self.certificate = certificate
def to_text(self, origin=None, relativize=True, **kw):
certificate_type = _ctype_to_text(self.certificate_type)
return "%s %d %s %s" % (certificate_type, self.key_tag,
dns.dnssec.algorithm_to_text(self.algorithm),
dns.rdata._base64ify(self.certificate))
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
certificate_type = _ctype_from_text(tok.get_string())
key_tag = tok.get_uint16()
algorithm = dns.dnssec.algorithm_from_text(tok.get_string())
if algorithm < 0 or algorithm > 255:
raise dns.exception.SyntaxError("bad algorithm type")
chunks = []
while 1:
t = tok.get().unescape()
if t.is_eol_or_eof():
break
if not t.is_identifier():
raise dns.exception.SyntaxError
chunks.append(t.value.encode())
b64 = b''.join(chunks)
certificate = base64.b64decode(b64)
return cls(rdclass, rdtype, certificate_type, key_tag,
algorithm, certificate)
def to_wire(self, file, compress=None, origin=None):
prefix = struct.pack("!HHB", self.certificate_type, self.key_tag,
self.algorithm)
file.write(prefix)
file.write(self.certificate)
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
prefix = wire[current: current + 5].unwrap()
current += 5
rdlen -= 5
if rdlen < 0:
raise dns.exception.FormError
(certificate_type, key_tag, algorithm) = struct.unpack("!HHB", prefix)
certificate = wire[current: current + rdlen].unwrap()
return cls(rdclass, rdtype, certificate_type, key_tag, algorithm,
certificate)
| 32.385246 | 78 | 0.643888 |
import struct
import base64
import dns.exception
import dns.dnssec
import dns.rdata
import dns.tokenizer
_ctype_by_value = {
1: 'PKIX',
2: 'SPKI',
3: 'PGP',
253: 'URI',
254: 'OID',
}
_ctype_by_name = {
'PKIX': 1,
'SPKI': 2,
'PGP': 3,
'URI': 253,
'OID': 254,
}
def _ctype_from_text(what):
v = _ctype_by_name.get(what)
if v is not None:
return v
return int(what)
def _ctype_to_text(what):
v = _ctype_by_value.get(what)
if v is not None:
return v
return str(what)
class CERT(dns.rdata.Rdata):
__slots__ = ['certificate_type', 'key_tag', 'algorithm', 'certificate']
def __init__(self, rdclass, rdtype, certificate_type, key_tag, algorithm,
certificate):
super(CERT, self).__init__(rdclass, rdtype)
self.certificate_type = certificate_type
self.key_tag = key_tag
self.algorithm = algorithm
self.certificate = certificate
def to_text(self, origin=None, relativize=True, **kw):
certificate_type = _ctype_to_text(self.certificate_type)
return "%s %d %s %s" % (certificate_type, self.key_tag,
dns.dnssec.algorithm_to_text(self.algorithm),
dns.rdata._base64ify(self.certificate))
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
certificate_type = _ctype_from_text(tok.get_string())
key_tag = tok.get_uint16()
algorithm = dns.dnssec.algorithm_from_text(tok.get_string())
if algorithm < 0 or algorithm > 255:
raise dns.exception.SyntaxError("bad algorithm type")
chunks = []
while 1:
t = tok.get().unescape()
if t.is_eol_or_eof():
break
if not t.is_identifier():
raise dns.exception.SyntaxError
chunks.append(t.value.encode())
b64 = b''.join(chunks)
certificate = base64.b64decode(b64)
return cls(rdclass, rdtype, certificate_type, key_tag,
algorithm, certificate)
def to_wire(self, file, compress=None, origin=None):
prefix = struct.pack("!HHB", self.certificate_type, self.key_tag,
self.algorithm)
file.write(prefix)
file.write(self.certificate)
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
prefix = wire[current: current + 5].unwrap()
current += 5
rdlen -= 5
if rdlen < 0:
raise dns.exception.FormError
(certificate_type, key_tag, algorithm) = struct.unpack("!HHB", prefix)
certificate = wire[current: current + rdlen].unwrap()
return cls(rdclass, rdtype, certificate_type, key_tag, algorithm,
certificate)
| true | true |
1c35c2e143571df334c4f6293a81344f48ae102a | 632 | py | Python | tests/test_pytest_mypy.py | bochecha/pytest-mypy | c163fc321514c493bf6ea6c0dcf4459f0727d268 | [
"MIT"
] | null | null | null | tests/test_pytest_mypy.py | bochecha/pytest-mypy | c163fc321514c493bf6ea6c0dcf4459f0727d268 | [
"MIT"
] | null | null | null | tests/test_pytest_mypy.py | bochecha/pytest-mypy | c163fc321514c493bf6ea6c0dcf4459f0727d268 | [
"MIT"
] | null | null | null | def test_mypy_success(testdir):
testdir.makepyfile('''
def myfunc(x: int) -> int:
return x * 2
def test_myfunc():
assert myfunc(12)
''')
result = testdir.runpytest('--mypy', '-v')
assert result.ret == 0
def test_mypy_error(testdir):
testdir.makepyfile('''
def myfunc(x: int) -> str:
return x * 2
def test_myfunc():
assert myfunc(12)
''')
result = testdir.runpytest('--mypy', '-v')
result.stdout.fnmatch_lines([
'test_mypy_error.py:2: error: Incompatible return value*',
])
assert result.ret != 0
| 21.066667 | 66 | 0.549051 | def test_mypy_success(testdir):
testdir.makepyfile('''
def myfunc(x: int) -> int:
return x * 2
def test_myfunc():
assert myfunc(12)
''')
result = testdir.runpytest('--mypy', '-v')
assert result.ret == 0
def test_mypy_error(testdir):
testdir.makepyfile('''
def myfunc(x: int) -> str:
return x * 2
def test_myfunc():
assert myfunc(12)
''')
result = testdir.runpytest('--mypy', '-v')
result.stdout.fnmatch_lines([
'test_mypy_error.py:2: error: Incompatible return value*',
])
assert result.ret != 0
| true | true |
1c35c2e919e86cb1344d8a697147b9570d07f8c5 | 2,170 | py | Python | clients/bbg/blpapi-python/examples/unittests/market-data-notifier/src/token_generator.py | vegabook/dstreams | 9a2919b36ba2901522a61737a593fea28a655777 | [
"MIT"
] | 228 | 2017-06-20T16:14:06.000Z | 2022-03-30T02:04:47.000Z | clients/bbg/blpapi-python/examples/unittests/market-data-notifier/src/token_generator.py | vegabook/dstreams | 9a2919b36ba2901522a61737a593fea28a655777 | [
"MIT"
] | 3 | 2017-05-04T02:48:36.000Z | 2018-02-01T13:59:46.000Z | clients/bbg/blpapi-python/examples/unittests/market-data-notifier/src/token_generator.py | vegabook/dstreams | 9a2919b36ba2901522a61737a593fea28a655777 | [
"MIT"
] | 84 | 2017-11-21T14:56:20.000Z | 2022-03-31T15:22:22.000Z | """Sample token generator for testing."""
import blpapi
TOKEN_SUCCESS = blpapi.Name("TokenGenerationSuccess")
TOKEN_FAILURE = blpapi.Name("TokenGenerationFailure")
TOKEN = blpapi.Name("token")
# pylint: disable=too-few-public-methods
class TokenGenerator():
"""Generates a token for later authorization."""
def __init__(self, session):
self._session = session
def generate(self, event_queue=None):
"""Generate a token."""
token = None
if event_queue is None:
event_queue = blpapi.EventQueue()
self._session.generateToken(blpapi.CorrelationId(), event_queue)
event = event_queue.nextEvent()
if event.eventType() == blpapi.Event.REQUEST_STATUS or \
event.eventType() == blpapi.Event.TOKEN_STATUS:
for msg in event:
if msg.messageType() == TOKEN_SUCCESS:
token = msg.getElementAsString(TOKEN)
return token
if msg.messageType() == TOKEN_FAILURE:
return None
return None
__copyright__ = """
Copyright 2020. Bloomberg Finance L.P.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions: The above
copyright notice and this permission notice shall be included in all copies
or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
| 38.75 | 76 | 0.710599 |
import blpapi
TOKEN_SUCCESS = blpapi.Name("TokenGenerationSuccess")
TOKEN_FAILURE = blpapi.Name("TokenGenerationFailure")
TOKEN = blpapi.Name("token")
class TokenGenerator():
def __init__(self, session):
self._session = session
def generate(self, event_queue=None):
token = None
if event_queue is None:
event_queue = blpapi.EventQueue()
self._session.generateToken(blpapi.CorrelationId(), event_queue)
event = event_queue.nextEvent()
if event.eventType() == blpapi.Event.REQUEST_STATUS or \
event.eventType() == blpapi.Event.TOKEN_STATUS:
for msg in event:
if msg.messageType() == TOKEN_SUCCESS:
token = msg.getElementAsString(TOKEN)
return token
if msg.messageType() == TOKEN_FAILURE:
return None
return None
__copyright__ = """
Copyright 2020. Bloomberg Finance L.P.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions: The above
copyright notice and this permission notice shall be included in all copies
or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
| true | true |
1c35c3189d23f65367d999d8662a458f07d02cbc | 755 | py | Python | tools/svg_to_pgn.py | Leviathan321/ChessDiagramRecognition | c46effa3a9d49ae29fa55e82733e7fc7ba41c043 | [
"MIT"
] | 6 | 2020-07-27T19:10:30.000Z | 2021-08-17T02:23:53.000Z | tools/svg_to_pgn.py | Leviathan321/ChessDiagramRecognition | c46effa3a9d49ae29fa55e82733e7fc7ba41c043 | [
"MIT"
] | 10 | 2020-06-17T15:19:26.000Z | 2021-01-01T23:13:01.000Z | tools/svg_to_pgn.py | Leviathan321/ChessDiagramRecognition | c46effa3a9d49ae29fa55e82733e7fc7ba41c043 | [
"MIT"
] | 7 | 2020-08-10T05:13:57.000Z | 2022-01-13T09:26:21.000Z | ################################################################################
# Convert a svg image to pgn format
################################################################################
import cairosvg
################################################################################
################################################################################
def main():
print("Insert input file path:")
input_url: str = input()
print("Insert output file path:")
output_url: str = input()
cairosvg.svg2png(url=input_url, write_to=output_url)
################################################################################
################################################################################
main()
| 32.826087 | 80 | 0.239735 | true | true | |
1c35c32dbe3e3aec380a3b6a46b5f030037d23e1 | 4,888 | py | Python | 1. FUNDAMENTOS/3. PROGRAMACION ESTADISTICA CON PYTHON/3. my project/Part 1/heart.py | alvarochiqui/edem | d28861b04d9053848e26c24056395e5381ed398e | [
"Apache-2.0"
] | null | null | null | 1. FUNDAMENTOS/3. PROGRAMACION ESTADISTICA CON PYTHON/3. my project/Part 1/heart.py | alvarochiqui/edem | d28861b04d9053848e26c24056395e5381ed398e | [
"Apache-2.0"
] | null | null | null | 1. FUNDAMENTOS/3. PROGRAMACION ESTADISTICA CON PYTHON/3. my project/Part 1/heart.py | alvarochiqui/edem | d28861b04d9053848e26c24056395e5381ed398e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 16 18:17:34 2021
@author: alvar
"""
#Importamos todas las librerias necesarias para el proyecto
import os #sistema operativo
import pandas as pd #gestionar datframes
import numpy as np #numeric python vectores
import matplotlib.pyplot as plt #graficos estadisticos
#Mencionamos carpeta donde se encuentra nuestro csv y lo mencionamos con el nombre "heart"
os.chdir(r'C:\Users\alvar\Desktop\EDEM\2. GITHUB\edem\Estadistica Python\my project')
heart = pd.read_csv ('heart.csv', sep=',')
os.getcwd()
#A continuación comprobamos que se ejecuta
print(heart)
#Sacamos datos estadísticos como la media, desviacion tipica y quartile
print(heart.head(4))
#Hacemos describe para las variables nominales identificadas
print(heart.Sex.describe())
print(heart.ChestPainType.describe())
print(heart.RestingECG.describe())
print(heart.ExerciseAngina.describe())
print(heart.ST_Slope.describe())
#Hacemos describe para las variables cuantitativas identificadas
Age = heart.Age.describe()
print(heart.Age.describe())
m_age=Age[1]
sd_age=Age[2]
print(m_age)
print(heart.RestingBP.describe())
Cholesterol = heart.Cholesterol.describe()
print(heart.Cholesterol.describe())
m_cho=Cholesterol[1]
sd_cho=Cholesterol[2]
print(heart.FastingBS.describe())
print(heart.MaxHR.describe())
print(heart.Oldpeak.describe())
print(heart.HeartDisease.describe())
#TABLAS
#Creamos una tabla para 3 variables nominales
#Nominal tipo Sex
mytablesex = heart.groupby(['Sex']).size()
print(mytablesex)
n=mytablesex.sum()
#Sacamos la tabla con porcentajes
mytablesex2 = (mytablesex/n)*100
print(mytablesex2)
#Redondeamos los porcentajes
mytablesex3 = round(mytablesex2,1)
print(mytablesex3)
#Nominal tipo: ChestPainType
mytablechest = heart.groupby(['ChestPainType']).size()
print(mytablechest)
n=mytablechest.sum()
#Sacamos la tabla con porcentajes
mytablechest2 = (mytablechest/n)*100
print(mytablechest2)
#Redondeamos los porcentajes
mytablechest3 = round(mytablechest2,1)
print(mytablechest3)
#Identificamos y elegimos la variable Sexo como nominal
#Creamos una tabla con la variable Sexo
mytable = heart.groupby(['Sex']).size()
print(mytable)
n=mytable.sum()
#Sacamos la tabla con porcentajes
mytable2 = (mytable/n)*100
print(mytable2)
#Redondeamos los porcentajes
mytable3 = round(mytable2,1)
print(mytable3)
#Una vez creada la tabla, creamos su plot
n=mytable.sum()
bar_list = ['ASY', 'ATA', 'NAP', 'TA']
plt.bar(bar_list, mytablechest3, edgecolor='black')
plt.ylabel('Percentage')
plt.xlabel('Chest Pain Type')
plt.title('Figure 4. Percentage of Chest Pain Type')
props = dict(boxstyle='round', facecolor='white', lw=0.5)
textstr = '$\mathrm{n}=%.0f$'%(n)
plt.text(1.6, 50,'n:918')
plt.savefig('Figure 4.svg')
#Para evitar que se junten dos gráficas, ejecutamos otra vez:
plt.show()
#Una vez creada la tabla, creamos su plot
n=mytable.sum()
bar_list = ['Female', 'Male']
plt.bar(bar_list, mytablesex3, edgecolor='black')
plt.ylabel('Percentage')
plt.xlabel('Sex')
plt.title('Figure 3. Percentage of Female and Male')
plt.text(1.5, 50,'n:918')
#Observamos visualmente en el plot como:
#las Mujeres son el 21% y hombres el 79% del sample de 918 pacientes
plt.savefig('Figure 3.svg')
#Para evitar que se junten dos gráficas, ejecutamos otra vez:
plt.show()
#Ahora elegimos una variable(Edad) cuantitativa para crear un histograma
#Edad(x) y vemos cuanto se repite(y=frecuencia) para cada franja(step=5)
#Sabiendo que el MIN es 28 y MAX es 77(del Age.decribe anterior)...
#he decidido usar np.arange(25,85)
x=heart['Age']
plt.hist(x,edgecolor='black',bins=20)
plt.xticks(np.arange(25,85, step=5))
plt.title("Figura 1. Edades")
plt.ylabel('Frequency')
plt.xlabel('Age')
plt.axvline(x=m_age, linewidth=1, linestyle= 'solid', color="red", label='Mean')
plt.axvline(x=m_age-sd_age, linewidth=1, linestyle= 'dashed', color="green", label='- 1 S.D.')
plt.axvline(x=m_age + sd_age, linewidth=1, linestyle= 'dashed', color="green", label='+ 1 S.D.')
plt.savefig('Figure 1.svg')
#Para evitar que se junten dos gráficas, ejecutamos otra vez:
plt.show()
#Ahora elegimos una variable(Edad) cuantitativa para crear un histograma
#Edad(x) y vemos cuanto se repite(y=frecuencia) para cada franja(step=5)
#Sabiendo que el MIN es 28 y MAX es 77(del Age.decribe anterior)...
#he decidido usar np.arange(25,85)
x=heart['Cholesterol']
plt.hist(x,edgecolor='black',bins=20)
plt.xticks(np.arange(0,610, step=50))
plt.title("Figura 2. Colesterol")
plt.ylabel('Frequency')
plt.xlabel('Cholesterol level')
plt.axvline(x=m_cho, linewidth=1, linestyle= 'solid', color="red", label='Mean')
plt.axvline(x=m_cho-sd_cho, linewidth=1, linestyle= 'dashed', color="green", label='- 1 S.D.')
plt.axvline(x=m_cho + sd_cho, linewidth=1, linestyle= 'dashed', color="green", label='+ 1 S.D.')
plt.savefig('Figure 2.svg') | 30.17284 | 96 | 0.740385 |
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
os.chdir(r'C:\Users\alvar\Desktop\EDEM\2. GITHUB\edem\Estadistica Python\my project')
heart = pd.read_csv ('heart.csv', sep=',')
os.getcwd()
print(heart)
print(heart.head(4))
print(heart.Sex.describe())
print(heart.ChestPainType.describe())
print(heart.RestingECG.describe())
print(heart.ExerciseAngina.describe())
print(heart.ST_Slope.describe())
Age = heart.Age.describe()
print(heart.Age.describe())
m_age=Age[1]
sd_age=Age[2]
print(m_age)
print(heart.RestingBP.describe())
Cholesterol = heart.Cholesterol.describe()
print(heart.Cholesterol.describe())
m_cho=Cholesterol[1]
sd_cho=Cholesterol[2]
print(heart.FastingBS.describe())
print(heart.MaxHR.describe())
print(heart.Oldpeak.describe())
print(heart.HeartDisease.describe())
mytablesex = heart.groupby(['Sex']).size()
print(mytablesex)
n=mytablesex.sum()
mytablesex2 = (mytablesex/n)*100
print(mytablesex2)
mytablesex3 = round(mytablesex2,1)
print(mytablesex3)
mytablechest = heart.groupby(['ChestPainType']).size()
print(mytablechest)
n=mytablechest.sum()
mytablechest2 = (mytablechest/n)*100
print(mytablechest2)
mytablechest3 = round(mytablechest2,1)
print(mytablechest3)
mytable = heart.groupby(['Sex']).size()
print(mytable)
n=mytable.sum()
mytable2 = (mytable/n)*100
print(mytable2)
mytable3 = round(mytable2,1)
print(mytable3)
n=mytable.sum()
bar_list = ['ASY', 'ATA', 'NAP', 'TA']
plt.bar(bar_list, mytablechest3, edgecolor='black')
plt.ylabel('Percentage')
plt.xlabel('Chest Pain Type')
plt.title('Figure 4. Percentage of Chest Pain Type')
props = dict(boxstyle='round', facecolor='white', lw=0.5)
textstr = '$\mathrm{n}=%.0f$'%(n)
plt.text(1.6, 50,'n:918')
plt.savefig('Figure 4.svg')
plt.show()
n=mytable.sum()
bar_list = ['Female', 'Male']
plt.bar(bar_list, mytablesex3, edgecolor='black')
plt.ylabel('Percentage')
plt.xlabel('Sex')
plt.title('Figure 3. Percentage of Female and Male')
plt.text(1.5, 50,'n:918')
plt.savefig('Figure 3.svg')
plt.show()
x=heart['Age']
plt.hist(x,edgecolor='black',bins=20)
plt.xticks(np.arange(25,85, step=5))
plt.title("Figura 1. Edades")
plt.ylabel('Frequency')
plt.xlabel('Age')
plt.axvline(x=m_age, linewidth=1, linestyle= 'solid', color="red", label='Mean')
plt.axvline(x=m_age-sd_age, linewidth=1, linestyle= 'dashed', color="green", label='- 1 S.D.')
plt.axvline(x=m_age + sd_age, linewidth=1, linestyle= 'dashed', color="green", label='+ 1 S.D.')
plt.savefig('Figure 1.svg')
plt.show()
x=heart['Cholesterol']
plt.hist(x,edgecolor='black',bins=20)
plt.xticks(np.arange(0,610, step=50))
plt.title("Figura 2. Colesterol")
plt.ylabel('Frequency')
plt.xlabel('Cholesterol level')
plt.axvline(x=m_cho, linewidth=1, linestyle= 'solid', color="red", label='Mean')
plt.axvline(x=m_cho-sd_cho, linewidth=1, linestyle= 'dashed', color="green", label='- 1 S.D.')
plt.axvline(x=m_cho + sd_cho, linewidth=1, linestyle= 'dashed', color="green", label='+ 1 S.D.')
plt.savefig('Figure 2.svg') | true | true |
1c35c41f5fdc2320979c9ed9aff80941d45c4c7b | 2,986 | py | Python | credentials_test.py | chiriket/Password-Locker | da40d20b886fcef01cc053a0c46a8caf91111877 | [
"MIT"
] | null | null | null | credentials_test.py | chiriket/Password-Locker | da40d20b886fcef01cc053a0c46a8caf91111877 | [
"MIT"
] | null | null | null | credentials_test.py | chiriket/Password-Locker | da40d20b886fcef01cc053a0c46a8caf91111877 | [
"MIT"
] | null | null | null | import unittest # Importing the unittest module
from credentials import Credentials # Importing the credentials class
class TestCredentials(unittest.TestCase):
'''
Test class that defines test cases for the credential class behaviours.
Args:
unittest.TestCase: TestCase class that helps in creating test cases
'''
def tearDown(self):
'''
tearDown method that does clean up after each test case has run.
'''
Credentials.credential_list = []
def setUp(self):
'''
Set up method to run before each test cases.
'''
self.new_credentials = Credentials("Twitter","Chiri","pass123") # create credential object
def test_init(self):
'''
test_init test case to test if the object is initialized properly
'''
self.assertEqual(self.new_credentials.account_platform,"Twitter")
self.assertEqual(self.new_credentials.account_name,"Chiri")
self.assertEqual(self.new_credentials.account_password,"pass123")
def test_save_credentials(self):
'''
test_save_credentials test case to test if the credentials object is saved into
the credentials list
'''
self.new_credentials.save_credentials() # saving the new credentials
self.assertEqual(len(Credentials.credentials_list),3)
def test_create_credentials(self):
'''
test_create_credentials test case to test if the
credentials object is added into
the credentials list
'''
self.new_credentials.create_credentials() # create new credentials
self.assertEqual(len(Credentials.credentials_list),1)
def test_save_multiple_credentials(self):
'''
test_save_multiple_credentials to check if we can save multiple credentials
objects to our credentials_list
'''
self.new_credentials.save_credentials()
test_credentials = Credentials("Twitter","Chiri","pass123") # new credentials
test_credentials.save_credentials()
self.assertEqual(len(Credentials.credentials_list),5)
def test_delete_credentials(self):
'''
test_delete_credentials to test if we can remove a credentials from our credentials list
'''
self.new_credentials.save_credentials()
test_credentials = Credentials("Twitter","Chiri","pass123") # new credentials
test_credentials.save_credentials()
self.new_credentials.delete_credentials()# Deleting a credentials object
self.assertEqual(len(Credentials.credentials_list),2)
def test_display_all_credentials(self):
'''
method that returns a list of all credentials saved
'''
self.assertEqual(Credentials.display_credentials(),Credentials.credentials_list)
if __name__ == '__main__':
unittest.main()
| 35.129412 | 100 | 0.659745 | import unittest
from credentials import Credentials
class TestCredentials(unittest.TestCase):
def tearDown(self):
Credentials.credential_list = []
def setUp(self):
self.new_credentials = Credentials("Twitter","Chiri","pass123")
def test_init(self):
self.assertEqual(self.new_credentials.account_platform,"Twitter")
self.assertEqual(self.new_credentials.account_name,"Chiri")
self.assertEqual(self.new_credentials.account_password,"pass123")
def test_save_credentials(self):
self.new_credentials.save_credentials()
self.assertEqual(len(Credentials.credentials_list),3)
def test_create_credentials(self):
self.new_credentials.create_credentials()
self.assertEqual(len(Credentials.credentials_list),1)
def test_save_multiple_credentials(self):
self.new_credentials.save_credentials()
test_credentials = Credentials("Twitter","Chiri","pass123")
test_credentials.save_credentials()
self.assertEqual(len(Credentials.credentials_list),5)
def test_delete_credentials(self):
self.new_credentials.save_credentials()
test_credentials = Credentials("Twitter","Chiri","pass123")
test_credentials.save_credentials()
self.new_credentials.delete_credentials()
self.assertEqual(len(Credentials.credentials_list),2)
def test_display_all_credentials(self):
self.assertEqual(Credentials.display_credentials(),Credentials.credentials_list)
if __name__ == '__main__':
unittest.main()
| true | true |
1c35c44907649702b009572b98943488db90e845 | 941 | py | Python | src/webservice/frame.py | AzemaBaptiste/SoundLandscape | a9a27606301dd3c9000474960668ea11bada1452 | [
"BSD-3-Clause"
] | 1 | 2019-05-13T22:05:06.000Z | 2019-05-13T22:05:06.000Z | src/webservice/frame.py | AzemaBaptiste/SoundLandscape | a9a27606301dd3c9000474960668ea11bada1452 | [
"BSD-3-Clause"
] | null | null | null | src/webservice/frame.py | AzemaBaptiste/SoundLandscape | a9a27606301dd3c9000474960668ea11bada1452 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import base64
import cv2
from flask import Blueprint
from src.data.webcam_images import VideoCamera
CAMERA_APP = Blueprint('camera_app', __name__)
@CAMERA_APP.route("/api/frame/get_camera_face", methods=["POST", "GET"])
def get_camera_face():
"""Get img from face.
:return: (str) face image
"""
CAMERA_FACE = VideoCamera(0)
frame = CAMERA_FACE.get_frame()
_, img_encoded = cv2.imencode('.jpg', frame)
CAMERA_FACE.__del__()
jpg_as_text = base64.b64encode(img_encoded)
return jpg_as_text
@CAMERA_APP.route("/api/frame/get_camera_front", methods=["POST", "GET"])
def get_camera_front():
"""Get img from front.
:return: (str) front image
"""
CAMERA_FRONT = VideoCamera(1)
frame = CAMERA_FRONT.get_frame()
_, img_encoded = cv2.imencode('.jpg', frame)
CAMERA_FRONT.__del__()
jpg_as_text = base64.b64encode(img_encoded)
return jpg_as_text
| 22.404762 | 73 | 0.686504 |
import base64
import cv2
from flask import Blueprint
from src.data.webcam_images import VideoCamera
CAMERA_APP = Blueprint('camera_app', __name__)
@CAMERA_APP.route("/api/frame/get_camera_face", methods=["POST", "GET"])
def get_camera_face():
CAMERA_FACE = VideoCamera(0)
frame = CAMERA_FACE.get_frame()
_, img_encoded = cv2.imencode('.jpg', frame)
CAMERA_FACE.__del__()
jpg_as_text = base64.b64encode(img_encoded)
return jpg_as_text
@CAMERA_APP.route("/api/frame/get_camera_front", methods=["POST", "GET"])
def get_camera_front():
CAMERA_FRONT = VideoCamera(1)
frame = CAMERA_FRONT.get_frame()
_, img_encoded = cv2.imencode('.jpg', frame)
CAMERA_FRONT.__del__()
jpg_as_text = base64.b64encode(img_encoded)
return jpg_as_text
| true | true |
1c35c513fb04c17f37bc0f40cdba4ccee9ab1721 | 1,042 | py | Python | setup.py | nuhamozaini/deepvec | a4019b685559d7aafce58d9e0b7afd0bb7d872d9 | [
"MIT"
] | 1 | 2019-04-04T08:53:21.000Z | 2019-04-04T08:53:21.000Z | setup.py | nuhamozaini/deepvec | a4019b685559d7aafce58d9e0b7afd0bb7d872d9 | [
"MIT"
] | null | null | null | setup.py | nuhamozaini/deepvec | a4019b685559d7aafce58d9e0b7afd0bb7d872d9 | [
"MIT"
] | null | null | null | from distutils.core import setup
from io import open
with open("README.rst", "r") as fh:
long_description = fh.read()
setup(
name='deepvec',
packages=['deepvec'],
version='0.2',
license='MIT',
description='Tensorflow wrapper for classification',
long_description=long_description,
author='Nuha Almozaini',
author_email='nuha.mozaini@gmail.com',
url='https://github.com/nuhamozaini/deepvec',
download_url='https://github.com/nuhamozaini/deepvec/archive/v_02.tar.gz',
keywords=['classification', 'deep learning', 'tensorflow', 'keras', 'pandas'],
install_requires=[
'tensorflow',
'pandas',
],
classifiers=[
'Development Status :: 3 - Alpha',
# Chose either "3 - Alpha", "4 - Beta" or "5 - Production/Stable" as the current state of your package
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
],
)
| 32.5625 | 110 | 0.639155 | from distutils.core import setup
from io import open
with open("README.rst", "r") as fh:
long_description = fh.read()
setup(
name='deepvec',
packages=['deepvec'],
version='0.2',
license='MIT',
description='Tensorflow wrapper for classification',
long_description=long_description,
author='Nuha Almozaini',
author_email='nuha.mozaini@gmail.com',
url='https://github.com/nuhamozaini/deepvec',
download_url='https://github.com/nuhamozaini/deepvec/archive/v_02.tar.gz',
keywords=['classification', 'deep learning', 'tensorflow', 'keras', 'pandas'],
install_requires=[
'tensorflow',
'pandas',
],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
],
)
| true | true |
1c35c69d29f37b4c8fa1d900d63f1ab1b4805776 | 7,623 | py | Python | pl_examples/basic_examples/conv_sequential_example.py | skmatz/pytorch-lightning | fc6d4027334b8869f02a3bdca0a0846f1cf79928 | [
"Apache-2.0"
] | null | null | null | pl_examples/basic_examples/conv_sequential_example.py | skmatz/pytorch-lightning | fc6d4027334b8869f02a3bdca0a0846f1cf79928 | [
"Apache-2.0"
] | null | null | null | pl_examples/basic_examples/conv_sequential_example.py | skmatz/pytorch-lightning | fc6d4027334b8869f02a3bdca0a0846f1cf79928 | [
"Apache-2.0"
] | null | null | null | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Example script of running the experimental DDP Sequential Plugin.
This script splits a convolutional model onto multiple GPUs, whilst using the internal built in balancer
to balance across your GPUs.
To run:
python conv_model_sequential_example.py --accelerator ddp --gpus 4 --max_epochs 1 --batch_size 256 --use_rpc_sequential
"""
import math
from argparse import ArgumentParser
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import pytorch_lightning as pl
from pl_examples import cli_lightning_logo
from pytorch_lightning import Trainer
from pytorch_lightning.metrics.functional import accuracy
from pytorch_lightning.plugins import RPCSequentialPlugin
from pytorch_lightning.utilities import _BOLTS_AVAILABLE, _FAIRSCALE_PIPE_AVAILABLE
if _BOLTS_AVAILABLE:
import pl_bolts
from pl_bolts.transforms.dataset_normalizations import cifar10_normalization
#####################
# Modules #
#####################
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
###############################
# LightningModule #
###############################
class LitResnet(pl.LightningModule):
"""
>>> LitResnet() # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
LitResnet(
(sequential_module): Sequential(...)
)
"""
def __init__(self, lr=0.05, batch_size=32, manual_optimization=False):
super().__init__()
self.save_hyperparameters()
self.sequential_module = nn.Sequential(
# Conv Layer block 1
nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=1),
nn.BatchNorm2d(32),
nn.ReLU(inplace=False),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=1),
nn.ReLU(inplace=False),
nn.MaxPool2d(kernel_size=2, stride=2),
# Conv Layer block 2
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=False),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1),
nn.ReLU(inplace=False),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Dropout2d(p=0.05),
# Conv Layer block 3
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=False),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1),
nn.ReLU(inplace=False),
nn.MaxPool2d(kernel_size=2, stride=2),
Flatten(),
nn.Dropout(p=0.1),
nn.Linear(4096, 1024),
nn.ReLU(inplace=False),
nn.Linear(1024, 512),
nn.ReLU(inplace=False),
nn.Dropout(p=0.1),
nn.Linear(512, 10)
)
self._example_input_array = torch.randn((1, 3, 32, 32))
if manual_optimization:
self.automatic_optimization = False
self.training_step = self.training_step_manual
def forward(self, x):
out = self.sequential_module(x)
return F.log_softmax(out, dim=-1)
def training_step_manual(self, batch, batch_idx):
opt = self.optimizers()
def closure():
x, y = batch
logits = self.forward(x)
loss = F.nll_loss(logits, y)
self.manual_backward(loss, opt)
self.log('train_loss', loss, prog_bar=True)
opt.step(closure=closure)
def training_step(self, batch, batch_idx):
x, y = batch
logits = self.forward(x)
loss = F.nll_loss(logits, y)
self.log('Training Loss', loss)
return loss
def _evaluate(self, batch, batch_idx, stage=None):
x, y = batch
out = self.forward(x)
logits = F.log_softmax(out, dim=-1)
loss = F.nll_loss(logits, y)
preds = torch.argmax(logits, dim=-1)
acc = accuracy(preds, y)
if stage:
self.log(f'{stage}_loss', loss, prog_bar=True)
self.log(f'{stage}_acc', acc, prog_bar=True)
return loss, acc
def validation_step(self, batch, batch_idx):
return self._evaluate(batch, batch_idx, 'val')[0]
def test_step(self, batch, batch_idx):
loss, acc = self._evaluate(batch, batch_idx, 'test')
self.log_dict({'test_loss': loss, 'test_acc': acc})
def configure_optimizers(self):
optimizer = torch.optim.SGD(self.parameters(), lr=self.hparams.lr, momentum=0.9, weight_decay=5e-4)
return {
'optimizer': optimizer,
'lr_scheduler': {
'scheduler': torch.optim.lr_scheduler.OneCycleLR(
optimizer,
0.1,
epochs=self.trainer.max_epochs,
steps_per_epoch=math.ceil(45000 / self.hparams.batch_size)
),
'interval': 'step',
}
}
#################################
# Instantiate Data Module #
#################################
def instantiate_datamodule(args):
train_transforms = torchvision.transforms.Compose([
torchvision.transforms.RandomCrop(32, padding=4),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
cifar10_normalization(),
])
test_transforms = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
cifar10_normalization(),
])
cifar10_dm = pl_bolts.datamodules.CIFAR10DataModule(
data_dir=args.data_dir,
batch_size=args.batch_size,
train_transforms=train_transforms,
test_transforms=test_transforms,
val_transforms=test_transforms,
)
return cifar10_dm
if __name__ == "__main__":
cli_lightning_logo()
assert _BOLTS_AVAILABLE, "Bolts is required for this example, install it via pip install pytorch-lightning-bolts"
assert _FAIRSCALE_PIPE_AVAILABLE, "FairScale and PyTorch 1.6 is required for this example."
parser = ArgumentParser(description="Pipe Example")
parser.add_argument("--use_rpc_sequential", action="store_true")
parser.add_argument("--manual_optimization", action="store_true")
parser = Trainer.add_argparse_args(parser)
parser = pl_bolts.datamodules.CIFAR10DataModule.add_argparse_args(parser)
args = parser.parse_args()
cifar10_dm = instantiate_datamodule(args)
plugins = None
if args.use_rpc_sequential:
plugins = RPCSequentialPlugin()
model = LitResnet(batch_size=args.batch_size, manual_optimization=args.manual_optimization)
trainer = pl.Trainer.from_argparse_args(args, plugins=[plugins] if plugins else None)
trainer.fit(model, cifar10_dm)
trainer.test(model, datamodule=cifar10_dm)
if trainer.accelerator.rpc_enabled:
# Called at the end of trainer to ensure all processes are killed
trainer.training_type_plugin.exit_rpc_process()
| 33.581498 | 120 | 0.642398 |
import math
from argparse import ArgumentParser
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import pytorch_lightning as pl
from pl_examples import cli_lightning_logo
from pytorch_lightning import Trainer
from pytorch_lightning.metrics.functional import accuracy
from pytorch_lightning.plugins import RPCSequentialPlugin
from pytorch_lightning.utilities import _BOLTS_AVAILABLE, _FAIRSCALE_PIPE_AVAILABLE
if _BOLTS_AVAILABLE:
import pl_bolts
from pl_bolts.transforms.dataset_normalizations import cifar10_normalization
=2, stride=2),
Flatten(),
nn.Dropout(p=0.1),
nn.Linear(4096, 1024),
nn.ReLU(inplace=False),
nn.Linear(1024, 512),
nn.ReLU(inplace=False),
nn.Dropout(p=0.1),
nn.Linear(512, 10)
)
self._example_input_array = torch.randn((1, 3, 32, 32))
if manual_optimization:
self.automatic_optimization = False
self.training_step = self.training_step_manual
def forward(self, x):
out = self.sequential_module(x)
return F.log_softmax(out, dim=-1)
def training_step_manual(self, batch, batch_idx):
opt = self.optimizers()
def closure():
x, y = batch
logits = self.forward(x)
loss = F.nll_loss(logits, y)
self.manual_backward(loss, opt)
self.log('train_loss', loss, prog_bar=True)
opt.step(closure=closure)
def training_step(self, batch, batch_idx):
x, y = batch
logits = self.forward(x)
loss = F.nll_loss(logits, y)
self.log('Training Loss', loss)
return loss
def _evaluate(self, batch, batch_idx, stage=None):
x, y = batch
out = self.forward(x)
logits = F.log_softmax(out, dim=-1)
loss = F.nll_loss(logits, y)
preds = torch.argmax(logits, dim=-1)
acc = accuracy(preds, y)
if stage:
self.log(f'{stage}_loss', loss, prog_bar=True)
self.log(f'{stage}_acc', acc, prog_bar=True)
return loss, acc
def validation_step(self, batch, batch_idx):
return self._evaluate(batch, batch_idx, 'val')[0]
def test_step(self, batch, batch_idx):
loss, acc = self._evaluate(batch, batch_idx, 'test')
self.log_dict({'test_loss': loss, 'test_acc': acc})
def configure_optimizers(self):
optimizer = torch.optim.SGD(self.parameters(), lr=self.hparams.lr, momentum=0.9, weight_decay=5e-4)
return {
'optimizer': optimizer,
'lr_scheduler': {
'scheduler': torch.optim.lr_scheduler.OneCycleLR(
optimizer,
0.1,
epochs=self.trainer.max_epochs,
steps_per_epoch=math.ceil(45000 / self.hparams.batch_size)
),
'interval': 'step',
}
}
t("--use_rpc_sequential", action="store_true")
parser.add_argument("--manual_optimization", action="store_true")
parser = Trainer.add_argparse_args(parser)
parser = pl_bolts.datamodules.CIFAR10DataModule.add_argparse_args(parser)
args = parser.parse_args()
cifar10_dm = instantiate_datamodule(args)
plugins = None
if args.use_rpc_sequential:
plugins = RPCSequentialPlugin()
model = LitResnet(batch_size=args.batch_size, manual_optimization=args.manual_optimization)
trainer = pl.Trainer.from_argparse_args(args, plugins=[plugins] if plugins else None)
trainer.fit(model, cifar10_dm)
trainer.test(model, datamodule=cifar10_dm)
if trainer.accelerator.rpc_enabled:
trainer.training_type_plugin.exit_rpc_process()
| true | true |
1c35c80dc11669568aa1e14f01fb8d018a0141ec | 10,831 | py | Python | Code/all-starter-code/linkedlist.py | Prones94/CS-1.3-Core-Data-Structures | 35c6b859dcde741cab0d2596ccf96a137dc3065a | [
"MIT"
] | null | null | null | Code/all-starter-code/linkedlist.py | Prones94/CS-1.3-Core-Data-Structures | 35c6b859dcde741cab0d2596ccf96a137dc3065a | [
"MIT"
] | null | null | null | Code/all-starter-code/linkedlist.py | Prones94/CS-1.3-Core-Data-Structures | 35c6b859dcde741cab0d2596ccf96a137dc3065a | [
"MIT"
] | null | null | null | #!python
class Node(object):
def __init__(self, data):
"""Initialize this node with the given data."""
self.data = data
self.next = None
def __repr__(self):
"""Return a string representation of this node."""
return 'Node({!r})'.format(self.data)
class LinkedList(object):
def __init__(self, iterable=None):
"""Initialize this linked list and append the given items, if any."""
self.head = None # First node
self.tail = None # Last node
self.size = 0 # Number of nodes
# Append the given items
if iterable is not None:
for item in iterable:
self.append(item)
def __str__(self):
"""Return a formatted string representation of this linked list."""
items = ['({!r})'.format(item) for item in self.items()]
return '[{}]'.format(' -> '.join(items))
def __repr__(self):
"""Return a string representation of this linked list."""
return 'LinkedList({!r})'.format(self.items())
def items(self):
"""Return a list of all items in this linked list.
Best and worst case running time: Theta(n) for n items in the list
because we always need to loop through all n nodes."""
# Create an empty list of results
result = [] # Constant time to create a new list
# Start at the head node
node = self.head # Constant time to assign a variable reference
# Loop until the node is None, which is one node too far past the tail
while node is not None: # Always n iterations because no early exit
# Append this node's data to the results list
result.append(node.data) # Constant time to append to a list
# Skip to the next node
node = node.next # Constant time to reassign a variable
# Now result contains the data from all nodes
return result # Constant time to return a list
def is_empty(self):
"""Return True if this linked list is empty, or False."""
return self.head is None
def length(self):
"""Return the length of this linked list by traversing its nodes.
Best and worst case running time: ??? under what conditions? [TODO]"""
# Node counter initialized to zero
node_count = 0
# Start at the head node
node = self.head
# Loop until the node is None, which is one node too far past the tail
while node is not None:
# Count one for this node
node_count += 1
# Skip to the next node
node = node.next
# Now node_count contains the number of nodes
return node_count
def get_at_index(self, index):
"""Return the item at the given index in this linked list, or
raise ValueError if the given index is out of range of the list size.
Best case running time: ??? under what conditions? [TODO]
Worst case running time: ??? under what conditions? [TODO]"""
# Check if the given index is out of range and if so raise an error
if not (0 <= index < self.size):
raise ValueError('List index out of range: {}'.format(index))
node = self.head
for i in range(0, self.size):
if i == index:
return node.data
else:
node = node.next
def insert_at_index(self, index, item):
"""Insert the given item at the given index in this linked list, or
raise ValueError if the given index is out of range of the list size.
Best case running time: ??? under what conditions? [TODO]
Worst case running time: ??? under what conditions? [TODO]"""
# Check if the given index is out of range and if so raise an error
if not (0 <= index <= self.size):
raise ValueError('List index out of range: {}'.format(index))
node = self.head
current_node = 0
new_node = Node(item)
if index == 0:
self.prepend(item)
elif index == self.length():
self.append(item)
else:
while node is not None:
if index == current_node:
if index != self.length():
new_node.next = node.next
node.next = new_node
else:
node.next = new_node
self.tail= new_node
self.size += 1
return
current_node += 1
node = node.next
def append(self, item):
"""Insert the given item at the tail of this linked list.
Best and worst case running time: ??? under what conditions? [TODO]"""
# Create a new node to hold the given item
new_node = Node(item)
# Check if this linked list is empty
if self.is_empty():
# Assign head to new node
self.head = new_node
else:
# Otherwise insert new node after tail
self.tail.next = new_node
# Update tail to new node regardless
self.size += 1
self.tail = new_node
def prepend(self, item):
"""Insert the given item at the head of this linked list.
Best and worst case running time: ??? under what conditions? [TODO]"""
# Create a new node to hold the given item
new_node = Node(item)
# Check if this linked list is empty
if self.is_empty():
# Assign tail to new node
self.tail = new_node
else:
# Otherwise insert new node before head
new_node.next = self.head
# Update head to new node regardless
self.size += 1
self.head = new_node
def find(self, quality):
"""Return an item from this linked list satisfying the given quality.
Best case running time: Omega(1) if item is near the head of the list.
Worst case running time: O(n) if item is near the tail of the list or
not present and we need to loop through all n nodes in the list."""
# Start at the head node
node = self.head # Constant time to assign a variable reference
# Loop until the node is None, which is one node too far past the tail
while node is not None: # Up to n iterations if we don't exit early
# Check if this node's data satisfies the given quality function
if quality(node.data): # Constant time to call quality function
# We found data satisfying the quality function, so exit early
return node.data # Constant time to return data
# Skip to the next node
node = node.next # Constant time to reassign a variable
# We never found data satisfying quality, but have to return something
return None # Constant time to return None
def replace(self, old_item, new_item):
"""Replace the given old_item in this linked list with given new_item
using the same node, or raise ValueError if old_item is not found.
Best case running time: ??? under what conditions? [TODO]
Worst case running time: ??? under what conditions? [TODO]"""
# TODO: Find the node containing the given old_item and replace its
# data with new_item, without creating a new node object
node = self.head
while node:
if node is not None:
if node.data == old_item:
node.data = new_item
return
else:
node = node.next
raise ValueError('{} not in list',format(old_item))
def delete(self, item):
"""Delete the given item from this linked list, or raise ValueError.
Best case running time: ??? under what conditions? [TODO]
Worst case running time: ??? under what conditions? [TODO]"""
# Start at the head node
node = self.head
# Keep track of the node before the one containing the given item
previous = None
# Create a flag to track if we have found the given item
found = False
# Loop until we have found the given item or the node is None
while not found and node is not None:
# Check if the node's data matches the given item
if node.data == item:
# We found data matching the given item, so update found flag
found = True
else:
# Skip to the next node
previous = node
node = node.next
# Check if we found the given item or we never did and reached the tail
if found:
# Check if we found a node in the middle of this linked list
self.size -= 1
if node is not self.head and node is not self.tail:
# Update the previous node to skip around the found node
previous.next = node.next
# Unlink the found node from its next node
node.next = None
# Check if we found a node at the head
if node is self.head:
# Update head to the next node
self.head = node.next
# Unlink the found node from the next node
node.next = None
# Check if we found a node at the tail
if node is self.tail:
# Check if there is a node before the found node
if previous is not None:
# Unlink the previous node from the found node
previous.next = None
# Update tail to the previous node regardless
self.tail = previous
else:
# Otherwise raise an error to tell the user that delete has failed
raise ValueError('Item not found: {}'.format(item))
def test_linked_list():
ll = LinkedList()
print(ll)
print('Appending items:')
ll.append('A')
print(ll)
ll.append('B')
print(ll)
ll.append('C')
print(ll)
print('head: {}'.format(ll.head))
print('tail: {}'.format(ll.tail))
print('size: {}'.format(ll.size))
print('length: {}'.format(ll.length()))
print('Getting items by index:')
for index in range(ll.size):
item = ll.get_at_index(index)
print('get_at_index({}): {!r}'.format(index, item))
print('Deleting items:')
ll.delete('B')
print(ll)
ll.delete('C')
print(ll)
ll.delete('A')
print(ll)
print('head: {}'.format(ll.head))
print('tail: {}'.format(ll.tail))
print('size: {}'.format(ll.size))
print('length: {}'.format(ll.length()))
if __name__ == '__main__':
test_linked_list()
| 40.114815 | 79 | 0.575939 |
class Node(object):
def __init__(self, data):
self.data = data
self.next = None
def __repr__(self):
return 'Node({!r})'.format(self.data)
class LinkedList(object):
def __init__(self, iterable=None):
self.head = None
self.tail = None
self.size = 0
if iterable is not None:
for item in iterable:
self.append(item)
def __str__(self):
items = ['({!r})'.format(item) for item in self.items()]
return '[{}]'.format(' -> '.join(items))
def __repr__(self):
return 'LinkedList({!r})'.format(self.items())
def items(self):
result = []
node = self.head
while node is not None:
result.append(node.data) # Constant time to append to a list
# Skip to the next node
node = node.next # Constant time to reassign a variable
# Now result contains the data from all nodes
return result # Constant time to return a list
def is_empty(self):
return self.head is None
def length(self):
# Node counter initialized to zero
node_count = 0
# Start at the head node
node = self.head
# Loop until the node is None, which is one node too far past the tail
while node is not None:
# Count one for this node
node_count += 1
# Skip to the next node
node = node.next
# Now node_count contains the number of nodes
return node_count
def get_at_index(self, index):
# Check if the given index is out of range and if so raise an error
if not (0 <= index < self.size):
raise ValueError('List index out of range: {}'.format(index))
node = self.head
for i in range(0, self.size):
if i == index:
return node.data
else:
node = node.next
def insert_at_index(self, index, item):
# Check if the given index is out of range and if so raise an error
if not (0 <= index <= self.size):
raise ValueError('List index out of range: {}'.format(index))
node = self.head
current_node = 0
new_node = Node(item)
if index == 0:
self.prepend(item)
elif index == self.length():
self.append(item)
else:
while node is not None:
if index == current_node:
if index != self.length():
new_node.next = node.next
node.next = new_node
else:
node.next = new_node
self.tail= new_node
self.size += 1
return
current_node += 1
node = node.next
def append(self, item):
# Create a new node to hold the given item
new_node = Node(item)
# Check if this linked list is empty
if self.is_empty():
# Assign head to new node
self.head = new_node
else:
# Otherwise insert new node after tail
self.tail.next = new_node
# Update tail to new node regardless
self.size += 1
self.tail = new_node
def prepend(self, item):
# Create a new node to hold the given item
new_node = Node(item)
# Check if this linked list is empty
if self.is_empty():
# Assign tail to new node
self.tail = new_node
else:
# Otherwise insert new node before head
new_node.next = self.head
# Update head to new node regardless
self.size += 1
self.head = new_node
def find(self, quality):
# Start at the head node
node = self.head # Constant time to assign a variable reference
# Loop until the node is None, which is one node too far past the tail
while node is not None: # Up to n iterations if we don't exit early
if quality(node.data): # Constant time to call quality function
# We found data satisfying the quality function, so exit early
return node.data # Constant time to return data
# Skip to the next node
node = node.next # Constant time to reassign a variable
# We never found data satisfying quality, but have to return something
return None # Constant time to return None
def replace(self, old_item, new_item):
# TODO: Find the node containing the given old_item and replace its
# data with new_item, without creating a new node object
node = self.head
while node:
if node is not None:
if node.data == old_item:
node.data = new_item
return
else:
node = node.next
raise ValueError('{} not in list',format(old_item))
def delete(self, item):
# Start at the head node
node = self.head
# Keep track of the node before the one containing the given item
previous = None
# Create a flag to track if we have found the given item
found = False
# Loop until we have found the given item or the node is None
while not found and node is not None:
# Check if the node's data matches the given item
if node.data == item:
found = True
else:
previous = node
node = node.next
if found:
self.size -= 1
if node is not self.head and node is not self.tail:
previous.next = node.next
node.next = None
if node is self.head:
self.head = node.next
node.next = None
if node is self.tail:
if previous is not None:
previous.next = None
self.tail = previous
else:
raise ValueError('Item not found: {}'.format(item))
def test_linked_list():
ll = LinkedList()
print(ll)
print('Appending items:')
ll.append('A')
print(ll)
ll.append('B')
print(ll)
ll.append('C')
print(ll)
print('head: {}'.format(ll.head))
print('tail: {}'.format(ll.tail))
print('size: {}'.format(ll.size))
print('length: {}'.format(ll.length()))
print('Getting items by index:')
for index in range(ll.size):
item = ll.get_at_index(index)
print('get_at_index({}): {!r}'.format(index, item))
print('Deleting items:')
ll.delete('B')
print(ll)
ll.delete('C')
print(ll)
ll.delete('A')
print(ll)
print('head: {}'.format(ll.head))
print('tail: {}'.format(ll.tail))
print('size: {}'.format(ll.size))
print('length: {}'.format(ll.length()))
if __name__ == '__main__':
test_linked_list()
| true | true |
1c35c9661ac08bed450194c2318fc510b368dd9d | 71 | py | Python | atcoder/corp/ddcc2016_qa.py | knuu/competitive-programming | 16bc68fdaedd6f96ae24310d697585ca8836ab6e | [
"MIT"
] | 1 | 2018-11-12T15:18:55.000Z | 2018-11-12T15:18:55.000Z | atcoder/corp/ddcc2016_qa.py | knuu/competitive-programming | 16bc68fdaedd6f96ae24310d697585ca8836ab6e | [
"MIT"
] | null | null | null | atcoder/corp/ddcc2016_qa.py | knuu/competitive-programming | 16bc68fdaedd6f96ae24310d697585ca8836ab6e | [
"MIT"
] | null | null | null | A, B, C = map(int, input().split())
print('{:.20f}'.format(C * B / A))
| 23.666667 | 35 | 0.507042 | A, B, C = map(int, input().split())
print('{:.20f}'.format(C * B / A))
| true | true |
1c35c9e083095ca9cfbcb56a938324a7ae79c11b | 6,685 | py | Python | starterbot/lib/python2.7/site-packages/slackclient/_server.py | dshaps10/StarterBot | 95c2ad467ecd76650fc1b59daf5b894800d6f0e3 | [
"MIT"
] | null | null | null | starterbot/lib/python2.7/site-packages/slackclient/_server.py | dshaps10/StarterBot | 95c2ad467ecd76650fc1b59daf5b894800d6f0e3 | [
"MIT"
] | null | null | null | starterbot/lib/python2.7/site-packages/slackclient/_server.py | dshaps10/StarterBot | 95c2ad467ecd76650fc1b59daf5b894800d6f0e3 | [
"MIT"
] | null | null | null | from slackclient._slackrequest import SlackRequest
from slackclient._channel import Channel
from slackclient._user import User
from slackclient._util import SearchList
from ssl import SSLError
from websocket import create_connection
import json
class Server(object):
'''
The Server object owns the websocket connection and all attached channel information.
'''
def __init__(self, token, connect=True):
self.token = token
self.username = None
self.domain = None
self.login_data = None
self.websocket = None
self.users = SearchList()
self.channels = SearchList()
self.connected = False
self.pingcounter = 0
self.ws_url = None
self.api_requester = SlackRequest()
if connect:
self.rtm_connect()
def __eq__(self, compare_str):
if compare_str == self.domain or compare_str == self.token:
return True
else:
return False
def __hash__(self):
return hash(self.token)
def __str__(self):
'''
Example Output::
username : None
domain : None
websocket : None
users : []
login_data : None
api_requester : <slackclient._slackrequest.SlackRequest
pingcounter : 0
channels : []
token : xoxb-asdlfkyadsofii7asdf734lkasdjfllakjba7zbu
connected : False
ws_url : None
'''
data = ""
for key in list(self.__dict__.keys()):
data += "{} : {}\n".format(key, str(self.__dict__[key])[:40])
return data
def __repr__(self):
return self.__str__()
def rtm_connect(self, reconnect=False):
reply = self.api_requester.do(self.token, "rtm.start")
if reply.status_code != 200:
raise SlackConnectionError
else:
login_data = reply.json()
if login_data["ok"]:
self.ws_url = login_data['url']
if not reconnect:
self.parse_slack_login_data(login_data)
self.connect_slack_websocket(self.ws_url)
else:
raise SlackLoginError
def parse_slack_login_data(self, login_data):
self.login_data = login_data
self.domain = self.login_data["team"]["domain"]
self.username = self.login_data["self"]["name"]
self.parse_channel_data(login_data["channels"])
self.parse_channel_data(login_data["groups"])
self.parse_channel_data(login_data["ims"])
self.parse_user_data(login_data["users"])
def connect_slack_websocket(self, ws_url):
try:
self.websocket = create_connection(ws_url)
self.websocket.sock.setblocking(0)
except:
raise SlackConnectionError
def parse_channel_data(self, channel_data):
for channel in channel_data:
if "name" not in channel:
channel["name"] = channel["id"]
if "members" not in channel:
channel["members"] = []
self.attach_channel(channel["name"],
channel["id"],
channel["members"])
def parse_user_data(self, user_data):
for user in user_data:
if "tz" not in user:
user["tz"] = "unknown"
if "real_name" not in user:
user["real_name"] = user["name"]
self.attach_user(user["name"], user["id"], user["real_name"], user["tz"])
def send_to_websocket(self, data):
"""
Send a JSON message directly to the websocket. See
`RTM documentation <https://api.slack.com/rtm` for allowed types.
:Args:
data (dict) the key/values to send the websocket.
"""
try:
data = json.dumps(data)
self.websocket.send(data)
except:
self.rtm_connect(reconnect=True)
def ping(self):
return self.send_to_websocket({"type": "ping"})
def websocket_safe_read(self):
""" Returns data if available, otherwise ''. Newlines indicate multiple
messages
"""
data = ""
while True:
try:
data += "{0}\n".format(self.websocket.recv())
except SSLError as e:
if e.errno == 2:
# errno 2 occurs when trying to read or write data, but more
# data needs to be received on the underlying TCP transport
# before the request can be fulfilled.
#
# Python 2.7.9+ and Python 3.3+ give this its own exception,
# SSLWantReadError
return ''
raise
return data.rstrip()
def attach_user(self, name, channel_id, real_name, tz):
if self.users.find(channel_id) is None:
self.users.append(User(self, name, channel_id, real_name, tz))
def attach_channel(self, name, channel_id, members=None):
if members is None:
members = []
if self.channels.find(channel_id) is None:
self.channels.append(Channel(self, name, channel_id, members))
def join_channel(self, name):
'''
Join a channel by name.
Note: this action is not allowed by bots, they must be invited to channels.
'''
return self.api_requester.do(
self.token,
"channels.join?name={}".format(name)
).text
def api_call(self, method, **kwargs):
'''
Call the Slack Web API as documented here: https://api.slack.com/web
:Args:
method (str): The API Method to call. See here for a list: https://api.slack.com/methods
:Kwargs:
(optional) kwargs: any arguments passed here will be bundled and sent to the api
requester as post_data
and will be passed along to the API.
Example::
sc.server.api_call(
"channels.setPurpose",
channel="CABC12345",
purpose="Writing some code!"
)
Returns:
str -- returns the text of the HTTP response.
Examples::
u'{"ok":true,"purpose":"Testing bots"}'
or
u'{"ok":false,"error":"channel_not_found"}'
See here for more information on responses: https://api.slack.com/web
'''
return self.api_requester.do(self.token, method, kwargs).text
class SlackConnectionError(Exception):
pass
class SlackLoginError(Exception):
pass
| 31.384977 | 100 | 0.566492 | from slackclient._slackrequest import SlackRequest
from slackclient._channel import Channel
from slackclient._user import User
from slackclient._util import SearchList
from ssl import SSLError
from websocket import create_connection
import json
class Server(object):
def __init__(self, token, connect=True):
self.token = token
self.username = None
self.domain = None
self.login_data = None
self.websocket = None
self.users = SearchList()
self.channels = SearchList()
self.connected = False
self.pingcounter = 0
self.ws_url = None
self.api_requester = SlackRequest()
if connect:
self.rtm_connect()
def __eq__(self, compare_str):
if compare_str == self.domain or compare_str == self.token:
return True
else:
return False
def __hash__(self):
return hash(self.token)
def __str__(self):
data = ""
for key in list(self.__dict__.keys()):
data += "{} : {}\n".format(key, str(self.__dict__[key])[:40])
return data
def __repr__(self):
return self.__str__()
def rtm_connect(self, reconnect=False):
reply = self.api_requester.do(self.token, "rtm.start")
if reply.status_code != 200:
raise SlackConnectionError
else:
login_data = reply.json()
if login_data["ok"]:
self.ws_url = login_data['url']
if not reconnect:
self.parse_slack_login_data(login_data)
self.connect_slack_websocket(self.ws_url)
else:
raise SlackLoginError
def parse_slack_login_data(self, login_data):
self.login_data = login_data
self.domain = self.login_data["team"]["domain"]
self.username = self.login_data["self"]["name"]
self.parse_channel_data(login_data["channels"])
self.parse_channel_data(login_data["groups"])
self.parse_channel_data(login_data["ims"])
self.parse_user_data(login_data["users"])
def connect_slack_websocket(self, ws_url):
try:
self.websocket = create_connection(ws_url)
self.websocket.sock.setblocking(0)
except:
raise SlackConnectionError
def parse_channel_data(self, channel_data):
for channel in channel_data:
if "name" not in channel:
channel["name"] = channel["id"]
if "members" not in channel:
channel["members"] = []
self.attach_channel(channel["name"],
channel["id"],
channel["members"])
def parse_user_data(self, user_data):
for user in user_data:
if "tz" not in user:
user["tz"] = "unknown"
if "real_name" not in user:
user["real_name"] = user["name"]
self.attach_user(user["name"], user["id"], user["real_name"], user["tz"])
def send_to_websocket(self, data):
try:
data = json.dumps(data)
self.websocket.send(data)
except:
self.rtm_connect(reconnect=True)
def ping(self):
return self.send_to_websocket({"type": "ping"})
def websocket_safe_read(self):
data = ""
while True:
try:
data += "{0}\n".format(self.websocket.recv())
except SSLError as e:
if e.errno == 2:
return ''
raise
return data.rstrip()
def attach_user(self, name, channel_id, real_name, tz):
if self.users.find(channel_id) is None:
self.users.append(User(self, name, channel_id, real_name, tz))
def attach_channel(self, name, channel_id, members=None):
if members is None:
members = []
if self.channels.find(channel_id) is None:
self.channels.append(Channel(self, name, channel_id, members))
def join_channel(self, name):
return self.api_requester.do(
self.token,
"channels.join?name={}".format(name)
).text
def api_call(self, method, **kwargs):
return self.api_requester.do(self.token, method, kwargs).text
class SlackConnectionError(Exception):
pass
class SlackLoginError(Exception):
pass
| true | true |
1c35ca73c09d506e9d55236a5bc09733d95fafa5 | 2,793 | py | Python | QUANTAXIS/QAFetch/base.py | kingore/QUANTAXIS | ead08c4ccd4db6467d3a9a2533cef2fb6b6c95ad | [
"MIT"
] | 1 | 2018-02-21T05:00:57.000Z | 2018-02-21T05:00:57.000Z | QUANTAXIS/QAFetch/base.py | ariesii/QUANTAXIS | a09d8784619e39ae74e13689011b08cdcc8431c4 | [
"MIT"
] | null | null | null | QUANTAXIS/QAFetch/base.py | ariesii/QUANTAXIS | a09d8784619e39ae74e13689011b08cdcc8431c4 | [
"MIT"
] | 1 | 2018-03-24T16:05:04.000Z | 2018-03-24T16:05:04.000Z | # coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2018 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'If-Modified-Since': 'Thu, 11 Jan 2018 07:05:01 GMT',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36'}
def _select_market_code(code):
"""
1- sh
0 -sz
"""
code = str(code)
if code[0] in ['5', '6', '9'] or code[:3] in ["009", "126", "110", "201", "202", "203", "204"]:
return 1
return 0
def _select_type(frequence):
if frequence in ['day', 'd', 'D', 'DAY', 'Day']:
frequence = 9
elif frequence in ['w', 'W', 'Week', 'week']:
frequence = 5
elif frequence in ['month', 'M', 'm', 'Month']:
frequence = 6
elif frequence in ['Q', 'Quarter', 'q']:
frequence = 10
elif frequence in ['y', 'Y', 'year', 'Year']:
frequence = 11
elif str(frequence) in ['5', '5m', '5min', 'five']:
frequence, type_ = 0, '5min'
elif str(frequence) in ['1', '1m', '1min', 'one']:
frequence, type_ = 8, '1min'
elif str(frequence) in ['15', '15m', '15min', 'fifteen']:
frequence, type_ = 1, '15min'
elif str(frequence) in ['30', '30m', '30min', 'half']:
frequence, type_ = 2, '30min'
elif str(frequence) in ['60', '60m', '60min', '1h']:
frequence, type_ = 3, '60min'
return frequence | 40.478261 | 138 | 0.634085 |
headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'If-Modified-Since': 'Thu, 11 Jan 2018 07:05:01 GMT',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36'}
def _select_market_code(code):
code = str(code)
if code[0] in ['5', '6', '9'] or code[:3] in ["009", "126", "110", "201", "202", "203", "204"]:
return 1
return 0
def _select_type(frequence):
if frequence in ['day', 'd', 'D', 'DAY', 'Day']:
frequence = 9
elif frequence in ['w', 'W', 'Week', 'week']:
frequence = 5
elif frequence in ['month', 'M', 'm', 'Month']:
frequence = 6
elif frequence in ['Q', 'Quarter', 'q']:
frequence = 10
elif frequence in ['y', 'Y', 'year', 'Year']:
frequence = 11
elif str(frequence) in ['5', '5m', '5min', 'five']:
frequence, type_ = 0, '5min'
elif str(frequence) in ['1', '1m', '1min', 'one']:
frequence, type_ = 8, '1min'
elif str(frequence) in ['15', '15m', '15min', 'fifteen']:
frequence, type_ = 1, '15min'
elif str(frequence) in ['30', '30m', '30min', 'half']:
frequence, type_ = 2, '30min'
elif str(frequence) in ['60', '60m', '60min', '1h']:
frequence, type_ = 3, '60min'
return frequence | true | true |
1c35cd9adb0c85b88590392345e600e9bf237706 | 9,577 | py | Python | implementations/srgan/srgan_pl.py | jsyoo61/PyTorch-GAN | 2d528c5f9818b0d1110c33808947643f81a75bbb | [
"MIT"
] | null | null | null | implementations/srgan/srgan_pl.py | jsyoo61/PyTorch-GAN | 2d528c5f9818b0d1110c33808947643f81a75bbb | [
"MIT"
] | null | null | null | implementations/srgan/srgan_pl.py | jsyoo61/PyTorch-GAN | 2d528c5f9818b0d1110c33808947643f81a75bbb | [
"MIT"
] | null | null | null | """
Super-resolution of CelebA using Generative Adversarial Networks.
The dataset can be downloaded from: https://www.dropbox.com/sh/8oqt9vytwxb3s4r/AADIKlz8PR9zr6Y20qbkunrba/Img/img_align_celeba.zip?dl=0
(if not available there see if options are listed at http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html)
Instrustion on running the script:
1. Download the dataset from the provided link
2. Save the folder 'img_align_celeba' to '../../data/'
4. Run the sript using command 'python3 srgan.py'
"""
# %%
import argparse
import os
import numpy as np
import math
import itertools
import sys
import torchvision.transforms as transforms
from torchvision.utils import save_image, make_grid
from torch.utils.data import DataLoader
from torch.autograd import Variable
from models import *
from datasets import *
import torch.nn as nn
import torch.nn.functional as F
import torch
from tools.tools import tdict, Timer, append, AverageMeter
from utils import *
from aggregation import aggregate_grad, distribute_all
import pdb
# %%
os.makedirs("images", exist_ok=True)
os.makedirs("saved_models", exist_ok=True)
parser = argparse.ArgumentParser()
parser.add_argument("--epoch", type=int, default=0, help="epoch to start training from")
parser.add_argument("--n_epochs", type=int, default=200, help="number of epochs of training")
parser.add_argument("--dataset_name", type=str, default="img_align_celeba", help="name of the dataset")
parser.add_argument("--batch_size", type=int, default=4, help="size of the batches")
parser.add_argument("--batch_m", type=int, default=1, help="batch multiplier. iterate over n batches and then apply gradients")
parser.add_argument("--lr", type=float, default=0.0002, help="adam: learning rate")
parser.add_argument("--b1", type=float, default=0.5, help="adam: decay of first order momentum of gradient")
parser.add_argument("--b2", type=float, default=0.999, help="adam: decay of first order momentum of gradient")
parser.add_argument("--decay_epoch", type=int, default=100, help="epoch from which to start lr decay")
parser.add_argument("--n_cpu", type=int, default=8, help="number of cpu threads to use during batch generation")
parser.add_argument("--hr_height", type=int, default=256, help="high res. image height")
parser.add_argument("--hr_width", type=int, default=256, help="high res. image width")
parser.add_argument("--channels", type=int, default=3, help="number of image channels")
parser.add_argument("--sample_interval", type=int, default=100, help="interval between saving image samples")
parser.add_argument("--checkpoint_interval", type=int, default=-1, help="interval between model checkpoints")
parser.add_argument("--checkpoint_name", type=str, default='default', help="name of checkpoint")
opt = parser.parse_args()
print(opt)
# %%
cuda = torch.cuda.is_available()
n_cuda = torch.cuda.device_count()
hr_shape = (opt.hr_height, opt.hr_width)
print('n_cuda: %s'%n_cuda)
# Initialize generator and discriminator
generator_list = []
discriminator_list = []
feature_extractor_list = []
optimizer_G_list = []
optimizer_D_list = []
for i in range(n_cuda):
generator = GeneratorResNet().cuda(i)
discriminator = Discriminator(input_shape=(opt.channels, *hr_shape)).cuda(i)
feature_extractor = FeatureExtractor().cuda(i)
# Set feature extractor to inference mode
feature_extractor.eval()
# Optimizers
optimizer_G = torch.optim.Adam(generator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
generator_list.append(generator)
discriminator_list.append(discriminator)
feature_extractor_list.append(feature_extractor)
optimizer_G_list.append(optimizer_G)
optimizer_D_list.append(optimizer_D)
optimizer_G = optimizer_G_list[0]
optimizer_D = optimizer_D_list[0]
print('number of parameters (generator): %s'%sum(p.numel() for p in generator_list[0].parameters()))
print('number of parameters (discriminator): %s'%sum(p.numel() for p in discriminator_list[0].parameters()))
for generator, discriminator, feature_extractor in zip(generator_list, discriminator_list, feature_extractor_list):
generator_device = next(generator.parameters()).device
discriminator_device = next(discriminator.parameters()).device
feature_extractor_device = next(feature_extractor.parameters()).device
print('models on device: generator(%s), discriminator(%s), feature_extractor(%s)'%(generator_device, discriminator_device, feature_extractor_device))
# Losses
criterion_GAN = torch.nn.MSELoss()
criterion_content = torch.nn.L1Loss()
if cuda:
criterion_GAN = criterion_GAN.cuda()
criterion_content = criterion_content.cuda()
Tensor = torch.cuda.FloatTensor if cuda else torch.Tensor
dataloader = DataLoader(
ImageDataset("../../data/%s" % opt.dataset_name, hr_shape=hr_shape),
batch_size=opt.batch_size,
shuffle=True,
num_workers=opt.n_cpu,
pin_memory=True
)
global_timer = Timer()
epoch_timer = Timer()
iter_timer = Timer()
iter_time_meter = AverageMeter()
# ----------
# Training
# ----------
global_timer.start()
for epoch in range(opt.epoch, opt.n_epochs):
epoch_timer.start()
imgs_list = []
for i, imgs in enumerate(dataloader):
if i % n_cuda == 0:
iter_timer.start()
imgs_list.append(imgs)
if len(imgs_list) < n_cuda:
continue
print('zero_grad_G')
optimizer_G.zero_grad()
print('zero_grad_D')
optimizer_D.zero_grad()
for imgs, generator, discriminator, feature_extractor in zip(imgs_list, generator_list, discriminator_list, feature_extractor_list):
device = next(generator.parameters()).device
with torch.cuda.device(device):
# ------------------
# Train Generators
# ------------------
# Configure model input
imgs_lr = imgs["lr"].cuda()
imgs_hr = imgs["hr"].cuda()
# Adversarial ground truths
valid = torch.ones((imgs_lr.size(0), *discriminator.output_shape), device=device)
fake = torch.zeros((imgs_lr.size(0), *discriminator.output_shape), device=device)
# Generate a high resolution image from low resolution input
gen_hr = generator(imgs_lr)
# Adversarial loss
loss_GAN = criterion_GAN(discriminator(gen_hr), valid)
# Content loss
gen_features = feature_extractor(gen_hr)
real_features = feature_extractor(imgs_hr)
loss_content = criterion_content(gen_features, real_features.detach())
# Total loss
loss_G = loss_content + 1e-3 * loss_GAN
loss_G = loss_G
loss_G.backward()
# ---------------------
# Train Discriminator
# ---------------------
# Loss of real and fake images
loss_real = criterion_GAN(discriminator(imgs_hr), valid)
loss_fake = criterion_GAN(discriminator(gen_hr.detach()), fake)
# Total loss
loss_D = (loss_real + loss_fake) / 2
loss_D = loss_D
loss_D.backward()
aggregate_grad(generator_list[1:], generator_list[0])
print('step_G')
optimizer_G.step()
distribute_all(generator_list[0], generator_list[1:])
aggregate_grad(discriminator_list[1:], discriminator_list[0])
print('step_D')
optimizer_D.step()
distribute_all(discriminator_list[0], discriminator_list[1:])
imgs_list = []
# --------------
# Log Progress
# --------------
print(
"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]"
% (epoch, opt.n_epochs, i, len(dataloader), loss_D.item()*n_cuda, loss_G.item()*n_cuda)
)
iter_time_meter.update(iter_timer.stop())
print('time for iteration: %s (%s)'%(iter_time_meter.val, iter_time_meter.avg))
batches_done = epoch * len(dataloader) + i+1
if batches_done % opt.sample_interval == 0:
# Save image grid with upsampled inputs and SRGAN outputs
imgs_lr = nn.functional.interpolate(imgs_lr, scale_factor=4)
imgs_hr_raw = imgs['hr_raw'].to(device)
print('[psnr] (imgs_lr):%s, (gen_hr):%s'%(psnr(minmaxscaler(imgs_lr), imgs_hr_raw, max_val=1).mean(), psnr(minmaxscaler(gen_hr), imgs_hr_raw, max_val=1).mean()))
gen_hr = make_grid(gen_hr, nrow=1, normalize=True)
imgs_lr = make_grid(imgs_lr, nrow=1, normalize=True)
img_grid = torch.cat((imgs_lr, gen_hr), -1)
save_image(img_grid, "images/%d.png" % batches_done, normalize=False)
elapsed_time = epoch_timer.stop()
print('Elapsed_time: %s'%elapsed_time)
if opt.checkpoint_interval != -1 and epoch % opt.checkpoint_interval == 0:
# Save model checkpoints
torch.save(generator.state_dict(), "saved_models/generator_%d.pth" % epoch)
torch.save(discriminator.state_dict(), "saved_models/discriminator_%d.pth" % epoch)
elapsed_time = global_timer.stop()
print(str(elapsed_time))
append(str(elapsed_time), 'elapsed_time.txt')
torch.save(generator.state_dict(), "saved_models/generator_%s.pth" % opt.checkpoint_name)
torch.save(discriminator.state_dict(), "saved_models/discriminator_%s.pth" % opt.checkpoint_name)
2 **((1/4)*np.log2(6))
| 39.904167 | 173 | 0.677874 |
import argparse
import os
import numpy as np
import math
import itertools
import sys
import torchvision.transforms as transforms
from torchvision.utils import save_image, make_grid
from torch.utils.data import DataLoader
from torch.autograd import Variable
from models import *
from datasets import *
import torch.nn as nn
import torch.nn.functional as F
import torch
from tools.tools import tdict, Timer, append, AverageMeter
from utils import *
from aggregation import aggregate_grad, distribute_all
import pdb
os.makedirs("images", exist_ok=True)
os.makedirs("saved_models", exist_ok=True)
parser = argparse.ArgumentParser()
parser.add_argument("--epoch", type=int, default=0, help="epoch to start training from")
parser.add_argument("--n_epochs", type=int, default=200, help="number of epochs of training")
parser.add_argument("--dataset_name", type=str, default="img_align_celeba", help="name of the dataset")
parser.add_argument("--batch_size", type=int, default=4, help="size of the batches")
parser.add_argument("--batch_m", type=int, default=1, help="batch multiplier. iterate over n batches and then apply gradients")
parser.add_argument("--lr", type=float, default=0.0002, help="adam: learning rate")
parser.add_argument("--b1", type=float, default=0.5, help="adam: decay of first order momentum of gradient")
parser.add_argument("--b2", type=float, default=0.999, help="adam: decay of first order momentum of gradient")
parser.add_argument("--decay_epoch", type=int, default=100, help="epoch from which to start lr decay")
parser.add_argument("--n_cpu", type=int, default=8, help="number of cpu threads to use during batch generation")
parser.add_argument("--hr_height", type=int, default=256, help="high res. image height")
parser.add_argument("--hr_width", type=int, default=256, help="high res. image width")
parser.add_argument("--channels", type=int, default=3, help="number of image channels")
parser.add_argument("--sample_interval", type=int, default=100, help="interval between saving image samples")
parser.add_argument("--checkpoint_interval", type=int, default=-1, help="interval between model checkpoints")
parser.add_argument("--checkpoint_name", type=str, default='default', help="name of checkpoint")
opt = parser.parse_args()
print(opt)
cuda = torch.cuda.is_available()
n_cuda = torch.cuda.device_count()
hr_shape = (opt.hr_height, opt.hr_width)
print('n_cuda: %s'%n_cuda)
generator_list = []
discriminator_list = []
feature_extractor_list = []
optimizer_G_list = []
optimizer_D_list = []
for i in range(n_cuda):
generator = GeneratorResNet().cuda(i)
discriminator = Discriminator(input_shape=(opt.channels, *hr_shape)).cuda(i)
feature_extractor = FeatureExtractor().cuda(i)
feature_extractor.eval()
optimizer_G = torch.optim.Adam(generator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
generator_list.append(generator)
discriminator_list.append(discriminator)
feature_extractor_list.append(feature_extractor)
optimizer_G_list.append(optimizer_G)
optimizer_D_list.append(optimizer_D)
optimizer_G = optimizer_G_list[0]
optimizer_D = optimizer_D_list[0]
print('number of parameters (generator): %s'%sum(p.numel() for p in generator_list[0].parameters()))
print('number of parameters (discriminator): %s'%sum(p.numel() for p in discriminator_list[0].parameters()))
for generator, discriminator, feature_extractor in zip(generator_list, discriminator_list, feature_extractor_list):
generator_device = next(generator.parameters()).device
discriminator_device = next(discriminator.parameters()).device
feature_extractor_device = next(feature_extractor.parameters()).device
print('models on device: generator(%s), discriminator(%s), feature_extractor(%s)'%(generator_device, discriminator_device, feature_extractor_device))
criterion_GAN = torch.nn.MSELoss()
criterion_content = torch.nn.L1Loss()
if cuda:
criterion_GAN = criterion_GAN.cuda()
criterion_content = criterion_content.cuda()
Tensor = torch.cuda.FloatTensor if cuda else torch.Tensor
dataloader = DataLoader(
ImageDataset("../../data/%s" % opt.dataset_name, hr_shape=hr_shape),
batch_size=opt.batch_size,
shuffle=True,
num_workers=opt.n_cpu,
pin_memory=True
)
global_timer = Timer()
epoch_timer = Timer()
iter_timer = Timer()
iter_time_meter = AverageMeter()
global_timer.start()
for epoch in range(opt.epoch, opt.n_epochs):
epoch_timer.start()
imgs_list = []
for i, imgs in enumerate(dataloader):
if i % n_cuda == 0:
iter_timer.start()
imgs_list.append(imgs)
if len(imgs_list) < n_cuda:
continue
print('zero_grad_G')
optimizer_G.zero_grad()
print('zero_grad_D')
optimizer_D.zero_grad()
for imgs, generator, discriminator, feature_extractor in zip(imgs_list, generator_list, discriminator_list, feature_extractor_list):
device = next(generator.parameters()).device
with torch.cuda.device(device):
imgs_lr = imgs["lr"].cuda()
imgs_hr = imgs["hr"].cuda()
valid = torch.ones((imgs_lr.size(0), *discriminator.output_shape), device=device)
fake = torch.zeros((imgs_lr.size(0), *discriminator.output_shape), device=device)
gen_hr = generator(imgs_lr)
loss_GAN = criterion_GAN(discriminator(gen_hr), valid)
gen_features = feature_extractor(gen_hr)
real_features = feature_extractor(imgs_hr)
loss_content = criterion_content(gen_features, real_features.detach())
loss_G = loss_content + 1e-3 * loss_GAN
loss_G = loss_G
loss_G.backward()
loss_real = criterion_GAN(discriminator(imgs_hr), valid)
loss_fake = criterion_GAN(discriminator(gen_hr.detach()), fake)
loss_D = (loss_real + loss_fake) / 2
loss_D = loss_D
loss_D.backward()
aggregate_grad(generator_list[1:], generator_list[0])
print('step_G')
optimizer_G.step()
distribute_all(generator_list[0], generator_list[1:])
aggregate_grad(discriminator_list[1:], discriminator_list[0])
print('step_D')
optimizer_D.step()
distribute_all(discriminator_list[0], discriminator_list[1:])
imgs_list = []
print(
"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]"
% (epoch, opt.n_epochs, i, len(dataloader), loss_D.item()*n_cuda, loss_G.item()*n_cuda)
)
iter_time_meter.update(iter_timer.stop())
print('time for iteration: %s (%s)'%(iter_time_meter.val, iter_time_meter.avg))
batches_done = epoch * len(dataloader) + i+1
if batches_done % opt.sample_interval == 0:
imgs_lr = nn.functional.interpolate(imgs_lr, scale_factor=4)
imgs_hr_raw = imgs['hr_raw'].to(device)
print('[psnr] (imgs_lr):%s, (gen_hr):%s'%(psnr(minmaxscaler(imgs_lr), imgs_hr_raw, max_val=1).mean(), psnr(minmaxscaler(gen_hr), imgs_hr_raw, max_val=1).mean()))
gen_hr = make_grid(gen_hr, nrow=1, normalize=True)
imgs_lr = make_grid(imgs_lr, nrow=1, normalize=True)
img_grid = torch.cat((imgs_lr, gen_hr), -1)
save_image(img_grid, "images/%d.png" % batches_done, normalize=False)
elapsed_time = epoch_timer.stop()
print('Elapsed_time: %s'%elapsed_time)
if opt.checkpoint_interval != -1 and epoch % opt.checkpoint_interval == 0:
torch.save(generator.state_dict(), "saved_models/generator_%d.pth" % epoch)
torch.save(discriminator.state_dict(), "saved_models/discriminator_%d.pth" % epoch)
elapsed_time = global_timer.stop()
print(str(elapsed_time))
append(str(elapsed_time), 'elapsed_time.txt')
torch.save(generator.state_dict(), "saved_models/generator_%s.pth" % opt.checkpoint_name)
torch.save(discriminator.state_dict(), "saved_models/discriminator_%s.pth" % opt.checkpoint_name)
2 **((1/4)*np.log2(6))
| true | true |
1c35cf2bc06adac2edb14614591c9dbe864c2054 | 1,447 | py | Python | luwu/utils/file_util.py | AaronJny/luwu | 05ee0bc605926661e42cada6cff5e281f4506291 | [
"MIT"
] | 19 | 2021-01-30T03:04:31.000Z | 2022-01-09T10:33:12.000Z | luwu/utils/file_util.py | AaronJny/luwu | 05ee0bc605926661e42cada6cff5e281f4506291 | [
"MIT"
] | 4 | 2021-04-15T02:10:53.000Z | 2021-06-24T12:17:29.000Z | luwu/utils/file_util.py | AaronJny/luwu | 05ee0bc605926661e42cada6cff5e281f4506291 | [
"MIT"
] | 5 | 2021-03-02T07:29:12.000Z | 2022-01-09T10:32:49.000Z | # -*- coding: utf-8 -*-
# @Author : AaronJny
# @LastEditTime : 2021-03-15
# @FilePath : /LuWu/luwu/utils/file_util.py
# @Desc :
import os
import time
from uuid import uuid1
from glob import glob
from luwu.utils import cmd_util
from loguru import logger
def abspath(filepath):
if filepath:
return os.path.abspath(os.path.expanduser(filepath))
else:
return ""
LUWU_TMP_DIR_ROOT = abspath("~/.luwu/tmp")
def mkdirs(dirpath):
os.makedirs(dirpath, exist_ok=True)
def get_tmp_dir(dir_name=""):
"""在~/.luwu下创建一个临时文件夹,并返回创建的文件夹的绝对路径
Args:
dir_name (str, optional): 子路径。如果不给定,则自动生成一个随机串. Defaults to ''.
"""
timestamp = str(int(time.time()))
if not dir_name:
dir_name = str(uuid1())
dirpath = abspath(os.path.join(LUWU_TMP_DIR_ROOT, timestamp, dir_name))
mkdirs(dirpath)
logger.info(f"已创建临时文件夹 {dirpath} .")
return dirpath
def clean_tmp_dir(days=3):
"""清理陆吾的临时文件夹,默认清理三天前的
Args:
days (int, optional): 要清理几天前的临时文件. Defaults to 3.
"""
timestamp = int(time.time())
days_timestamp = 86400 * days
cnt = 0
for dir_path in glob(os.path.join(LUWU_TMP_DIR_ROOT, "*")):
dir_timestamp = int(dir_path.split("/")[-1])
if timestamp - dir_timestamp > days_timestamp:
cmd = f"rm -rf {abspath(dir_path)}"
cmd_util.run_cmd(cmd)
cnt += 1
logger.info(f"已清理掉 {cnt} 个临时文件夹.")
| 24.525424 | 75 | 0.630961 |
import os
import time
from uuid import uuid1
from glob import glob
from luwu.utils import cmd_util
from loguru import logger
def abspath(filepath):
if filepath:
return os.path.abspath(os.path.expanduser(filepath))
else:
return ""
LUWU_TMP_DIR_ROOT = abspath("~/.luwu/tmp")
def mkdirs(dirpath):
os.makedirs(dirpath, exist_ok=True)
def get_tmp_dir(dir_name=""):
timestamp = str(int(time.time()))
if not dir_name:
dir_name = str(uuid1())
dirpath = abspath(os.path.join(LUWU_TMP_DIR_ROOT, timestamp, dir_name))
mkdirs(dirpath)
logger.info(f"已创建临时文件夹 {dirpath} .")
return dirpath
def clean_tmp_dir(days=3):
timestamp = int(time.time())
days_timestamp = 86400 * days
cnt = 0
for dir_path in glob(os.path.join(LUWU_TMP_DIR_ROOT, "*")):
dir_timestamp = int(dir_path.split("/")[-1])
if timestamp - dir_timestamp > days_timestamp:
cmd = f"rm -rf {abspath(dir_path)}"
cmd_util.run_cmd(cmd)
cnt += 1
logger.info(f"已清理掉 {cnt} 个临时文件夹.")
| true | true |
1c35d02c76912387c8c0d48ec77217aeb67083f6 | 1,469 | py | Python | server/routes/routes_home.py | prsolucoes/firedash | 31e4364088200a63bed5754c527061554c139a27 | [
"MIT"
] | 2 | 2019-10-04T21:52:40.000Z | 2019-11-05T20:11:04.000Z | server/routes/routes_home.py | prsolucoes/firedash | 31e4364088200a63bed5754c527061554c139a27 | [
"MIT"
] | 2 | 2021-05-08T00:40:24.000Z | 2021-05-08T00:40:42.000Z | server/routes/routes_home.py | paulo-coutinho/firedash | 31e4364088200a63bed5754c527061554c139a27 | [
"MIT"
] | 2 | 2019-09-16T15:45:25.000Z | 2019-10-04T21:52:44.000Z | import os
from flask import render_template, Blueprint, send_from_directory, current_app
from config.data import config_data
routes_home = Blueprint("home", __name__)
@routes_home.route("/", defaults={"path": ""})
@routes_home.route("/<path:path>")
def action_catch_all(path):
if config_data["web_cli_enabled"]:
return render_template("index.html")
else:
return """
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>Firedash</title>
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/bulma/0.7.5/css/bulma.min.css">
<script defer src="https://use.fontawesome.com/releases/v5.3.1/js/all.js"></script>
</head>
<body>
<section class="section">
<div class="container has-text-centered">
<p>
<img src="https://github.com/prsolucoes/firedash/blob/master/extras/images/logo.png?raw=true" title="Firedash" style="width: 100px">
</p>
<h1 class="title">
Firedash
</h1>
<p class="subtitle">
Dashboards for general purposes with batteries included
</p>
</div>
</section>
</body>
</html>
"""
@routes_home.route("/favicon.ico")
def action_favicon():
return send_from_directory(
os.path.join(current_app.root_path, "..", "..", "web-cli", "dist", "static"),
"favicon.ico",
mimetype="image/vnd.microsoft.icon",
)
| 28.25 | 140 | 0.641253 | import os
from flask import render_template, Blueprint, send_from_directory, current_app
from config.data import config_data
routes_home = Blueprint("home", __name__)
@routes_home.route("/", defaults={"path": ""})
@routes_home.route("/<path:path>")
def action_catch_all(path):
if config_data["web_cli_enabled"]:
return render_template("index.html")
else:
return """
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>Firedash</title>
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/bulma/0.7.5/css/bulma.min.css">
<script defer src="https://use.fontawesome.com/releases/v5.3.1/js/all.js"></script>
</head>
<body>
<section class="section">
<div class="container has-text-centered">
<p>
<img src="https://github.com/prsolucoes/firedash/blob/master/extras/images/logo.png?raw=true" title="Firedash" style="width: 100px">
</p>
<h1 class="title">
Firedash
</h1>
<p class="subtitle">
Dashboards for general purposes with batteries included
</p>
</div>
</section>
</body>
</html>
"""
@routes_home.route("/favicon.ico")
def action_favicon():
return send_from_directory(
os.path.join(current_app.root_path, "..", "..", "web-cli", "dist", "static"),
"favicon.ico",
mimetype="image/vnd.microsoft.icon",
)
| true | true |
1c35d050815b9cdf658203a170875c7e4d0749ff | 3,308 | py | Python | Athos/tests/tf/unittests/test_non_linear.py | mpc-msri-dev/EzPC | a489c49d5c92f51df0277a7e5751e1b8baeb0bc1 | [
"MIT"
] | null | null | null | Athos/tests/tf/unittests/test_non_linear.py | mpc-msri-dev/EzPC | a489c49d5c92f51df0277a7e5751e1b8baeb0bc1 | [
"MIT"
] | null | null | null | Athos/tests/tf/unittests/test_non_linear.py | mpc-msri-dev/EzPC | a489c49d5c92f51df0277a7e5751e1b8baeb0bc1 | [
"MIT"
] | null | null | null | '''
Authors: Pratik Bhatu.
Copyright:
Copyright (c) 2021 Microsoft Research
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import tensorflow as tf
import numpy as np
import pytest
import sys
import os
# Athos DIR
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", ".."))
from tests.utils import Config, Compiler, assert_almost_equal
@pytest.mark.skip(reason="[non-linear] Haven't made non-linear functionalities public")
@pytest.mark.parametrize("a_shape", [[4, 4], [1], []])
@pytest.mark.parametrize("dtype", [np.single])
@pytest.mark.parametrize(
"tfOp",
[
tf.math.sqrt,
tf.math.rsqrt,
tf.math.sigmoid,
tf.math.tanh,
tf.nn.relu,
],
)
def test_non_linear(test_dir, backend, tfOp, a_shape, dtype):
graph = tf.Graph()
a_inp = dtype(np.random.randn(*a_shape))
with graph.as_default():
a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
output = tfOp(a, name="output")
with tf.compat.v1.Session(graph=graph) as sess:
expected_output = sess.run(output, feed_dict={a: a_inp})
assert expected_output is not None
config = Config(backend).add_input(a).add_output(output)
compiler = Compiler(graph, config, test_dir)
mpc_output = compiler.compile_and_run([a_inp])
assert_almost_equal(tf_output=expected_output, mpc_tensor=mpc_output, precision=2)
return
@pytest.mark.skip(reason="[softmax] Haven't made non-linear functionalities public")
@pytest.mark.parametrize("a_shape, axis", [([2, 3], 1), ([1], 0)])
@pytest.mark.parametrize("dtype", [np.single])
def test_softmax(test_dir, backend, a_shape, axis, dtype):
graph = tf.Graph()
a_inp = dtype(np.random.randn(*a_shape))
with graph.as_default():
a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
output = tf.nn.softmax(a, axis=axis, name="output")
with tf.compat.v1.Session(graph=graph) as sess:
expected_output = sess.run(output, feed_dict={a: a_inp})
assert expected_output is not None
config = Config(backend).add_input(a).add_output(output)
compiler = Compiler(graph, config, test_dir)
mpc_output = compiler.compile_and_run([a_inp])
assert_almost_equal(tf_output=expected_output, mpc_tensor=mpc_output, precision=2)
return
| 40.341463 | 87 | 0.726723 | import tensorflow as tf
import numpy as np
import pytest
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", ".."))
from tests.utils import Config, Compiler, assert_almost_equal
@pytest.mark.skip(reason="[non-linear] Haven't made non-linear functionalities public")
@pytest.mark.parametrize("a_shape", [[4, 4], [1], []])
@pytest.mark.parametrize("dtype", [np.single])
@pytest.mark.parametrize(
"tfOp",
[
tf.math.sqrt,
tf.math.rsqrt,
tf.math.sigmoid,
tf.math.tanh,
tf.nn.relu,
],
)
def test_non_linear(test_dir, backend, tfOp, a_shape, dtype):
graph = tf.Graph()
a_inp = dtype(np.random.randn(*a_shape))
with graph.as_default():
a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
output = tfOp(a, name="output")
with tf.compat.v1.Session(graph=graph) as sess:
expected_output = sess.run(output, feed_dict={a: a_inp})
assert expected_output is not None
config = Config(backend).add_input(a).add_output(output)
compiler = Compiler(graph, config, test_dir)
mpc_output = compiler.compile_and_run([a_inp])
assert_almost_equal(tf_output=expected_output, mpc_tensor=mpc_output, precision=2)
return
@pytest.mark.skip(reason="[softmax] Haven't made non-linear functionalities public")
@pytest.mark.parametrize("a_shape, axis", [([2, 3], 1), ([1], 0)])
@pytest.mark.parametrize("dtype", [np.single])
def test_softmax(test_dir, backend, a_shape, axis, dtype):
graph = tf.Graph()
a_inp = dtype(np.random.randn(*a_shape))
with graph.as_default():
a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
output = tf.nn.softmax(a, axis=axis, name="output")
with tf.compat.v1.Session(graph=graph) as sess:
expected_output = sess.run(output, feed_dict={a: a_inp})
assert expected_output is not None
config = Config(backend).add_input(a).add_output(output)
compiler = Compiler(graph, config, test_dir)
mpc_output = compiler.compile_and_run([a_inp])
assert_almost_equal(tf_output=expected_output, mpc_tensor=mpc_output, precision=2)
return
| true | true |
1c35d0a3ab1bdafc146123180f084381439c529a | 1,784 | py | Python | rcc8_table.py | CaFaSa/ternary-projective-relations | 66e6a9b3792e950cf53d848c5a86170bc810fef4 | [
"MIT"
] | null | null | null | rcc8_table.py | CaFaSa/ternary-projective-relations | 66e6a9b3792e950cf53d848c5a86170bc810fef4 | [
"MIT"
] | 9 | 2018-09-18T11:04:05.000Z | 2019-01-23T15:19:19.000Z | rcc8_table.py | CaFaSa/ternary-projective-relations | 66e6a9b3792e950cf53d848c5a86170bc810fef4 | [
"MIT"
] | null | null | null | from collections import defaultdict
#T composition table
T=defaultdict(dict)
U={'DC','EC','EQ','TPP','NTPP','TPPi','NTPPi','PO'}
O={'EQ','TPP','NTPP','TPPi','NTTPi','PO'}
T['DC']={'DC':U, 'EC':{'DC','EC','PO','TPP','NTPP'},'PO':{'DC','EC','PO','TPP','NTPP'},'TPP':{'DC','EC','PO','TPP','NTPP'},'NTPP':{'DC','EC','PO','TPP','NTPP'},'TPPi':{'DC'},'NTPPi':{'DC'},'EQ':{'DC'}}
T['EC']={'DC':{'DC','EC','PO','PPi'},'EC':{'DC','EC','PO','TPP','TPi'},'PO':U,'TPP':{'EC','PO','TPP','NTPP'},'NTPP':{'PO','TPP','NTPP'},'TPPi':{'DC','EC','PO','PPi'},'EQ':{'PO'}}
T['PO']={'DC':{'DC','EC','PO','PPi'},'EC':{'DC','EC','PO','PPi'},'PO':U,'TPP':{'PO','TPP','NTPP'},'NTPP':{'PO','TPP','NTPP'},'TPPi':{'DC','EC','PO','PPi'},'NTPPi':{'DC','EC','PO','PPi'},'EQ':{'PO'}}
T['TPP']={'DC':'DC','EC':{'DC','EC'},'PO':{'DC','EC','PO','TPP','NTPP'},'TPP':{'TPP','NTPP'},'NTPP':{'NTPP'},'TPPi':{'DC','EC','PO','TPP','TPi'},'NTPPi':{'DC','EC','PO','PPi'},'EQ':{'TPP'}}
T['NTPP']={'DC':{'DC'},'EC':{'DC'},'PO':{'DC','EC','PO','TPP','NTPP'},'TPP':{'NTPP'},'NTPP':{'NTPP'},'TPPi':{'DC','EC','PO','TPP','NTPP'},'NTPPi':U,'EQ':'NTPP'}
T['TPPi']={'DC':{'DC','EC','PO','PPi'},'EC':{'EC','PO,PPi'},'PO':{'PO','TPP','TPi'},'TPP':{'PO','TPP','TPi'},'NTPP':{'PO','TPP','NTPP'},'TPPi':{'PPi'},'NTPPi':{'NTPPi'},'EQ':{'NTPPi'}}
T['NTPPi']={'DC':{'DC','EC','PO','PPi'},'EC':{'PO','PPi'},'PO':{'PO','PPi'},'TPP':{'PO','PPi'},'NTPP':O,'TPPi':{'NTPPi'},'NTTPi':{'NTPPi'},'EQ':{'NTPPi'}}
T['EQ']={'DC':{'DC'},'EC':{'EC'},'PO':{'PO'},'TPP':{'TPP'},'NTPP':{'NTPP'},'TPPi':{'TPPi'},'NTPPi':{'NTPPi'},'EQ':{'EQ'}}
OperatoriDiretti=['EQ','TPP','NTPP','PO','EC','DC']
OperatoriInversi=['EQ','TPPI','NTPPI','PO','EC','DC']
#OperatoriDiretti=['DC','EC','EQ','TPP','NTPP','PO']
#OperatoriInversi=['EQ','TPPI','NTPPI','PO','EC','DC']
| 74.333333 | 201 | 0.471973 | from collections import defaultdict
T=defaultdict(dict)
U={'DC','EC','EQ','TPP','NTPP','TPPi','NTPPi','PO'}
O={'EQ','TPP','NTPP','TPPi','NTTPi','PO'}
T['DC']={'DC':U, 'EC':{'DC','EC','PO','TPP','NTPP'},'PO':{'DC','EC','PO','TPP','NTPP'},'TPP':{'DC','EC','PO','TPP','NTPP'},'NTPP':{'DC','EC','PO','TPP','NTPP'},'TPPi':{'DC'},'NTPPi':{'DC'},'EQ':{'DC'}}
T['EC']={'DC':{'DC','EC','PO','PPi'},'EC':{'DC','EC','PO','TPP','TPi'},'PO':U,'TPP':{'EC','PO','TPP','NTPP'},'NTPP':{'PO','TPP','NTPP'},'TPPi':{'DC','EC','PO','PPi'},'EQ':{'PO'}}
T['PO']={'DC':{'DC','EC','PO','PPi'},'EC':{'DC','EC','PO','PPi'},'PO':U,'TPP':{'PO','TPP','NTPP'},'NTPP':{'PO','TPP','NTPP'},'TPPi':{'DC','EC','PO','PPi'},'NTPPi':{'DC','EC','PO','PPi'},'EQ':{'PO'}}
T['TPP']={'DC':'DC','EC':{'DC','EC'},'PO':{'DC','EC','PO','TPP','NTPP'},'TPP':{'TPP','NTPP'},'NTPP':{'NTPP'},'TPPi':{'DC','EC','PO','TPP','TPi'},'NTPPi':{'DC','EC','PO','PPi'},'EQ':{'TPP'}}
T['NTPP']={'DC':{'DC'},'EC':{'DC'},'PO':{'DC','EC','PO','TPP','NTPP'},'TPP':{'NTPP'},'NTPP':{'NTPP'},'TPPi':{'DC','EC','PO','TPP','NTPP'},'NTPPi':U,'EQ':'NTPP'}
T['TPPi']={'DC':{'DC','EC','PO','PPi'},'EC':{'EC','PO,PPi'},'PO':{'PO','TPP','TPi'},'TPP':{'PO','TPP','TPi'},'NTPP':{'PO','TPP','NTPP'},'TPPi':{'PPi'},'NTPPi':{'NTPPi'},'EQ':{'NTPPi'}}
T['NTPPi']={'DC':{'DC','EC','PO','PPi'},'EC':{'PO','PPi'},'PO':{'PO','PPi'},'TPP':{'PO','PPi'},'NTPP':O,'TPPi':{'NTPPi'},'NTTPi':{'NTPPi'},'EQ':{'NTPPi'}}
T['EQ']={'DC':{'DC'},'EC':{'EC'},'PO':{'PO'},'TPP':{'TPP'},'NTPP':{'NTPP'},'TPPi':{'TPPi'},'NTPPi':{'NTPPi'},'EQ':{'EQ'}}
OperatoriDiretti=['EQ','TPP','NTPP','PO','EC','DC']
OperatoriInversi=['EQ','TPPI','NTPPI','PO','EC','DC']
| true | true |
1c35d0dd4fb4025d909ea5c166f05978e111964a | 265 | py | Python | tests/artificial/transf_None/trend_LinearTrend/cycle_12/ar_/test_artificial_32_None_LinearTrend_12__0.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | null | null | null | tests/artificial/transf_None/trend_LinearTrend/cycle_12/ar_/test_artificial_32_None_LinearTrend_12__0.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | 1 | 2019-11-30T23:39:38.000Z | 2019-12-01T04:34:35.000Z | tests/artificial/transf_None/trend_LinearTrend/cycle_12/ar_/test_artificial_32_None_LinearTrend_12__0.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | null | null | null | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "LinearTrend", cycle_length = 12, transform = "None", sigma = 0.0, exog_count = 0, ar_order = 0); | 37.857143 | 160 | 0.728302 | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "LinearTrend", cycle_length = 12, transform = "None", sigma = 0.0, exog_count = 0, ar_order = 0); | true | true |
1c35d117446b110efee6159784d102f5fd4ad147 | 343 | py | Python | aula4_pt1/views.py | ulisses9si/curso-flask | 2bde146f39d4eea78b98a8189ce76afd622ea54a | [
"Unlicense"
] | null | null | null | aula4_pt1/views.py | ulisses9si/curso-flask | 2bde146f39d4eea78b98a8189ce76afd622ea54a | [
"Unlicense"
] | null | null | null | aula4_pt1/views.py | ulisses9si/curso-flask | 2bde146f39d4eea78b98a8189ce76afd622ea54a | [
"Unlicense"
] | null | null | null | """Extensão Flask"""
from flask import Flask, request
def init_app(app: Flask):
"""Inicialização de extensões"""
@app.route("/")
def index():
print(request.args)
return "Esta rodando aguarde"
@app.route("/contato")
def contato():
return "<form><input type='text'></input></form>"
| 21.4375 | 58 | 0.571429 | from flask import Flask, request
def init_app(app: Flask):
@app.route("/")
def index():
print(request.args)
return "Esta rodando aguarde"
@app.route("/contato")
def contato():
return "<form><input type='text'></input></form>"
| true | true |
1c35d1223d4b40e592b04a994e6978a0965730e4 | 2,794 | py | Python | guardianbot/interactions.py | shiftinv/GuardianBot | 2c5faef7ba4bf35e9f7fc814dd88f432f0af89da | [
"Apache-2.0"
] | 2 | 2021-11-21T12:30:44.000Z | 2021-11-22T13:39:27.000Z | guardianbot/interactions.py | shiftinv/GuardianBot | 2c5faef7ba4bf35e9f7fc814dd88f432f0af89da | [
"Apache-2.0"
] | null | null | null | guardianbot/interactions.py | shiftinv/GuardianBot | 2c5faef7ba4bf35e9f7fc814dd88f432f0af89da | [
"Apache-2.0"
] | null | null | null | from disnake.ext import commands
from typing import Callable, Dict, List, Optional, TypeVar, Union
from . import multicmd, types, utils
from .config import Config
class CustomSyncBot(commands.Bot):
async def _sync_application_command_permissions(self) -> None:
for command in self.application_commands:
# make sure `default_permission` is `False` if custom permissions are set
all_perms: List[bool] = []
for u in command.permissions.values():
for p in (u.permissions, u.role_ids, u.user_ids, {None: u.owner} if u.owner is not None else None):
if not p:
continue
all_perms.extend(p.values())
if all_perms and all(p is True for p in all_perms):
assert command.body.default_permission is False, \
f'custom command permissions require `default_permission = False` (command: \'{command.qualified_name}\')'
# call original func
return await super()._sync_application_command_permissions()
async def _prepare_application_commands(self) -> None:
async with utils.catch_and_exit(self):
return await super()._prepare_application_commands()
async def _delayed_command_sync(self) -> None:
async with utils.catch_and_exit(self):
return await super()._delayed_command_sync()
_TCmd = TypeVar(
'_TCmd',
commands.InvokableApplicationCommand,
types.HandlerType,
# permissions can only be set on top level, not per subcommand/subgroup
multicmd._MultiCommand,
multicmd._MultiGroup
)
def allow(
*,
roles: Optional[Dict[int, bool]] = None,
users: Optional[Dict[int, bool]] = None,
owner: Optional[bool] = None
) -> Callable[[_TCmd], _TCmd]:
def wrap(cmd: _TCmd) -> _TCmd:
dec = commands.guild_permissions(
Config.guild_id,
roles=types.unwrap_opt(roles),
users=types.unwrap_opt(users),
owner=types.unwrap_opt(owner),
)
dec_input: Union[commands.InvokableApplicationCommand, types.HandlerType]
if isinstance(cmd, (multicmd._MultiCommand, multicmd._MultiGroup)):
dec_input = cmd._slash_command
elif isinstance(cmd, multicmd._MultiBase) or not callable(cmd):
raise TypeError(f'permissions cannot be set on `{type(cmd).__name__}` objects')
else:
dec_input = cmd
# apply decorator to handler func/object
r = dec(dec_input)
# sanity check to protect against internal changes, since we're not returning the decorator's result
assert r is dec_input
return cmd
return wrap
allow_mod = allow(owner=True, roles=dict.fromkeys(Config.mod_role_ids, True))
| 36.763158 | 126 | 0.653901 | from disnake.ext import commands
from typing import Callable, Dict, List, Optional, TypeVar, Union
from . import multicmd, types, utils
from .config import Config
class CustomSyncBot(commands.Bot):
async def _sync_application_command_permissions(self) -> None:
for command in self.application_commands:
all_perms: List[bool] = []
for u in command.permissions.values():
for p in (u.permissions, u.role_ids, u.user_ids, {None: u.owner} if u.owner is not None else None):
if not p:
continue
all_perms.extend(p.values())
if all_perms and all(p is True for p in all_perms):
assert command.body.default_permission is False, \
f'custom command permissions require `default_permission = False` (command: \'{command.qualified_name}\')'
return await super()._sync_application_command_permissions()
async def _prepare_application_commands(self) -> None:
async with utils.catch_and_exit(self):
return await super()._prepare_application_commands()
async def _delayed_command_sync(self) -> None:
async with utils.catch_and_exit(self):
return await super()._delayed_command_sync()
_TCmd = TypeVar(
'_TCmd',
commands.InvokableApplicationCommand,
types.HandlerType,
multicmd._MultiCommand,
multicmd._MultiGroup
)
def allow(
*,
roles: Optional[Dict[int, bool]] = None,
users: Optional[Dict[int, bool]] = None,
owner: Optional[bool] = None
) -> Callable[[_TCmd], _TCmd]:
def wrap(cmd: _TCmd) -> _TCmd:
dec = commands.guild_permissions(
Config.guild_id,
roles=types.unwrap_opt(roles),
users=types.unwrap_opt(users),
owner=types.unwrap_opt(owner),
)
dec_input: Union[commands.InvokableApplicationCommand, types.HandlerType]
if isinstance(cmd, (multicmd._MultiCommand, multicmd._MultiGroup)):
dec_input = cmd._slash_command
elif isinstance(cmd, multicmd._MultiBase) or not callable(cmd):
raise TypeError(f'permissions cannot be set on `{type(cmd).__name__}` objects')
else:
dec_input = cmd
r = dec(dec_input)
assert r is dec_input
return cmd
return wrap
allow_mod = allow(owner=True, roles=dict.fromkeys(Config.mod_role_ids, True))
| true | true |
1c35d173246b53fefd76fea0ee73619eb3487e35 | 1,515 | py | Python | game/core/tools/roomSuport.py | Galtvam/projeto-de-redes | 351f84074ea8739de52f280e5f52f7d1da6af728 | [
"MIT"
] | 2 | 2019-05-30T23:14:52.000Z | 2021-03-31T04:43:55.000Z | game/core/tools/roomSuport.py | Galtvam/projeto-de-redes | 351f84074ea8739de52f280e5f52f7d1da6af728 | [
"MIT"
] | 1 | 2019-07-01T18:08:11.000Z | 2019-07-01T18:08:11.000Z | game/core/tools/roomSuport.py | Galtvam/projeto-de-redes | 351f84074ea8739de52f280e5f52f7d1da6af728 | [
"MIT"
] | null | null | null | #coding: utf-8
def extractListOfRooms(peersList):
rooms = {}
for peer in peersList:
if peer[2] and (peer[3] != None):
rooms[peer[3]] = peer
return rooms
def extractPlayersInRoom(roomID, peersList):
players = []
for peer in peersList:
if peer[2] and (peer[3] == roomID):
nickname = peer[1]
ip = peer[0]
players.append([nickname, ip, None])
return players
def offlineDetection(peersList, playersList):
for player in playersList[1:]:
mark = False
for peer in peersList:
if peer[0] == player[1]:
mark = True
break
if not(mark):
playersList.remove(player)
def candidatesExtractor(playersList, lastMaster):
cand = []
if len(playersList) > 2:
for player in playersList:
if player[0] != lastMaster:
cand.append(player[0])
else:
for player in playersList:
if player[0] != lastMaster:
cand.append(player[0])
return cand
def wordPackageExtractor(message):
word = ''
answer = ''
aux = ''
for l in message:
try:
int(chr(l))
aux += chr(l)
except:
break
numberLen = len(aux)
count = int(aux)
max = numberLen + count
for l in message[numberLen:max]:
word += chr(l)
ans = message[max:]
answer = ''
for k in ans:
answer += chr(k)
return word, answer
| 24.047619 | 49 | 0.532673 |
def extractListOfRooms(peersList):
rooms = {}
for peer in peersList:
if peer[2] and (peer[3] != None):
rooms[peer[3]] = peer
return rooms
def extractPlayersInRoom(roomID, peersList):
players = []
for peer in peersList:
if peer[2] and (peer[3] == roomID):
nickname = peer[1]
ip = peer[0]
players.append([nickname, ip, None])
return players
def offlineDetection(peersList, playersList):
for player in playersList[1:]:
mark = False
for peer in peersList:
if peer[0] == player[1]:
mark = True
break
if not(mark):
playersList.remove(player)
def candidatesExtractor(playersList, lastMaster):
cand = []
if len(playersList) > 2:
for player in playersList:
if player[0] != lastMaster:
cand.append(player[0])
else:
for player in playersList:
if player[0] != lastMaster:
cand.append(player[0])
return cand
def wordPackageExtractor(message):
word = ''
answer = ''
aux = ''
for l in message:
try:
int(chr(l))
aux += chr(l)
except:
break
numberLen = len(aux)
count = int(aux)
max = numberLen + count
for l in message[numberLen:max]:
word += chr(l)
ans = message[max:]
answer = ''
for k in ans:
answer += chr(k)
return word, answer
| true | true |
1c35d1b4130224fc95c6a593379fa7b96eb1a7ee | 8,013 | py | Python | pypegasus/base/ttypes.py | XiaoMi/pegasus-python-client | 877ed3bdc193d44d10dbe9b89b4f1acf3f681587 | [
"Apache-2.0"
] | 8 | 2018-07-19T09:33:44.000Z | 2022-03-27T15:59:53.000Z | pypegasus/base/ttypes.py | XiaoMi/pegasus-python-client | 877ed3bdc193d44d10dbe9b89b4f1acf3f681587 | [
"Apache-2.0"
] | 8 | 2018-03-02T08:11:10.000Z | 2022-02-11T03:38:33.000Z | pypegasus/base/ttypes.py | XiaoMi/pegasus-python-client | 877ed3bdc193d44d10dbe9b89b4f1acf3f681587 | [
"Apache-2.0"
] | 8 | 2018-02-27T07:38:28.000Z | 2021-03-25T02:53:19.000Z | #
# Autogenerated by Thrift Compiler (0.9.3)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from aenum import Enum
import socket
import struct
from thrift.Thrift import TType
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class blob:
thrift_spec = (
)
def read(self, iprot):
self.data = iprot.readString()
def write(self, oprot):
oprot.writeString(self.data)
def validate(self):
return
def __init__(self, data=None):
self.data = data
def __hash__(self):
value = 17
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
def __len__(self):
return len(self.data)
class rocksdb_error_types(Enum):
kOk = 0
kNotFound = 1
kCorruption = 2
kNotSupported = 3
kInvalidArgument = 4
kIOError = 5
kMergeInProgress = 6
kIncomplete = 7
kShutdownInProgress = 8
kTimedOut = 9
kAborted = 10
kBusy = 11
kExpired = 12
kTryAgain = 13
kNoNeedOperate = 101
class error_types(Enum):
ERR_OK = 0
ERR_UNKNOWN = 1
ERR_SERVICE_NOT_FOUND = 2
ERR_SERVICE_ALREADY_RUNNING = 3
ERR_IO_PENDING = 4
ERR_TIMEOUT = 5
ERR_SERVICE_NOT_ACTIVE = 6
ERR_BUSY = 7
ERR_NETWORK_INIT_FAILED = 8
ERR_FORWARD_TO_OTHERS = 9
ERR_OBJECT_NOT_FOUND = 10
ERR_HANDLER_NOT_FOUND = 11
ERR_LEARN_FILE_FAILED = 12
ERR_GET_LEARN_STATE_FAILED = 13
ERR_INVALID_VERSION = 14
ERR_INVALID_PARAMETERS = 15
ERR_CAPACITY_EXCEEDED = 16
ERR_INVALID_STATE = 17
ERR_INACTIVE_STATE = 18
ERR_NOT_ENOUGH_MEMBER = 19
ERR_FILE_OPERATION_FAILED = 20
ERR_HANDLE_EOF = 21
ERR_WRONG_CHECKSUM = 22
ERR_INVALID_DATA = 23
ERR_INVALID_HANDLE = 24
ERR_INCOMPLETE_DATA = 25
ERR_VERSION_OUTDATED = 26
ERR_PATH_NOT_FOUND = 27
ERR_PATH_ALREADY_EXIST = 28
ERR_ADDRESS_ALREADY_USED = 29
ERR_STATE_FREEZED = 30
ERR_LOCAL_APP_FAILURE = 31
ERR_BIND_IOCP_FAILED = 32
ERR_NETWORK_START_FAILED = 33
ERR_NOT_IMPLEMENTED = 34
ERR_CHECKPOINT_FAILED = 35
ERR_WRONG_TIMING = 36
ERR_NO_NEED_OPERATE = 37
ERR_CORRUPTION = 38
ERR_TRY_AGAIN = 39
ERR_CLUSTER_NOT_FOUND = 40
ERR_CLUSTER_ALREADY_EXIST = 41
ERR_SERVICE_ALREADY_EXIST = 42
ERR_INJECTED = 43
ERR_REPLICATION_FAILURE = 44
ERR_APP_EXIST = 45
ERR_APP_NOT_EXIST = 46
ERR_BUSY_CREATING = 47
ERR_BUSY_DROPPING = 48
ERR_NETWORK_FAILURE = 49
ERR_UNDER_RECOVERY = 50
ERR_LEARNER_NOT_FOUND = 51
ERR_OPERATION_DISABLED = 52
ERR_EXPIRED = 53
ERR_LOCK_ALREADY_EXIST = 54
ERR_HOLD_BY_OTHERS = 55
ERR_RECURSIVE_LOCK = 56
ERR_NO_OWNER = 57
ERR_NODE_ALREADY_EXIST = 58
ERR_INCONSISTENT_STATE = 59
ERR_ARRAY_INDEX_OUT_OF_RANGE = 60
ERR_DIR_NOT_EMPTY = 61
ERR_FS_INTERNAL = 62
ERR_IGNORE_BAD_DATA = 63
ERR_APP_DROPPED = 64
ERR_MOCK_INTERNAL = 65
ERR_ZOOKEEPER_OPERATION = 66
ERR_CHILD_REGISTERED = 67
ERR_INGESTION_FAILED = 68
ERR_UNAUTHENTICATED = 69
ERR_KRB5_INTERNAL = 70
ERR_SASL_INTERNAL = 71
ERR_SASL_INCOMPLETE = 72
ERR_ACL_DENY = 73
ERR_SPLITTING = 74
ERR_PARENT_PARTITION_MISUSED = 75
ERR_CHILD_NOT_READY = 76
ERR_DISK_INSUFFICIENT = 77
# ERROR_CODE defined by client
ERR_SESSION_RESET = 78
ERR_THREAD_INTERRUPTED = 79
class error_code:
thrift_spec = (
)
def __init__(self, ):
self.errno = error_types.ERR_UNKNOWN
@staticmethod
def value_of(error_name):
return error_types[error_name]
def read(self, iprot):
self.errno = iprot.readString()
def write(self, oprot):
oprot.writeString()
def validate(self):
return
def __hash__(self):
value = 17
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class task_code:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('task_code')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class rpc_address:
thrift_spec = (
(1, TType.I64, 'address', None, None, ), # 1
)
def __init__(self):
self.address = 0
def is_valid(self):
return self.address == 0
def from_string(self, host_port):
host, port = host_port.split(':')
self.address = socket.ntohl(struct.unpack("I", socket.inet_aton(host))[0])
self.address = (self.address << 32) + (int(port) << 16) + 1 # TODO why + 1?
return True
def to_host_port(self):
s = []
address = self.address
port = (address >> 16) & 0xFFFF
address = address >> 32
for i in range(4):
s.append(str(address & 0xFF))
address = address >> 8
host = '.'.join(s[::-1])
return host, port
def read(self, iprot):
self.address = iprot.readI64() & 0xFFFFFFFFFFFFFFFF
def write(self, oprot):
oprot.writeI64(self.address)
def validate(self):
return
def __hash__(self):
return self.address ^ (self.address >> 32)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return other.__class__.__name__ == "rpc_address" and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class gpid:
thrift_spec = (
(1, TType.I64, 'value', None, None, ), # 1
)
def __init__(self, app_id=0, pidx=0):
self.value = (pidx << 32) + app_id
def read(self, iprot):
self.value = iprot.readI64()
def write(self, oprot):
oprot.writeI64(self.value)
def validate(self):
return
def __hash__(self):
return self.value >> 32 ^ self.value & 0x00000000ffffffff
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
def get_app_id(self):
return self.value & 0x00000000ffffffff
def get_pidx(self):
return self.value >> 32
| 24.135542 | 188 | 0.669911 |
from aenum import Enum
import socket
import struct
from thrift.Thrift import TType
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class blob:
thrift_spec = (
)
def read(self, iprot):
self.data = iprot.readString()
def write(self, oprot):
oprot.writeString(self.data)
def validate(self):
return
def __init__(self, data=None):
self.data = data
def __hash__(self):
value = 17
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
def __len__(self):
return len(self.data)
class rocksdb_error_types(Enum):
kOk = 0
kNotFound = 1
kCorruption = 2
kNotSupported = 3
kInvalidArgument = 4
kIOError = 5
kMergeInProgress = 6
kIncomplete = 7
kShutdownInProgress = 8
kTimedOut = 9
kAborted = 10
kBusy = 11
kExpired = 12
kTryAgain = 13
kNoNeedOperate = 101
class error_types(Enum):
ERR_OK = 0
ERR_UNKNOWN = 1
ERR_SERVICE_NOT_FOUND = 2
ERR_SERVICE_ALREADY_RUNNING = 3
ERR_IO_PENDING = 4
ERR_TIMEOUT = 5
ERR_SERVICE_NOT_ACTIVE = 6
ERR_BUSY = 7
ERR_NETWORK_INIT_FAILED = 8
ERR_FORWARD_TO_OTHERS = 9
ERR_OBJECT_NOT_FOUND = 10
ERR_HANDLER_NOT_FOUND = 11
ERR_LEARN_FILE_FAILED = 12
ERR_GET_LEARN_STATE_FAILED = 13
ERR_INVALID_VERSION = 14
ERR_INVALID_PARAMETERS = 15
ERR_CAPACITY_EXCEEDED = 16
ERR_INVALID_STATE = 17
ERR_INACTIVE_STATE = 18
ERR_NOT_ENOUGH_MEMBER = 19
ERR_FILE_OPERATION_FAILED = 20
ERR_HANDLE_EOF = 21
ERR_WRONG_CHECKSUM = 22
ERR_INVALID_DATA = 23
ERR_INVALID_HANDLE = 24
ERR_INCOMPLETE_DATA = 25
ERR_VERSION_OUTDATED = 26
ERR_PATH_NOT_FOUND = 27
ERR_PATH_ALREADY_EXIST = 28
ERR_ADDRESS_ALREADY_USED = 29
ERR_STATE_FREEZED = 30
ERR_LOCAL_APP_FAILURE = 31
ERR_BIND_IOCP_FAILED = 32
ERR_NETWORK_START_FAILED = 33
ERR_NOT_IMPLEMENTED = 34
ERR_CHECKPOINT_FAILED = 35
ERR_WRONG_TIMING = 36
ERR_NO_NEED_OPERATE = 37
ERR_CORRUPTION = 38
ERR_TRY_AGAIN = 39
ERR_CLUSTER_NOT_FOUND = 40
ERR_CLUSTER_ALREADY_EXIST = 41
ERR_SERVICE_ALREADY_EXIST = 42
ERR_INJECTED = 43
ERR_REPLICATION_FAILURE = 44
ERR_APP_EXIST = 45
ERR_APP_NOT_EXIST = 46
ERR_BUSY_CREATING = 47
ERR_BUSY_DROPPING = 48
ERR_NETWORK_FAILURE = 49
ERR_UNDER_RECOVERY = 50
ERR_LEARNER_NOT_FOUND = 51
ERR_OPERATION_DISABLED = 52
ERR_EXPIRED = 53
ERR_LOCK_ALREADY_EXIST = 54
ERR_HOLD_BY_OTHERS = 55
ERR_RECURSIVE_LOCK = 56
ERR_NO_OWNER = 57
ERR_NODE_ALREADY_EXIST = 58
ERR_INCONSISTENT_STATE = 59
ERR_ARRAY_INDEX_OUT_OF_RANGE = 60
ERR_DIR_NOT_EMPTY = 61
ERR_FS_INTERNAL = 62
ERR_IGNORE_BAD_DATA = 63
ERR_APP_DROPPED = 64
ERR_MOCK_INTERNAL = 65
ERR_ZOOKEEPER_OPERATION = 66
ERR_CHILD_REGISTERED = 67
ERR_INGESTION_FAILED = 68
ERR_UNAUTHENTICATED = 69
ERR_KRB5_INTERNAL = 70
ERR_SASL_INTERNAL = 71
ERR_SASL_INCOMPLETE = 72
ERR_ACL_DENY = 73
ERR_SPLITTING = 74
ERR_PARENT_PARTITION_MISUSED = 75
ERR_CHILD_NOT_READY = 76
ERR_DISK_INSUFFICIENT = 77
ERR_SESSION_RESET = 78
ERR_THREAD_INTERRUPTED = 79
class error_code:
thrift_spec = (
)
def __init__(self, ):
self.errno = error_types.ERR_UNKNOWN
@staticmethod
def value_of(error_name):
return error_types[error_name]
def read(self, iprot):
self.errno = iprot.readString()
def write(self, oprot):
oprot.writeString()
def validate(self):
return
def __hash__(self):
value = 17
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class task_code:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('task_code')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class rpc_address:
thrift_spec = (
(1, TType.I64, 'address', None, None, ),
)
def __init__(self):
self.address = 0
def is_valid(self):
return self.address == 0
def from_string(self, host_port):
host, port = host_port.split(':')
self.address = socket.ntohl(struct.unpack("I", socket.inet_aton(host))[0])
self.address = (self.address << 32) + (int(port) << 16) + 1
return True
def to_host_port(self):
s = []
address = self.address
port = (address >> 16) & 0xFFFF
address = address >> 32
for i in range(4):
s.append(str(address & 0xFF))
address = address >> 8
host = '.'.join(s[::-1])
return host, port
def read(self, iprot):
self.address = iprot.readI64() & 0xFFFFFFFFFFFFFFFF
def write(self, oprot):
oprot.writeI64(self.address)
def validate(self):
return
def __hash__(self):
return self.address ^ (self.address >> 32)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return other.__class__.__name__ == "rpc_address" and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class gpid:
thrift_spec = (
(1, TType.I64, 'value', None, None, ),
)
def __init__(self, app_id=0, pidx=0):
self.value = (pidx << 32) + app_id
def read(self, iprot):
self.value = iprot.readI64()
def write(self, oprot):
oprot.writeI64(self.value)
def validate(self):
return
def __hash__(self):
return self.value >> 32 ^ self.value & 0x00000000ffffffff
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
def get_app_id(self):
return self.value & 0x00000000ffffffff
def get_pidx(self):
return self.value >> 32
| true | true |
1c35d2067aef56bee8c2fb53ec03f7259ed5bb43 | 22,085 | py | Python | flightrl/stable-baselines3/stable_baselines3/common/logger.py | arsimone/flightmare | c546d9d54970c7ad803f3ada4c2ea64c51ab7287 | [
"MIT"
] | null | null | null | flightrl/stable-baselines3/stable_baselines3/common/logger.py | arsimone/flightmare | c546d9d54970c7ad803f3ada4c2ea64c51ab7287 | [
"MIT"
] | null | null | null | flightrl/stable-baselines3/stable_baselines3/common/logger.py | arsimone/flightmare | c546d9d54970c7ad803f3ada4c2ea64c51ab7287 | [
"MIT"
] | null | null | null | import datetime
import json
import os
import sys
import tempfile
import warnings
from collections import defaultdict
from typing import Any, Dict, List, Optional, Sequence, TextIO, Tuple, Union
import numpy as np
import pandas
import torch as th
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
SummaryWriter = None
DEBUG = 10
INFO = 20
WARN = 30
ERROR = 40
DISABLED = 50
class Video(object):
"""
Video data class storing the video frames and the frame per seconds
"""
def __init__(self, frames: th.Tensor, fps: Union[float, int]):
self.frames = frames
self.fps = fps
class Graph(object):
"""
Graph class logging graph to tensorboard
"""
def __init__(self, model: th.nn.Module, model_input: th.tensor):
self.model = model
self.model_input = model_input
class FormatUnsupportedError(NotImplementedError):
def __init__(self, unsupported_formats: Sequence[str], value_description: str):
if len(unsupported_formats) > 1:
format_str = f"formats {', '.join(unsupported_formats)} are"
else:
format_str = f"format {unsupported_formats[0]} is"
super(FormatUnsupportedError, self).__init__(
f"The {format_str} not supported for the {value_description} value logged.\n"
f"You can exclude formats via the `exclude` parameter of the logger's `record` function."
)
class KVWriter(object):
"""
Key Value writer
"""
def write(self, key_values: Dict[str, Any], key_excluded: Dict[str, Union[str, Tuple[str, ...]]], step: int = 0) -> None:
"""
Write a dictionary to file
:param key_values:
:param key_excluded:
:param step:
"""
raise NotImplementedError
def close(self) -> None:
"""
Close owned resources
"""
raise NotImplementedError
class SeqWriter(object):
"""
sequence writer
"""
def write_sequence(self, sequence: List) -> None:
"""
write_sequence an array to file
:param sequence:
"""
raise NotImplementedError
class HumanOutputFormat(KVWriter, SeqWriter):
def __init__(self, filename_or_file: Union[str, TextIO]):
"""
log to a file, in a human readable format
:param filename_or_file: the file to write the log to
"""
if isinstance(filename_or_file, str):
self.file = open(filename_or_file, "wt")
self.own_file = True
else:
assert hasattr(filename_or_file, "write"), f"Expected file or str, got {filename_or_file}"
self.file = filename_or_file
self.own_file = False
def write(self, key_values: Dict, key_excluded: Dict, step: int = 0) -> None:
# Create strings for printing
key2str = {}
tag = None
for (key, value), (_, excluded) in zip(sorted(key_values.items()), sorted(key_excluded.items())):
if excluded is not None and ("stdout" in excluded or "log" in excluded):
continue
if isinstance(value, Video):
raise FormatUnsupportedError(["stdout", "log"], "video")
if isinstance(value, float):
# Align left
value_str = f"{value:<8.3g}"
else:
value_str = str(value)
if key.find("/") > 0: # Find tag and add it to the dict
tag = key[: key.find("/") + 1]
key2str[self._truncate(tag)] = ""
# Remove tag from key
if tag is not None and tag in key:
key = str(" " + key[len(tag) :])
key2str[self._truncate(key)] = self._truncate(value_str)
# Find max widths
if len(key2str) == 0:
warnings.warn("Tried to write empty key-value dict")
return
else:
key_width = max(map(len, key2str.keys()))
val_width = max(map(len, key2str.values()))
# Write out the data
dashes = "-" * (key_width + val_width + 7)
lines = [dashes]
for key, value in key2str.items():
key_space = " " * (key_width - len(key))
val_space = " " * (val_width - len(value))
lines.append(f"| {key}{key_space} | {value}{val_space} |")
lines.append(dashes)
self.file.write("\n".join(lines) + "\n")
# Flush the output to the file
self.file.flush()
@classmethod
def _truncate(cls, string: str, max_length: int = 23) -> str:
return string[: max_length - 3] + "..." if len(string) > max_length else string
def write_sequence(self, sequence: List) -> None:
sequence = list(sequence)
for i, elem in enumerate(sequence):
self.file.write(elem)
if i < len(sequence) - 1: # add space unless this is the last one
self.file.write(" ")
self.file.write("\n")
self.file.flush()
def close(self) -> None:
"""
closes the file
"""
if self.own_file:
self.file.close()
def filter_excluded_keys(
key_values: Dict[str, Any], key_excluded: Dict[str, Union[str, Tuple[str, ...]]], _format: str
) -> Dict[str, Any]:
"""
Filters the keys specified by ``key_exclude`` for the specified format
:param key_values: log dictionary to be filtered
:param key_excluded: keys to be excluded per format
:param _format: format for which this filter is run
:return: dict without the excluded keys
"""
def is_excluded(key: str) -> bool:
return key in key_excluded and key_excluded[key] is not None and _format in key_excluded[key]
return {key: value for key, value in key_values.items() if not is_excluded(key)}
class JSONOutputFormat(KVWriter):
def __init__(self, filename: str):
"""
log to a file, in the JSON format
:param filename: the file to write the log to
"""
self.file = open(filename, "wt")
def write(self, key_values: Dict[str, Any], key_excluded: Dict[str, Union[str, Tuple[str, ...]]], step: int = 0) -> None:
def cast_to_json_serializable(value: Any):
if isinstance(value, Video):
raise FormatUnsupportedError(["json"], "video")
if hasattr(value, "dtype"):
if value.shape == () or len(value) == 1:
# if value is a dimensionless numpy array or of length 1, serialize as a float
return float(value)
else:
# otherwise, a value is a numpy array, serialize as a list or nested lists
return value.tolist()
return value
key_values = {
key: cast_to_json_serializable(value)
for key, value in filter_excluded_keys(key_values, key_excluded, "json").items()
}
self.file.write(json.dumps(key_values) + "\n")
self.file.flush()
def close(self) -> None:
"""
closes the file
"""
self.file.close()
class CSVOutputFormat(KVWriter):
def __init__(self, filename: str):
"""
log to a file, in a CSV format
:param filename: the file to write the log to
"""
self.file = open(filename, "w+t")
self.keys = []
self.separator = ","
def write(self, key_values: Dict[str, Any], key_excluded: Dict[str, Union[str, Tuple[str, ...]]], step: int = 0) -> None:
# Add our current row to the history
key_values = filter_excluded_keys(key_values, key_excluded, "csv")
extra_keys = key_values.keys() - self.keys
if extra_keys:
self.keys.extend(extra_keys)
self.file.seek(0)
lines = self.file.readlines()
self.file.seek(0)
for (i, key) in enumerate(self.keys):
if i > 0:
self.file.write(",")
self.file.write(key)
self.file.write("\n")
for line in lines[1:]:
self.file.write(line[:-1])
self.file.write(self.separator * len(extra_keys))
self.file.write("\n")
for i, key in enumerate(self.keys):
if i > 0:
self.file.write(",")
value = key_values.get(key)
if isinstance(value, Video):
raise FormatUnsupportedError(["csv"], "video")
if value is not None:
self.file.write(str(value))
self.file.write("\n")
self.file.flush()
def close(self) -> None:
"""
closes the file
"""
self.file.close()
class TensorBoardOutputFormat(KVWriter):
def __init__(self, folder: str):
"""
Dumps key/value pairs into TensorBoard's numeric format.
:param folder: the folder to write the log to
"""
assert SummaryWriter is not None, "tensorboard is not installed, you can use " "pip install tensorboard to do so"
self.writer = SummaryWriter(log_dir=folder)
def write(self, key_values: Dict[str, Any], key_excluded: Dict[str, Union[str, Tuple[str, ...]]], step: int = 0) -> None:
for (key, value), (_, excluded) in zip(sorted(key_values.items()), sorted(key_excluded.items())):
if excluded is not None and "tensorboard" in excluded:
continue
if isinstance(value, np.ScalarType):
self.writer.add_scalar(key, value, step)
if isinstance(value, th.Tensor):
self.writer.add_histogram(key, value, step)
if isinstance(value, Graph):
self.writer.add_graph(value.model, value.model_input)
if isinstance(value, Video):
self.writer.add_video(key, value.frames, step, value.fps)
# Flush the output to the file
self.writer.flush()
def close(self) -> None:
"""
closes the file
"""
if self.writer:
self.writer.close()
self.writer = None
def make_output_format(_format: str, log_dir: str, log_suffix: str = "") -> KVWriter:
"""
return a logger for the requested format
:param _format: the requested format to log to ('stdout', 'log', 'json' or 'csv' or 'tensorboard')
:param log_dir: the logging directory
:param log_suffix: the suffix for the log file
:return: the logger
"""
os.makedirs(log_dir, exist_ok=True)
if _format == "stdout":
return HumanOutputFormat(sys.stdout)
elif _format == "log":
return HumanOutputFormat(os.path.join(log_dir, f"log{log_suffix}.txt"))
elif _format == "json":
return JSONOutputFormat(os.path.join(log_dir, f"progress{log_suffix}.json"))
elif _format == "csv":
return CSVOutputFormat(os.path.join(log_dir, f"progress{log_suffix}.csv"))
elif _format == "tensorboard":
return TensorBoardOutputFormat(log_dir)
else:
raise ValueError(f"Unknown format specified: {_format}")
# ================================================================
# API
# ================================================================
def record(key: str, value: Any, exclude: Optional[Union[str, Tuple[str, ...]]] = None) -> None:
"""
Log a value of some diagnostic
Call this once for each diagnostic quantity, each iteration
If called many times, last value will be used.
:param key: save to log this key
:param value: save to log this value
:param exclude: outputs to be excluded
"""
Logger.CURRENT.record(key, value, exclude)
def record_mean(key: str, value: Union[int, float], exclude: Optional[Union[str, Tuple[str, ...]]] = None) -> None:
"""
The same as record(), but if called many times, values averaged.
:param key: save to log this key
:param value: save to log this value
:param exclude: outputs to be excluded
"""
Logger.CURRENT.record_mean(key, value, exclude)
def record_dict(key_values: Dict[str, Any]) -> None:
"""
Log a dictionary of key-value pairs.
:param key_values: the list of keys and values to save to log
"""
for key, value in key_values.items():
record(key, value)
def dump(step: int = 0) -> None:
"""
Write all of the diagnostics from the current iteration
"""
Logger.CURRENT.dump(step)
def get_log_dict() -> Dict:
"""
get the key values logs
:return: the logged values
"""
return Logger.CURRENT.name_to_value
def log(*args, level: int = INFO) -> None:
"""
Write the sequence of args, with no separators,
to the console and output files (if you've configured an output file).
level: int. (see logger.py docs) If the global logger level is higher than
the level argument here, don't print to stdout.
:param args: log the arguments
:param level: the logging level (can be DEBUG=10, INFO=20, WARN=30, ERROR=40, DISABLED=50)
"""
Logger.CURRENT.log(*args, level=level)
def debug(*args) -> None:
"""
Write the sequence of args, with no separators,
to the console and output files (if you've configured an output file).
Using the DEBUG level.
:param args: log the arguments
"""
log(*args, level=DEBUG)
def info(*args) -> None:
"""
Write the sequence of args, with no separators,
to the console and output files (if you've configured an output file).
Using the INFO level.
:param args: log the arguments
"""
log(*args, level=INFO)
def warn(*args) -> None:
"""
Write the sequence of args, with no separators,
to the console and output files (if you've configured an output file).
Using the WARN level.
:param args: log the arguments
"""
log(*args, level=WARN)
def error(*args) -> None:
"""
Write the sequence of args, with no separators,
to the console and output files (if you've configured an output file).
Using the ERROR level.
:param args: log the arguments
"""
log(*args, level=ERROR)
def set_level(level: int) -> None:
"""
Set logging threshold on current logger.
:param level: the logging level (can be DEBUG=10, INFO=20, WARN=30, ERROR=40, DISABLED=50)
"""
Logger.CURRENT.set_level(level)
def get_level() -> int:
"""
Get logging threshold on current logger.
:return: the logging level (can be DEBUG=10, INFO=20, WARN=30, ERROR=40, DISABLED=50)
"""
return Logger.CURRENT.level
def get_dir() -> str:
"""
Get directory that log files are being written to.
will be None if there is no output directory (i.e., if you didn't call start)
:return: the logging directory
"""
return Logger.CURRENT.get_dir()
record_tabular = record
dump_tabular = dump
# ================================================================
# Backend
# ================================================================
class Logger(object):
# A logger with no output files. (See right below class definition)
# So that you can still log to the terminal without setting up any output files
DEFAULT = None
CURRENT = None # Current logger being used by the free functions above
def __init__(self, folder: Optional[str], output_formats: List[KVWriter]):
"""
the logger class
:param folder: the logging location
:param output_formats: the list of output format
"""
self.name_to_value = defaultdict(float) # values this iteration
self.name_to_count = defaultdict(int)
self.name_to_excluded = defaultdict(str)
self.level = INFO
self.dir = folder
self.output_formats = output_formats
# Logging API, forwarded
# ----------------------------------------
def record(self, key: str, value: Any, exclude: Optional[Union[str, Tuple[str, ...]]] = None) -> None:
"""
Log a value of some diagnostic
Call this once for each diagnostic quantity, each iteration
If called many times, last value will be used.
:param key: save to log this key
:param value: save to log this value
:param exclude: outputs to be excluded
"""
self.name_to_value[key] = value
self.name_to_excluded[key] = exclude
def record_mean(self, key: str, value: Any, exclude: Optional[Union[str, Tuple[str, ...]]] = None) -> None:
"""
The same as record(), but if called many times, values averaged.
:param key: save to log this key
:param value: save to log this value
:param exclude: outputs to be excluded
"""
if value is None:
self.name_to_value[key] = None
return
old_val, count = self.name_to_value[key], self.name_to_count[key]
self.name_to_value[key] = old_val * count / (count + 1) + value / (count + 1)
self.name_to_count[key] = count + 1
self.name_to_excluded[key] = exclude
def dump(self, step: int = 0) -> None:
"""
Write all of the diagnostics from the current iteration
"""
if self.level == DISABLED:
return
for _format in self.output_formats:
if isinstance(_format, KVWriter):
_format.write(self.name_to_value, self.name_to_excluded, step)
self.name_to_value.clear()
self.name_to_count.clear()
self.name_to_excluded.clear()
def log(self, *args, level: int = INFO) -> None:
"""
Write the sequence of args, with no separators,
to the console and output files (if you've configured an output file).
level: int. (see logger.py docs) If the global logger level is higher than
the level argument here, don't print to stdout.
:param args: log the arguments
:param level: the logging level (can be DEBUG=10, INFO=20, WARN=30, ERROR=40, DISABLED=50)
"""
if self.level <= level:
self._do_log(args)
# Configuration
# ----------------------------------------
def set_level(self, level: int) -> None:
"""
Set logging threshold on current logger.
:param level: the logging level (can be DEBUG=10, INFO=20, WARN=30, ERROR=40, DISABLED=50)
"""
self.level = level
def get_dir(self) -> str:
"""
Get directory that log files are being written to.
will be None if there is no output directory (i.e., if you didn't call start)
:return: the logging directory
"""
return self.dir
def close(self) -> None:
"""
closes the file
"""
for _format in self.output_formats:
_format.close()
# Misc
# ----------------------------------------
def _do_log(self, args) -> None:
"""
log to the requested format outputs
:param args: the arguments to log
"""
for _format in self.output_formats:
if isinstance(_format, SeqWriter):
_format.write_sequence(map(str, args))
# Initialize logger
Logger.DEFAULT = Logger.CURRENT = Logger(folder=None, output_formats=[HumanOutputFormat(sys.stdout)])
def configure(folder: Optional[str] = None, format_strings: Optional[List[str]] = None) -> None:
"""
configure the current logger
:param folder: the save location
(if None, $SB3_LOGDIR, if still None, tempdir/baselines-[date & time])
:param format_strings: the output logging format
(if None, $SB3_LOG_FORMAT, if still None, ['stdout', 'log', 'csv'])
"""
if folder is None:
folder = os.getenv("SB3_LOGDIR")
if folder is None:
folder = os.path.join(tempfile.gettempdir(), datetime.datetime.now().strftime("SB3-%Y-%m-%d-%H-%M-%S-%f"))
assert isinstance(folder, str)
os.makedirs(folder, exist_ok=True)
log_suffix = ""
if format_strings is None:
format_strings = os.getenv("SB3_LOG_FORMAT", "stdout,log,csv").split(",")
format_strings = filter(None, format_strings)
output_formats = [make_output_format(f, folder, log_suffix) for f in format_strings]
Logger.CURRENT = Logger(folder=folder, output_formats=output_formats)
log(f"Logging to {folder}")
def reset() -> None:
"""
reset the current logger
"""
if Logger.CURRENT is not Logger.DEFAULT:
Logger.CURRENT.close()
Logger.CURRENT = Logger.DEFAULT
log("Reset logger")
class ScopedConfigure(object):
def __init__(self, folder: Optional[str] = None, format_strings: Optional[List[str]] = None):
"""
Class for using context manager while logging
usage:
with ScopedConfigure(folder=None, format_strings=None):
{code}
:param folder: the logging folder
:param format_strings: the list of output logging format
"""
self.dir = folder
self.format_strings = format_strings
self.prev_logger = None
def __enter__(self) -> None:
self.prev_logger = Logger.CURRENT
configure(folder=self.dir, format_strings=self.format_strings)
def __exit__(self, *args) -> None:
Logger.CURRENT.close()
Logger.CURRENT = self.prev_logger
# ================================================================
# Readers
# ================================================================
def read_json(filename: str) -> pandas.DataFrame:
"""
read a json file using pandas
:param filename: the file path to read
:return: the data in the json
"""
data = []
with open(filename, "rt") as file_handler:
for line in file_handler:
data.append(json.loads(line))
return pandas.DataFrame(data)
def read_csv(filename: str) -> pandas.DataFrame:
"""
read a csv file using pandas
:param filename: the file path to read
:return: the data in the csv
"""
return pandas.read_csv(filename, index_col=None, comment="#")
| 31.237624 | 125 | 0.591578 | import datetime
import json
import os
import sys
import tempfile
import warnings
from collections import defaultdict
from typing import Any, Dict, List, Optional, Sequence, TextIO, Tuple, Union
import numpy as np
import pandas
import torch as th
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
SummaryWriter = None
DEBUG = 10
INFO = 20
WARN = 30
ERROR = 40
DISABLED = 50
class Video(object):
def __init__(self, frames: th.Tensor, fps: Union[float, int]):
self.frames = frames
self.fps = fps
class Graph(object):
def __init__(self, model: th.nn.Module, model_input: th.tensor):
self.model = model
self.model_input = model_input
class FormatUnsupportedError(NotImplementedError):
def __init__(self, unsupported_formats: Sequence[str], value_description: str):
if len(unsupported_formats) > 1:
format_str = f"formats {', '.join(unsupported_formats)} are"
else:
format_str = f"format {unsupported_formats[0]} is"
super(FormatUnsupportedError, self).__init__(
f"The {format_str} not supported for the {value_description} value logged.\n"
f"You can exclude formats via the `exclude` parameter of the logger's `record` function."
)
class KVWriter(object):
def write(self, key_values: Dict[str, Any], key_excluded: Dict[str, Union[str, Tuple[str, ...]]], step: int = 0) -> None:
raise NotImplementedError
def close(self) -> None:
raise NotImplementedError
class SeqWriter(object):
def write_sequence(self, sequence: List) -> None:
raise NotImplementedError
class HumanOutputFormat(KVWriter, SeqWriter):
def __init__(self, filename_or_file: Union[str, TextIO]):
if isinstance(filename_or_file, str):
self.file = open(filename_or_file, "wt")
self.own_file = True
else:
assert hasattr(filename_or_file, "write"), f"Expected file or str, got {filename_or_file}"
self.file = filename_or_file
self.own_file = False
def write(self, key_values: Dict, key_excluded: Dict, step: int = 0) -> None:
# Create strings for printing
key2str = {}
tag = None
for (key, value), (_, excluded) in zip(sorted(key_values.items()), sorted(key_excluded.items())):
if excluded is not None and ("stdout" in excluded or "log" in excluded):
continue
if isinstance(value, Video):
raise FormatUnsupportedError(["stdout", "log"], "video")
if isinstance(value, float):
# Align left
value_str = f"{value:<8.3g}"
else:
value_str = str(value)
if key.find("/") > 0: # Find tag and add it to the dict
tag = key[: key.find("/") + 1]
key2str[self._truncate(tag)] = ""
# Remove tag from key
if tag is not None and tag in key:
key = str(" " + key[len(tag) :])
key2str[self._truncate(key)] = self._truncate(value_str)
# Find max widths
if len(key2str) == 0:
warnings.warn("Tried to write empty key-value dict")
return
else:
key_width = max(map(len, key2str.keys()))
val_width = max(map(len, key2str.values()))
# Write out the data
dashes = "-" * (key_width + val_width + 7)
lines = [dashes]
for key, value in key2str.items():
key_space = " " * (key_width - len(key))
val_space = " " * (val_width - len(value))
lines.append(f"| {key}{key_space} | {value}{val_space} |")
lines.append(dashes)
self.file.write("\n".join(lines) + "\n")
# Flush the output to the file
self.file.flush()
@classmethod
def _truncate(cls, string: str, max_length: int = 23) -> str:
return string[: max_length - 3] + "..." if len(string) > max_length else string
def write_sequence(self, sequence: List) -> None:
sequence = list(sequence)
for i, elem in enumerate(sequence):
self.file.write(elem)
if i < len(sequence) - 1: # add space unless this is the last one
self.file.write(" ")
self.file.write("\n")
self.file.flush()
def close(self) -> None:
if self.own_file:
self.file.close()
def filter_excluded_keys(
key_values: Dict[str, Any], key_excluded: Dict[str, Union[str, Tuple[str, ...]]], _format: str
) -> Dict[str, Any]:
def is_excluded(key: str) -> bool:
return key in key_excluded and key_excluded[key] is not None and _format in key_excluded[key]
return {key: value for key, value in key_values.items() if not is_excluded(key)}
class JSONOutputFormat(KVWriter):
def __init__(self, filename: str):
self.file = open(filename, "wt")
def write(self, key_values: Dict[str, Any], key_excluded: Dict[str, Union[str, Tuple[str, ...]]], step: int = 0) -> None:
def cast_to_json_serializable(value: Any):
if isinstance(value, Video):
raise FormatUnsupportedError(["json"], "video")
if hasattr(value, "dtype"):
if value.shape == () or len(value) == 1:
# if value is a dimensionless numpy array or of length 1, serialize as a float
return float(value)
else:
# otherwise, a value is a numpy array, serialize as a list or nested lists
return value.tolist()
return value
key_values = {
key: cast_to_json_serializable(value)
for key, value in filter_excluded_keys(key_values, key_excluded, "json").items()
}
self.file.write(json.dumps(key_values) + "\n")
self.file.flush()
def close(self) -> None:
self.file.close()
class CSVOutputFormat(KVWriter):
def __init__(self, filename: str):
self.file = open(filename, "w+t")
self.keys = []
self.separator = ","
def write(self, key_values: Dict[str, Any], key_excluded: Dict[str, Union[str, Tuple[str, ...]]], step: int = 0) -> None:
# Add our current row to the history
key_values = filter_excluded_keys(key_values, key_excluded, "csv")
extra_keys = key_values.keys() - self.keys
if extra_keys:
self.keys.extend(extra_keys)
self.file.seek(0)
lines = self.file.readlines()
self.file.seek(0)
for (i, key) in enumerate(self.keys):
if i > 0:
self.file.write(",")
self.file.write(key)
self.file.write("\n")
for line in lines[1:]:
self.file.write(line[:-1])
self.file.write(self.separator * len(extra_keys))
self.file.write("\n")
for i, key in enumerate(self.keys):
if i > 0:
self.file.write(",")
value = key_values.get(key)
if isinstance(value, Video):
raise FormatUnsupportedError(["csv"], "video")
if value is not None:
self.file.write(str(value))
self.file.write("\n")
self.file.flush()
def close(self) -> None:
self.file.close()
class TensorBoardOutputFormat(KVWriter):
def __init__(self, folder: str):
assert SummaryWriter is not None, "tensorboard is not installed, you can use " "pip install tensorboard to do so"
self.writer = SummaryWriter(log_dir=folder)
def write(self, key_values: Dict[str, Any], key_excluded: Dict[str, Union[str, Tuple[str, ...]]], step: int = 0) -> None:
for (key, value), (_, excluded) in zip(sorted(key_values.items()), sorted(key_excluded.items())):
if excluded is not None and "tensorboard" in excluded:
continue
if isinstance(value, np.ScalarType):
self.writer.add_scalar(key, value, step)
if isinstance(value, th.Tensor):
self.writer.add_histogram(key, value, step)
if isinstance(value, Graph):
self.writer.add_graph(value.model, value.model_input)
if isinstance(value, Video):
self.writer.add_video(key, value.frames, step, value.fps)
# Flush the output to the file
self.writer.flush()
def close(self) -> None:
if self.writer:
self.writer.close()
self.writer = None
def make_output_format(_format: str, log_dir: str, log_suffix: str = "") -> KVWriter:
os.makedirs(log_dir, exist_ok=True)
if _format == "stdout":
return HumanOutputFormat(sys.stdout)
elif _format == "log":
return HumanOutputFormat(os.path.join(log_dir, f"log{log_suffix}.txt"))
elif _format == "json":
return JSONOutputFormat(os.path.join(log_dir, f"progress{log_suffix}.json"))
elif _format == "csv":
return CSVOutputFormat(os.path.join(log_dir, f"progress{log_suffix}.csv"))
elif _format == "tensorboard":
return TensorBoardOutputFormat(log_dir)
else:
raise ValueError(f"Unknown format specified: {_format}")
# ================================================================
# API
# ================================================================
def record(key: str, value: Any, exclude: Optional[Union[str, Tuple[str, ...]]] = None) -> None:
Logger.CURRENT.record(key, value, exclude)
def record_mean(key: str, value: Union[int, float], exclude: Optional[Union[str, Tuple[str, ...]]] = None) -> None:
Logger.CURRENT.record_mean(key, value, exclude)
def record_dict(key_values: Dict[str, Any]) -> None:
for key, value in key_values.items():
record(key, value)
def dump(step: int = 0) -> None:
Logger.CURRENT.dump(step)
def get_log_dict() -> Dict:
return Logger.CURRENT.name_to_value
def log(*args, level: int = INFO) -> None:
Logger.CURRENT.log(*args, level=level)
def debug(*args) -> None:
log(*args, level=DEBUG)
def info(*args) -> None:
log(*args, level=INFO)
def warn(*args) -> None:
log(*args, level=WARN)
def error(*args) -> None:
log(*args, level=ERROR)
def set_level(level: int) -> None:
Logger.CURRENT.set_level(level)
def get_level() -> int:
return Logger.CURRENT.level
def get_dir() -> str:
return Logger.CURRENT.get_dir()
record_tabular = record
dump_tabular = dump
# ================================================================
# Backend
# ================================================================
class Logger(object):
# A logger with no output files. (See right below class definition)
# So that you can still log to the terminal without setting up any output files
DEFAULT = None
CURRENT = None # Current logger being used by the free functions above
def __init__(self, folder: Optional[str], output_formats: List[KVWriter]):
self.name_to_value = defaultdict(float) # values this iteration
self.name_to_count = defaultdict(int)
self.name_to_excluded = defaultdict(str)
self.level = INFO
self.dir = folder
self.output_formats = output_formats
# Logging API, forwarded
# ----------------------------------------
def record(self, key: str, value: Any, exclude: Optional[Union[str, Tuple[str, ...]]] = None) -> None:
self.name_to_value[key] = value
self.name_to_excluded[key] = exclude
def record_mean(self, key: str, value: Any, exclude: Optional[Union[str, Tuple[str, ...]]] = None) -> None:
if value is None:
self.name_to_value[key] = None
return
old_val, count = self.name_to_value[key], self.name_to_count[key]
self.name_to_value[key] = old_val * count / (count + 1) + value / (count + 1)
self.name_to_count[key] = count + 1
self.name_to_excluded[key] = exclude
def dump(self, step: int = 0) -> None:
if self.level == DISABLED:
return
for _format in self.output_formats:
if isinstance(_format, KVWriter):
_format.write(self.name_to_value, self.name_to_excluded, step)
self.name_to_value.clear()
self.name_to_count.clear()
self.name_to_excluded.clear()
def log(self, *args, level: int = INFO) -> None:
if self.level <= level:
self._do_log(args)
# Configuration
# ----------------------------------------
def set_level(self, level: int) -> None:
self.level = level
def get_dir(self) -> str:
return self.dir
def close(self) -> None:
for _format in self.output_formats:
_format.close()
# Misc
# ----------------------------------------
def _do_log(self, args) -> None:
for _format in self.output_formats:
if isinstance(_format, SeqWriter):
_format.write_sequence(map(str, args))
# Initialize logger
Logger.DEFAULT = Logger.CURRENT = Logger(folder=None, output_formats=[HumanOutputFormat(sys.stdout)])
def configure(folder: Optional[str] = None, format_strings: Optional[List[str]] = None) -> None:
if folder is None:
folder = os.getenv("SB3_LOGDIR")
if folder is None:
folder = os.path.join(tempfile.gettempdir(), datetime.datetime.now().strftime("SB3-%Y-%m-%d-%H-%M-%S-%f"))
assert isinstance(folder, str)
os.makedirs(folder, exist_ok=True)
log_suffix = ""
if format_strings is None:
format_strings = os.getenv("SB3_LOG_FORMAT", "stdout,log,csv").split(",")
format_strings = filter(None, format_strings)
output_formats = [make_output_format(f, folder, log_suffix) for f in format_strings]
Logger.CURRENT = Logger(folder=folder, output_formats=output_formats)
log(f"Logging to {folder}")
def reset() -> None:
if Logger.CURRENT is not Logger.DEFAULT:
Logger.CURRENT.close()
Logger.CURRENT = Logger.DEFAULT
log("Reset logger")
class ScopedConfigure(object):
def __init__(self, folder: Optional[str] = None, format_strings: Optional[List[str]] = None):
self.dir = folder
self.format_strings = format_strings
self.prev_logger = None
def __enter__(self) -> None:
self.prev_logger = Logger.CURRENT
configure(folder=self.dir, format_strings=self.format_strings)
def __exit__(self, *args) -> None:
Logger.CURRENT.close()
Logger.CURRENT = self.prev_logger
# ================================================================
# Readers
# ================================================================
def read_json(filename: str) -> pandas.DataFrame:
data = []
with open(filename, "rt") as file_handler:
for line in file_handler:
data.append(json.loads(line))
return pandas.DataFrame(data)
def read_csv(filename: str) -> pandas.DataFrame:
return pandas.read_csv(filename, index_col=None, comment="#")
| true | true |
1c35d30f441730bd6bc9b240eade1dd952b106d9 | 594 | py | Python | examples/rockblock_send_text.py | OperatorFoundation/Adafruit_CircuitPython_RockBlock | d98b530faba55e71a1872ddaaab0ae507e86362c | [
"MIT"
] | null | null | null | examples/rockblock_send_text.py | OperatorFoundation/Adafruit_CircuitPython_RockBlock | d98b530faba55e71a1872ddaaab0ae507e86362c | [
"MIT"
] | null | null | null | examples/rockblock_send_text.py | OperatorFoundation/Adafruit_CircuitPython_RockBlock | d98b530faba55e71a1872ddaaab0ae507e86362c | [
"MIT"
] | null | null | null | # pylint: disable=wrong-import-position
import time
# CircuitPython / Blinka
import board
uart = board.UART()
uart.baudrate = 19200
# via USB cable
# import serial
# uart = serial.Serial("/dev/ttyUSB0", 19200)
from adafruit_rockblock import RockBlock
rb = RockBlock(uart)
# set the text
rb.text_out = "hello world"
# try a satellite Short Burst Data transfer
print("Talking to satellite...")
status = rb.satellite_transfer()
# loop as needed
retry = 0
while status[0] > 8:
time.sleep(10)
status = rb.satellite_transfer()
print(retry, status)
retry += 1
print("\nDONE.")
| 18 | 45 | 0.710438 |
import time
import board
uart = board.UART()
uart.baudrate = 19200
from adafruit_rockblock import RockBlock
rb = RockBlock(uart)
rb.text_out = "hello world"
print("Talking to satellite...")
status = rb.satellite_transfer()
retry = 0
while status[0] > 8:
time.sleep(10)
status = rb.satellite_transfer()
print(retry, status)
retry += 1
print("\nDONE.")
| true | true |
1c35d361c6c540bfb4a02dbd495449ea98ed33fb | 1,041 | py | Python | audio.py | Anti-Counter021/Discord-Audio-bot | 1e10b8f2ffb12304269e9ca2dd40da5ea282adf6 | [
"MIT"
] | null | null | null | audio.py | Anti-Counter021/Discord-Audio-bot | 1e10b8f2ffb12304269e9ca2dd40da5ea282adf6 | [
"MIT"
] | null | null | null | audio.py | Anti-Counter021/Discord-Audio-bot | 1e10b8f2ffb12304269e9ca2dd40da5ea282adf6 | [
"MIT"
] | null | null | null | import asyncio
import discord
import youtube_dl.utils
youtube_dl.utils.bug_reports_message = lambda: ''
ytdl_format_options = {
'format': 'bestaudio/best',
'restrictfilenames': True,
'noplaylist': True,
'nocheckcertificate': True,
'ignoreerrors': False,
'logtostderr': False,
'quiet': True,
'no_warnings': True,
'default_search': 'auto',
'source_address': '0.0.0.0',
}
ffmpeg_options = {
'options': '-vn',
}
ytdl = youtube_dl.YoutubeDL(ytdl_format_options)
class YTDLSource(discord.PCMVolumeTransformer):
@classmethod
async def from_url(cls, url, *, loop = asyncio.get_event_loop(), stream = False):
try:
data = await loop.run_in_executor(None, lambda: ytdl.extract_info(url, download=not stream))
if 'entries' in data:
data = data['entries'][0]
filename = data['title'] if stream else ytdl.prepare_filename(data)
return filename, data['title']
except:
raise ValueError('Video not found')
| 26.025 | 104 | 0.642651 | import asyncio
import discord
import youtube_dl.utils
youtube_dl.utils.bug_reports_message = lambda: ''
ytdl_format_options = {
'format': 'bestaudio/best',
'restrictfilenames': True,
'noplaylist': True,
'nocheckcertificate': True,
'ignoreerrors': False,
'logtostderr': False,
'quiet': True,
'no_warnings': True,
'default_search': 'auto',
'source_address': '0.0.0.0',
}
ffmpeg_options = {
'options': '-vn',
}
ytdl = youtube_dl.YoutubeDL(ytdl_format_options)
class YTDLSource(discord.PCMVolumeTransformer):
@classmethod
async def from_url(cls, url, *, loop = asyncio.get_event_loop(), stream = False):
try:
data = await loop.run_in_executor(None, lambda: ytdl.extract_info(url, download=not stream))
if 'entries' in data:
data = data['entries'][0]
filename = data['title'] if stream else ytdl.prepare_filename(data)
return filename, data['title']
except:
raise ValueError('Video not found')
| true | true |
1c35d45a02f04139580da7a76ff13868f9a6fc6e | 9,795 | py | Python | captcha.py | Zhas1ke/Captcha_Generator | 72be27f298b8475643f037082b06b453a2dc9b78 | [
"MIT"
] | null | null | null | captcha.py | Zhas1ke/Captcha_Generator | 72be27f298b8475643f037082b06b453a2dc9b78 | [
"MIT"
] | null | null | null | captcha.py | Zhas1ke/Captcha_Generator | 72be27f298b8475643f037082b06b453a2dc9b78 | [
"MIT"
] | null | null | null | import numpy as np
import cv2
import string
import math
import os
import uuid
import random
##############################################
grad_img = cv2.imread('grad.png')
def sp_noise(image,prob):
'''
Add salt and pepper noise to image
prob: Probability of the noise
'''
output = np.zeros(image.shape,np.uint8)
thres = 1 - prob
for i in range(image.shape[0]):
for j in range(image.shape[1]):
rdn = random.random()
if rdn < prob:
output[i][j] = (np.random.randint(0, 256), np.random.randint(0, 256), np.random.randint(0, 256))
else:
output[i][j] = image[i][j]
return output
##############################################
wd, _ = os.path.split(os.path.abspath(__file__))
CAPTCHA_LENGTH = 6
WIDTH = 300 # 120
HEIGHT = 100 # 36
# RGB
# font_colors = {
# 'dark-green':(0, 150, 0),
# (241, 145, 241)
# 'red':(230, 70, 50),
# 'violet':(135, 80, 250),
# 'light-green':(65, 235, 100)
# }
# BGR
font_colors = {
'dark-green':(0, 150, 0),
'red':(50, 70, 230),
'violet':(250, 80, 135),
'light-green':(100, 235, 65)
}
class Captcha:
def __init__(self, width, high, ls=None, lc=CAPTCHA_LENGTH, fs=None,
# folder=os.path.join(wd, 'samples'),
folder='samples',
debug=False):
"""
:param ls: letter set, all
:param fs: font set
:param lc: letter count in one pic
:param folder: the folder to save img
:param debug: debug mode
"""
if fs is None:
fs = ['FONT_HERSHEY_SIMPLEX', 'FONT_ITALIC']
self.fs = fs
if ls is None:
ls = string.ascii_uppercase + string.digits
if isinstance(ls, str):
self.letter = [i for i in ls]
elif isinstance(ls, list):
self.letter = ls
self.lc = lc
self.width, self.high = width, high
self.debug = debug
self.folder = folder
if not self.debug and folder:
if not os.path.exists(self.folder):
os.makedirs(self.folder)
def _tilt_img(self, img):
tmp_img = img.copy()
tmp_img.fill(255)
tile_angle = np.random.randint(
100*-math.pi/6, 0
) / 100
high, width, _ = img.shape
for y in range(width):
for x in range(high):
new_y = int(y + (x-high/2)*math.tanh(tile_angle))
try:
tmp_img[x, new_y, :] = img[x, y, :]
except IndexError:
pass
img[:, :, :] = tmp_img[:, :, :]
def _shake_img(self, img, outer_top_left, outer_bottom_right,
inner_top_left, inner_bottom_right):
(x1, y1), (x2, y2) = outer_top_left, outer_bottom_right
(i1, j1), (i2, j2) = inner_top_left, inner_bottom_right
delta_x = np.random.randint(x1-i1, x2-i2)
delta_y = np.random.randint(y1-j1, y2-j2)
area = img[y1:y2, x1:x2, :]
area_high, area_width, _ = area.shape
tmp_area = area.copy()
tmp_area.fill(255)
for index_y in range(area_high):
for index_x in range(area_width):
new_x, new_y = index_x + delta_x, index_y + delta_y
if new_x < area_width and new_y < area_high:
tmp_area[new_y, new_x, :] = area[index_y, index_x, :]
area[:, :, :] = tmp_area[:, :, :]
def _distort_img(self, img):
high, width, _ = img.shape
tmp_img = img.copy()
tmp_img.fill(255)
coef_vertical = np.random.randint(1, 5)
coef_horizontal = np.random.choice([2, 3, 4]) * math.pi / width
scale_biase = np.random.randint(0, 360) * math.pi / 180
def new_coordinate(x, y):
return int(x+coef_vertical*math.sin(coef_horizontal*y+scale_biase))
for y in range(width):
for x in range(high):
new_x = new_coordinate(x, y)
try:
tmp_img[x, y, :] = img[new_x, y, :]
except IndexError:
pass
img[:, :, :] = tmp_img[:, :, :]
def _draw_basic(self, img, text):
font_scale = 1.6 # 36 px
max_width = max_high = 0
for i in text:
for _font_face in [getattr(cv2, self.fs[i]) for i in range(len(self.fs))]:
for _font_thickness in [5, 6]:
(width, high), _ = cv2.getTextSize(
i, _font_face, font_scale, _font_thickness)
max_width, max_high = max(max_width, width), max(max_high, high)
total_width = max_width * self.lc
width_delta = np.random.randint(0, self.width - total_width)
vertical_range = self.high - max_high
images = list()
font_color = np.random.choice(a=['dark-green', 'red', 'violet', 'light-green'], p=[0.91, 0.03, 0.03, 0.03])
font_color = font_colors[font_color]
delta_high = np.random.randint(
int(2*vertical_range/5), int(3*vertical_range/5)
)
for index, letter in enumerate(text):
font_face = getattr(cv2, np.random.choice(self.fs))
font_thickness = np.random.choice([5, 6])
tmp_img = img.copy()
bottom_left_coordinate = (
index*max_width + width_delta,
self.high - delta_high
)
cv2.putText(tmp_img, letter, bottom_left_coordinate, font_face,
font_scale, font_color, font_thickness)
self._tilt_img(tmp_img)
# cv2.imshow(text, tmp_img)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
images.append(tmp_img)
high, width, _ = img.shape
for y in range(width):
for x in range(high):
r, g, b = 0, 0, 0
for tmp_img in images:
r += tmp_img[x, y, 0] + 1
g += tmp_img[x, y, 1] + 1
b += tmp_img[x, y, 2] + 1
r, g, b = r % 256, g % 256, b % 256
img[x, y, :] = (r, g, b)
for y in range(width):
for x in range(high):
if (img[x,y,0] + img[x,y,1] + img[x,y,2]) % 256 == 0:
img[x,y,0] = img[x,y,1] = img[x,y,2] = 255
def _draw_line(self, img):
left_x = np.random.randint(0, self.width//4)
left_y = np.random.randint(self.high)
right_x = np.random.randint(self.width*3//4, self.width)
right_y = np.random.randint(self.high)
start, end = (left_x, left_y), (right_x, right_y)
line_color = tuple(int(np.random.choice(range(0, 156)))
for _ in range(3))
line_thickness = np.random.randint(1, 3)
cv2.line(img, start, end, line_color, line_thickness)
def _put_noise(self, img):
for i in range(600):
x = np.random.randint(self.width)
y = np.random.randint(self.high)
dot_color = tuple(int(np.random.choice(range(0, 156)))
for _ in range(3))
img[y, x, :] = dot_color
def save_img(self, text):
img = np.zeros((self.high, self.width, 3), np.uint8)
img.fill(255)
# img = cv2.imread('grad.png')
# cv2.imshow(text, img)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
self._draw_basic(img, text)
# self._put_noise(img)
# self._distort_img(img)
# self._draw_line(img)
noise_grad_img = sp_noise(grad_img,0.15)
img = cv2.addWeighted(img, 0.5,noise_grad_img,0.5,0)
# cv2.imshow(text, dst)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
if self.debug:
cv2.imshow(text, img)
cv2.waitKey(0)
cv2.destroyAllWindows()
else:
fn = text + ('_'+str(uuid.uuid1())[4: 8])
cv2.imwrite('{}\\{}.jpg'.format(self.folder, fn), img)
def batch_create_img(self, number=5):
exits = set()
while(len(exits)) < number:
word = ''.join(np.random.choice(self.letter, self.lc))
if word not in exits:
exits.add(word)
self.save_img(word)
if not self.debug:
if len(exits) % 10 == 0:
print('{} generated.'.format(len(exits)))
if not self.debug:
print('{} captchas saved into {}.'.format(len(exits), self.folder))
if __name__ == '__main__':
letters = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
c = Captcha(WIDTH, HEIGHT, letters, fs=['FONT_HERSHEY_SIMPLEX', 'FONT_ITALIC'], debug=False)
c.batch_create_img(19995)
'''
font_scale = 1.5
font = cv2.FONT_HERSHEY_PLAIN
# set the rectangle background to white
rectangle_bgr = (255, 255, 255)
# make a black image
img = np.zeros((500, 500))
# set some text
text = "Some text in a box!"
# get the width and height of the text box
(text_width, text_height) = cv2.getTextSize(text, font, fontScale=font_scale, thickness=1)[0]
# set the text start position
text_offset_x = 10
text_offset_y = img.shape[0] - 25
# make the coords of the box with a small padding of two pixels
box_coords = ((text_offset_x, text_offset_y), (text_offset_x + text_width + 2, text_offset_y - text_height - 2))
cv2.rectangle(img, box_coords[0], box_coords[1], rectangle_bgr, cv2.FILLED)
cv2.putText(img, text, (text_offset_x, text_offset_y), font, fontScale=font_scale, color=(0, 0, 0), thickness=1)
cv2.imshow("A box!", img)
cv2.waitKey(0)
''' | 33.775862 | 119 | 0.528229 | import numpy as np
import cv2
import string
import math
import os
import uuid
import random
inner_bottom_right):
(x1, y1), (x2, y2) = outer_top_left, outer_bottom_right
(i1, j1), (i2, j2) = inner_top_left, inner_bottom_right
delta_x = np.random.randint(x1-i1, x2-i2)
delta_y = np.random.randint(y1-j1, y2-j2)
area = img[y1:y2, x1:x2, :]
area_high, area_width, _ = area.shape
tmp_area = area.copy()
tmp_area.fill(255)
for index_y in range(area_high):
for index_x in range(area_width):
new_x, new_y = index_x + delta_x, index_y + delta_y
if new_x < area_width and new_y < area_high:
tmp_area[new_y, new_x, :] = area[index_y, index_x, :]
area[:, :, :] = tmp_area[:, :, :]
def _distort_img(self, img):
high, width, _ = img.shape
tmp_img = img.copy()
tmp_img.fill(255)
coef_vertical = np.random.randint(1, 5)
coef_horizontal = np.random.choice([2, 3, 4]) * math.pi / width
scale_biase = np.random.randint(0, 360) * math.pi / 180
def new_coordinate(x, y):
return int(x+coef_vertical*math.sin(coef_horizontal*y+scale_biase))
for y in range(width):
for x in range(high):
new_x = new_coordinate(x, y)
try:
tmp_img[x, y, :] = img[new_x, y, :]
except IndexError:
pass
img[:, :, :] = tmp_img[:, :, :]
def _draw_basic(self, img, text):
font_scale = 1.6
max_width = max_high = 0
for i in text:
for _font_face in [getattr(cv2, self.fs[i]) for i in range(len(self.fs))]:
for _font_thickness in [5, 6]:
(width, high), _ = cv2.getTextSize(
i, _font_face, font_scale, _font_thickness)
max_width, max_high = max(max_width, width), max(max_high, high)
total_width = max_width * self.lc
width_delta = np.random.randint(0, self.width - total_width)
vertical_range = self.high - max_high
images = list()
font_color = np.random.choice(a=['dark-green', 'red', 'violet', 'light-green'], p=[0.91, 0.03, 0.03, 0.03])
font_color = font_colors[font_color]
delta_high = np.random.randint(
int(2*vertical_range/5), int(3*vertical_range/5)
)
for index, letter in enumerate(text):
font_face = getattr(cv2, np.random.choice(self.fs))
font_thickness = np.random.choice([5, 6])
tmp_img = img.copy()
bottom_left_coordinate = (
index*max_width + width_delta,
self.high - delta_high
)
cv2.putText(tmp_img, letter, bottom_left_coordinate, font_face,
font_scale, font_color, font_thickness)
self._tilt_img(tmp_img)
images.append(tmp_img)
high, width, _ = img.shape
for y in range(width):
for x in range(high):
r, g, b = 0, 0, 0
for tmp_img in images:
r += tmp_img[x, y, 0] + 1
g += tmp_img[x, y, 1] + 1
b += tmp_img[x, y, 2] + 1
r, g, b = r % 256, g % 256, b % 256
img[x, y, :] = (r, g, b)
for y in range(width):
for x in range(high):
if (img[x,y,0] + img[x,y,1] + img[x,y,2]) % 256 == 0:
img[x,y,0] = img[x,y,1] = img[x,y,2] = 255
def _draw_line(self, img):
left_x = np.random.randint(0, self.width//4)
left_y = np.random.randint(self.high)
right_x = np.random.randint(self.width*3//4, self.width)
right_y = np.random.randint(self.high)
start, end = (left_x, left_y), (right_x, right_y)
line_color = tuple(int(np.random.choice(range(0, 156)))
for _ in range(3))
line_thickness = np.random.randint(1, 3)
cv2.line(img, start, end, line_color, line_thickness)
def _put_noise(self, img):
for i in range(600):
x = np.random.randint(self.width)
y = np.random.randint(self.high)
dot_color = tuple(int(np.random.choice(range(0, 156)))
for _ in range(3))
img[y, x, :] = dot_color
def save_img(self, text):
img = np.zeros((self.high, self.width, 3), np.uint8)
img.fill(255)
self._draw_basic(img, text)
noise_grad_img = sp_noise(grad_img,0.15)
img = cv2.addWeighted(img, 0.5,noise_grad_img,0.5,0)
if self.debug:
cv2.imshow(text, img)
cv2.waitKey(0)
cv2.destroyAllWindows()
else:
fn = text + ('_'+str(uuid.uuid1())[4: 8])
cv2.imwrite('{}\\{}.jpg'.format(self.folder, fn), img)
def batch_create_img(self, number=5):
exits = set()
while(len(exits)) < number:
word = ''.join(np.random.choice(self.letter, self.lc))
if word not in exits:
exits.add(word)
self.save_img(word)
if not self.debug:
if len(exits) % 10 == 0:
print('{} generated.'.format(len(exits)))
if not self.debug:
print('{} captchas saved into {}.'.format(len(exits), self.folder))
if __name__ == '__main__':
letters = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
c = Captcha(WIDTH, HEIGHT, letters, fs=['FONT_HERSHEY_SIMPLEX', 'FONT_ITALIC'], debug=False)
c.batch_create_img(19995)
| true | true |
1c35d4ca457d208328b483d9b22e632210ef3f26 | 3,896 | py | Python | aws_lambda/lambda_function.py | ia-flash/matchvec | e418c55c55a273f6a73fc048b3259967960c7e4f | [
"Apache-2.0"
] | 11 | 2019-10-30T08:14:49.000Z | 2021-09-28T07:46:58.000Z | aws_lambda/lambda_function.py | ia-flash/matchvec | e418c55c55a273f6a73fc048b3259967960c7e4f | [
"Apache-2.0"
] | 15 | 2019-09-09T07:31:41.000Z | 2022-03-11T23:54:18.000Z | aws_lambda/lambda_function.py | ia-flash/matchvec | e418c55c55a273f6a73fc048b3259967960c7e4f | [
"Apache-2.0"
] | 2 | 2019-10-31T21:10:27.000Z | 2022-02-14T19:39:57.000Z | import io
import re
from os import listdir, getenv
import json
import base64
import numpy as np
import cv2
from PIL import Image
from matchvec import predict_class, predict_objects, predict_anonym
from urllib.request import urlopen
from requests_toolbelt.multipart import decoder
pattern = re.compile('(?<=form-data; name=").*?(?=")')
def lambda_handler_classification(event, context):
print("ENV", getenv('BACKEND'))
print("ENV", getenv('DETECTION_THRESHOLD'))
print("LISTDIR", listdir('/tmp'))
res = list()
if event.get('httpMethod') == 'OPTIONS':
return {
'headers': {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Headers": "Content-Type",
"Access-Control-Allow-Methods": "OPTIONS"
},
'statusCode': 200
}
assert event.get('httpMethod') == 'POST'
try:
event['body'] = base64.b64decode(event['body'])
except:
return {
'headers': {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Headers": "Content-Type",
"Access-Control-Allow-Methods": "POST"
},
'statusCode': 400,
'body': json.dumps(res)
}
if event['path'] == '/predict':
infer_func = predict_class
elif event['path'] == '/object_detection':
infer_func = predict_objects
elif event['path'] == '/anonym':
infer_func = predict_anonym
else:
return {
'headers': {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Headers": "Content-Type",
"Access-Control-Allow-Methods": "POST"
},
'statusCode': 404,
'body': json.dumps(res)
}
content_type = event.get('headers', {"content-type": ''}).get('content-type')
if 'multipart/form-data' in content_type:
# convert to bytes if need
if type(event['body']) is str:
event['body'] = bytes(event['body'], 'utf-8')
multipart_data = decoder.MultipartDecoder(event['body'], content_type)
for part in multipart_data.parts:
content_disposition = part.headers.get(b'Content-Disposition', b'').decode('utf-8')
search_field = pattern.search(content_disposition)
if search_field:
if search_field.group(0) == 'image':
try:
img_io = io.BytesIO(part.content)
img_io.seek(0)
img = Image.open(img_io)
img = cv2.cvtColor(np.array(img), cv2.COLOR_BGR2RGB)
res.append(infer_func(img))
except Exception as e:
print(e)
res.append([])
elif search_field.group(0) == 'url':
try:
resp = urlopen(part.content.decode('utf-8'))
img = np.asarray(bytearray(resp.read()), dtype="uint8")
img = cv2.imdecode(img, cv2.IMREAD_COLOR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
res.append(infer_func(img))
except Exception as e:
print(e)
res.append([])
else:
print('Bad field name in form-data')
return {
'headers': {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Headers": "Content-Type",
"Access-Control-Allow-Methods": "OPTIONS,POST"
},
'statusCode': 200,
'body': json.dumps(res)
}
| 36.754717 | 95 | 0.497177 | import io
import re
from os import listdir, getenv
import json
import base64
import numpy as np
import cv2
from PIL import Image
from matchvec import predict_class, predict_objects, predict_anonym
from urllib.request import urlopen
from requests_toolbelt.multipart import decoder
pattern = re.compile('(?<=form-data; name=").*?(?=")')
def lambda_handler_classification(event, context):
print("ENV", getenv('BACKEND'))
print("ENV", getenv('DETECTION_THRESHOLD'))
print("LISTDIR", listdir('/tmp'))
res = list()
if event.get('httpMethod') == 'OPTIONS':
return {
'headers': {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Headers": "Content-Type",
"Access-Control-Allow-Methods": "OPTIONS"
},
'statusCode': 200
}
assert event.get('httpMethod') == 'POST'
try:
event['body'] = base64.b64decode(event['body'])
except:
return {
'headers': {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Headers": "Content-Type",
"Access-Control-Allow-Methods": "POST"
},
'statusCode': 400,
'body': json.dumps(res)
}
if event['path'] == '/predict':
infer_func = predict_class
elif event['path'] == '/object_detection':
infer_func = predict_objects
elif event['path'] == '/anonym':
infer_func = predict_anonym
else:
return {
'headers': {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Headers": "Content-Type",
"Access-Control-Allow-Methods": "POST"
},
'statusCode': 404,
'body': json.dumps(res)
}
content_type = event.get('headers', {"content-type": ''}).get('content-type')
if 'multipart/form-data' in content_type:
if type(event['body']) is str:
event['body'] = bytes(event['body'], 'utf-8')
multipart_data = decoder.MultipartDecoder(event['body'], content_type)
for part in multipart_data.parts:
content_disposition = part.headers.get(b'Content-Disposition', b'').decode('utf-8')
search_field = pattern.search(content_disposition)
if search_field:
if search_field.group(0) == 'image':
try:
img_io = io.BytesIO(part.content)
img_io.seek(0)
img = Image.open(img_io)
img = cv2.cvtColor(np.array(img), cv2.COLOR_BGR2RGB)
res.append(infer_func(img))
except Exception as e:
print(e)
res.append([])
elif search_field.group(0) == 'url':
try:
resp = urlopen(part.content.decode('utf-8'))
img = np.asarray(bytearray(resp.read()), dtype="uint8")
img = cv2.imdecode(img, cv2.IMREAD_COLOR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
res.append(infer_func(img))
except Exception as e:
print(e)
res.append([])
else:
print('Bad field name in form-data')
return {
'headers': {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Headers": "Content-Type",
"Access-Control-Allow-Methods": "OPTIONS,POST"
},
'statusCode': 200,
'body': json.dumps(res)
}
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.