hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
356bf69789fcdff483b30f2dd86757b1ad448ff4 | 34,806 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2015_06_15/aio/operations/_express_route_circuits_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 3 | 2020-06-23T02:25:27.000Z | 2021-09-07T18:48:11.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2015_06_15/aio/operations/_express_route_circuits_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 510 | 2019-07-17T16:11:19.000Z | 2021-08-02T08:38:32.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2015_06_15/aio/operations/_express_route_circuits_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 5 | 2019-09-04T12:51:37.000Z | 2020-09-16T07:28:40.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteCircuitsOperations:
"""ExpressRouteCircuitsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2015_06_15.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
circuit_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-06-15"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
circuit_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'} # type: ignore
async def get(
self,
resource_group_name: str,
circuit_name: str,
**kwargs
) -> "_models.ExpressRouteCircuit":
"""Gets information about the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of express route circuit.
:type circuit_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCircuit, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2015_06_15.models.ExpressRouteCircuit
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuit"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-06-15"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCircuit', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
circuit_name: str,
parameters: "_models.ExpressRouteCircuit",
**kwargs
) -> "_models.ExpressRouteCircuit":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuit"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-06-15"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ExpressRouteCircuit')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuit', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteCircuit', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
circuit_name: str,
parameters: "_models.ExpressRouteCircuit",
**kwargs
) -> AsyncLROPoller["_models.ExpressRouteCircuit"]:
"""Creates or updates an express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the circuit.
:type circuit_name: str
:param parameters: Parameters supplied to the create or update express route circuit operation.
:type parameters: ~azure.mgmt.network.v2015_06_15.models.ExpressRouteCircuit
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExpressRouteCircuit or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2015_06_15.models.ExpressRouteCircuit]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuit"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuit', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'} # type: ignore
def list_arp_table(
self,
resource_group_name: str,
circuit_name: str,
**kwargs
) -> AsyncIterable["_models.ExpressRouteCircuitsArpTableListResult"]:
"""The ListArpTable from ExpressRouteCircuit operation retrieves the currently advertised arp
table associated with the ExpressRouteCircuits in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the circuit.
:type circuit_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteCircuitsArpTableListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2015_06_15.models.ExpressRouteCircuitsArpTableListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitsArpTableListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-06-15"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_arp_table.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitsArpTableListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_arp_table.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/arpTable'} # type: ignore
def list_routes_table(
self,
resource_group_name: str,
circuit_name: str,
**kwargs
) -> AsyncIterable["_models.ExpressRouteCircuitsRoutesTableListResult"]:
"""The ListRoutesTable from ExpressRouteCircuit operation retrieves the currently advertised
routes table associated with the ExpressRouteCircuits in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the circuit.
:type circuit_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteCircuitsRoutesTableListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2015_06_15.models.ExpressRouteCircuitsRoutesTableListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitsRoutesTableListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-06-15"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_routes_table.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitsRoutesTableListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_routes_table.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/routesTable'} # type: ignore
def list_stats(
self,
resource_group_name: str,
circuit_name: str,
**kwargs
) -> AsyncIterable["_models.ExpressRouteCircuitsStatsListResult"]:
"""The ListStats ExpressRouteCircuit operation retrieves all the stats from a ExpressRouteCircuits
in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the loadBalancer.
:type circuit_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteCircuitsStatsListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2015_06_15.models.ExpressRouteCircuitsStatsListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitsStatsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-06-15"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_stats.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitsStatsListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_stats.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/stats'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.ExpressRouteCircuitListResult"]:
"""Gets all the express route circuits in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteCircuitListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2015_06_15.models.ExpressRouteCircuitListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-06-15"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits'} # type: ignore
def list_all(
self,
**kwargs
) -> AsyncIterable["_models.ExpressRouteCircuitListResult"]:
"""Gets all the express route circuits in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteCircuitListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2015_06_15.models.ExpressRouteCircuitListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-06-15"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/expressRouteCircuits'} # type: ignore
| 49.86533 | 201 | 0.66296 |
e65e0ec76d5756193408096e053858a4ac95ffe1 | 38,934 | py | Python | diofant/utilities/enumerative.py | diofant/omg | 72fd45f832240d1ded6f0a411e97bb9f7aa9f1d2 | [
"BSD-3-Clause"
] | null | null | null | diofant/utilities/enumerative.py | diofant/omg | 72fd45f832240d1ded6f0a411e97bb9f7aa9f1d2 | [
"BSD-3-Clause"
] | null | null | null | diofant/utilities/enumerative.py | diofant/omg | 72fd45f832240d1ded6f0a411e97bb9f7aa9f1d2 | [
"BSD-3-Clause"
] | null | null | null | """
Algorithms and classes to support enumerative combinatorics.
Currently just multiset partitions, but more could be added.
Terminology (following Knuth, algorithm 7.1.2.5M TAOCP)
*multiset* aaabbcccc has a *partition* aaabc | bccc
The submultisets, aaabc and bccc of the partition are called
*parts*, or sometimes *vectors*. (Knuth notes that multiset
partitions can be thought of as partitions of vectors of integers,
where the ith element of the vector gives the multiplicity of
element i.)
The values a, b and c are *components* of the multiset. These
correspond to elements of a set, but in a multiset can be present
with a multiplicity greater than 1.
The algorithm deserves some explanation.
Think of the part aaabc from the multiset above. If we impose an
ordering on the components of the multiset, we can represent a part
with a vector, in which the value of the first element of the vector
corresponds to the multiplicity of the first component in that
part. Thus, aaabc can be represented by the vector [3, 1, 1]. We
can also define an ordering on parts, based on the lexicographic
ordering of the vector (leftmost vector element, i.e., the element
with the smallest component number, is the most significant), so
that [3, 1, 1] > [3, 1, 0] and [3, 1, 1] > [2, 1, 4]. The ordering
on parts can be extended to an ordering on partitions: First, sort
the parts in each partition, left-to-right in decreasing order. Then
partition A is greater than partition B if A's leftmost/greatest
part is greater than B's leftmost part. If the leftmost parts are
equal, compare the second parts, and so on.
In this ordering, the greatest partition of a given multiset has only
one part. The least partition is the one in which the components
are spread out, one per part.
The enumeration algorithms in this file yield the partitions of the
argument multiset in decreasing order. The main data structure is a
stack of parts, corresponding to the current partition. An
important invariant is that the parts on the stack are themselves in
decreasing order. This data structure is decremented to find the
next smaller partition. Most often, decrementing the partition will
only involve adjustments to the smallest parts at the top of the
stack, much as adjacent integers *usually* differ only in their last
few digits.
Knuth's algorithm uses two main operations on parts:
Decrement - change the part so that it is smaller in the
(vector) lexicographic order, but reduced by the smallest amount possible.
For example, if the multiset has vector [5,
3, 1], and the bottom/greatest part is [4, 2, 1], this part would
decrement to [4, 2, 0], while [4, 0, 0] would decrement to [3, 3,
1]. A singleton part is never decremented -- [1, 0, 0] is not
decremented to [0, 3, 1]. Instead, the decrement operator needs
to fail for this case. In Knuth's psuedocode, the decrement
operator is step m5.
Spread unallocated multiplicity - Once a part has been decremented,
it cannot be the rightmost part in the partition. There is some
multiplicity that has not been allocated, and new parts must be
created above it in the stack to use up this multiplicity. To
maintain the invariant that the parts on the stack are in
decreasing order, these new parts must be less than or equal to
the decremented part.
For example, if the multiset is [5, 3, 1], and its most
significant part has just been decremented to [5, 3, 0], the
spread operation will add a new part so that the stack becomes
[[5, 3, 0], [0, 0, 1]]. If the most significant part (for the
same multiset) has been decremented to [2, 0, 0] the stack becomes
[[2, 0, 0], [2, 0, 0], [1, 3, 1]]. In the psuedocode, the spread
operation for one part is step m2. The complete spread operation
is a loop of steps m2 and m3.
In order to facilitate the spread operation, Knuth stores, for each
component of each part, not just the multiplicity of that component
in the part, but also the total multiplicity available for this
component in this part or any lesser part above it on the stack.
One added twist is that Knuth does not represent the part vectors as
arrays. Instead, he uses a sparse representation, in which a
component of a part is represented as a component number (c), plus
the multiplicity of the component in that part (v) as well as the
total multiplicity available for that component (u). This saves
time that would be spent skipping over zeros.
"""
class PartComponent:
"""Internal class used in support of the multiset partitions
enumerators and the associated visitor functions.
Represents one component of one part of the current partition.
A stack of these, plus an auxiliary frame array, f, represents a
partition of the multiset.
Knuth's psuedocode makes c, u, and v separate arrays.
"""
def __init__(self):
# Component number
self.c = 0
# The as yet unpartitioned amount in component c
# *before* it is allocated by this triple
self.u = 0
# Amount of c component in the current part
# (v<=u). An invariant of the representation is
# that the next higher triple for this component
# (if there is one) will have a value of u-v in
# its u attribute.
self.v = 0
def __eq__(self, other):
"""Define value oriented equality, which is useful for testers."""
return (isinstance(other, self.__class__) and
self.c == other.c and
self.u == other.u and
self.v == other.v)
# This function tries to be a faithful implementation of algorithm
# 7.1.2.5M in Volume 4A, Combinatoral Algorithms, Part 1, of The Art
# of Computer Programming, by Donald Knuth. This includes using
# (mostly) the same variable names, etc. This makes for rather
# low-level Python.
# Changes from Knuth's psuedocode include
# - use PartComponent struct/object instead of 3 arrays
# - make the function a generator
# - map (with some difficulty) the GOTOs to Python control structures.
# - Knuth uses 1-based numbering for components, this code is 0-based
# - renamed variable l to lpart.
# - flag variable x takes on values True/False instead of 1/0
#
def multiset_partitions_taocp(multiplicities):
"""Enumerates partitions of a multiset.
Parameters
==========
multiplicities
list of integer multiplicities of the components of the multiset.
Yields
======
state
Internal data structure which encodes a particular partition.
This output is then usually processed by a vistor function
which combines the information from this data structure with
the components themselves to produce an actual partition.
Unless they wish to create their own visitor function, users will
have little need to look inside this data structure. But, for
reference, it is a 3-element list with components:
f
is a frame array, which is used to divide pstack into parts.
lpart
points to the base of the topmost part.
pstack
is an array of PartComponent objects.
The ``state`` output offers a peek into the internal data
structures of the enumeration function. The client should
treat this as read-only; any modification of the data
structure will cause unpredictable (and almost certainly
incorrect) results. Also, the components of ``state`` are
modified in place at each iteration. Hence, the visitor must
be called at each loop iteration. Accumulating the ``state``
instances and processing them later will not work.
Examples
========
>>> # variables components and multiplicities represent the multiset 'abb'
>>> components = 'ab'
>>> multiplicities = [1, 2]
>>> states = multiset_partitions_taocp(multiplicities)
>>> [list_visitor(state, components) for state in states]
[[['a', 'b', 'b']],
[['a', 'b'], ['b']],
[['a'], ['b', 'b']],
[['a'], ['b'], ['b']]]
"""
# Important variables.
# m is the number of components, i.e., number of distinct elements
m = len(multiplicities)
# n is the cardinality, total number of elements whether or not distinct
n = sum(multiplicities)
# The main data structure, f segments pstack into parts. See
# list_visitor() for example code indicating how this internal
# state corresponds to a partition.
# Note: allocation of space for stack is conservative. Knuth's
# exercise 7.2.1.5.68 gives some indication of how to tighten this
# bound, but this is not implemented.
pstack = [PartComponent() for i in range(n * m + 1)]
f = [0] * (n + 1)
# Step M1 in Knuth (Initialize)
# Initial state - entire multiset in one part.
for j in range(m):
ps = pstack[j]
ps.c = j
ps.u = multiplicities[j]
ps.v = multiplicities[j]
# Other variables
f[0] = 0
a = 0
lpart = 0
f[1] = m
b = m # in general, current stack frame is from a to b - 1
while True:
while True:
# Step M2 (Subtract v from u)
j = a
k = b
x = False
while j < b:
pstack[k].u = pstack[j].u - pstack[j].v
if pstack[k].u == 0:
x = True
elif not x:
pstack[k].c = pstack[j].c
pstack[k].v = min(pstack[j].v, pstack[k].u)
x = pstack[k].u < pstack[j].v
k = k + 1
else: # x is True
pstack[k].c = pstack[j].c
pstack[k].v = pstack[k].u
k = k + 1
j = j + 1
# Note: x is True iff v has changed
# Step M3 (Push if nonzero.)
if k > b:
a = b
b = k
lpart = lpart + 1
f[lpart + 1] = b
# Return to M2
else:
break # Continue to M4
# M4 Visit a partition
state = [f, lpart, pstack]
yield state
# M5 (Decrease v)
while True:
j = b-1
while pstack[j].v == 0:
j = j - 1
if j == a and pstack[j].v == 1:
# M6 (Backtrack)
if lpart == 0:
return
lpart = lpart - 1
b = a
a = f[lpart]
# Return to M5
else:
pstack[j].v = pstack[j].v - 1
for k in range(j + 1, b):
pstack[k].v = pstack[k].u
break # GOTO M2
# --------------- Visitor functions for multiset partitions ---------------
# A visitor takes the partition state generated by
# multiset_partitions_taocp or other enumerator, and produces useful
# output (such as the actual partition).
def factoring_visitor(state, primes):
"""Use with multiset_partitions_taocp to enumerate the ways a
number can be expressed as a product of factors. For this usage,
the exponents of the prime factors of a number are arguments to
the partition enumerator, while the corresponding prime factors
are input here.
Examples
========
To enumerate the factorings of a number we can think of the elements of the
partition as being the prime factors and the multiplicities as being their
exponents.
>>> primes, multiplicities = zip(*factorint(24).items())
>>> primes
(2, 3)
>>> multiplicities
(3, 1)
>>> states = multiset_partitions_taocp(multiplicities)
>>> [factoring_visitor(state, primes) for state in states]
[[24], [8, 3], [12, 2], [4, 6], [4, 2, 3], [6, 2, 2], [2, 2, 2, 3]]
"""
f, lpart, pstack = state
factoring = []
for i in range(lpart + 1):
factor = 1
for ps in pstack[f[i]: f[i + 1]]:
if ps.v > 0:
factor *= primes[ps.c] ** ps.v
factoring.append(factor)
return factoring
def list_visitor(state, components):
"""Return a list of lists to represent the partition.
Examples
========
>>> states = multiset_partitions_taocp([1, 2, 1])
>>> s = next(states)
>>> list_visitor(s, 'abc') # for multiset 'a b b c'
[['a', 'b', 'b', 'c']]
>>> s = next(states)
>>> list_visitor(s, [1, 2, 3]) # for multiset '1 2 2 3
[[1, 2, 2], [3]]
"""
f, lpart, pstack = state
partition = []
for i in range(lpart+1):
part = []
for ps in pstack[f[i]:f[i+1]]:
if ps.v > 0:
part.extend([components[ps.c]] * ps.v)
partition.append(part)
return partition
class MultisetPartitionTraverser:
"""
Has methods to ``enumerate`` and ``count`` the partitions of a multiset.
This implements a refactored and extended version of Knuth's algorithm
7.1.2.5M.
The enumeration methods of this class are generators and return
data structures which can be interpreted by the same visitor
functions used for the output of ``multiset_partitions_taocp``.
See Also
========
multiset_partitions_taocp
Examples
========
>>> m = MultisetPartitionTraverser()
>>> m.count_partitions([4, 4, 4, 2])
127750
>>> m.count_partitions([3, 3, 3])
686
References
==========
* Algorithm 7.1.2.5M in Volume 4A, Combinatoral Algorithms,
Part 1, of The Art of Computer Programming, by Donald Knuth.
* On a Problem of Oppenheim concerning
"Factorisatio Numerorum" E. R. Canfield, Paul Erdős, Carl
Pomerance, JOURNAL OF NUMBER THEORY, Vol. 17, No. 1. August
1983. See section 7 for a description of an algorithm
similar to Knuth's.
* Generating Multiset Partitions, Brent Yorgey, The
Monad.Reader, Issue 8, September 2007.
"""
def __init__(self):
# TRACING variables. These are useful for gathering
# statistics on the algorithm itself, but have no particular
# benefit to a user of the code.
self.k1 = 0
self.k2 = 0
self.p1 = 0
self.pstack = None
self.f = None
self.lpart = None
self.discarded = None
self.pcount = None
self.dp_stack = None
self.dp_map = None
#
# Helper methods for enumeration
#
def _initialize_enumeration(self, multiplicities):
"""Allocates and initializes the partition stack.
This is called from the enumeration/counting routines, so
there is no need to call it separately.
"""
num_components = len(multiplicities)
# cardinality is the total number of elements, whether or not distinct
cardinality = sum(multiplicities)
# pstack is the partition stack, which is segmented by
# f into parts.
self.pstack = [PartComponent() for i in
range(num_components * cardinality + 1)]
self.f = [0] * (cardinality + 1)
# Initial state - entire multiset in one part.
for j in range(num_components):
ps = self.pstack[j]
ps.c = j
ps.u = multiplicities[j]
ps.v = multiplicities[j]
self.f[0] = 0
self.f[1] = num_components
self.lpart = 0
# The decrement_part() method corresponds to step M5 in Knuth's
# algorithm. This is the base version for enum_all(). Modified
# versions of this method are needed if we want to restrict
# sizes of the partitions produced.
def decrement_part(self, part):
"""Decrements part (a subrange of pstack), if possible, returning
True iff the part was successfully decremented.
If you think of the v values in the part as a multi-digit
integer (least significant digit on the right) this is
basically decrementing that integer, but with the extra
constraint that the leftmost digit cannot be decremented to 0.
Parameters
==========
part
The part, represented as a list of PartComponent objects,
which is to be decremented.
"""
plen = len(part)
for j in range(plen - 1, -1, -1):
if (j == 0 and part[j].v > 1) or (j > 0 and part[j].v > 0):
# found val to decrement
part[j].v -= 1
# Reset trailing parts back to maximum
for k in range(j + 1, plen):
part[k].v = part[k].u
return True
return False
# Version to allow number of parts to be bounded from above.
# Corresponds to (a modified) step M5.
def decrement_part_small(self, part, ub):
"""Decrements part (a subrange of pstack), if possible, returning
True iff the part was successfully decremented.
Parameters
==========
part
part to be decremented (topmost part on the stack)
ub
the maximum number of parts allowed in a partition
returned by the calling traversal.
Notes
=====
The goal of this modification of the ordinary decrement method
is to fail (meaning that the subtree rooted at this part is to
be skipped) when it can be proved that this part can only have
child partitions which are larger than allowed by ``ub``. If a
decision is made to fail, it must be accurate, otherwise the
enumeration will miss some partitions. But, it is OK not to
capture all the possible failures -- if a part is passed that
shouldn't be, the resulting too-large partitions are filtered
by the enumeration one level up. However, as is usual in
constrained enumerations, failing early is advantageous.
The tests used by this method catch the most common cases,
although this implementation is by no means the last word on
this problem. The tests include:
1) ``lpart`` must be less than ``ub`` by at least 2. This is because
once a part has been decremented, the partition
will gain at least one child in the spread step.
2) If the leading component of the part is about to be
decremented, check for how many parts will be added in
order to use up the unallocated multiplicity in that
leading component, and fail if this number is greater than
allowed by ``ub``. (See code for the exact expression.) This
test is given in the answer to Knuth's problem 7.2.1.5.69.
3) If there is *exactly* enough room to expand the leading
component by the above test, check the next component (if
it exists) once decrementing has finished. If this has
``v == 0``, this next component will push the expansion over the
limit by 1, so fail.
"""
if self.lpart >= ub - 1:
self.p1 += 1 # increment to keep track of usefulness of tests
return False
plen = len(part)
for j in range(plen - 1, -1, -1): # pragma: no branch
# Knuth's mod, (answer to problem 7.2.1.5.69)
if (j == 0) and (part[0].v - 1)*(ub - self.lpart) < part[0].u:
self.k1 += 1
return False
if (j == 0 and part[j].v > 1) or (j > 0 and part[j].v > 0):
# found val to decrement
part[j].v -= 1
# Reset trailing parts back to maximum
for k in range(j + 1, plen):
part[k].v = part[k].u
# Have now decremented part, but are we doomed to
# failure when it is expanded? Check one oddball case
# that turns out to be surprisingly common - exactly
# enough room to expand the leading component, but no
# room for the second component, which has v=0.
if (plen > 1 and (part[1].v == 0) and
(part[0].u - part[0].v) ==
((ub - self.lpart - 1) * part[0].v)):
self.k2 += 1
return False
return True
def decrement_part_large(self, part, lb):
"""Decrements part, while respecting size constraint.
A part can have no children which are of sufficient size (as
indicated by ``lb``) unless that part has sufficient
unallocated multiplicity. When enforcing the size constraint,
this method will decrement the part (if necessary) by an
amount needed to ensure sufficient unallocated multiplicity.
Returns True iff the part was successfully decremented.
Parameters
==========
part
part to be decremented (topmost part on the stack)
lb
The partitions produced by the calling enumeration must
have more parts than this value.
"""
# Next, perform any needed additional decrementing to respect
# "sufficient unallocated multiplicity" (or fail if this is
# not possible).
min_unalloc = lb - self.lpart
if min_unalloc <= 0:
return True
total_mult = sum(pc.u for pc in part)
total_alloc = sum(pc.v for pc in part)
if total_mult <= min_unalloc:
return False
deficit = min_unalloc - (total_mult - total_alloc)
if deficit <= 0:
return True
for i in range(len(part) - 1, -1, -1): # pragma: no branch
if i == 0:
assert part[0].v > deficit
part[0].v -= deficit
return True
else:
if part[i].v >= deficit:
part[i].v -= deficit
return True
else:
deficit -= part[i].v
part[i].v = 0
def decrement_part_range(self, part, lb, ub):
"""Decrements part (a subrange of pstack), if possible, returning
True iff the part was successfully decremented.
Parameters
==========
part
part to be decremented (topmost part on the stack)
ub
the maximum number of parts allowed in a partition
returned by the calling traversal.
lb
The partitions produced by the calling enumeration must
have more parts than this value.
Notes
=====
Combines the constraints of _small and _large decrement
methods. If returns success, part has been decremented at
least once, but perhaps by quite a bit more if needed to meet
the lb constraint.
"""
# Constraint in the range case is just enforcing both the
# constraints from _small and _large cases. Note the 0 as the
# second argument to the _large call -- this is the signal to
# decrement only as needed to for constraint enforcement. The
# short circuiting and left-to-right order of the 'and'
# operator is important for this to work correctly.
return self.decrement_part_small(part, ub) and \
self.decrement_part_large(part, lb)
def spread_part_multiplicity(self):
"""Returns True if a new part has been created, and
adjusts pstack, f and lpart as needed.
Notes
=====
Spreads unallocated multiplicity from the current top part
into a new part created above the current on the stack. This
new part is constrained to be less than or equal to the old in
terms of the part ordering.
This call does nothing (and returns False) if the current top
part has no unallocated multiplicity.
"""
j = self.f[self.lpart] # base of current top part
k = self.f[self.lpart + 1] # ub of current; potential base of next
base = k # save for later comparison
# Set to true when the new part (so far) is
# strictly less than (as opposed to less than
# or equal) to the old.
changed = False
for j in range(self.f[self.lpart], self.f[self.lpart + 1]):
self.pstack[k].u = self.pstack[j].u - self.pstack[j].v
if self.pstack[k].u == 0:
changed = True
else:
self.pstack[k].c = self.pstack[j].c
if changed: # Put all available multiplicity in this part
self.pstack[k].v = self.pstack[k].u
else: # Still maintaining ordering constraint
if self.pstack[k].u < self.pstack[j].v:
self.pstack[k].v = self.pstack[k].u
changed = True
else:
self.pstack[k].v = self.pstack[j].v
k = k + 1
if k > base:
# Adjust for the new part on stack
self.lpart = self.lpart + 1
self.f[self.lpart + 1] = k
return True
return False
def top_part(self):
"""Return current top part on the stack, as a slice of pstack.
"""
return self.pstack[self.f[self.lpart]:self.f[self.lpart + 1]]
# Same interface and functionality as multiset_partitions_taocp(),
# but some might find this refactored version easier to follow.
def enum_all(self, multiplicities):
"""Enumerate the partitions of a multiset.
Examples
========
>>> m = MultisetPartitionTraverser()
>>> states = m.enum_all([2, 2])
>>> [list_visitor(state, 'ab') for state in states]
[[['a', 'a', 'b', 'b']],
[['a', 'a', 'b'], ['b']],
[['a', 'a'], ['b', 'b']],
[['a', 'a'], ['b'], ['b']],
[['a', 'b', 'b'], ['a']],
[['a', 'b'], ['a', 'b']],
[['a', 'b'], ['a'], ['b']],
[['a'], ['a'], ['b', 'b']],
[['a'], ['a'], ['b'], ['b']]]
See also
========
multiset_partitions_taocp():
which provides the same result as this method, but is
about twice as fast. Hence, enum_all is primarily useful
for testing. Also see the function for a discussion of
states and visitors.
"""
self._initialize_enumeration(multiplicities)
while True:
while self.spread_part_multiplicity():
pass
# M4 Visit a partition
state = [self.f, self.lpart, self.pstack]
yield state
# M5 (Decrease v)
while not self.decrement_part(self.top_part()):
# M6 (Backtrack)
if self.lpart == 0:
return
self.lpart -= 1
def enum_small(self, multiplicities, ub):
"""Enumerate multiset partitions with no more than ``ub`` parts.
Equivalent to enum_range(multiplicities, 0, ub)
See also
========
enum_all, enum_large, enum_range
Parameters
==========
multiplicities
list of multiplicities of the components of the multiset.
ub
Maximum number of parts
Examples
========
>>> m = MultisetPartitionTraverser()
>>> states = m.enum_small([2, 2], 2)
>>> [list_visitor(state, 'ab') for state in states]
[[['a', 'a', 'b', 'b']],
[['a', 'a', 'b'], ['b']],
[['a', 'a'], ['b', 'b']],
[['a', 'b', 'b'], ['a']],
[['a', 'b'], ['a', 'b']]]
The implementation is based, in part, on the answer given to
exercise 69, in Knuth.
References
==========
* Algorithm 7.1.2.5M in Volume 4A, Combinatoral Algorithms,
Part 1, of The Art of Computer Programming, by Donald Knuth.
"""
# Keep track of iterations which do not yield a partition.
# Clearly, we would like to keep this number small.
self.discarded = 0
if ub <= 0:
return
self._initialize_enumeration(multiplicities)
while True:
good_partition = True
while self.spread_part_multiplicity():
if self.lpart >= ub:
self.discarded += 1
good_partition = False
self.lpart = ub - 2
break
# M4 Visit a partition
if good_partition:
state = [self.f, self.lpart, self.pstack]
yield state
# M5 (Decrease v)
while not self.decrement_part_small(self.top_part(), ub):
# M6 (Backtrack)
if self.lpart == 0:
return
self.lpart -= 1
def enum_large(self, multiplicities, lb):
"""Enumerate the partitions of a multiset with lb < num(parts)
See also
========
enum_all, enum_small, enum_range
Parameters
==========
multiplicities
list of multiplicities of the components of the multiset.
lb
Number of parts in the partition must be greater than
this lower bound.
Examples
========
>>> m = MultisetPartitionTraverser()
>>> states = m.enum_large([2, 2], 2)
>>> [list_visitor(state, 'ab') for state in states]
[[['a', 'a'], ['b'], ['b']],
[['a', 'b'], ['a'], ['b']],
[['a'], ['a'], ['b', 'b']],
[['a'], ['a'], ['b'], ['b']]]
"""
return self.enum_range(multiplicities, lb, sum(multiplicities))
def enum_range(self, multiplicities, lb, ub):
"""Enumerate the partitions of a multiset with
``lb < num(parts) <= ub``.
In particular, if partitions with exactly ``k`` parts are
desired, call with ``(multiplicities, k - 1, k)``. This
method generalizes enum_all, enum_small, and enum_large.
Examples
========
>>> m = MultisetPartitionTraverser()
>>> states = m.enum_range([2, 2], 1, 2)
>>> [list_visitor(state, 'ab') for state in states]
[[['a', 'a', 'b'], ['b']],
[['a', 'a'], ['b', 'b']],
[['a', 'b', 'b'], ['a']],
[['a', 'b'], ['a', 'b']]]
"""
# combine the constraints of the _large and _small
# enumerations.
self.discarded = 0
if ub <= 0 or lb >= sum(multiplicities):
return
self._initialize_enumeration(multiplicities)
self.decrement_part_large(self.top_part(), lb)
while True:
good_partition = True
while self.spread_part_multiplicity():
assert self.decrement_part_large(self.top_part(), lb)
if self.lpart >= ub:
self.discarded += 1
good_partition = False
self.lpart = ub - 2
break
# M4 Visit a partition
if good_partition:
state = [self.f, self.lpart, self.pstack]
yield state
# M5 (Decrease v)
while not self.decrement_part_range(self.top_part(), lb, ub):
# M6 (Backtrack)
if self.lpart == 0:
return
self.lpart -= 1
def count_partitions_slow(self, multiplicities):
"""Returns the number of partitions of a multiset whose elements
have the multiplicities given in ``multiplicities``.
Primarily for comparison purposes. It follows the same path as
enumerate, and counts, rather than generates, the partitions.
See Also
========
count_partitions
Has the same calling interface, but is much faster.
"""
# number of partitions so far in the enumeration
self.pcount = 0
self._initialize_enumeration(multiplicities)
while True:
while self.spread_part_multiplicity():
pass
# M4 Visit (count) a partition
self.pcount += 1
# M5 (Decrease v)
while not self.decrement_part(self.top_part()):
# M6 (Backtrack)
if self.lpart == 0:
return self.pcount
self.lpart -= 1
def count_partitions(self, multiplicities):
"""Returns the number of partitions of a multiset whose components
have the multiplicities given in ``multiplicities``.
For larger counts, this method is much faster than calling one
of the enumerators and counting the result. Uses dynamic
programming to cut down on the number of nodes actually
explored. The dictionary used in order to accelerate the
counting process is stored in the ``MultisetPartitionTraverser``
object and persists across calls. If the the user does not
expect to call ``count_partitions`` for any additional
multisets, the object should be cleared to save memory. On
the other hand, the cache built up from one count run can
significantly speed up subsequent calls to ``count_partitions``,
so it may be advantageous not to clear the object.
Examples
========
>>> m = MultisetPartitionTraverser()
>>> m.count_partitions([9, 8, 2])
288716
>>> m.count_partitions([2, 2])
9
>>> del m
Notes
=====
If one looks at the workings of Knuth's algorithm M, it
can be viewed as a traversal of a binary tree of parts. A
part has (up to) two children, the left child resulting from
the spread operation, and the right child from the decrement
operation. The ordinary enumeration of multiset partitions is
an in-order traversal of this tree, and with the partitions
corresponding to paths from the root to the leaves. The
mapping from paths to partitions is a little complicated,
since the partition would contain only those parts which are
leaves or the parents of a spread link, not those which are
parents of a decrement link.
For counting purposes, it is sufficient to count leaves, and
this can be done with a recursive in-order traversal. The
number of leaves of a subtree rooted at a particular part is a
function only of that part itself, so memoizing has the
potential to speed up the counting dramatically.
This method follows a computational approach which is similar
to the hypothetical memoized recursive function, but with two
differences:
1) This method is iterative, borrowing its structure from the
other enumerations and maintaining an explicit stack of
parts which are in the process of being counted. (There
may be multisets which can be counted reasonably quickly by
this implementation, but which would overflow the default
Python recursion limit with a recursive implementation.)
2) Instead of using the part data structure directly, a more
compact key is constructed. This saves space, but more
importantly coalesces some parts which would remain
separate with physical keys.
Unlike the enumeration functions, there is currently no _range
version of count_partitions. If someone wants to stretch
their brain, it should be possible to construct one by
memoizing with a histogram of counts rather than a single
count, and combining the histograms.
References
==========
* Algorithm 7.1.2.5M in Volume 4A, Combinatoral Algorithms,
Part 1, of The Art of Computer Programming, by Donald Knuth.
"""
# number of partitions so far in the enumeration
self.pcount = 0
# dp_stack is list of lists of (part_key, start_count) pairs
self.dp_stack = []
# dp_map is map part_key-> count, where count represents the
# number of multiset which are descendants of a part with this
# key, **or any of its decrements**
# Thus, when we find a part in the map, we add its count
# value to the running total, cut off the enumeration, and
# backtrack
if self.dp_map is None:
self.dp_map = {}
self._initialize_enumeration(multiplicities)
pkey = part_key(self.top_part())
self.dp_stack.append([(pkey, 0)])
while True:
while self.spread_part_multiplicity():
pkey = part_key(self.top_part())
if pkey in self.dp_map:
# Already have a cached value for the count of the
# subtree rooted at this part. Add it to the
# running counter, and break out of the spread
# loop. The -1 below is to compensate for the
# leaf that this code path would otherwise find,
# and which gets incremented for below.
self.pcount += (self.dp_map[pkey] - 1)
self.lpart -= 1
break
self.dp_stack.append([(pkey, self.pcount)])
# M4 count a leaf partition
self.pcount += 1
# M5 (Decrease v)
while not self.decrement_part(self.top_part()):
# M6 (Backtrack)
for key, oldcount in self.dp_stack.pop():
self.dp_map[key] = self.pcount - oldcount
if self.lpart == 0:
return self.pcount
self.lpart -= 1
# At this point have successfully decremented the part on
# the stack and it does not appear in the cache. It needs
# to be added to the list at the top of dp_stack
pkey = part_key(self.top_part())
self.dp_stack[-1].append((pkey, self.pcount),)
def part_key(part):
"""Helper for MultisetPartitionTraverser.count_partitions that
creates a key for ``part``, that only includes information which can
affect the count for that part. (Any irrelevant information just
reduces the effectiveness of dynamic programming.)
Notes
=====
This member function is a candidate for future exploration. There
are likely symmetries that can be exploited to coalesce some
``part_key`` values, and thereby save space and improve
performance.
"""
# The component number is irrelevant for counting partitions, so
# leave it out of the memo key.
rval = []
for ps in part:
rval.append(ps.u)
rval.append(ps.v)
return tuple(rval)
| 36.31903 | 79 | 0.59023 |
901a32aa0ac9cf16715fa58417c1df5155635527 | 431 | py | Python | venv/Scripts/pip3-script.py | kevindsouza2306/ImplementingLeNet | 46592c9a5c2f65551bcf9e0a2f5cde61a8f6f0f5 | [
"MIT"
] | null | null | null | venv/Scripts/pip3-script.py | kevindsouza2306/ImplementingLeNet | 46592c9a5c2f65551bcf9e0a2f5cde61a8f6f0f5 | [
"MIT"
] | null | null | null | venv/Scripts/pip3-script.py | kevindsouza2306/ImplementingLeNet | 46592c9a5c2f65551bcf9e0a2f5cde61a8f6f0f5 | [
"MIT"
] | null | null | null | #!C:\Users\Administrator\PycharmProjects\ImplementingLeNet\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
| 33.153846 | 82 | 0.684455 |
154ed22214830071d93b2005ab0c38382239fc0f | 6,771 | py | Python | pandas/tests/indexes/multi/test_analytics.py | gsyqax/pandas | cb35d8a938c9222d903482d2f66c62fece5a7aae | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | 4 | 2015-06-09T07:27:52.000Z | 2021-08-06T13:50:05.000Z | pandas/tests/indexes/multi/test_analytics.py | gsyqax/pandas | cb35d8a938c9222d903482d2f66c62fece5a7aae | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | 7 | 2015-08-30T23:51:00.000Z | 2018-12-29T19:52:35.000Z | pandas/tests/indexes/multi/test_analytics.py | gsyqax/pandas | cb35d8a938c9222d903482d2f66c62fece5a7aae | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | 5 | 2017-10-04T22:24:49.000Z | 2021-08-06T13:50:13.000Z | import numpy as np
import pytest
from pandas.compat.numpy import _np_version_under1p17
import pandas as pd
from pandas import Index, MultiIndex, date_range, period_range
import pandas._testing as tm
def test_shift(idx):
# GH8083 test the base class for shift
msg = "Not supported for type MultiIndex"
with pytest.raises(NotImplementedError, match=msg):
idx.shift(1)
with pytest.raises(NotImplementedError, match=msg):
idx.shift(1, 2)
def test_groupby(idx):
groups = idx.groupby(np.array([1, 1, 1, 2, 2, 2]))
labels = idx.tolist()
exp = {1: labels[:3], 2: labels[3:]}
tm.assert_dict_equal(groups, exp)
# GH5620
groups = idx.groupby(idx)
exp = {key: [key] for key in idx}
tm.assert_dict_equal(groups, exp)
def test_truncate():
major_axis = Index(list(range(4)))
minor_axis = Index(list(range(2)))
major_codes = np.array([0, 0, 1, 2, 3, 3])
minor_codes = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(
levels=[major_axis, minor_axis], codes=[major_codes, minor_codes]
)
result = index.truncate(before=1)
assert "foo" not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(after=1)
assert 2 not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(before=1, after=2)
assert len(result.levels[0]) == 2
msg = "after < before"
with pytest.raises(ValueError, match=msg):
index.truncate(3, 1)
# TODO: reshape
def test_reorder_levels(idx):
# this blows up
with pytest.raises(IndexError, match="^Too many levels"):
idx.reorder_levels([2, 1, 0])
def test_numpy_repeat():
reps = 2
numbers = [1, 2, 3]
names = np.array(["foo", "bar"])
m = MultiIndex.from_product([numbers, names], names=names)
expected = MultiIndex.from_product([numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(np.repeat(m, reps), expected)
msg = "the 'axis' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.repeat(m, reps, axis=1)
def test_append_mixed_dtypes():
# GH 13660
dti = date_range("2011-01-01", freq="M", periods=3)
dti_tz = date_range("2011-01-01", freq="M", periods=3, tz="US/Eastern")
pi = period_range("2011-01", freq="M", periods=3)
mi = MultiIndex.from_arrays(
[[1, 2, 3], [1.1, np.nan, 3.3], ["a", "b", "c"], dti, dti_tz, pi]
)
assert mi.nlevels == 6
res = mi.append(mi)
exp = MultiIndex.from_arrays(
[
[1, 2, 3, 1, 2, 3],
[1.1, np.nan, 3.3, 1.1, np.nan, 3.3],
["a", "b", "c", "a", "b", "c"],
dti.append(dti),
dti_tz.append(dti_tz),
pi.append(pi),
]
)
tm.assert_index_equal(res, exp)
other = MultiIndex.from_arrays(
[
["x", "y", "z"],
["x", "y", "z"],
["x", "y", "z"],
["x", "y", "z"],
["x", "y", "z"],
["x", "y", "z"],
]
)
res = mi.append(other)
exp = MultiIndex.from_arrays(
[
[1, 2, 3, "x", "y", "z"],
[1.1, np.nan, 3.3, "x", "y", "z"],
["a", "b", "c", "x", "y", "z"],
dti.append(pd.Index(["x", "y", "z"])),
dti_tz.append(pd.Index(["x", "y", "z"])),
pi.append(pd.Index(["x", "y", "z"])),
]
)
tm.assert_index_equal(res, exp)
def test_iter(idx):
result = list(idx)
expected = [
("foo", "one"),
("foo", "two"),
("bar", "one"),
("baz", "two"),
("qux", "one"),
("qux", "two"),
]
assert result == expected
def test_sub(idx):
first = idx
# - now raises (previously was set op difference)
msg = "cannot perform __sub__ with this index type: MultiIndex"
with pytest.raises(TypeError, match=msg):
first - idx[-3:]
with pytest.raises(TypeError, match=msg):
idx[-3:] - first
with pytest.raises(TypeError, match=msg):
idx[-3:] - first.tolist()
msg = "cannot perform __rsub__ with this index type: MultiIndex"
with pytest.raises(TypeError, match=msg):
first.tolist() - idx[-3:]
def test_map(idx):
# callable
index = idx
# we don't infer UInt64
if isinstance(index, pd.UInt64Index):
expected = index.astype("int64")
else:
expected = index
result = index.map(lambda x: x)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"mapper",
[
lambda values, idx: {i: e for e, i in zip(values, idx)},
lambda values, idx: pd.Series(values, idx),
],
)
def test_map_dictlike(idx, mapper):
if isinstance(idx, (pd.CategoricalIndex, pd.IntervalIndex)):
pytest.skip(f"skipping tests for {type(idx)}")
identity = mapper(idx.values, idx)
# we don't infer to UInt64 for a dict
if isinstance(idx, pd.UInt64Index) and isinstance(identity, dict):
expected = idx.astype("int64")
else:
expected = idx
result = idx.map(identity)
tm.assert_index_equal(result, expected)
# empty mappable
expected = pd.Index([np.nan] * len(idx))
result = idx.map(mapper(expected, idx))
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"func",
[
np.exp,
np.exp2,
np.expm1,
np.log,
np.log2,
np.log10,
np.log1p,
np.sqrt,
np.sin,
np.cos,
np.tan,
np.arcsin,
np.arccos,
np.arctan,
np.sinh,
np.cosh,
np.tanh,
np.arcsinh,
np.arccosh,
np.arctanh,
np.deg2rad,
np.rad2deg,
],
ids=lambda func: func.__name__,
)
def test_numpy_ufuncs(idx, func):
# test ufuncs of numpy. see:
# https://numpy.org/doc/stable/reference/ufuncs.html
if _np_version_under1p17:
expected_exception = AttributeError
msg = f"'tuple' object has no attribute '{func.__name__}'"
else:
expected_exception = TypeError
msg = (
"loop of ufunc does not support argument 0 of type tuple which "
f"has no callable {func.__name__} method"
)
with pytest.raises(expected_exception, match=msg):
func(idx)
@pytest.mark.parametrize(
"func",
[np.isfinite, np.isinf, np.isnan, np.signbit],
ids=lambda func: func.__name__,
)
def test_numpy_type_funcs(idx, func):
msg = (
f"ufunc '{func.__name__}' not supported for the input types, and the inputs "
"could not be safely coerced to any supported types according to "
"the casting rule ''safe''"
)
with pytest.raises(TypeError, match=msg):
func(idx)
| 25.745247 | 85 | 0.570669 |
d5e87d1df246245a376e681132a77064203e5d1e | 27,609 | py | Python | pandas/tseries/tools.py | augustoproiete-forks/pandas-dev--pandas | fcd73ad2e7482414b61d47056c6c9c220b11702c | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause"
] | null | null | null | pandas/tseries/tools.py | augustoproiete-forks/pandas-dev--pandas | fcd73ad2e7482414b61d47056c6c9c220b11702c | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause"
] | null | null | null | pandas/tseries/tools.py | augustoproiete-forks/pandas-dev--pandas | fcd73ad2e7482414b61d47056c6c9c220b11702c | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause"
] | null | null | null | from datetime import datetime, timedelta, time
import numpy as np
from collections import MutableMapping
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.core.common as com
from pandas.core.common import ABCIndexClass, ABCSeries, ABCDataFrame
import pandas.compat as compat
from pandas.util.decorators import deprecate_kwarg
_DATEUTIL_LEXER_SPLIT = None
try:
# Since these are private methods from dateutil, it is safely imported
# here so in case this interface changes, pandas will just fallback
# to not using the functionality
from dateutil.parser import _timelex
if hasattr(_timelex, 'split'):
def _lexer_split_from_str(dt_str):
# The StringIO(str(_)) is for dateutil 2.2 compatibility
return _timelex.split(compat.StringIO(str(dt_str)))
_DATEUTIL_LEXER_SPLIT = _lexer_split_from_str
except (ImportError, AttributeError):
pass
def _infer_tzinfo(start, end):
def _infer(a, b):
tz = a.tzinfo
if b and b.tzinfo:
if not (tslib.get_timezone(tz) == tslib.get_timezone(b.tzinfo)):
raise AssertionError('Inputs must both have the same timezone,'
' {0} != {1}'.format(tz, b.tzinfo))
return tz
tz = None
if start is not None:
tz = _infer(start, end)
elif end is not None:
tz = _infer(end, start)
return tz
def _guess_datetime_format(dt_str, dayfirst=False,
dt_str_parse=compat.parse_date,
dt_str_split=_DATEUTIL_LEXER_SPLIT):
"""
Guess the datetime format of a given datetime string.
Parameters
----------
dt_str : string, datetime string to guess the format of
dayfirst : boolean, default False
If True parses dates with the day first, eg 20/01/2005
Warning: dayfirst=True is not strict, but will prefer to parse
with day first (this is a known bug).
dt_str_parse : function, defaults to `compat.parse_date` (dateutil)
This function should take in a datetime string and return
a `datetime.datetime` guess that the datetime string represents
dt_str_split : function, defaults to `_DATEUTIL_LEXER_SPLIT` (dateutil)
This function should take in a datetime string and return
a list of strings, the guess of the various specific parts
e.g. '2011/12/30' -> ['2011', '/', '12', '/', '30']
Returns
-------
ret : datetime format string (for `strftime` or `strptime`)
"""
if dt_str_parse is None or dt_str_split is None:
return None
if not isinstance(dt_str, compat.string_types):
return None
day_attribute_and_format = (('day',), '%d', 2)
# attr name, format, padding (if any)
datetime_attrs_to_format = [
(('year', 'month', 'day'), '%Y%m%d', 0),
(('year',), '%Y', 0),
(('month',), '%B', 0),
(('month',), '%b', 0),
(('month',), '%m', 2),
day_attribute_and_format,
(('hour',), '%H', 2),
(('minute',), '%M', 2),
(('second',), '%S', 2),
(('microsecond',), '%f', 6),
(('second', 'microsecond'), '%S.%f', 0),
]
if dayfirst:
datetime_attrs_to_format.remove(day_attribute_and_format)
datetime_attrs_to_format.insert(0, day_attribute_and_format)
try:
parsed_datetime = dt_str_parse(dt_str, dayfirst=dayfirst)
except:
# In case the datetime can't be parsed, its format cannot be guessed
return None
if parsed_datetime is None:
return None
try:
tokens = dt_str_split(dt_str)
except:
# In case the datetime string can't be split, its format cannot
# be guessed
return None
format_guess = [None] * len(tokens)
found_attrs = set()
for attrs, attr_format, padding in datetime_attrs_to_format:
# If a given attribute has been placed in the format string, skip
# over other formats for that same underlying attribute (IE, month
# can be represented in multiple different ways)
if set(attrs) & found_attrs:
continue
if all(getattr(parsed_datetime, attr) is not None for attr in attrs):
for i, token_format in enumerate(format_guess):
token_filled = tokens[i].zfill(padding)
if (token_format is None and
token_filled == parsed_datetime.strftime(attr_format)):
format_guess[i] = attr_format
tokens[i] = token_filled
found_attrs.update(attrs)
break
# Only consider it a valid guess if we have a year, month and day
if len(set(['year', 'month', 'day']) & found_attrs) != 3:
return None
output_format = []
for i, guess in enumerate(format_guess):
if guess is not None:
# Either fill in the format placeholder (like %Y)
output_format.append(guess)
else:
# Or just the token separate (IE, the dashes in "01-01-2013")
try:
# If the token is numeric, then we likely didn't parse it
# properly, so our guess is wrong
float(tokens[i])
return None
except ValueError:
pass
output_format.append(tokens[i])
guessed_format = ''.join(output_format)
# rebuild string, capturing any inferred padding
dt_str = ''.join(tokens)
if parsed_datetime.strftime(guessed_format) == dt_str:
return guessed_format
def _guess_datetime_format_for_array(arr, **kwargs):
# Try to guess the format based on the first non-NaN element
non_nan_elements = com.notnull(arr).nonzero()[0]
if len(non_nan_elements):
return _guess_datetime_format(arr[non_nan_elements[0]], **kwargs)
@deprecate_kwarg(old_arg_name='coerce', new_arg_name='errors',
mapping={True: 'coerce', False: 'raise'})
def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False,
utc=None, box=True, format=None, exact=True, coerce=None,
unit=None, infer_datetime_format=False):
"""
Convert argument to datetime.
Parameters
----------
arg : string, datetime, list, tuple, 1-d array, Series
.. versionadded: 0.18.1
or DataFrame/dict-like
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as NaT
- If 'ignore', then invalid parsing will return the input
dayfirst : boolean, default False
Specify a date parse order if `arg` is str or its list-likes.
If True, parses dates with the day first, eg 10/11/12 is parsed as
2012-11-10.
Warning: dayfirst=True is not strict, but will prefer to parse
with day first (this is a known bug, based on dateutil behavior).
yearfirst : boolean, default False
Specify a date parse order if `arg` is str or its list-likes.
- If True parses dates with the year first, eg 10/11/12 is parsed as
2010-11-12.
- If both dayfirst and yearfirst are True, yearfirst is preceded (same
as dateutil).
Warning: yearfirst=True is not strict, but will prefer to parse
with year first (this is a known bug, based on dateutil beahavior).
.. versionadded: 0.16.1
utc : boolean, default None
Return UTC DatetimeIndex if True (converting any tz-aware
datetime.datetime objects as well).
box : boolean, default True
- If True returns a DatetimeIndex
- If False returns ndarray of values.
format : string, default None
strftime to parse time, eg "%d/%m/%Y", note that "%f" will parse
all the way up to nanoseconds.
exact : boolean, True by default
- If True, require an exact format match.
- If False, allow the format to match anywhere in the target string.
unit : string, default 'ns'
unit of the arg (D,s,ms,us,ns) denote the unit in epoch
(e.g. a unix timestamp), which is an integer/float number.
infer_datetime_format : boolean, default False
If True and no `format` is given, attempt to infer the format of the
datetime strings, and if it can be inferred, switch to a faster
method of parsing them. In some cases this can increase the parsing
speed by ~5-10x.
Returns
-------
ret : datetime if parsing succeeded.
Return type depends on input:
- list-like: DatetimeIndex
- Series: Series of datetime64 dtype
- scalar: Timestamp
In case when it is not possible to return designated types (e.g. when
any element of input is before Timestamp.min or after Timestamp.max)
return will have datetime.datetime type (or correspoding array/Series).
Examples
--------
Assembling a datetime from multiple columns of a DataFrame. The keys can be
common abbreviations like ['year', 'month', 'day', 'minute', 'second',
'ms', 'us', 'ns']) or plurals of the same
>>> df = pd.DataFrame({'year': [2015, 2016],
'month': [2, 3],
'day': [4, 5]})
>>> pd.to_datetime(df)
0 2015-02-04
1 2016-03-05
dtype: datetime64[ns]
If a date that does not meet timestamp limitations, passing errors='coerce'
will force to NaT. Furthermore this will force non-dates to NaT as well.
>>> pd.to_datetime('13000101', format='%Y%m%d')
datetime.datetime(1300, 1, 1, 0, 0)
>>> pd.to_datetime('13000101', format='%Y%m%d', errors='coerce')
NaT
Passing infer_datetime_format=True can often-times speedup a parsing
if its not an ISO8601 format exactly, but in a regular format.
>>> s = pd.Series(['3/11/2000', '3/12/2000', '3/13/2000']*1000)
>>> s.head()
0 3/11/2000
1 3/12/2000
2 3/13/2000
3 3/11/2000
4 3/12/2000
dtype: object
>>> %timeit pd.to_datetime(s,infer_datetime_format=True)
100 loops, best of 3: 10.4 ms per loop
>>> %timeit pd.to_datetime(s,infer_datetime_format=False)
1 loop, best of 3: 471 ms per loop
"""
return _to_datetime(arg, errors=errors, dayfirst=dayfirst,
yearfirst=yearfirst,
utc=utc, box=box, format=format, exact=exact,
unit=unit, infer_datetime_format=infer_datetime_format)
def _to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False,
utc=None, box=True, format=None, exact=True,
unit=None, freq=None, infer_datetime_format=False):
"""
Same as to_datetime, but accept freq for
DatetimeIndex internal construction
"""
from pandas.tseries.index import DatetimeIndex
def _convert_listlike(arg, box, format, name=None):
if isinstance(arg, (list, tuple)):
arg = np.array(arg, dtype='O')
# these are shortcutable
if com.is_datetime64_ns_dtype(arg):
if box and not isinstance(arg, DatetimeIndex):
try:
return DatetimeIndex(arg, tz='utc' if utc else None,
name=name)
except ValueError:
pass
return arg
elif com.is_datetime64tz_dtype(arg):
if not isinstance(arg, DatetimeIndex):
return DatetimeIndex(arg, tz='utc' if utc else None)
if utc:
arg = arg.tz_convert(None).tz_localize('UTC')
return arg
elif unit is not None:
if format is not None:
raise ValueError("cannot specify both format and unit")
arg = getattr(arg, 'values', arg)
result = tslib.array_with_unit_to_datetime(arg, unit,
errors=errors)
if box:
if errors == 'ignore':
from pandas import Index
return Index(result)
return DatetimeIndex(result, tz='utc' if utc else None,
name=name)
return result
elif getattr(arg, 'ndim', 1) > 1:
raise TypeError('arg must be a string, datetime, list, tuple, '
'1-d array, or Series')
arg = com._ensure_object(arg)
require_iso8601 = False
if infer_datetime_format and format is None:
format = _guess_datetime_format_for_array(arg, dayfirst=dayfirst)
if format is not None:
# There is a special fast-path for iso8601 formatted
# datetime strings, so in those cases don't use the inferred
# format because this path makes process slower in this
# special case
format_is_iso8601 = _format_is_iso(format)
if format_is_iso8601:
require_iso8601 = not infer_datetime_format
format = None
try:
result = None
if format is not None:
# shortcut formatting here
if format == '%Y%m%d':
try:
result = _attempt_YYYYMMDD(arg, errors=errors)
except:
raise ValueError("cannot convert the input to "
"'%Y%m%d' date format")
# fallback
if result is None:
try:
result = tslib.array_strptime(
arg, format, exact=exact, errors=errors)
except tslib.OutOfBoundsDatetime:
if errors == 'raise':
raise
result = arg
except ValueError:
# if format was inferred, try falling back
# to array_to_datetime - terminate here
# for specified formats
if not infer_datetime_format:
if errors == 'raise':
raise
result = arg
if result is None and (format is None or infer_datetime_format):
result = tslib.array_to_datetime(
arg,
errors=errors,
utc=utc,
dayfirst=dayfirst,
yearfirst=yearfirst,
freq=freq,
require_iso8601=require_iso8601
)
if com.is_datetime64_dtype(result) and box:
result = DatetimeIndex(result,
tz='utc' if utc else None,
name=name)
return result
except ValueError as e:
try:
values, tz = tslib.datetime_to_datetime64(arg)
return DatetimeIndex._simple_new(values, name=name, tz=tz)
except (ValueError, TypeError):
raise e
if arg is None:
return arg
elif isinstance(arg, tslib.Timestamp):
return arg
elif isinstance(arg, ABCSeries):
from pandas import Series
values = _convert_listlike(arg._values, False, format)
return Series(values, index=arg.index, name=arg.name)
elif isinstance(arg, (ABCDataFrame, MutableMapping)):
return _assemble_from_unit_mappings(arg, errors=errors)
elif isinstance(arg, ABCIndexClass):
return _convert_listlike(arg, box, format, name=arg.name)
elif com.is_list_like(arg):
return _convert_listlike(arg, box, format)
return _convert_listlike(np.array([arg]), box, format)[0]
# mappings for assembling units
_unit_map = {'year': 'year',
'years': 'year',
'month': 'month',
'months': 'month',
'day': 'day',
'days': 'day',
'hour': 'h',
'hours': 'h',
'minute': 'm',
'minutes': 'm',
'second': 's',
'seconds': 's',
'ms': 'ms',
'millisecond': 'ms',
'milliseconds': 'ms',
'us': 'us',
'microsecond': 'us',
'microseconds': 'us',
'ns': 'ns',
'nanosecond': 'ns',
'nanoseconds': 'ns'
}
def _assemble_from_unit_mappings(arg, errors):
"""
assemble the unit specifed fields from the arg (DataFrame)
Return a Series for actual parsing
Parameters
----------
arg : DataFrame
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as NaT
- If 'ignore', then invalid parsing will return the input
Returns
-------
Series
"""
from pandas import to_timedelta, to_numeric, DataFrame
arg = DataFrame(arg)
if not arg.columns.is_unique:
raise ValueError("cannot assemble with duplicate keys")
# replace passed unit with _unit_map
def f(value):
if value in _unit_map:
return _unit_map[value]
# m is case significant
if value.lower() in _unit_map:
return _unit_map[value.lower()]
return value
unit = {k: f(k) for k in arg.keys()}
unit_rev = {v: k for k, v in unit.items()}
# we require at least Ymd
required = ['year', 'month', 'day']
req = sorted(list(set(required) - set(unit_rev.keys())))
if len(req):
raise ValueError("to assemble mappings requires at "
"least that [year, month, day] be specified: "
"[{0}] is missing".format(','.join(req)))
# keys we don't recognize
excess = sorted(list(set(unit_rev.keys()) - set(_unit_map.values())))
if len(excess):
raise ValueError("extra keys have been passed "
"to the datetime assemblage: "
"[{0}]".format(','.join(excess)))
def coerce(values):
# we allow coercion to if errors allows
return to_numeric(values, errors=errors)
values = (coerce(arg[unit_rev['year']]) * 10000 +
coerce(arg[unit_rev['month']]) * 100 +
coerce(arg[unit_rev['day']]))
try:
values = to_datetime(values, format='%Y%m%d', errors=errors)
except (TypeError, ValueError) as e:
raise ValueError("cannot assemble the "
"datetimes: {0}".format(e))
for u in ['h', 'm', 's', 'ms', 'us', 'ns']:
value = unit_rev.get(u)
if value is not None and value in arg:
try:
values += to_timedelta(coerce(arg[value]),
unit=u,
errors=errors)
except (TypeError, ValueError) as e:
raise ValueError("cannot assemble the datetimes "
"[{0}]: {1}".format(value, e))
return values
def _attempt_YYYYMMDD(arg, errors):
""" try to parse the YYYYMMDD/%Y%m%d format, try to deal with NaT-like,
arg is a passed in as an object dtype, but could really be ints/strings
with nan-like/or floats (e.g. with nan)
Parameters
----------
arg : passed value
errors : 'raise','ignore','coerce'
"""
def calc(carg):
# calculate the actual result
carg = carg.astype(object)
parsed = lib.try_parse_year_month_day(carg / 10000,
carg / 100 % 100,
carg % 100)
return tslib.array_to_datetime(parsed, errors=errors)
def calc_with_mask(carg, mask):
result = np.empty(carg.shape, dtype='M8[ns]')
iresult = result.view('i8')
iresult[~mask] = tslib.iNaT
result[mask] = calc(carg[mask].astype(np.float64).astype(np.int64)).\
astype('M8[ns]')
return result
# try intlike / strings that are ints
try:
return calc(arg.astype(np.int64))
except:
pass
# a float with actual np.nan
try:
carg = arg.astype(np.float64)
return calc_with_mask(carg, com.notnull(carg))
except:
pass
# string with NaN-like
try:
mask = ~lib.ismember(arg, tslib._nat_strings)
return calc_with_mask(arg, mask)
except:
pass
return None
def _format_is_iso(f):
"""
Does format match the iso8601 set that can be handled by the C parser?
Generally of form YYYY-MM-DDTHH:MM:SS - date separator can be different
but must be consistent. Leading 0s in dates and times are optional.
"""
iso_template = '%Y{date_sep}%m{date_sep}%d{time_sep}%H:%M:%S.%f'.format
excluded_formats = ['%Y%m%d', '%Y%m', '%Y']
for date_sep in [' ', '/', '\\', '-', '.', '']:
for time_sep in [' ', 'T']:
if (iso_template(date_sep=date_sep,
time_sep=time_sep
).startswith(f) and f not in excluded_formats):
return True
return False
def parse_time_string(arg, freq=None, dayfirst=None, yearfirst=None):
"""
Try hard to parse datetime string, leveraging dateutil plus some extra
goodies like quarter recognition.
Parameters
----------
arg : compat.string_types
freq : str or DateOffset, default None
Helps with interpreting time string if supplied
dayfirst : bool, default None
If None uses default from print_config
yearfirst : bool, default None
If None uses default from print_config
Returns
-------
datetime, datetime/dateutil.parser._result, str
"""
from pandas.core.config import get_option
if not isinstance(arg, compat.string_types):
return arg
from pandas.tseries.offsets import DateOffset
if isinstance(freq, DateOffset):
freq = freq.rule_code
if dayfirst is None:
dayfirst = get_option("display.date_dayfirst")
if yearfirst is None:
yearfirst = get_option("display.date_yearfirst")
return tslib.parse_datetime_string_with_reso(arg, freq=freq,
dayfirst=dayfirst,
yearfirst=yearfirst)
DateParseError = tslib.DateParseError
normalize_date = tslib.normalize_date
# Fixed time formats for time parsing
_time_formats = ["%H:%M", "%H%M", "%I:%M%p", "%I%M%p",
"%H:%M:%S", "%H%M%S", "%I:%M:%S%p", "%I%M%S%p"]
def _guess_time_format_for_array(arr):
# Try to guess the format based on the first non-NaN element
non_nan_elements = com.notnull(arr).nonzero()[0]
if len(non_nan_elements):
element = arr[non_nan_elements[0]]
for time_format in _time_formats:
try:
datetime.strptime(element, time_format)
return time_format
except ValueError:
pass
return None
def to_time(arg, format=None, infer_time_format=False, errors='raise'):
"""
Parse time strings to time objects using fixed strptime formats ("%H:%M",
"%H%M", "%I:%M%p", "%I%M%p", "%H:%M:%S", "%H%M%S", "%I:%M:%S%p",
"%I%M%S%p")
Use infer_time_format if all the strings are in the same format to speed
up conversion.
Parameters
----------
arg : string in time format, datetime.time, list, tuple, 1-d array, Series
format : str, default None
Format used to convert arg into a time object. If None, fixed formats
are used.
infer_time_format: bool, default False
Infer the time format based on the first non-NaN element. If all
strings are in the same format, this will speed up conversion.
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as None
- If 'ignore', then invalid parsing will return the input
Returns
-------
datetime.time
"""
from pandas.core.series import Series
def _convert_listlike(arg, format):
if isinstance(arg, (list, tuple)):
arg = np.array(arg, dtype='O')
elif getattr(arg, 'ndim', 1) > 1:
raise TypeError('arg must be a string, datetime, list, tuple, '
'1-d array, or Series')
arg = com._ensure_object(arg)
if infer_time_format and format is None:
format = _guess_time_format_for_array(arg)
times = []
if format is not None:
for element in arg:
try:
times.append(datetime.strptime(element, format).time())
except (ValueError, TypeError):
if errors == 'raise':
raise ValueError("Cannot convert %s to a time with "
"given format %s" % (element, format))
elif errors == 'ignore':
return arg
else:
times.append(None)
else:
formats = _time_formats[:]
format_found = False
for element in arg:
time_object = None
for time_format in formats:
try:
time_object = datetime.strptime(element,
time_format).time()
if not format_found:
# Put the found format in front
fmt = formats.pop(formats.index(time_format))
formats.insert(0, fmt)
format_found = True
break
except (ValueError, TypeError):
continue
if time_object is not None:
times.append(time_object)
elif errors == 'raise':
raise ValueError("Cannot convert arg {arg} to "
"a time".format(arg=arg))
elif errors == 'ignore':
return arg
else:
times.append(None)
return times
if arg is None:
return arg
elif isinstance(arg, time):
return arg
elif isinstance(arg, Series):
values = _convert_listlike(arg._values, format)
return Series(values, index=arg.index, name=arg.name)
elif isinstance(arg, ABCIndexClass):
return _convert_listlike(arg, format)
elif com.is_list_like(arg):
return _convert_listlike(arg, format)
return _convert_listlike(np.array([arg]), format)[0]
def format(dt):
"""Returns date in YYYYMMDD format."""
return dt.strftime('%Y%m%d')
OLE_TIME_ZERO = datetime(1899, 12, 30, 0, 0, 0)
def ole2datetime(oledt):
"""function for converting excel date to normal date format"""
val = float(oledt)
# Excel has a bug where it thinks the date 2/29/1900 exists
# we just reject any date before 3/1/1900.
if val < 61:
raise ValueError("Value is outside of acceptable range: %s " % val)
return OLE_TIME_ZERO + timedelta(days=val)
| 35.260536 | 79 | 0.567569 |
017f4f6c91f2b3297fc3e5bc09505dc9bd927f4e | 1,997 | py | Python | scripts/update_bugs_assets.py | theastropath/turbot | c623cd9af73876efdd315f3d7dd09448a06d3331 | [
"MIT"
] | 10 | 2020-04-11T23:43:42.000Z | 2021-06-18T17:31:09.000Z | scripts/update_bugs_assets.py | theastropath/turbot | c623cd9af73876efdd315f3d7dd09448a06d3331 | [
"MIT"
] | 116 | 2020-04-15T20:37:49.000Z | 2022-03-29T22:21:25.000Z | scripts/update_bugs_assets.py | theastropath/turbot | c623cd9af73876efdd315f3d7dd09448a06d3331 | [
"MIT"
] | 3 | 2020-04-11T23:56:34.000Z | 2020-06-18T17:44:34.000Z | #!/usr/bin/env python3
import csv
import re
from enum import Enum
from pathlib import Path
import requests
from bs4 import BeautifulSoup
page = requests.get("https://animalcrossing.fandom.com/wiki/Bugs_(New_Horizons)")
tree = BeautifulSoup(page.content, "lxml")
class Hemisphere(Enum):
NORTHERN = 0
SOUTHERN = 1
def clean(item):
return re.sub("^-$", "0", item.strip().replace("✓", "1"))
def ingest(writer, hemisphere):
def data_from(item):
img = item.find("img")
if img:
return img["data-src"]
return item.text
table_tag = tree.select(".tabber table")[hemisphere.value]
# there's some weird nesting on the northern bug table, keep any eye on this,
# it may eventually be fixed on the wiki page and this code will break
if hemisphere == Hemisphere.NORTHERN:
table_tag = table_tag.select("table")[0]
tab_data = [
[data_from(item) for item in row_data.select("td")]
for row_data in table_tag.select("tr")
]
for row in range(1, len(tab_data)):
data = [clean(i) for i in tab_data[row]]
if data:
corrected = [
hemisphere.name.lower(),
data[0].lower(),
data[1],
int(data[2].replace(",", "")),
*[d.lower() for d in data[3:]],
]
writer.writerow(corrected)
with open(Path("src") / "turbot" / "assets" / "bugs.csv", "w", newline="") as out:
writer = csv.writer(out)
writer.writerow(
[
"hemisphere",
"name",
"image",
"price",
"location",
"time",
"jan",
"feb",
"mar",
"apr",
"may",
"jun",
"jul",
"aug",
"sep",
"oct",
"nov",
"dec",
]
)
ingest(writer, Hemisphere.NORTHERN)
ingest(writer, Hemisphere.SOUTHERN)
| 24.353659 | 82 | 0.51978 |
5cde6b38c9ddb19747bdf4e89a338c06cfd776bf | 2,864 | py | Python | dsp/digaree/cgen_srf.py | FelixVi/Bedrock | 82072341902048e5b37022512909d209efb243d6 | [
"RSA-MD"
] | 17 | 2019-09-29T14:52:18.000Z | 2022-03-28T21:16:25.000Z | dsp/digaree/cgen_srf.py | FelixVi/Bedrock | 82072341902048e5b37022512909d209efb243d6 | [
"RSA-MD"
] | null | null | null | dsp/digaree/cgen_srf.py | FelixVi/Bedrock | 82072341902048e5b37022512909d209efb243d6 | [
"RSA-MD"
] | 4 | 2019-12-04T17:30:38.000Z | 2021-11-01T01:52:13.000Z | #!/usr/bin/python
# SRF cavity analog state computer
# Takes in cavity field, forward, and reverse vector measurements
# and computes the cavity detune frequency, decay parameter, and
# power imbalance for the purposes of a tuning loop and quench detector.
# Keeps a history of the previous four cavity field mesurements so it
# can get dV/dt.
# Output of this program should be both valid c99 and valid input
# for the scheduler/mapper.
# See the rest of the Digaree infrastructure for details.
from cgen_lib import cgen_init, given, mul, sub, cpx_sub, cpx_mul
from cgen_lib import cpx_scale, cpx_dot, cpx_inv_conj, cpx_mul_conj
from cgen_lib import cpx_mag, set_result, cpx_persist, cpx_copy, cpx_add
cgen_init("cgen_srf.py")
# History of measured cavity voltages, used to compute dV/dt
# Initial value in simulation should be settable from initgen?
# Cut-and-paste for now, until we at least get the right answer.
cpx_persist("v1")
cpx_persist("v2")
cpx_persist("v3")
cpx_persist("v4")
# These lines declare the input variables,
# first six streamed from the radio
given("k_r") # forward
given("k_i") # forward
given("r_r") # reverse
given("r_i") # reverse
given("v_r") # cavity
given("v_i") # cavity
# next eight host-settable
given("beta_r")
given("beta_i")
given("invT")
given("two") # needed by 1/x macro
given("sclr")
given("sclf")
given("sclv")
given("powt")
# Get (still unscaled) derivative
# Implements [-2 -1 0 1 2] FIR
cpx_sub("dv1", "v", "v4", 3) # note multiply-by-4
cpx_sub("dv2", "v1", "v3", 2) # note multiply-by-2
cpx_add("dvx", "dv1", "dv2", 3) # note multiply-by-4
# Result is the amount that V will change in 80*T.
# Including the second-order CIC used to generate input samples,
# this computation has a 3*T group delay.
# State-variable computation of the complex number a,
# yielding detune frequency and decay rate
cpx_inv_conj("x5", "v", 0, 3)
cpx_scale("dvdt", "dvx", "invT", 1)
cpx_mul("x3", "k", "beta", 1, 1)
cpx_sub("x4", "dvdt", "x3", 2) # some evidence this shift should be 1
cpx_mul_conj("a", "x4", "x5", 2, 2)
set_result("ab", "a_r", "a_i")
# Power balance measure of cavity dissipation; uses magnitudes only
cpx_mag("magr", "r", 0) # reverse
mul("powr", "sclr", "magr", 0)
cpx_mag("magf", "k", 0) # forward
mul("powf", "sclf", "magf", 0)
sub("wgnet", "powf", "powr", 1) # net power transferred by waveguide
cpx_dot("dv2", "v", "dvx", 2) # 2 * V * dV/dt = d/dt(V^2)
mul("dudt", "dv2", "sclv", 3) # dU/dt = power to stored energy
sub("diss", "wgnet", "dudt", 1) # est. of dissipation in cold cavity
sub("perr", "diss", "powt", 1) # allow for measurement error
set_result("cd", "diss", "perr") # trigger quench fault if perr > 0
# Watch these like a hawk: order of execution matters,
# unlike everything else here
cpx_copy("v4", "v3")
cpx_copy("v3", "v2")
cpx_copy("v2", "v1")
cpx_copy("v1", "v")
| 35.358025 | 72 | 0.694832 |
f9ebfd4e89c2442074f19f2808eb0f453b301244 | 2,413 | py | Python | trust_stores_observatory/root_record.py | jurajsomorovsky/trust_stores_observatory | 6dfdd4985e54c46fc7383b888c1b5828ba329ee6 | [
"MIT"
] | null | null | null | trust_stores_observatory/root_record.py | jurajsomorovsky/trust_stores_observatory | 6dfdd4985e54c46fc7383b888c1b5828ba329ee6 | [
"MIT"
] | null | null | null | trust_stores_observatory/root_record.py | jurajsomorovsky/trust_stores_observatory | 6dfdd4985e54c46fc7383b888c1b5828ba329ee6 | [
"MIT"
] | null | null | null | from binascii import hexlify
from typing import TYPE_CHECKING
from cryptography.hazmat.primitives.hashes import SHA256
from cryptography.x509 import Certificate
from trust_stores_observatory.certificate_utils import CertificateUtils
if TYPE_CHECKING:
from trust_stores_observatory.store_fetcher.scraped_root_record import ScrapedRootCertificateRecord # noqa: F401
class RootCertificateRecord:
"""A validated/sanitized root certificate listed on a trust store page of one of the supported platforms.
This is the object we export to the trust store YAML files.
"""
def __init__(self, canonical_subject_name: str, sha256_fingerprint: bytes) -> None:
self.subject_name = canonical_subject_name
if len(sha256_fingerprint) != 32:
raise ValueError(f'Supplied SHA 256 fingerprint is not 32 bytes long: "{sha256_fingerprint}"')
self.fingerprint = sha256_fingerprint
def __eq__(self, other: object) -> bool:
if not isinstance(other, RootCertificateRecord):
return False
return self.__dict__ == other.__dict__
def __hash__(self) -> int:
# Required so we can have sets of RootCertificateRecords
return hash(self.subject_name + self.hex_fingerprint)
@classmethod
def from_certificate(cls, certificate: Certificate) -> "RootCertificateRecord":
subject_name = CertificateUtils.get_canonical_subject_name(certificate)
fingerprint = certificate.fingerprint(SHA256())
return cls(subject_name, fingerprint)
@classmethod
def from_unknown_record(cls, record: "ScrapedRootCertificateRecord") -> "RootCertificateRecord":
"""For some platforms (such as Apple), we fetch the list of root certificates by scraping a web page that
only contains basic information about each cert, but not the actual PEM data. This method should be used when
the certificate corresponding to the scraped fingerprint was not found in the local certificate repository.
"""
# I will have to manually find and add this certificate
temp_subject_name = f" CERTIFICATE NOT IN REPO: {record.subject_name}"
return cls(temp_subject_name, record.fingerprint)
@property
def hex_fingerprint(self) -> str:
"""The SHA 256 fingerprint of the certificate as a hex string.
"""
return hexlify(self.fingerprint).decode("ascii")
| 43.089286 | 117 | 0.733527 |
d4b9bfd4bfee3b966cf1ec80603a0a489298c957 | 7,387 | py | Python | hoomd/mcm/test-py/move_by_type.py | brendonwaters/Mechanical-Contraction-Method | 3476c04c3af32d8a60d0d88c91c95bdcfb116dc3 | [
"BSD-3-Clause"
] | null | null | null | hoomd/mcm/test-py/move_by_type.py | brendonwaters/Mechanical-Contraction-Method | 3476c04c3af32d8a60d0d88c91c95bdcfb116dc3 | [
"BSD-3-Clause"
] | null | null | null | hoomd/mcm/test-py/move_by_type.py | brendonwaters/Mechanical-Contraction-Method | 3476c04c3af32d8a60d0d88c91c95bdcfb116dc3 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import division, print_function
from hoomd import *
from hoomd import mcm
import hoomd
import unittest
import os
import numpy
context.initialize()
def create_empty(**kwargs):
snap = data.make_snapshot(**kwargs);
return init.read_snapshot(snap);
# This test ensures that the small box code path is enabled at the correct box sizes and works correctly
# It performs two tests
# 1) we set translations moves by type, freezing all, half then none of the system. We use acceptance probabilities to check that system is behaving as expected.
# 2) we set rotation moves by type, freezing all, half then none of the system. We use acceptance probabilities to check that system is behaving as expected.
#
# Success condition: Correctly reproduce the precalculated acceptance probabilities for these move sizes
#
# Failure mode: Failing to set the move size by type correctly will change the acceptance prob and break these tests
#
# Failure mode 2: Moving particles for which move size is zero
#
class pair_move_some(unittest.TestCase):
def setUp(self) :
self.system = create_empty(N=1000, box=data.boxdim(Lx=11,Ly=5.5, Lz=5.5, dimensions=3), particle_types=['A','B'])
self.mc = mcm.integrate.convex_polyhedron(seed=10,a=0.0,d={'A':0.1,'B':0.0});
self.mc.set_params(deterministic=True)
rverts= numpy.array( [(-2,-1,-1),
(-2,1,-1),
(-2,-1,1),
(-2,1,1),
(2,-1,-1),
(2,1,-1),
(2,-1,1),
(2,1,1)])*0.25
self.mc.shape_param.set('A', vertices=rverts,ignore_statistics=False)
self.mc.shape_param.set('B', vertices=rverts,ignore_statistics=False)
context.current.sorter.set_params(grid=8)
def test_move_some(self):
#init particles on a grid
xgs,ygs = numpy.meshgrid(numpy.linspace(-4.5,4.5,10),0.5*numpy.linspace(-4.5,4.5,10))
xs=list()
ys=list()
zs=list()
for z in 0.5*numpy.linspace(-4.5,4.5,10):
xs.append(xgs)
ys.append(ygs)
zs.append(z*numpy.ones(xgs.shape))
xs = numpy.array(xs).ravel()*1.05
ys = numpy.array(ys).ravel()*1.05
zs = numpy.array(zs).ravel()*1.05
for x,y,z,p in zip(xs,ys,zs,self.system.particles):
p.position=(x,y,z)
p.orientation=(1.0,0.0,0.0,0.0)
#loop over ignored particle fractions, and expected accepted probs for the 'B' particles
gpu_accept_probs = [1.0, 0.57, 0.14]
cpu_accept_probs = [1.0,0.597, 0.147]
if hoomd.context.exec_conf.isCUDAEnabled():
probs = gpu_accept_probs
else:
probs = cpu_accept_probs
for N_a,prob in zip([0,500,1000],probs):
for t,p in zip(['A']*N_a+['B']*(1000-N_a),self.system.particles):
p.type=t
run(100)
#check that the B particles haven't moved
for x,y,z,p in zip(xs,ys,zs,self.system.particles):
if(p.type=='B'):
r0 = p.position
self.assertAlmostEqual(r0[0],x,places=4)
self.assertAlmostEqual(r0[1],y,places=4)
self.assertAlmostEqual(r0[2],z,places=4)
del p
# verify that all moves are accepted and zero overlaps are registered
number_of_overlaps = self.mc.count_overlaps();
self.assertEqual(number_of_overlaps,0)
#all rots are accepted (0 movesize)
rotate_acceptance_prob = self.mc.get_rotate_acceptance()
self.assertAlmostEqual(rotate_acceptance_prob,1.0,places=3)
#some trans are accepted (check prob)
translate_acceptance_prob = self.mc.get_translate_acceptance()
self.assertGreater(translate_acceptance_prob,prob*0.9)
self.assertLess(translate_acceptance_prob,prob*1.1)
def tearDown(self):
del self.mc
del self.system
context.initialize()
class pair_rot_some(unittest.TestCase):
def setUp(self) :
self.system = create_empty(N=1000, box=data.boxdim(Lx=11,Ly=5.5, Lz=5.5, dimensions=3), particle_types=['A','B'])
self.mc = mcm.integrate.convex_polyhedron(seed=10,d=0.0,a={'A':0.05,'B':0.0});
self.mc.set_params(deterministic=True)
rverts= numpy.array( [(-2,-1,-1),
(-2,1,-1),
(-2,-1,1),
(-2,1,1),
(2,-1,-1),
(2,1,-1),
(2,-1,1),
(2,1,1)])*0.25
self.mc.shape_param.set('A', vertices=rverts,ignore_statistics=False)
self.mc.shape_param.set('B', vertices=rverts,ignore_statistics=False)
context.current.sorter.set_params(grid=8)
def test_rot_some(self):
#init particles on a grid
xgs,ygs = numpy.meshgrid(numpy.linspace(-4.5,4.5,10),0.5*numpy.linspace(-4.5,4.5,10))
xs=list()
ys=list()
zs=list()
for z in 0.5*numpy.linspace(-4.5,4.5,10):
xs.append(xgs)
ys.append(ygs)
zs.append(z*numpy.ones(xgs.shape))
xs = numpy.array(xs).ravel()*1.05
ys = numpy.array(ys).ravel()*1.05
zs = numpy.array(zs).ravel()*1.05
for x,y,z,p in zip(xs,ys,zs,self.system.particles):
p.position=(x,y,z)
p.orientation=(1.0,0.0,0.0,0.0)
#loop over ignored particle fractions, and expected accepted probs for the 'B' particles
gpu_accept_probs = [1.0, 0.517, 0.031]
cpu_accept_probs = [1.0, 0.517, 0.0299]
if hoomd.context.exec_conf.isCUDAEnabled():
probs = gpu_accept_probs
else:
probs = cpu_accept_probs
for N_a,prob in zip([0,500,1000],probs):
for t,p in zip(['A']*N_a+['B']*(1000-N_a),self.system.particles):
p.type=t
run(100)
#check that B orientations are unchanged
for p in self.system.particles:
if(p.type=='B'):
q0 = p.orientation
self.assertAlmostEqual(q0[0],1)
self.assertAlmostEqual(q0[1],0)
self.assertAlmostEqual(q0[2],0)
self.assertAlmostEqual(q0[2],0)
del p
# verify that all moves are accepted and zero overlaps are registered
number_of_overlaps = self.mc.count_overlaps();
self.assertEqual(number_of_overlaps,0)
translate_acceptance_prob = self.mc.get_translate_acceptance()
#sometimes gridshift will cause a very small number of rejections
self.assertAlmostEqual(translate_acceptance_prob,1.0,places=3)
#should be zero, because these are spheres and no rotation moves should be attempted
rotate_acceptance_prob = self.mc.get_rotate_acceptance()
self.assertGreater(rotate_acceptance_prob,prob*0.7)
self.assertLess(rotate_acceptance_prob,prob*1.3)
def tearDown(self):
del self.mc
del self.system
context.initialize()
if __name__ == '__main__':
unittest.main(argv = ['test.py', '-v'])
| 39.92973 | 161 | 0.580208 |
fae74e30af609dee826d20a05cbcbcad079fdffe | 793 | py | Python | api/app/services/images/generateQr.py | JexPY/filemanager-fastapi | da830fe6d9a3d515e0d04e6e690ff366225ec251 | [
"MIT"
] | 24 | 2020-09-15T11:59:55.000Z | 2022-03-13T19:58:02.000Z | api/app/services/images/generateQr.py | JexPY/filemanager-fastapi | da830fe6d9a3d515e0d04e6e690ff366225ec251 | [
"MIT"
] | null | null | null | api/app/services/images/generateQr.py | JexPY/filemanager-fastapi | da830fe6d9a3d515e0d04e6e690ff366225ec251 | [
"MIT"
] | 5 | 2020-10-11T08:41:29.000Z | 2022-03-10T07:23:55.000Z | import os
import qrcode
from PIL import Image
from pathlib import Path
from ..helpers.uniqueFileName import generate_unique_name
def qr_code_image(text = str, with_logo = bool):
print(with_logo)
qrImagePIL = qrcode.QRCode(
error_correction=qrcode.constants.ERROR_CORRECT_H,
border=2,
)
qrImagePIL.add_data(text)
qrImagePIL.make()
qrImage = qrImagePIL.make_image().convert('RGB')
if with_logo:
logo = Image.open(os.environ.get('QR_IMAGE_LOGO_PATH'))
qrImage.paste(logo, ((qrImage.size[0] - logo.size[0]) // 2, (qrImage.size[1] - logo.size[1]) // 2))
qrUniqueName = generate_unique_name('png')[0]
qrImage.save(os.environ.get('QR_IMAGE_LOCAL_PATH') + qrUniqueName)
return {
'qrImage' : qrUniqueName
}
| 26.433333 | 107 | 0.677175 |
4103081d0334965fb6fb9f3a0bccbe5ad64cbea2 | 2,390 | py | Python | js/setup.py | jaimergp/nglview | be6faafc68202854a818d2569d69675503184875 | [
"MIT"
] | 161 | 2020-07-28T14:05:57.000Z | 2022-03-31T08:38:06.000Z | js/setup.py | gph82/nglview | d0a186b6b69e9ecd76171d61249686a7af5d6589 | [
"MIT"
] | 123 | 2020-07-27T15:02:27.000Z | 2022-03-30T18:31:51.000Z | js/setup.py | gph82/nglview | d0a186b6b69e9ecd76171d61249686a7af5d6589 | [
"MIT"
] | 42 | 2020-07-28T09:50:06.000Z | 2022-03-11T18:50:22.000Z | """
nglview-js-widgets setup
"""
import json
from pathlib import Path
from jupyter_packaging import (
create_cmdclass,
install_npm,
ensure_targets,
combine_commands,
skip_if_exists
)
import setuptools
HERE = Path(__file__).parent.resolve()
# The name of the project
name = "nglview-js-widgets"
lab_path = (HERE / name / "labextension")
# Representative files that should exist after a successful build
jstargets = [
str(lab_path / "package.json"),
]
package_data_spec = {
name: ["*"],
}
labext_name = "nglview-js-widgets"
data_files_spec = [
("share/jupyter/labextensions/%s" % labext_name, str(lab_path), "**"),
("share/jupyter/labextensions/%s" % labext_name, str(HERE), "install.json"),
]
cmdclass = create_cmdclass("jsdeps",
package_data_spec=package_data_spec,
data_files_spec=data_files_spec
)
js_command = combine_commands(
install_npm(HERE, build_cmd="build:prod", npm=["jlpm"]),
ensure_targets(jstargets),
)
is_repo = (HERE / ".git").exists()
if is_repo:
cmdclass["jsdeps"] = js_command
else:
cmdclass["jsdeps"] = skip_if_exists(jstargets, js_command)
long_description = (HERE / "README.md").read_text()
# Get the package info from package.json
pkg_json = json.loads((HERE / "package.json").read_bytes())
setup_args = dict(
name=name,
version=pkg_json["version"],
url=pkg_json["homepage"],
author=pkg_json["author"]["name"],
author_email=pkg_json["author"]["email"],
description=pkg_json["description"],
license=pkg_json["license"],
long_description=long_description,
long_description_content_type="text/markdown",
cmdclass=cmdclass,
packages=setuptools.find_packages(),
install_requires=[
"jupyterlab~=3.0",
],
zip_safe=False,
include_package_data=True,
python_requires=">=3.6",
platforms="Linux, Mac OS X, Windows",
keywords=["Jupyter", "JupyterLab", "JupyterLab3"],
classifiers=[
"License :: OSI Approved :: BSD License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Framework :: Jupyter",
],
)
if __name__ == "__main__":
setuptools.setup(**setup_args)
| 25.157895 | 80 | 0.670293 |
2667ce4730976f1857f9ed82f1170a02e190c651 | 6,059 | py | Python | venv/Lib/site-packages/resampy/filters.py | Terrathaw/ba21_loma_2_py | eebf5104dd054cef1ab61f0b257933ff679e75ec | [
"MIT"
] | 179 | 2016-04-10T14:38:05.000Z | 2022-03-27T10:41:45.000Z | lo-fi-machine/processing/package/resampy/filters.py | rochelletham/lo-tify | 910c62d0ec14bc5a7aaa4f7dca80c0ca14c4c060 | [
"MIT"
] | 63 | 2016-04-09T15:12:22.000Z | 2021-07-05T08:01:54.000Z | lo-fi-machine/processing/package/resampy/filters.py | rochelletham/lo-tify | 910c62d0ec14bc5a7aaa4f7dca80c0ca14c4c060 | [
"MIT"
] | 38 | 2016-04-21T15:44:37.000Z | 2022-01-28T03:24:53.000Z | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''Filter construction and loading.
--------------------------------
`resampy` provides two pre-computed resampling filters which are tuned for either
high-quality or fast calculation:
- `kaiser_best` : 64 zero-crossings, a Kaiser window with beta=14.769656459379492,
and a roll-off frequency of Nyquist * 0.9475937167399596.
- `kaiser_fast` : 16 zero-crossings, a Kaiser window with beta=8.555504641634386,
and a roll-off frequency of Nyquist * 0.85.
These filters can be used by calling `resample` as follows:
>>> resampy.resample(x, sr_orig, sr_new, filter='kaiser_best') # High-quality
>>> resampy.resample(x, sr_orig, sr_new, filter='kaiser_fast') # Fast calculation
It is also possible to construct custom filters as follows:
>>> resampy.resample(x, sr_orig, sr_new, filter='sinc_window',
... **kwargs)
where ``**kwargs`` are additional parameters to `resampy.filters.sinc_window`_.
'''
import scipy.signal
import numpy as np
import os
import pkg_resources
import six
import sys
FILTER_FUNCTIONS = ['sinc_window']
__all__ = ['get_filter'] + FILTER_FUNCTIONS
def sinc_window(num_zeros=64, precision=9, window=None, rolloff=0.945):
'''Construct a windowed sinc interpolation filter
Parameters
----------
num_zeros : int > 0
The number of zero-crossings to retain in the sinc filter
precision : int > 0
The number of filter coefficients to retain for each zero-crossing
window : callable
The window function. By default, uses Blackman-Harris.
rolloff : float > 0
The roll-off frequency (as a fraction of nyquist)
Returns
-------
interp_window: np.ndarray [shape=(num_zeros * num_table + 1)]
The interpolation window (right-hand side)
num_bits: int
The number of bits of precision to use in the filter table
rolloff : float > 0
The roll-off frequency of the filter, as a fraction of Nyquist
Raises
------
TypeError
if `window` is not callable or `None`
ValueError
if `num_zeros < 1`, `precision < 1`,
or `rolloff` is outside the range `(0, 1]`.
Examples
--------
>>> # A filter with 10 zero-crossings, 32 samples per crossing, and a
... # Hann window for tapering.
>>> halfwin, prec, rolloff = resampy.filters.sinc_window(num_zeros=10, precision=5,
... window=scipy.signal.hann)
>>> halfwin
array([ 9.450e-01, 9.436e-01, ..., -7.455e-07, -0.000e+00])
>>> prec
32
>>> rolloff
0.945
>>> # Or using sinc-window filter construction directly in resample
>>> y = resampy.resample(x, sr_orig, sr_new, filter='sinc_window',
... num_zeros=10, precision=5,
... window=scipy.signal.hann)
'''
if window is None:
window = scipy.signal.blackmanharris
elif not six.callable(window):
raise TypeError('window must be callable, not type(window)={}'.format(type(window)))
if not 0 < rolloff <= 1:
raise ValueError('Invalid roll-off: rolloff={}'.format(rolloff))
if num_zeros < 1:
raise ValueError('Invalid num_zeros: num_zeros={}'.format(num_zeros))
if precision < 0:
raise ValueError('Invalid precision: precision={}'.format(precision))
# Generate the right-wing of the sinc
num_bits = 2**precision
n = num_bits * num_zeros
sinc_win = rolloff * np.sinc(rolloff * np.linspace(0, num_zeros, num=n + 1,
endpoint=True))
# Build the window function and cut off the left half
taper = window(2 * n + 1)[n:]
interp_win = (taper * sinc_win)
return interp_win, num_bits, rolloff
def get_filter(name_or_function, **kwargs):
'''Retrieve a window given its name or function handle.
Parameters
----------
name_or_function : str or callable
If a function, returns `name_or_function(**kwargs)`.
If a string, and it matches the name of one of the defined
filter functions, the corresponding function is called with `**kwargs`.
If a string, and it matches the name of a pre-computed filter,
the corresponding filter is retrieved, and kwargs is ignored.
Valid pre-computed filter names are:
- 'kaiser_fast'
- 'kaiser_best'
Returns
-------
half_window : np.ndarray
The right wing of the interpolation filter
precision : int > 0
The number of samples between zero-crossings of the filter
rolloff : float > 0
The roll-off frequency of the filter as a fraction of Nyquist
Raises
------
NotImplementedError
If `name_or_function` cannot be found as a filter.
'''
if name_or_function in FILTER_FUNCTIONS:
return getattr(sys.modules[__name__], name_or_function)(**kwargs)
elif six.callable(name_or_function):
return name_or_function(**kwargs)
else:
try:
return load_filter(name_or_function)
except (IOError, ValueError):
raise NotImplementedError('Cannot load filter definition for '
'{}'.format(name_or_function))
def load_filter(filter_name):
'''Retrieve a pre-computed filter.
Parameters
----------
filter_name : str
The key of the filter, e.g., 'kaiser_fast'
Returns
-------
half_window : np.ndarray
The right wing of the interpolation filter
precision : int > 0
The number of samples between zero-crossings of the fitler
rolloff : float > 0
The roll-off frequency of the filter, as a fraction of Nyquist
'''
fname = os.path.join('data',
os.path.extsep.join([filter_name, 'npz']))
data = np.load(pkg_resources.resource_filename(__name__, fname))
return data['half_window'], data['precision'], data['rolloff']
| 30.913265 | 92 | 0.627166 |
3b39bc8964c5b808922adc9f560f2969e58ed32f | 121,269 | py | Python | pymatgen/io/vasp/sets.py | vorwerkc/pymatgen | 81e96a4a5739e383f921b7272db7ebf8b1710cb9 | [
"MIT"
] | null | null | null | pymatgen/io/vasp/sets.py | vorwerkc/pymatgen | 81e96a4a5739e383f921b7272db7ebf8b1710cb9 | [
"MIT"
] | null | null | null | pymatgen/io/vasp/sets.py | vorwerkc/pymatgen | 81e96a4a5739e383f921b7272db7ebf8b1710cb9 | [
"MIT"
] | null | null | null | # Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module defines the VaspInputSet abstract base class and a concrete
implementation for the parameters developed and tested by the core team
of pymatgen, including the Materials Virtual Lab, Materials Project and the MIT
high throughput project. The basic concept behind an input set is to specify
a scheme to generate a consistent set of VASP inputs from a structure
without further user intervention. This ensures comparability across
runs.
Read the following carefully before implementing new input sets:
1. 99% of what needs to be done can be done by specifying user_incar_settings
to override some of the defaults of various input sets. Unless there is an
extremely good reason to add a new set, DO NOT add one. E.g., if you want
to turn the hubbard U off, just set "LDAU": False as a user_incar_setting.
2. All derivative input sets should inherit from one of the usual MPRelaxSet or
MITRelaxSet, and proper superclass delegation should be used where possible.
In particular, you are not supposed to implement your own as_dict or
from_dict for derivative sets unless you know what you are doing.
Improper overriding the as_dict and from_dict protocols is the major
cause of implementation headaches. If you need an example, look at how the
MPStaticSet or MPNonSCFSets are constructed.
The above are recommendations. The following are UNBREAKABLE rules:
1. All input sets must take in a structure or list of structures as the first
argument.
2. user_incar_settings, user_kpoints_settings and user_<whatever>_settings are
ABSOLUTE. Any new sets you implement must obey this. If a user wants to
override your settings, you assume he knows what he is doing. Do not
magically override user supplied settings. You can issue a warning if you
think the user is wrong.
3. All input sets must save all supplied args and kwargs as instance variables.
E.g., self.my_arg = my_arg and self.kwargs = kwargs in the __init__. This
ensures the as_dict and from_dict work correctly.
"""
import abc
import glob
import itertools
import os
import re
import shutil
import warnings
from copy import deepcopy
from itertools import chain
from pathlib import Path
from typing import List, Optional, Tuple, Union
from zipfile import ZipFile
import numpy as np
from monty.dev import deprecated
from monty.io import zopen
from monty.json import MSONable
from monty.serialization import loadfn
from pymatgen.analysis.structure_matcher import StructureMatcher
from pymatgen.core.periodic_table import Element, Species
from pymatgen.core.sites import PeriodicSite
from pymatgen.core.structure import Structure
from pymatgen.io.vasp.inputs import Incar, Kpoints, Poscar, Potcar, VaspInput
from pymatgen.io.vasp.outputs import Outcar, Vasprun
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.symmetry.bandstructure import HighSymmKpath
MODULE_DIR = Path(__file__).resolve().parent
class VaspInputSet(MSONable, metaclass=abc.ABCMeta):
"""
Base class representing a set of Vasp input parameters with a structure
supplied as init parameters. Typically, you should not inherit from this
class. Start from DictSet or MPRelaxSet or MITRelaxSet.
"""
@property
@abc.abstractmethod
def incar(self):
"""Incar object"""
pass
@property
@abc.abstractmethod
def kpoints(self):
"""Kpoints object"""
pass
@property
@abc.abstractmethod
def poscar(self):
"""Poscar object"""
pass
@property
def potcar_symbols(self):
"""
List of POTCAR symbols.
"""
# pylint: disable=E1101
elements = self.poscar.site_symbols
potcar_symbols = []
settings = self._config_dict["POTCAR"]
if isinstance(settings[elements[-1]], dict):
for el in elements:
potcar_symbols.append(settings[el]["symbol"] if el in settings else el)
else:
for el in elements:
potcar_symbols.append(settings.get(el, el))
return potcar_symbols
@property
def potcar(self):
"""
Potcar object.
"""
# pylint: disable=E1101
potcar = Potcar(self.potcar_symbols, functional=self.potcar_functional)
# warn if the selected POTCARs do not correspond to the chosen
# potcar_functional
for psingle in potcar:
if self.potcar_functional not in psingle.identify_potcar()[0]:
warnings.warn(
"POTCAR data with symbol {} is not known by pymatgen to\
correspond with the selected potcar_functional {}. This POTCAR\
is known to correspond with functionals {}. Please verify that\
you are using the right POTCARs!".format(
psingle.symbol,
self.potcar_functional,
psingle.identify_potcar(mode="data")[0],
),
BadInputSetWarning,
)
return potcar
@property # type: ignore
@deprecated(message="Use the get_vasp_input() method instead.")
def all_input(self):
"""
Returns all input files as a dict of {filename: vasp object}
Returns:
dict of {filename: object}, e.g., {'INCAR': Incar object, ...}
"""
return {
"INCAR": self.incar,
"KPOINTS": self.kpoints,
"POSCAR": self.poscar,
"POTCAR": self.potcar,
}
def get_vasp_input(self) -> VaspInput:
"""
Returns:
VaspInput
"""
return VaspInput(
incar=self.incar,
kpoints=self.kpoints,
poscar=self.poscar,
potcar=self.potcar,
)
def write_input(
self,
output_dir,
make_dir_if_not_present=True,
include_cif=False,
potcar_spec=False,
zip_output=False,
):
"""
Writes a set of VASP input to a directory.
Args:
output_dir (str): Directory to output the VASP input files
make_dir_if_not_present (bool): Set to True if you want the
directory (and the whole path) to be created if it is not
present.
include_cif (bool): Whether to write a CIF file in the output
directory for easier opening by VESTA.
potcar_spec (bool): Instead of writing the POTCAR, write a "POTCAR.spec".
This is intended to help sharing an input set with people who might
not have a license to specific Potcar files. Given a "POTCAR.spec",
the specific POTCAR file can be re-generated using pymatgen with the
"generate_potcar" function in the pymatgen CLI.
zip_output (bool): If True, output will be zipped into a file with the
same name as the InputSet (e.g., MPStaticSet.zip)
"""
if potcar_spec:
if make_dir_if_not_present and not os.path.exists(output_dir):
os.makedirs(output_dir)
with zopen(os.path.join(output_dir, "POTCAR.spec"), "wt") as f:
f.write("\n".join(self.potcar_symbols))
for k, v in {
"INCAR": self.incar,
"POSCAR": self.poscar,
"KPOINTS": self.kpoints,
}.items():
if v is not None:
with zopen(os.path.join(output_dir, k), "wt") as f:
f.write(v.__str__())
else:
vinput = self.get_vasp_input()
vinput.write_input(output_dir, make_dir_if_not_present=make_dir_if_not_present)
cifname = ""
if include_cif:
s = vinput["POSCAR"].structure
cifname = Path(output_dir) / ("%s.cif" % re.sub(r"\s", "", s.formula))
s.to(filename=cifname)
if zip_output:
filename = self.__class__.__name__ + ".zip"
with ZipFile(os.path.join(output_dir, filename), "w") as zip:
for file in [
"INCAR",
"POSCAR",
"KPOINTS",
"POTCAR",
"POTCAR.spec",
cifname,
]:
try:
zip.write(os.path.join(output_dir, file), arcname=file)
except FileNotFoundError:
pass
try:
os.remove(os.path.join(output_dir, file))
except (FileNotFoundError, PermissionError, IsADirectoryError):
pass
def as_dict(self, verbosity=2):
"""
Args:
verbosity: Verbosity for generated dict. If 1, structure is
excluded.
Returns:
MSONable dict
"""
d = MSONable.as_dict(self)
if verbosity == 1:
d.pop("structure", None)
return d
def _load_yaml_config(fname):
config = loadfn(str(MODULE_DIR / ("%s.yaml" % fname)))
if "PARENT" in config:
parent_config = _load_yaml_config(config["PARENT"])
for k, v in parent_config.items():
if k not in config:
config[k] = v
elif isinstance(v, dict):
v_new = config.get(k, {})
v_new.update(v)
config[k] = v_new
return config
class DictSet(VaspInputSet):
"""
Concrete implementation of VaspInputSet that is initialized from a dict
settings. This allows arbitrary settings to be input. In general,
this is rarely used directly unless there is a source of settings in yaml
format (e.g., from a REST interface). It is typically used by other
VaspInputSets for initialization.
Special consideration should be paid to the way the MAGMOM initialization
for the INCAR is done. The initialization differs depending on the type of
structure and the configuration settings. The order in which the magmom is
determined is as follows:
1. If the site itself has a magmom setting (i.e. site.properties["magmom"] = float),
that is used. This can be set with structure.add_site_property().
2. If the species of the site has a spin setting, that is used. This can be set
with structure.add_spin_by_element().
3. If the species itself has a particular setting in the config file, that
is used, e.g., Mn3+ may have a different magmom than Mn4+.
4. Lastly, the element symbol itself is checked in the config file. If
there are no settings, a default value of 0.6 is used.
"""
def __init__(
self,
structure,
config_dict,
files_to_transfer=None,
user_incar_settings=None,
user_kpoints_settings=None,
user_potcar_settings=None,
constrain_total_magmom=False,
sort_structure=True,
potcar_functional=None,
user_potcar_functional=None,
force_gamma=False,
reduce_structure=None,
vdw=None,
use_structure_charge=False,
standardize=False,
sym_prec=0.1,
international_monoclinic=True,
validate_magmom=True,
):
"""
Args:
structure (Structure): The Structure to create inputs for.
config_dict (dict): The config dictionary to use.
files_to_transfer (dict): A dictionary of {filename: filepath}. This
allows the transfer of files from a previous calculation.
user_incar_settings (dict): User INCAR settings. This allows a user
to override INCAR settings, e.g., setting a different MAGMOM for
various elements or species. Note that in the new scheme,
ediff_per_atom and hubbard_u are no longer args. Instead, the
config_dict supports EDIFF_PER_ATOM and EDIFF keys. The former
scales with # of atoms, the latter does not. If both are
present, EDIFF is preferred. To force such settings, just supply
user_incar_settings={"EDIFF": 1e-5, "LDAU": False} for example.
The keys 'LDAUU', 'LDAUJ', 'LDAUL' are special cases since
pymatgen defines different values depending on what anions are
present in the structure, so these keys can be defined in one
of two ways, e.g. either {"LDAUU":{"O":{"Fe":5}}} to set LDAUU
for Fe to 5 in an oxide, or {"LDAUU":{"Fe":5}} to set LDAUU to
5 regardless of the input structure.
If a None value is given, that key is unset. For example,
{"ENCUT": None} will remove ENCUT from the incar settings.
user_kpoints_settings (dict or Kpoints): Allow user to override kpoints
setting by supplying a dict E.g., {"reciprocal_density": 1000}.
User can also supply Kpoints object. Default is None.
user_potcar_settings (dict: Allow user to override POTCARs. E.g.,
{"Gd": "Gd_3"}. This is generally not recommended. Default is None.
constrain_total_magmom (bool): Whether to constrain the total magmom
(NUPDOWN in INCAR) to be the sum of the expected MAGMOM for all
species. Defaults to False.
sort_structure (bool): Whether to sort the structure (using the
default sort order of electronegativity) before generating input
files. Defaults to True, the behavior you would want most of the
time. This ensures that similar atomic species are grouped
together.
user_potcar_functional (str): Functional to use. Default (None) is to use
the functional in the config dictionary. Valid values:
"PBE", "PBE_52", "PBE_54", "LDA", "LDA_52", "LDA_54", "PW91",
"LDA_US", "PW91_US".
force_gamma (bool): Force gamma centered kpoint generation. Default
(False) is to use the Automatic Density kpoint scheme, which
will use the Gamma centered generation scheme for hexagonal
cells, and Monkhorst-Pack otherwise.
reduce_structure (None/str): Before generating the input files,
generate the reduced structure. Default (None), does not
alter the structure. Valid values: None, "niggli", "LLL".
vdw: Adds default parameters for van-der-Waals functionals supported
by VASP to INCAR. Supported functionals are: DFT-D2, undamped
DFT-D3, DFT-D3 with Becke-Jonson damping, Tkatchenko-Scheffler,
Tkatchenko-Scheffler with iterative Hirshfeld partitioning,
MBD@rSC, dDsC, Dion's vdW-DF, DF2, optPBE, optB88, optB86b and
rVV10.
use_structure_charge (bool): If set to True, then the public
variable used for setting the overall charge of the
structure (structure.charge) is used to set the NELECT
variable in the INCAR
Default is False (structure's overall charge is not used)
standardize (float): Whether to standardize to a primitive standard
cell. Defaults to False.
sym_prec (float): Tolerance for symmetry finding.
international_monoclinic (bool): Whether to use international convention
(vs Curtarolo) for monoclinic. Defaults True.
validate_magmom (bool): Ensure that the missing magmom values are filled
in with the VASP default value of 1.0
"""
if reduce_structure:
structure = structure.get_reduced_structure(reduce_structure)
if sort_structure:
structure = structure.get_sorted_structure()
if validate_magmom:
get_valid_magmom_struct(structure, spin_mode="auto", inplace=True)
self._structure = structure
self._config_dict = deepcopy(config_dict)
self.files_to_transfer = files_to_transfer or {}
self.constrain_total_magmom = constrain_total_magmom
self.sort_structure = sort_structure
self.force_gamma = force_gamma
self.reduce_structure = reduce_structure
self.user_incar_settings = user_incar_settings or {}
self.user_kpoints_settings = user_kpoints_settings or {}
self.user_potcar_settings = user_potcar_settings
self.vdw = vdw.lower() if vdw is not None else None
self.use_structure_charge = use_structure_charge
self.standardize = standardize
self.sym_prec = sym_prec
self.international_monoclinic = international_monoclinic
if self.user_incar_settings.get("KSPACING") and user_kpoints_settings is not None:
warnings.warn(
"You have specified KSPACING and also supplied kpoints "
"settings. KSPACING only has effect when there is no "
"KPOINTS file. Since both settings were given, pymatgen"
"will generate a KPOINTS file and ignore KSPACING."
"Remove the `user_kpoints_settings` argument to enable KSPACING.",
BadInputSetWarning,
)
if self.vdw:
vdw_par = loadfn(str(MODULE_DIR / "vdW_parameters.yaml"))
try:
self._config_dict["INCAR"].update(vdw_par[self.vdw])
except KeyError:
raise KeyError(
"Invalid or unsupported van-der-Waals "
"functional. Supported functionals are "
"%s." % vdw_par.keys()
)
# read the POTCAR_FUNCTIONAL from the .yaml
self.potcar_functional = self._config_dict.get("POTCAR_FUNCTIONAL", "PBE")
if potcar_functional is not None and user_potcar_functional is not None:
raise ValueError(
"Received both 'potcar_functional' and "
"'user_potcar_functional arguments. 'potcar_functional "
"is deprecated."
)
if potcar_functional:
warnings.warn(
"'potcar_functional' argument is deprecated. Use 'user_potcar_functional' instead.",
FutureWarning,
)
self.potcar_functional = potcar_functional
elif user_potcar_functional:
self.potcar_functional = user_potcar_functional
# warn if a user is overriding POTCAR_FUNCTIONAL
if self.potcar_functional != self._config_dict.get("POTCAR_FUNCTIONAL"):
warnings.warn(
"Overriding the POTCAR functional is generally not recommended "
" as it significantly affect the results of calculations and "
"compatibility with other calculations done with the same "
"input set. Note that some POTCAR symbols specified in "
"the configuration file may not be available in the selected "
"functional.",
BadInputSetWarning,
)
if self.user_potcar_settings:
warnings.warn(
"Overriding POTCARs is generally not recommended as it "
"significantly affect the results of calculations and "
"compatibility with other calculations done with the same "
"input set. In many instances, it is better to write a "
"subclass of a desired input set and override the POTCAR in "
"the subclass to be explicit on the differences.",
BadInputSetWarning,
)
for k, v in self.user_potcar_settings.items():
self._config_dict["POTCAR"][k] = v
@property
def structure(self) -> Structure:
"""
:return: Structure
"""
if self.standardize and self.sym_prec:
return standardize_structure(
self._structure,
sym_prec=self.sym_prec,
international_monoclinic=self.international_monoclinic,
)
return self._structure
@property
def incar(self) -> Incar:
"""
:return: Incar
"""
settings = dict(self._config_dict["INCAR"])
for k, v in self.user_incar_settings.items():
if v is None:
try:
del settings[k]
except KeyError:
settings[k] = v
elif k == "KSPACING" and self.user_kpoints_settings != {}:
pass # Ignore KSPACING if user_kpoints_settings are given
else:
settings[k] = v
structure = self.structure
incar = Incar()
comp = structure.composition
elements = sorted((el for el in comp.elements if comp[el] > 0), key=lambda e: e.X)
most_electroneg = elements[-1].symbol
poscar = Poscar(structure)
hubbard_u = settings.get("LDAU", False)
for k, v in settings.items():
if k == "MAGMOM":
mag = []
for site in structure:
if v and not isinstance(v, dict):
raise TypeError(
"MAGMOM must be supplied in a dictionary format, e.g. {'Fe': 5}. "
"If you want site-specific magnetic moments, set them in the site magmom properties "
"of the site objects in the structure."
)
if hasattr(site, "magmom"):
mag.append(site.magmom)
elif hasattr(site.specie, "spin"):
mag.append(site.specie.spin)
elif str(site.specie) in v:
if site.specie.symbol == "Co" and v[str(site.specie)] <= 1.0:
warnings.warn(
"Co without an oxidation state is initialized as low spin by default in Pymatgen. "
"If this default behavior is not desired, please set the spin on the magmom on the "
"site directly to ensure correct initialization."
)
mag.append(v.get(str(site.specie)))
else:
if site.specie.symbol == "Co":
warnings.warn(
"Co without an oxidation state is initialized as low spin by default in Pymatgen. "
"If this default behavior is not desired, please set the spin on the magmom on the "
"site directly to ensure correct initialization."
)
mag.append(v.get(site.specie.symbol, 0.6))
incar[k] = mag
elif k in ("LDAUU", "LDAUJ", "LDAUL"):
if hubbard_u:
if hasattr(structure[0], k.lower()):
m = {site.specie.symbol: getattr(site, k.lower()) for site in structure}
incar[k] = [m[sym] for sym in poscar.site_symbols]
# lookup specific LDAU if specified for most_electroneg atom
elif most_electroneg in v.keys() and isinstance(v[most_electroneg], dict):
incar[k] = [v[most_electroneg].get(sym, 0) for sym in poscar.site_symbols]
# else, use fallback LDAU value if it exists
else:
incar[k] = [
v.get(sym, 0) if isinstance(v.get(sym, 0), (float, int)) else 0
for sym in poscar.site_symbols
]
elif k.startswith("EDIFF") and k != "EDIFFG":
if "EDIFF" not in settings and k == "EDIFF_PER_ATOM":
incar["EDIFF"] = float(v) * structure.num_sites
else:
incar["EDIFF"] = float(settings["EDIFF"])
else:
incar[k] = v
has_u = hubbard_u and sum(incar["LDAUU"]) > 0
if not has_u:
for key in list(incar.keys()):
if key.startswith("LDAU"):
del incar[key]
# Modify LMAXMIX if you have d or f electrons present.
# Note that if the user explicitly sets LMAXMIX in settings it will
# override this logic.
# Previously, this was only set if Hubbard U was enabled as per the
# VASP manual but following an investigation it was determined that
# this would lead to a significant difference between SCF -> NonSCF
# even without Hubbard U enabled. Thanks to Andrew Rosen for
# investigating and reporting.
if "LMAXMIX" not in settings.keys():
# contains f-electrons
if any(el.Z > 56 for el in structure.composition):
incar["LMAXMIX"] = 6
# contains d-electrons
elif any(el.Z > 20 for el in structure.composition):
incar["LMAXMIX"] = 4
# Warn user about LASPH for +U, meta-GGAs, hybrids, and vdW-DF
if not incar.get("LASPH", False) and (
incar.get("METAGGA")
or incar.get("LHFCALC", False)
or incar.get("LDAU", False)
or incar.get("LUSE_VDW", False)
):
warnings.warn(
"LASPH = True should be set for +U, meta-GGAs, hybrids, and vdW-DFT",
BadInputSetWarning,
)
if self.constrain_total_magmom:
nupdown = sum(mag if abs(mag) > 0.6 else 0 for mag in incar["MAGMOM"])
if abs(nupdown - round(nupdown)) > 1e-5:
warnings.warn(
"constrain_total_magmom was set to True, but the sum of MAGMOM "
"values is not an integer. NUPDOWN is meant to set the spin "
"multiplet and should typically be an integer. You are likely "
"better off changing the values of MAGMOM or simply setting "
"NUPDOWN directly in your INCAR settings.",
UserWarning,
)
incar["NUPDOWN"] = nupdown
if self.use_structure_charge:
incar["NELECT"] = self.nelect
# Check that ALGO is appropriate
if incar.get("LHFCALC", False) is True and incar.get("ALGO", "Normal") not in ["Normal", "All", "Damped"]:
warnings.warn(
"Hybrid functionals only support Algo = All, Damped, or Normal.",
BadInputSetWarning,
)
# Ensure adequate number of KPOINTS are present for the tetrahedron
# method (ISMEAR=-5). If KSPACING is in the INCAR file the number
# of kpoints is not known before calling VASP, but a warning is raised
# when the KSPACING value is > 0.5 (2 reciprocal Angstrom).
# An error handler in Custodian is available to
# correct overly large KSPACING values (small number of kpoints)
# if necessary.
# if "KSPACING" not in self.user_incar_settings.keys():
if self.kpoints is not None:
if np.product(self.kpoints.kpts) < 4 and incar.get("ISMEAR", 0) == -5:
incar["ISMEAR"] = 0
if self.user_incar_settings.get("KSPACING", 0) > 0.5 and incar.get("ISMEAR", 0) == -5:
warnings.warn(
"Large KSPACING value detected with ISMEAR = -5. Ensure that VASP "
"generates an adequate number of KPOINTS, lower KSPACING, or "
"set ISMEAR = 0",
BadInputSetWarning,
)
if all(k.is_metal for k in structure.composition.keys()):
if incar.get("NSW", 0) > 0 and incar.get("ISMEAR", 1) < 1:
warnings.warn(
"Relaxation of likely metal with ISMEAR < 1 "
"detected. Please see VASP recommendations on "
"ISMEAR for metals.",
BadInputSetWarning,
)
return incar
@property
def poscar(self) -> Poscar:
"""
:return: Poscar
"""
return Poscar(self.structure)
@property
def nelect(self) -> float:
"""
Gets the default number of electrons for a given structure.
"""
nelectrons_by_element = {p.element: p.nelectrons for p in self.potcar}
nelect = sum(
num_atoms * nelectrons_by_element[str(el)]
for el, num_atoms in self.structure.composition.element_composition.items()
)
if self.use_structure_charge:
return nelect - self.structure.charge
return nelect
@property
def kpoints(self) -> Union[Kpoints, None]:
"""
Returns a KPOINTS file using the fully automated grid method. Uses
Gamma centered meshes for hexagonal cells and Monk grids otherwise.
If KSPACING is set in user_incar_settings (or the INCAR file), no
file is created because VASP will automatically generate the kpoints.
Algorithm:
Uses a simple approach scaling the number of divisions along each
reciprocal lattice vector proportional to its length.
"""
# Return None if KSPACING is present in the INCAR, because this will
# cause VASP to generate the kpoints automatically
if self.user_incar_settings.get("KSPACING") or self._config_dict["INCAR"].get("KSPACING"):
if self.user_kpoints_settings == {}:
return None
settings = self.user_kpoints_settings or self._config_dict.get("KPOINTS")
if isinstance(settings, Kpoints):
return settings
# Return None if KSPACING is present in the INCAR, because this will
# cause VASP to generate the kpoints automatically
if self.user_incar_settings.get("KSPACING") and self.user_kpoints_settings == {}:
return None
# If grid_density is in the kpoints_settings use
# Kpoints.automatic_density
if settings.get("grid_density"):
return Kpoints.automatic_density(self.structure, int(settings["grid_density"]), self.force_gamma)
# If reciprocal_density is in the kpoints_settings use
# Kpoints.automatic_density_by_vol
if settings.get("reciprocal_density"):
return Kpoints.automatic_density_by_vol(
self.structure, int(settings["reciprocal_density"]), self.force_gamma
)
# If length is in the kpoints_settings use Kpoints.automatic
if settings.get("length"):
return Kpoints.automatic(settings["length"])
# Raise error. Unsure of which kpoint generation to use
raise ValueError(
"Invalid KPoint Generation algo : Supported Keys are "
"grid_density: for Kpoints.automatic_density generation, "
"reciprocal_density: for KPoints.automatic_density_by_vol "
"generation, and length : for Kpoints.automatic generation"
)
def estimate_nbands(self) -> int:
"""
Estimate the number of bands that VASP will initialize a
calculation with by default. Note that in practice this
can depend on # of cores (if not set explicitly)
"""
nions = len(self.structure)
# from VASP's point of view, the number of magnetic atoms are
# the number of atoms with non-zero magmoms, so use Incar as
# source of truth
nmag = len([m for m in self.incar["MAGMOM"] if not np.allclose(m, 0)])
# by definition, if non-spin polarized ignore nmag
if (not nmag) or (self.incar["ISPIN"] == 1):
nbands = np.ceil(self.nelect / 2 + nions / 2)
else:
nbands = np.ceil(0.6 * self.nelect + nmag)
return int(nbands)
def __str__(self):
return self.__class__.__name__
def __repr__(self):
return self.__class__.__name__
def write_input(
self,
output_dir: str,
make_dir_if_not_present: bool = True,
include_cif: bool = False,
potcar_spec: bool = False,
zip_output: bool = False,
):
"""
Writes out all input to a directory.
Args:
output_dir (str): Directory to output the VASP input files
make_dir_if_not_present (bool): Set to True if you want the
directory (and the whole path) to be created if it is not
present.
include_cif (bool): Whether to write a CIF file in the output
directory for easier opening by VESTA.
potcar_spec (bool): Instead of writing the POTCAR, write a "POTCAR.spec".
This is intended to help sharing an input set with people who might
not have a license to specific Potcar files. Given a "POTCAR.spec",
the specific POTCAR file can be re-generated using pymatgen with the
"generate_potcar" function in the pymatgen CLI.
"""
super().write_input(
output_dir=output_dir,
make_dir_if_not_present=make_dir_if_not_present,
include_cif=include_cif,
potcar_spec=potcar_spec,
zip_output=zip_output,
)
for k, v in self.files_to_transfer.items():
with zopen(v, "rb") as fin, zopen(str(Path(output_dir) / k), "wb") as fout:
shutil.copyfileobj(fin, fout)
def calculate_ng(self, max_prime_factor: int = 7, must_inc_2: bool = True) -> Tuple:
"""
Calculates the NGX, NGY, and NGZ values using the information availible in the INCAR and POTCAR
This is meant to help with making initial guess for the FFT grid so we can interact with the Charge density API
Args:
max_prime_factor (int): the valid prime factors of the grid size in each direction
VASP has many different setting for this to handel many compiling options.
For typical MPI options all prime factors up to 7 are allowed
"""
# TODO throw error for Ultrasoft potentials
_RYTOEV = 13.605826
_AUTOA = 0.529177249
_PI = 3.141592653589793238
# TODO Only do this for VASP 6 for now. Older version require more advanced logitc
# get the ENCUT val
if "ENCUT" in self.incar and self.incar["ENCUT"] > 0:
encut = self.incar["ENCUT"]
else:
encut = max(i_species.enmax for i_species in self.all_input["POTCAR"])
#
_CUTOF = [
np.sqrt(encut / _RYTOEV) / (2 * _PI / (anorm / _AUTOA)) for anorm in self.poscar.structure.lattice.abc
]
_PREC = "Normal" # VASP default
if "PREC" in self.incar:
_PREC = self.incar["PREC"]
if _PREC[0].lower() in {"l", "m", "h"}:
raise NotImplementedError(
"PREC = LOW/MEDIUM/HIGH from VASP 4.x and not supported, Please use NORMA/SINGLE/ACCURATE"
)
if _PREC[0].lower() in {"a", "s"}: # TODO This only works in VASP 6.x
_WFACT = 4
else:
_WFACT = 3
def next_g_size(cur_g_size):
g_size = int(_WFACT * cur_g_size + 0.5)
return next_num_with_prime_factors(g_size, max_prime_factor, must_inc_2)
ng_vec = [*map(next_g_size, _CUTOF)]
if _PREC[0].lower() in {"a", "n"}: # TODO This works for VASP 5.x and 6.x
finer_g_scale = 2
else:
finer_g_scale = 1
return ng_vec, [ng_ * finer_g_scale for ng_ in ng_vec]
# Helper functions to determine valid FFT grids for VASP
def next_num_with_prime_factors(n: int, max_prime_factor: int, must_inc_2: bool = True) -> int:
"""
Return the next number greater than or equal to n that only has the desired prime factors
Args:
n (int): Initial guess at the grid density
max_prime_factor (int): the maximum prime factor
must_inc_2 (bool): 2 must be a prime factor of the result
Returns:
int: first product of of the prime_factors that is >= n
"""
if max_prime_factor < 2:
raise ValueError("Must choose a maximum prime factor greater than 2")
prime_factors = primes_less_than(max_prime_factor)
for new_val in itertools.count(start=n):
if must_inc_2 and new_val % 2 != 0:
continue
cur_val_ = new_val
for j in prime_factors:
while cur_val_ % j == 0:
cur_val_ //= j
if cur_val_ == 1:
return new_val
raise ValueError("No factorable number found, not possible.")
def primes_less_than(max_val: int) -> List[int]:
"""
Get the primes less than or equal to the max value
"""
res = []
for i in range(2, max_val + 1):
for j in range(2, i):
if i % j == 0:
break
else:
res.append(i)
return res
class MITRelaxSet(DictSet):
"""
Standard implementation of VaspInputSet utilizing parameters in the MIT
High-throughput project.
The parameters are chosen specifically for a high-throughput project,
which means in general pseudopotentials with fewer electrons were chosen.
Please refer::
A Jain, G. Hautier, C. Moore, S. P. Ong, C. Fischer, T. Mueller,
K. A. Persson, G. Ceder. A high-throughput infrastructure for density
functional theory calculations. Computational Materials Science,
2011, 50(8), 2295-2310. doi:10.1016/j.commatsci.2011.02.023
"""
CONFIG = _load_yaml_config("MITRelaxSet")
def __init__(self, structure, **kwargs):
"""
:param structure: Structure
:param kwargs: Same as those supported by DictSet.
"""
super().__init__(structure, MITRelaxSet.CONFIG, **kwargs)
self.kwargs = kwargs
class MPRelaxSet(DictSet):
"""
Implementation of VaspInputSet utilizing parameters in the public
Materials Project. Typically, the pseudopotentials chosen contain more
electrons than the MIT parameters, and the k-point grid is ~50% more dense.
The LDAUU parameters are also different due to the different psps used,
which result in different fitted values.
"""
CONFIG = _load_yaml_config("MPRelaxSet")
def __init__(self, structure, **kwargs):
"""
:param structure: Structure
:param kwargs: Same as those supported by DictSet.
"""
super().__init__(structure, MPRelaxSet.CONFIG, **kwargs)
self.kwargs = kwargs
class MPScanRelaxSet(DictSet):
"""
Class for writing a relaxation input set using the accurate and numerically
efficient r2SCAN variant of the Strongly Constrained and Appropriately Normed
(SCAN) metaGGA density functional.
Notes:
1. This functional is officially supported in VASP 6.0.0 and above. On older version,
source code may be obtained by contacting the authors of the referenced manuscript.
The original SCAN functional, available from VASP 5.4.3 onwards, maybe used instead
by passing `user_incar_settings={"METAGGA": "SCAN"}` when instantiating this InputSet.
r2SCAN and SCAN are expected to yield very similar results.
2. Meta-GGA calculations require POTCAR files that include
information on the kinetic energy density of the core-electrons,
i.e. "PBE_52" or "PBE_54". Make sure the POTCARs include the
following lines (see VASP wiki for more details):
$ grep kinetic POTCAR
kinetic energy-density
mkinetic energy-density pseudized
kinetic energy density (partial)
References:
James W. Furness, Aaron D. Kaplan, Jinliang Ning, John P. Perdew, and Jianwei Sun.
Accurate and Numerically Efficient r2SCAN Meta-Generalized Gradient Approximation.
The Journal of Physical Chemistry Letters 0, 11 DOI: 10.1021/acs.jpclett.0c02405
"""
CONFIG = _load_yaml_config("MPSCANRelaxSet")
def __init__(self, structure, bandgap=0, **kwargs):
"""
Args:
structure (Structure): Input structure.
bandgap (int): Bandgap of the structure in eV. The bandgap is used to
compute the appropriate k-point density and determine the
smearing settings.
Metallic systems (default, bandgap = 0) use a KSPACING value of 0.22
and Methfessel-Paxton order 2 smearing (ISMEAR=2, SIGMA=0.2).
Non-metallic systems (bandgap > 0) use the tetrahedron smearing
method (ISMEAR=-5, SIGMA=0.05). The KSPACING value is
calculated from the bandgap via Eqs. 25 and 29 of Wisesa, McGill,
and Mueller [1] (see References). Note that if 'user_incar_settings'
or 'user_kpoints_settings' override KSPACING, the calculation from
bandgap is not performed.
vdw (str): set "rVV10" to enable SCAN+rVV10, which is a versatile
van der Waals density functional by combing the SCAN functional
with the rVV10 non-local correlation functional. rvv10 is the only
dispersion correction available for SCAN at this time.
**kwargs: Same as those supported by DictSet.
References:
[1] P. Wisesa, K.A. McGill, T. Mueller, Efficient generation of
generalized Monkhorst-Pack grids through the use of informatics,
Phys. Rev. B. 93 (2016) 1–10. doi:10.1103/PhysRevB.93.155109.
"""
super().__init__(structure, MPScanRelaxSet.CONFIG, **kwargs)
self.bandgap = bandgap
self.kwargs = kwargs
if self.potcar_functional not in ["PBE_52", "PBE_54"]:
raise ValueError("SCAN calculations require PBE_52 or PBE_54!")
# self.kwargs.get("user_incar_settings", {
updates = {}
# select the KSPACING and smearing parameters based on the bandgap
if self.bandgap == 0:
updates["KSPACING"] = 0.22
updates["SIGMA"] = 0.2
updates["ISMEAR"] = 2
else:
rmin = 25.22 - 2.87 * bandgap # Eq. 25
kspacing = 2 * np.pi * 1.0265 / (rmin - 1.0183) # Eq. 29
# cap the KSPACING at a max of 0.44, per internal benchmarking
if 0.22 < kspacing < 0.44:
updates["KSPACING"] = kspacing
else:
updates["KSPACING"] = 0.44
updates["ISMEAR"] = -5
updates["SIGMA"] = 0.05
# Don't overwrite things the user has supplied
if self.user_incar_settings.get("KSPACING"):
del updates["KSPACING"]
if self.user_incar_settings.get("ISMEAR"):
del updates["ISMEAR"]
if self.user_incar_settings.get("SIGMA"):
del updates["SIGMA"]
if self.vdw:
if self.vdw != "rvv10":
warnings.warn(
"Use of van der waals functionals other than rVV10 with SCAN is not supported at this time. "
)
# delete any vdw parameters that may have been added to the INCAR
vdw_par = loadfn(str(MODULE_DIR / "vdW_parameters.yaml"))
for k, v in vdw_par[self.vdw].items():
try:
del self._config_dict["INCAR"][k]
except KeyError:
pass
self._config_dict["INCAR"].update(updates)
class MPMetalRelaxSet(MPRelaxSet):
"""
Implementation of VaspInputSet utilizing parameters in the public
Materials Project, but with tuning for metals. Key things are a denser
k point density, and a
"""
CONFIG = _load_yaml_config("MPRelaxSet")
def __init__(self, structure, **kwargs):
"""
:param structure: Structure
:param kwargs: Same as those supported by DictSet.
"""
super().__init__(structure, **kwargs)
self._config_dict["INCAR"].update({"ISMEAR": 1, "SIGMA": 0.2})
self._config_dict["KPOINTS"].update({"reciprocal_density": 200})
self.kwargs = kwargs
class MPHSERelaxSet(DictSet):
"""
Same as the MPRelaxSet, but with HSE parameters.
"""
CONFIG = _load_yaml_config("MPHSERelaxSet")
def __init__(self, structure, **kwargs):
"""
:param structure: Structure
:param kwargs: Same as those supported by DictSet.
"""
super().__init__(structure, MPHSERelaxSet.CONFIG, **kwargs)
self.kwargs = kwargs
class MPStaticSet(MPRelaxSet):
"""
Creates input files for a static calculation.
"""
def __init__(
self,
structure,
prev_incar=None,
prev_kpoints=None,
lepsilon=False,
lcalcpol=False,
reciprocal_density=100,
small_gap_multiply=None,
**kwargs,
):
"""
Args:
structure (Structure): Structure from previous run.
prev_incar (Incar): Incar file from previous run.
prev_kpoints (Kpoints): Kpoints from previous run.
lepsilon (bool): Whether to add static dielectric calculation
lcalcpol (bool): Whether to turn on evaluation of the Berry phase approximations
for electronic polarization
reciprocal_density (int): For static calculations, we usually set the
reciprocal density by volume. This is a convenience arg to change
that, rather than using user_kpoints_settings. Defaults to 100,
which is ~50% more than that of standard relaxation calculations.
small_gap_multiply ([float, float]): If the gap is less than
1st index, multiply the default reciprocal_density by the 2nd
index.
**kwargs: kwargs supported by MPRelaxSet.
"""
super().__init__(structure, **kwargs)
if isinstance(prev_incar, str):
prev_incar = Incar.from_file(prev_incar)
if isinstance(prev_kpoints, str):
prev_kpoints = Kpoints.from_file(prev_kpoints)
self.prev_incar = prev_incar
self.prev_kpoints = prev_kpoints
self.reciprocal_density = reciprocal_density
self.kwargs = kwargs
self.lepsilon = lepsilon
self.lcalcpol = lcalcpol
self.small_gap_multiply = small_gap_multiply
@property
def incar(self):
"""
:return: Incar
"""
parent_incar = super().incar
incar = Incar(self.prev_incar) if self.prev_incar is not None else Incar(parent_incar)
incar.update(
{
"IBRION": -1,
"ISMEAR": -5,
"LAECHG": True,
"LCHARG": True,
"LORBIT": 11,
"LVHAR": True,
"LWAVE": False,
"NSW": 0,
"ALGO": "Normal",
}
)
if self.lepsilon:
incar["IBRION"] = 8
incar["LEPSILON"] = True
# LPEAD=T: numerical evaluation of overlap integral prevents
# LRF_COMMUTATOR errors and can lead to better expt. agreement
# but produces slightly different results
incar["LPEAD"] = True
# Note that DFPT calculations MUST unset NSW. NSW = 0 will fail
# to output ionic.
incar.pop("NSW", None)
incar.pop("NPAR", None)
# tighter ediff for DFPT
incar["EDIFF"] = 1e-5
if self.lcalcpol:
incar["LCALCPOL"] = True
for k in ["MAGMOM", "NUPDOWN"] + list(self.user_incar_settings.keys()):
# For these parameters as well as user specified settings, override
# the incar settings.
if parent_incar.get(k, None) is not None:
incar[k] = parent_incar[k]
else:
incar.pop(k, None)
# use new LDAUU when possible b/c the Poscar might have changed
# representation
if incar.get("LDAU"):
u = incar.get("LDAUU", [])
j = incar.get("LDAUJ", [])
if sum(u[x] - j[x] for x, y in enumerate(u)) > 0:
for tag in ("LDAUU", "LDAUL", "LDAUJ"):
incar.update({tag: parent_incar[tag]})
# ensure to have LMAXMIX for GGA+U static run
if "LMAXMIX" not in incar:
incar.update({"LMAXMIX": parent_incar["LMAXMIX"]})
# Compare ediff between previous and staticinputset values,
# choose the tighter ediff
incar["EDIFF"] = min(incar.get("EDIFF", 1), parent_incar["EDIFF"])
return incar
@property
def kpoints(self) -> Optional[Kpoints]:
"""
:return: Kpoints
"""
self._config_dict["KPOINTS"]["reciprocal_density"] = self.reciprocal_density
kpoints = super().kpoints
# Prefer to use k-point scheme from previous run
# except for when lepsilon = True is specified
if kpoints is not None:
if self.prev_kpoints and self.prev_kpoints.style != kpoints.style:
if (self.prev_kpoints.style == Kpoints.supported_modes.Monkhorst) and (not self.lepsilon):
k_div = [kp + 1 if kp % 2 == 1 else kp for kp in kpoints.kpts[0]] # type: ignore
kpoints = Kpoints.monkhorst_automatic(k_div) # type: ignore
else:
kpoints = Kpoints.gamma_automatic(kpoints.kpts[0]) # type: ignore
return kpoints
def override_from_prev_calc(self, prev_calc_dir="."):
"""
Update the input set to include settings from a previous calculation.
Args:
prev_calc_dir (str): The path to the previous calculation directory.
Returns:
The input set with the settings (structure, k-points, incar, etc)
updated using the previous VASP run.
"""
vasprun, outcar = get_vasprun_outcar(prev_calc_dir)
self.prev_incar = vasprun.incar
self.prev_kpoints = vasprun.kpoints
if self.standardize:
warnings.warn(
"Use of standardize=True with from_prev_run is not "
"recommended as there is no guarantee the copied "
"files will be appropriate for the standardized "
"structure."
)
self._structure = get_structure_from_prev_run(vasprun, outcar)
# multiply the reciprocal density if needed
if self.small_gap_multiply:
gap = vasprun.eigenvalue_band_properties[0]
if gap <= self.small_gap_multiply[0]:
self.reciprocal_density = self.reciprocal_density * self.small_gap_multiply[1]
return self
@classmethod
def from_prev_calc(cls, prev_calc_dir, **kwargs):
"""
Generate a set of Vasp input files for static calculations from a
directory of previous Vasp run.
Args:
prev_calc_dir (str): Directory containing the outputs(
vasprun.xml and OUTCAR) of previous vasp run.
**kwargs: All kwargs supported by MPStaticSet, other than prev_incar
and prev_structure and prev_kpoints which are determined from
the prev_calc_dir.
"""
input_set = cls(_dummy_structure, **kwargs)
return input_set.override_from_prev_calc(prev_calc_dir=prev_calc_dir)
class MPScanStaticSet(MPScanRelaxSet):
"""
Creates input files for a static calculation using the accurate and numerically
efficient r2SCAN variant of the Strongly Constrainted and Appropriately Normed
(SCAN) metaGGA functional.
"""
def __init__(self, structure, bandgap=0, prev_incar=None, lepsilon=False, lcalcpol=False, **kwargs):
"""
Args:
structure (Structure): Structure from previous run.
bandgap (float): Bandgap of the structure in eV. The bandgap is used to
compute the appropriate k-point density and determine the
smearing settings.
prev_incar (Incar): Incar file from previous run.
lepsilon (bool): Whether to add static dielectric calculation
lcalcpol (bool): Whether to turn on evaluation of the Berry phase approximations
for electronic polarization.
**kwargs: kwargs supported by MPScanRelaxSet.
"""
super().__init__(structure, bandgap, **kwargs)
if isinstance(prev_incar, str):
prev_incar = Incar.from_file(prev_incar)
self.prev_incar = prev_incar
self.kwargs = kwargs
self.lepsilon = lepsilon
self.lcalcpol = lcalcpol
@property
def incar(self):
"""
:return: Incar
"""
parent_incar = super().incar
incar = Incar(self.prev_incar) if self.prev_incar is not None else Incar(parent_incar)
incar.update({"LREAL": False, "NSW": 0, "LORBIT": 11, "LVHAR": True, "ISMEAR": -5})
if self.lepsilon:
incar["IBRION"] = 8
incar["LEPSILON"] = True
# LPEAD=T: numerical evaluation of overlap integral prevents
# LRF_COMMUTATOR errors and can lead to better expt. agreement
# but produces slightly different results
incar["LPEAD"] = True
# Note that DFPT calculations MUST unset NSW. NSW = 0 will fail
# to output ionic.
incar.pop("NSW", None)
incar.pop("NPAR", None)
if self.lcalcpol:
incar["LCALCPOL"] = True
for k in list(self.user_incar_settings.keys()):
# For user specified settings, override
# the incar settings.
if parent_incar.get(k, None) is not None:
incar[k] = parent_incar[k]
else:
incar.pop(k, None)
return incar
def override_from_prev_calc(self, prev_calc_dir="."):
"""
Update the input set to include settings from a previous calculation.
Args:
prev_calc_dir (str): The path to the previous calculation directory.
Returns:
The input set with the settings (structure, k-points, incar, etc)
updated using the previous VASP run.
"""
vasprun, outcar = get_vasprun_outcar(prev_calc_dir)
self.prev_incar = vasprun.incar
self._structure = get_structure_from_prev_run(vasprun, outcar)
return self
@classmethod
def from_prev_calc(cls, prev_calc_dir, **kwargs):
"""
Generate a set of Vasp input files for static calculations from a
directory of previous Vasp run.
Args:
prev_calc_dir (str): Directory containing the outputs(
vasprun.xml and OUTCAR) of previous vasp run.
**kwargs: All kwargs supported by MPScanStaticSet, other than prev_incar
which is determined from the prev_calc_dir.
"""
input_set = cls(_dummy_structure, **kwargs)
return input_set.override_from_prev_calc(prev_calc_dir=prev_calc_dir)
class MPHSEBSSet(MPHSERelaxSet):
"""
Implementation of a VaspInputSet for HSE band structure computations.
Remember that HSE band structures must be self-consistent in VASP. A
band structure along symmetry lines for instance needs BOTH a uniform
grid with appropriate weights AND a path along the lines with weight 0.
Thus, the "Uniform" mode is just like regular static SCF but allows
adding custom kpoints (e.g., corresponding to known VBM/CBM) to the
uniform grid that have zero weight (e.g., for better gap estimate).
The "Gap" mode behaves just like the "Uniform" mode, however, if starting
from a previous calculation, the VBM and CBM k-points will automatically
be added to ``added_kpoints``.
The "Line" mode is just like Uniform mode, but additionally adds
k-points along symmetry lines with zero weight.
"""
def __init__(
self,
structure,
user_incar_settings=None,
added_kpoints=None,
mode="Gap",
reciprocal_density=None,
copy_chgcar=True,
kpoints_line_density=20,
**kwargs,
):
"""
Args:
structure (Structure): Structure to compute
user_incar_settings (dict): A dict specifying additional incar
settings
added_kpoints (list): a list of kpoints (list of 3 number list)
added to the run. The k-points are in fractional coordinates
mode (str): "Line" - generate k-points along symmetry lines for
bandstructure. "Uniform" - generate uniform k-points grid.
reciprocal_density (int): k-point density to use for uniform mesh.
copy_chgcar (bool): Whether to copy the CHGCAR of a previous run.
kpoints_line_density (int): k-point density for high symmetry lines
**kwargs (dict): Any other parameters to pass into DictSet.
"""
super().__init__(structure, **kwargs)
self.user_incar_settings = user_incar_settings or {}
self._config_dict["INCAR"].update(
{
"NSW": 0,
"ISMEAR": 0,
"SIGMA": 0.05,
"ISYM": 3,
"LCHARG": False,
"NELMIN": 5,
}
)
self.added_kpoints = added_kpoints if added_kpoints is not None else []
self.mode = mode
if not reciprocal_density or "reciprocal_density" not in self.user_kpoints_settings:
self.reciprocal_density = 50
else:
self.reciprocal_density = reciprocal_density or self.user_kpoints_settings["reciprocal_density"]
self.kpoints_line_density = kpoints_line_density
self.copy_chgcar = copy_chgcar
@property
def kpoints(self) -> Kpoints:
"""
:return: Kpoints
"""
kpts = [] # type: List[Union[int, float, None]]
weights = [] # type: List[Union[float, None]]
all_labels = [] # type: List[Union[str, None]]
structure = self.structure
# for both modes, include the Uniform mesh w/standard weights
grid = Kpoints.automatic_density_by_vol(structure, self.reciprocal_density).kpts
ir_kpts = SpacegroupAnalyzer(structure, symprec=0.1).get_ir_reciprocal_mesh(grid[0])
for k in ir_kpts:
kpts.append(k[0])
weights.append(int(k[1]))
all_labels.append(None)
# for both modes, include any user-added kpoints w/zero weight
for k in self.added_kpoints:
kpts.append(k)
weights.append(0.0)
all_labels.append("user-defined")
# for line mode only, add the symmetry lines w/zero weight
if self.mode.lower() == "line":
kpath = HighSymmKpath(structure)
frac_k_points, labels = kpath.get_kpoints(
line_density=self.kpoints_line_density, coords_are_cartesian=False
)
for k, f in enumerate(frac_k_points):
kpts.append(f)
weights.append(0.0)
all_labels.append(labels[k])
comment = "HSE run along symmetry lines" if self.mode.lower() == "line" else "HSE run on uniform grid"
return Kpoints(
comment=comment,
style=Kpoints.supported_modes.Reciprocal,
num_kpts=len(kpts),
kpts=kpts, # type: ignore
kpts_weights=weights,
labels=all_labels,
)
def override_from_prev_calc(self, prev_calc_dir="."):
"""
Update the input set to include settings from a previous calculation.
Args:
prev_calc_dir (str): The path to the previous calculation directory.
Returns:
The input set with the settings (structure, k-points, incar, etc)
updated using the previous VASP run.
"""
vasprun, outcar = get_vasprun_outcar(prev_calc_dir)
self._structure = get_structure_from_prev_run(vasprun, outcar)
# note: recommend not standardizing the cell because we want to retain
# k-points
if self.standardize:
warnings.warn(
"Use of standardize=True with from_prev_calc is not "
"recommended as there is no guarantee the copied "
"files will be appropriate for the standardized "
"structure."
)
if self.mode.lower() == "gap":
added_kpoints = []
bs = vasprun.get_band_structure()
vbm, cbm = bs.get_vbm()["kpoint"], bs.get_cbm()["kpoint"]
if vbm:
added_kpoints.append(vbm.frac_coords)
if cbm:
added_kpoints.append(cbm.frac_coords)
self.added_kpoints.extend(added_kpoints)
files_to_transfer = {}
if self.copy_chgcar:
chgcars = sorted(glob.glob(str(Path(prev_calc_dir) / "CHGCAR*")))
if chgcars:
files_to_transfer["CHGCAR"] = str(chgcars[-1])
self.files_to_transfer.update(files_to_transfer)
return self
@classmethod
def from_prev_calc(cls, prev_calc_dir, **kwargs):
"""
Generate a set of Vasp input files for HSE calculations from a
directory of previous Vasp run.
Args:
prev_calc_dir (str): Directory containing the outputs
(vasprun.xml and OUTCAR) of previous vasp run.
**kwargs: All kwargs supported by MPHSEBSStaticSet, other than
prev_structure which is determined from the previous calc dir.
"""
input_set = cls(_dummy_structure, **kwargs)
return input_set.override_from_prev_calc(prev_calc_dir=prev_calc_dir)
class MPNonSCFSet(MPRelaxSet):
"""
Init a MPNonSCFSet. Typically, you would use the classmethod
from_prev_calc to initialize from a previous SCF run.
"""
def __init__(
self,
structure,
prev_incar=None,
mode="line",
nedos=2001,
dedos=0.005,
reciprocal_density=100,
sym_prec=0.1,
kpoints_line_density=20,
optics=False,
copy_chgcar=True,
nbands_factor=1.2,
small_gap_multiply=None,
**kwargs,
):
"""
Args:
structure (Structure): Structure to compute
prev_incar (Incar/string): Incar file from previous run.
mode (str): Line, Uniform or Boltztrap mode supported.
nedos (int): nedos parameter. Default to 2001.
dedos (float): setting nedos=0 and uniform mode in from_prev_calc,
an automatic nedos will be calculated using the total energy range
divided by the energy step dedos
reciprocal_density (int): density of k-mesh by reciprocal
volume (defaults to 100)
sym_prec (float): Symmetry precision (for Uniform mode).
kpoints_line_density (int): Line density for Line mode.
optics (bool): whether to add dielectric function
copy_chgcar: Whether to copy the old CHGCAR when starting from a
previous calculation.
nbands_factor (float): Multiplicative factor for NBANDS when starting
from a previous calculation. Choose a higher number if you are
doing an LOPTICS calculation.
small_gap_multiply ([float, float]): When starting from a previous
calculation, if the gap is less than 1st index, multiply the default
reciprocal_density by the 2nd index.
**kwargs: kwargs supported by MPRelaxSet.
"""
super().__init__(structure, **kwargs)
if isinstance(prev_incar, str):
prev_incar = Incar.from_file(prev_incar)
self.prev_incar = prev_incar
self.kwargs = kwargs
self.nedos = nedos
self.dedos = dedos
self.reciprocal_density = reciprocal_density
self.sym_prec = sym_prec
self.kpoints_line_density = kpoints_line_density
self.optics = optics
self.mode = mode.lower()
self.copy_chgcar = copy_chgcar
self.nbands_factor = nbands_factor
self.small_gap_multiply = small_gap_multiply
if self.mode.lower() not in ["line", "uniform", "boltztrap"]:
raise ValueError("Supported modes for NonSCF runs are 'Line', 'Uniform' and 'Boltztrap!")
if (self.mode.lower() != "uniform" or nedos < 2000) and optics:
warnings.warn("It is recommended to use Uniform mode with a high NEDOS for optics calculations.")
@property
def incar(self) -> Incar:
"""
:return: Incar
"""
incar = super().incar
if self.prev_incar is not None:
incar.update(self.prev_incar.items())
# Overwrite necessary INCAR parameters from previous runs
incar.update(
{
"IBRION": -1,
"LCHARG": False,
"LORBIT": 11,
"LWAVE": False,
"NSW": 0,
"ISYM": 0,
"ICHARG": 11,
}
)
if self.mode.lower() == "uniform":
# use tetrahedron method for DOS and optics calculations
incar.update({"ISMEAR": -5, "ISYM": 2})
else:
# if line mode, can't use ISMEAR=-5; also use small sigma to avoid
# partial occupancies for small band gap materials.
# finally, explicit k-point generation (needed for bolztrap mode)
# is incompatible with ISMEAR = -5.
incar.update({"ISMEAR": 0, "SIGMA": 0.01})
incar.update(self.user_incar_settings)
if self.mode.lower() in "uniform":
# Set smaller steps for DOS and optics output
incar["NEDOS"] = self.nedos
if self.optics:
incar["LOPTICS"] = True
incar.pop("MAGMOM", None)
return incar
@property
def kpoints(self) -> Optional[Kpoints]:
"""
:return: Kpoints
"""
# override pymatgen kpoints if provided
user_kpoints = self.user_kpoints_settings
if isinstance(user_kpoints, Kpoints):
return user_kpoints
if self.mode.lower() == "line":
kpath = HighSymmKpath(self.structure)
frac_k_points, k_points_labels = kpath.get_kpoints(
line_density=self.kpoints_line_density, coords_are_cartesian=False
)
kpoints = Kpoints(
comment="Non SCF run along symmetry lines",
style=Kpoints.supported_modes.Reciprocal,
num_kpts=len(frac_k_points),
kpts=frac_k_points,
labels=k_points_labels,
kpts_weights=[1] * len(frac_k_points),
)
elif self.mode.lower() == "boltztrap":
kpoints = Kpoints.automatic_density_by_vol(self.structure, self.reciprocal_density)
mesh = kpoints.kpts[0]
ir_kpts = SpacegroupAnalyzer(self.structure, symprec=self.sym_prec).get_ir_reciprocal_mesh(mesh)
kpts = []
weights = []
for k in ir_kpts:
kpts.append(k[0])
weights.append(int(k[1]))
kpoints = Kpoints(
comment="Non SCF run on uniform grid",
style=Kpoints.supported_modes.Reciprocal,
num_kpts=len(ir_kpts),
kpts=kpts,
kpts_weights=weights,
)
else:
self._config_dict["KPOINTS"]["reciprocal_density"] = self.reciprocal_density
return super().kpoints
return kpoints
def override_from_prev_calc(self, prev_calc_dir="."):
"""
Update the input set to include settings from a previous calculation.
Args:
prev_calc_dir (str): The path to the previous calculation directory.
Returns:
The input set with the settings (structure, k-points, incar, etc)
updated using the previous VASP run.
"""
vasprun, outcar = get_vasprun_outcar(prev_calc_dir)
self.prev_incar = vasprun.incar
# Get a Magmom-decorated structure
self._structure = get_structure_from_prev_run(vasprun, outcar)
if self.standardize:
warnings.warn(
"Use of standardize=True with from_prev_run is not "
"recommended as there is no guarantee the copied "
"files will be appropriate for the standardized"
" structure. copy_chgcar is enforced to be false."
)
self.copy_chgcar = False
# Turn off spin when magmom for every site is smaller than 0.02.
if outcar and outcar.magnetization:
site_magmom = np.array([i["tot"] for i in outcar.magnetization])
ispin = 2 if np.any(site_magmom[np.abs(site_magmom) > 0.02]) else 1
elif vasprun.is_spin:
ispin = 2
else:
ispin = 1
nbands = int(np.ceil(vasprun.parameters["NBANDS"] * self.nbands_factor))
self.prev_incar.update({"ISPIN": ispin, "NBANDS": nbands})
files_to_transfer = {}
if self.copy_chgcar:
chgcars = sorted(glob.glob(str(Path(prev_calc_dir) / "CHGCAR*")))
if chgcars:
files_to_transfer["CHGCAR"] = str(chgcars[-1])
self.files_to_transfer.update(files_to_transfer)
# multiply the reciprocal density if needed:
if self.small_gap_multiply:
gap = vasprun.eigenvalue_band_properties[0]
if gap <= self.small_gap_multiply[0]:
self.reciprocal_density = self.reciprocal_density * self.small_gap_multiply[1]
self.kpoints_line_density = self.kpoints_line_density * self.small_gap_multiply[1]
# automatic setting of nedos using the energy range and the energy step dedos
if self.nedos == 0:
emax = max(eigs.max() for eigs in vasprun.eigenvalues.values())
emin = min(eigs.min() for eigs in vasprun.eigenvalues.values())
self.nedos = int((emax - emin) / self.dedos)
return self
@classmethod
def from_prev_calc(cls, prev_calc_dir, **kwargs):
"""
Generate a set of Vasp input files for NonSCF calculations from a
directory of previous static Vasp run.
Args:
prev_calc_dir (str): The directory contains the outputs(
vasprun.xml and OUTCAR) of previous vasp run.
**kwargs: All kwargs supported by MPNonSCFSet, other than structure,
prev_incar and prev_chgcar which are determined from the
prev_calc_dir.
"""
input_set = cls(_dummy_structure, **kwargs)
return input_set.override_from_prev_calc(prev_calc_dir=prev_calc_dir)
class MPSOCSet(MPStaticSet):
"""
An input set for running spin-orbit coupling (SOC) calculations.
"""
def __init__(
self,
structure,
saxis=(0, 0, 1),
copy_chgcar=True,
nbands_factor=1.2,
reciprocal_density=100,
small_gap_multiply=None,
magmom=None,
**kwargs,
):
"""
Args:
structure (Structure): the structure must have the 'magmom' site
property and each magnetic moment value must have 3
components. eg: ``magmom = [[0,0,2], ...]``
saxis (tuple): magnetic moment orientation
copy_chgcar: Whether to copy the old CHGCAR. Defaults to True.
nbands_factor (float): Multiplicative factor for NBANDS. Choose a
higher number if you are doing an LOPTICS calculation.
reciprocal_density (int): density of k-mesh by reciprocal volume.
small_gap_multiply ([float, float]): If the gap is less than
1st index, multiply the default reciprocal_density by the 2nd
index.
magmom (list[list[float]]): Override for the structure magmoms.
**kwargs: kwargs supported by MPStaticSet.
"""
if not hasattr(structure[0], "magmom") and not isinstance(structure[0].magmom, list):
raise ValueError(
"The structure must have the 'magmom' site "
"property and each magnetic moment value must have 3 "
"components. eg:- magmom = [0,0,2]"
)
super().__init__(structure, reciprocal_density=reciprocal_density, **kwargs)
self.saxis = saxis
self.copy_chgcar = copy_chgcar
self.nbands_factor = nbands_factor
self.small_gap_multiply = small_gap_multiply
self.magmom = magmom
@property
def incar(self) -> Incar:
"""
:return: Incar
"""
incar = super().incar
if self.prev_incar is not None:
incar.update(self.prev_incar.items())
# Overwrite necessary INCAR parameters from previous runs
incar.update({"ISYM": -1, "LSORBIT": "T", "ICHARG": 11, "SAXIS": list(self.saxis)})
incar.update(self.user_incar_settings)
return incar
def override_from_prev_calc(self, prev_calc_dir="."):
"""
Update the input set to include settings from a previous calculation.
Args:
prev_calc_dir (str): The path to the previous calculation directory.
Returns:
The input set with the settings (structure, k-points, incar, etc)
updated using the previous VASP run.
"""
vasprun, outcar = get_vasprun_outcar(prev_calc_dir)
self.prev_incar = vasprun.incar
# Remove magmoms from previous INCAR, since we will prefer
# the final calculated magmoms
# TODO: revisit in context of MPStaticSet incar logic
if "MAGMOM" in self.prev_incar:
del self.prev_incar["magmom"]
# Get a magmom-decorated structure
self._structure = get_structure_from_prev_run(vasprun, outcar)
if self.standardize:
warnings.warn(
"Use of standardize=True with from_prev_run is not "
"recommended as there is no guarantee the copied "
"files will be appropriate for the standardized"
" structure. copy_chgcar is enforced to be false."
)
self.copy_chgcar = False
# override magmom if provided
if self.magmom:
self._structure = self._structure.copy(site_properties={"magmom": self.magmom})
# magmom has to be 3D for SOC calculation.
if hasattr(self._structure[0], "magmom"):
if not isinstance(self._structure[0].magmom, list):
self._structure = self._structure.copy(
site_properties={"magmom": [[0, 0, site.magmom] for site in self._structure]}
)
else:
raise ValueError("Neither the previous structure has magmom property nor magmom provided")
nbands = int(np.ceil(vasprun.parameters["NBANDS"] * self.nbands_factor))
self.prev_incar.update({"NBANDS": nbands})
files_to_transfer = {}
if self.copy_chgcar:
chgcars = sorted(glob.glob(str(Path(prev_calc_dir) / "CHGCAR*")))
if chgcars:
files_to_transfer["CHGCAR"] = str(chgcars[-1])
self.files_to_transfer.update(files_to_transfer)
# multiply the reciprocal density if needed:
if self.small_gap_multiply:
gap = vasprun.eigenvalue_band_properties[0]
if gap <= self.small_gap_multiply[0]:
self.reciprocal_density = self.reciprocal_density * self.small_gap_multiply[1]
return self
@classmethod
def from_prev_calc(cls, prev_calc_dir, **kwargs):
"""
Generate a set of Vasp input files for SOC calculations from a
directory of previous static Vasp run. SOC calc requires all 3
components for MAGMOM for each atom in the structure.
Args:
prev_calc_dir (str): The directory contains the outputs(
vasprun.xml and OUTCAR) of previous vasp run.
**kwargs: All kwargs supported by MPSOCSet, other than structure,
prev_incar and prev_chgcar which are determined from the
prev_calc_dir.
"""
input_set = cls(_dummy_structure, **kwargs)
return input_set.override_from_prev_calc(prev_calc_dir=prev_calc_dir)
class MPNMRSet(MPStaticSet):
"""
Init a MPNMRSet.
"""
def __init__(self, structure, mode="cs", isotopes=None, prev_incar=None, reciprocal_density=100, **kwargs):
"""
Args:
structure (Structure): Structure to compute
mode (str): The NMR calculation to run
"cs": for Chemical Shift
"efg" for Electric Field Gradient
isotopes (list): list of Isotopes for quadrupole moments
prev_incar (Incar): Incar file from previous run.
reciprocal_density (int): density of k-mesh by reciprocal
volume (defaults to 100)
**kwargs: kwargs supported by MPStaticSet.
"""
self.mode = mode
self.isotopes = isotopes if isotopes else []
super().__init__(structure, prev_incar=prev_incar, reciprocal_density=reciprocal_density, **kwargs)
@property
def incar(self):
"""
:return: Incar
"""
incar = super().incar
if self.mode.lower() == "cs":
incar.update(
{
"LCHIMAG": True,
"EDIFF": -1.0e-10,
"ISYM": 0,
"LCHARG": False,
"LNMR_SYM_RED": True,
"NELMIN": 10,
"NSLPLINE": True,
"PREC": "ACCURATE",
"SIGMA": 0.01,
}
)
elif self.mode.lower() == "efg":
isotopes = {ist.split("-")[0]: ist for ist in self.isotopes}
quad_efg = [Species(p).get_nmr_quadrupole_moment(isotopes.get(p, None)) for p in self.poscar.site_symbols]
incar.update(
{
"ALGO": "FAST",
"EDIFF": -1.0e-10,
"ISYM": 0,
"LCHARG": False,
"LEFG": True,
"QUAD_EFG": quad_efg,
"NELMIN": 10,
"PREC": "ACCURATE",
"SIGMA": 0.01,
}
)
incar.update(self.user_incar_settings)
return incar
class MVLElasticSet(MPRelaxSet):
"""
MVL denotes VASP input sets that are implemented by the Materials Virtual
Lab (http://www.materialsvirtuallab.org) for various research.
This input set is used to calculate elastic constants in VASP. It is used
in the following work::
Z. Deng, Z. Wang, I.-H. Chu, J. Luo, S. P. Ong.
“Elastic Properties of Alkali Superionic Conductor Electrolytes
from First Principles Calculations”, J. Electrochem. Soc.
2016, 163(2), A67-A74. doi: 10.1149/2.0061602jes
To read the elastic constants, you may use the Outcar class which parses the
elastic constants.
"""
def __init__(self, structure, potim=0.015, **kwargs):
"""
Args:
scale (float): POTIM parameter. The default of 0.015 is usually fine,
but some structures may require a smaller step.
user_incar_settings (dict): A dict specifying additional incar
settings.
kwargs:
Parameters supported by MPRelaxSet.
"""
super().__init__(structure, **kwargs)
self._config_dict["INCAR"].update({"IBRION": 6, "NFREE": 2, "POTIM": potim})
self._config_dict["INCAR"].pop("NPAR", None)
class MVLGWSet(DictSet):
"""
MVL denotes VASP input sets that are implemented by the Materials Virtual
Lab (http://www.materialsvirtuallab.org) for various research. This is a
flexible input set for GW calculations.
Note that unlike all other input sets in this module, the PBE_54 series of
functional is set as the default. These have much improved performance for
GW calculations.
A typical sequence is mode="STATIC" -> mode="DIAG" -> mode="GW" ->
mode="BSE". For all steps other than the first one (static), the
recommendation is to use from_prev_calculation on the preceding run in
the series.
"""
CONFIG = _load_yaml_config("MVLGWSet")
SUPPORTED_MODES = ("DIAG", "GW", "STATIC", "BSE")
def __init__(
self,
structure,
prev_incar=None,
nbands=None,
reciprocal_density=100,
mode="STATIC",
copy_wavecar=True,
nbands_factor=5,
ncores=16,
**kwargs,
):
r"""
Args:
structure (Structure): Input structure.
prev_incar (Incar/string): Incar file from previous run.
mode (str): Supported modes are "STATIC" (default), "DIAG", "GW",
and "BSE".
nbands (int): For subsequent calculations, it is generally
recommended to perform NBANDS convergence starting from the
NBANDS of the previous run for DIAG, and to use the exact same
NBANDS for GW and BSE. This parameter is used by
from_previous_calculation to set nband.
copy_wavecar: Whether to copy the old WAVECAR, WAVEDER and associated
files when starting from a previous calculation.
nbands_factor (int): Multiplicative factor for NBANDS when starting
from a previous calculation. Only applies if mode=="DIAG".
Need to be tested for convergence.
ncores (int): Numbers of cores used for the calculation. VASP will alter
NBANDS if it was not dividable by ncores. Only applies if
mode=="DIAG".
**kwargs: All kwargs supported by DictSet. Typically,
user_incar_settings is a commonly used option.
"""
super().__init__(structure, MVLGWSet.CONFIG, **kwargs)
self.prev_incar = prev_incar
self.nbands = nbands
self.reciprocal_density = reciprocal_density
self.mode = mode.upper()
if self.mode not in MVLGWSet.SUPPORTED_MODES:
raise ValueError(f"{self.mode} not one of the support modes : {MVLGWSet.SUPPORTED_MODES}")
self.kwargs = kwargs
self.copy_wavecar = copy_wavecar
self.nbands_factor = nbands_factor
self.ncores = ncores
@property
def kpoints(self):
"""
Generate gamma center k-points mesh grid for GW calc,
which is requested by GW calculation.
"""
return Kpoints.automatic_density_by_vol(self.structure, self.reciprocal_density, force_gamma=True)
@property
def incar(self):
"""
:return: Incar
"""
parent_incar = super().incar
incar = Incar(self.prev_incar) if self.prev_incar is not None else Incar(parent_incar)
if self.mode == "DIAG":
# Default parameters for diagonalization calculation.
incar.update({"ALGO": "Exact", "NELM": 1, "LOPTICS": True, "LPEAD": True})
elif self.mode == "GW":
# Default parameters for GW calculation.
incar.update({"ALGO": "GW0", "NELM": 1, "NOMEGA": 80, "ENCUTGW": 250})
incar.pop("EDIFF", None)
incar.pop("LOPTICS", None)
incar.pop("LPEAD", None)
elif self.mode == "BSE":
# Default parameters for BSE calculation.
incar.update({"ALGO": "BSE", "ANTIRES": 0, "NBANDSO": 20, "NBANDSV": 20})
if self.nbands:
incar["NBANDS"] = self.nbands
# Respect user set INCAR.
incar.update(self.kwargs.get("user_incar_settings", {}))
return incar
def override_from_prev_calc(self, prev_calc_dir="."):
"""
Update the input set to include settings from a previous calculation.
Args:
prev_calc_dir (str): The path to the previous calculation directory.
Returns:
The input set with the settings (structure, k-points, incar, etc)
updated using the previous VASP run.
"""
vasprun, outcar = get_vasprun_outcar(prev_calc_dir)
self.prev_incar = vasprun.incar
self._structure = vasprun.final_structure
if self.standardize:
warnings.warn(
"Use of standardize=True with from_prev_run is not "
"recommended as there is no guarantee the copied "
"files will be appropriate for the standardized "
"structure."
)
self.nbands = int(vasprun.parameters["NBANDS"])
if self.mode.upper() == "DIAG":
self.nbands = int(np.ceil(self.nbands * self.nbands_factor / self.ncores) * self.ncores)
# copy WAVECAR, WAVEDER (derivatives)
files_to_transfer = {}
if self.copy_wavecar:
for fname in ("WAVECAR", "WAVEDER", "WFULL"):
w = sorted(glob.glob(str(Path(prev_calc_dir) / (fname + "*"))))
if w:
if fname == "WFULL":
for f in w:
fname = Path(f).name
fname = fname.split(".")[0]
files_to_transfer[fname] = f
else:
files_to_transfer[fname] = str(w[-1])
self.files_to_transfer.update(files_to_transfer)
return self
@classmethod
def from_prev_calc(cls, prev_calc_dir, mode="DIAG", **kwargs):
"""
Generate a set of Vasp input files for GW or BSE calculations from a
directory of previous Exact Diag Vasp run.
Args:
prev_calc_dir (str): The directory contains the outputs(
vasprun.xml of previous vasp run.
mode (str): Supported modes are "STATIC", "DIAG" (default), "GW",
and "BSE".
**kwargs: All kwargs supported by MVLGWSet, other than structure,
prev_incar and mode, which are determined from the
prev_calc_dir.
"""
input_set = cls(_dummy_structure, mode=mode, **kwargs)
return input_set.override_from_prev_calc(prev_calc_dir=prev_calc_dir)
class MVLSlabSet(MPRelaxSet):
"""
Class for writing a set of slab vasp runs,
including both slabs (along the c direction) and orient unit cells (bulk),
to ensure the same KPOINTS, POTCAR and INCAR criterion.
"""
def __init__(
self, structure, k_product=50, bulk=False, auto_dipole=False, set_mix=True, sort_structure=True, **kwargs
):
"""
:param structure: Structure
:param k_product: default to 50, kpoint number * length for a & b
directions, also for c direction in bulk calculations
:param bulk:
:param auto_dipole:
:param set_mix:
:param sort_structure:
:param kwargs: Other kwargs supported by :class:`DictSet`.
"""
super().__init__(structure, **kwargs)
if sort_structure:
structure = structure.get_sorted_structure()
self.k_product = k_product
self.bulk = bulk
self.auto_dipole = auto_dipole
self.kwargs = kwargs
self.set_mix = set_mix
self.kpt_calc = None
slab_incar = {
"EDIFF": 1e-4,
"EDIFFG": -0.02,
"ENCUT": 400,
"ISMEAR": 0,
"SIGMA": 0.05,
"ISIF": 3,
}
if not self.bulk:
slab_incar["ISIF"] = 2
slab_incar["LVTOT"] = True
if self.set_mix:
slab_incar["AMIN"] = 0.01
slab_incar["AMIX"] = 0.2
slab_incar["BMIX"] = 0.001
slab_incar["NELMIN"] = 8
if self.auto_dipole:
weights = [s.species.weight for s in structure]
center_of_mass = np.average(structure.frac_coords, weights=weights, axis=0)
slab_incar["IDIPOL"] = 3
slab_incar["LDIPOL"] = True
slab_incar["DIPOL"] = center_of_mass
self._config_dict["INCAR"].update(slab_incar)
@property
def kpoints(self):
"""
k_product, default to 50, is kpoint number * length for a & b
directions, also for c direction in bulk calculations
Automatic mesh & Gamma is the default setting.
"""
# To get input sets, the input structure has to has the same number
# of required parameters as a Structure object (ie. 4). Slab
# attributes aren't going to affect the VASP inputs anyways so
# converting the slab into a structure should not matter
kpt = super().kpoints
kpt.comment = "Automatic mesh"
kpt.style = "Gamma"
# use k_product to calculate kpoints, k_product = kpts[0][0] * a
lattice_abc = self.structure.lattice.abc
kpt_calc = [
int(self.k_product / lattice_abc[0] + 0.5),
int(self.k_product / lattice_abc[1] + 0.5),
1,
]
self.kpt_calc = kpt_calc
# calculate kpts (c direction) for bulk. (for slab, set to 1)
if self.bulk:
kpt_calc[2] = int(self.k_product / lattice_abc[2] + 0.5)
kpt.kpts[0] = kpt_calc
return kpt
def as_dict(self, verbosity=2):
"""
:param verbosity: Verbosity of dict. E.g., whether to include Structure.
:return: MSONAble dict
"""
d = MSONable.as_dict(self)
if verbosity == 1:
d.pop("structure", None)
return d
class MVLGBSet(MPRelaxSet):
"""
Class for writing a vasp input files for grain boundary calculations, slab
or bulk.
"""
def __init__(self, structure, k_product=40, slab_mode=False, is_metal=True, **kwargs):
r"""
Args:
structure(Structure): provide the structure
k_product: Kpoint number * length for a & b directions, also for c
direction in bulk calculations. Default to 40.
slab_mode (bool): Defaults to False. Use default (False) for a
bulk supercell. Use True if you are performing calculations on a
slab-like (i.e., surface) of the GB, for example, when you are
calculating the work of separation.
is_metal (bool): Defaults to True. This determines whether an ISMEAR of
1 is used (for metals) or not (for insulators and semiconductors)
by default. Note that it does *not* override user_incar_settings,
which can be set by the user to be anything desired.
**kwargs:
Other kwargs supported by :class:`MPRelaxSet`.
"""
super().__init__(structure, **kwargs)
self.k_product = k_product
self.slab_mode = slab_mode
self.is_metal = is_metal
@property
def kpoints(self):
"""
k_product, default to 40, is kpoint number * length for a & b
directions, also for c direction in bulk calculations
Automatic mesh & Gamma is the default setting.
"""
# To get input sets, the input structure has to has the same number
# of required parameters as a Structure object.
kpt = super().kpoints
kpt.comment = "Generated by pymatgen's MVLGBSet"
kpt.style = "Gamma"
# use k_product to calculate kpoints, k_product = kpts[0][0] * a
lengths = self.structure.lattice.abc
kpt_calc = [
int(self.k_product / lengths[0] + 0.5),
int(self.k_product / lengths[1] + 0.5),
int(self.k_product / lengths[2] + 0.5),
]
if self.slab_mode:
kpt_calc[2] = 1
kpt.kpts[0] = kpt_calc
return kpt
@property
def incar(self):
"""
:return: Incar
"""
incar = super().incar
# The default incar setting is used for metallic system, for
# insulator or semiconductor, ISMEAR need to be changed.
incar.update(
{
"LCHARG": False,
"NELM": 60,
"PREC": "Normal",
"EDIFFG": -0.02,
"ICHARG": 0,
"NSW": 200,
"EDIFF": 0.0001,
}
)
if self.is_metal:
incar["ISMEAR"] = 1
incar["LDAU"] = False
if self.slab_mode:
# for clean grain boundary and bulk relaxation, full optimization
# relaxation (ISIF=3) is used. For slab relaxation (ISIF=2) is used.
incar["ISIF"] = 2
incar["NELMIN"] = 8
incar.update(self.user_incar_settings)
return incar
class MVLRelax52Set(DictSet):
"""
Implementation of VaspInputSet utilizing the public Materials Project
parameters for INCAR & KPOINTS and VASP's recommended PAW potentials for
POTCAR.
Keynotes from VASP manual:
1. Recommended potentials for calculations using vasp.5.2+
2. If dimers with short bonds are present in the compound (O2, CO,
N2, F2, P2, S2, Cl2), it is recommended to use the h potentials.
Specifically, C_h, O_h, N_h, F_h, P_h, S_h, Cl_h
3. Released on Oct 28, 2018 by VASP. Please refer to VASP
Manual 1.2, 1.3 & 10.2.1 for more details.
"""
CONFIG = _load_yaml_config("MVLRelax52Set")
def __init__(self, structure, **kwargs):
"""
Args:
structure (Structure): input structure.
potcar_functional (str): choose from "PBE_52" and "PBE_54".
**kwargs: Other kwargs supported by :class:`DictSet`.
"""
if kwargs.get("potcar_functional") or kwargs.get("user_potcar_functional"):
super().__init__(structure, MVLRelax52Set.CONFIG, **kwargs)
else:
super().__init__(structure, MVLRelax52Set.CONFIG, user_potcar_functional="PBE_52", **kwargs)
if self.potcar_functional not in ["PBE_52", "PBE_54"]:
raise ValueError("Please select from PBE_52 and PBE_54!")
self.kwargs = kwargs
class MITNEBSet(MITRelaxSet):
"""
Class for writing NEB inputs. Note that EDIFF is not on a per atom
basis for this input set.
"""
def __init__(self, structures, unset_encut=False, **kwargs):
"""
Args:
structures: List of Structure objects.
unset_encut (bool): Whether to unset ENCUT.
**kwargs: Other kwargs supported by :class:`DictSet`.
"""
if len(structures) < 3:
raise ValueError("You need at least 3 structures for an NEB.")
kwargs["sort_structure"] = False
super().__init__(structures[0], **kwargs)
self.structures = self._process_structures(structures)
self.unset_encut = False
if unset_encut:
self._config_dict["INCAR"].pop("ENCUT", None)
if "EDIFF" not in self._config_dict["INCAR"]:
self._config_dict["INCAR"]["EDIFF"] = self._config_dict["INCAR"].pop("EDIFF_PER_ATOM")
# NEB specific defaults
defaults = {
"IMAGES": len(structures) - 2,
"IBRION": 1,
"ISYM": 0,
"LCHARG": False,
"LDAU": False,
}
self._config_dict["INCAR"].update(defaults)
@property
def poscar(self):
"""
:return: Poscar for structure of first end point.
"""
return Poscar(self.structures[0])
@property
def poscars(self):
"""
:return: List of Poscars.
"""
return [Poscar(s) for s in self.structures]
@staticmethod
def _process_structures(structures):
"""
Remove any atom jumps across the cell
"""
input_structures = structures
structures = [input_structures[0]]
for s in input_structures[1:]:
prev = structures[-1]
for i, site in enumerate(s):
t = np.round(prev[i].frac_coords - site.frac_coords)
if np.any(np.abs(t) > 0.5):
s.translate_sites([i], t, to_unit_cell=False)
structures.append(s)
return structures
def write_input(
self,
output_dir,
make_dir_if_not_present=True,
write_cif=False,
write_path_cif=False,
write_endpoint_inputs=False,
):
"""
NEB inputs has a special directory structure where inputs are in 00,
01, 02, ....
Args:
output_dir (str): Directory to output the VASP input files
make_dir_if_not_present (bool): Set to True if you want the
directory (and the whole path) to be created if it is not
present.
write_cif (bool): If true, writes a cif along with each POSCAR.
write_path_cif (bool): If true, writes a cif for each image.
write_endpoint_inputs (bool): If true, writes input files for
running endpoint calculations.
"""
output_dir = Path(output_dir)
if make_dir_if_not_present and not output_dir.exists():
output_dir.mkdir(parents=True)
self.incar.write_file(str(output_dir / "INCAR"))
self.kpoints.write_file(str(output_dir / "KPOINTS"))
self.potcar.write_file(str(output_dir / "POTCAR"))
for i, p in enumerate(self.poscars):
d = output_dir / str(i).zfill(2)
if not d.exists():
d.mkdir(parents=True)
p.write_file(str(d / "POSCAR"))
if write_cif:
p.structure.to(filename=str(d / f"{i}.cif"))
if write_endpoint_inputs:
end_point_param = MITRelaxSet(self.structures[0], user_incar_settings=self.user_incar_settings)
for image in ["00", str(len(self.structures) - 1).zfill(2)]:
end_point_param.incar.write_file(str(output_dir / image / "INCAR"))
end_point_param.kpoints.write_file(str(output_dir / image / "KPOINTS"))
end_point_param.potcar.write_file(str(output_dir / image / "POTCAR"))
if write_path_cif:
sites = set()
lat = self.structures[0].lattice
for site in chain(*(s.sites for s in self.structures)):
sites.add(PeriodicSite(site.species, site.frac_coords, lat))
nebpath = Structure.from_sites(sorted(sites))
nebpath.to(filename=str(output_dir / "path.cif"))
class MITMDSet(MITRelaxSet):
"""
Class for writing a vasp md run. This DOES NOT do multiple stage
runs.
"""
def __init__(self, structure, start_temp, end_temp, nsteps, time_step=2, spin_polarized=False, **kwargs):
r"""
Args:
structure (Structure): Input structure.
start_temp (int): Starting temperature.
end_temp (int): Final temperature.
nsteps (int): Number of time steps for simulations. NSW parameter.
time_step (int): The time step for the simulation. The POTIM
parameter. Defaults to 2fs.
spin_polarized (bool): Whether to do spin polarized calculations.
The ISPIN parameter. Defaults to False.
**kwargs: Other kwargs supported by :class:`DictSet`.
"""
# MD default settings
defaults = {
"TEBEG": start_temp,
"TEEND": end_temp,
"NSW": nsteps,
"EDIFF_PER_ATOM": 0.000001,
"LSCALU": False,
"LCHARG": False,
"LPLANE": False,
"LWAVE": True,
"ISMEAR": 0,
"NELMIN": 4,
"LREAL": True,
"BMIX": 1,
"MAXMIX": 20,
"NELM": 500,
"NSIM": 4,
"ISYM": 0,
"ISIF": 0,
"IBRION": 0,
"NBLOCK": 1,
"KBLOCK": 100,
"SMASS": 0,
"POTIM": time_step,
"PREC": "Low",
"ISPIN": 2 if spin_polarized else 1,
"LDAU": False,
}
super().__init__(structure, **kwargs)
self.start_temp = start_temp
self.end_temp = end_temp
self.nsteps = nsteps
self.time_step = time_step
self.spin_polarized = spin_polarized
self.kwargs = kwargs
# use VASP default ENCUT
self._config_dict["INCAR"].pop("ENCUT", None)
if defaults["ISPIN"] == 1:
self._config_dict["INCAR"].pop("MAGMOM", None)
self._config_dict["INCAR"].update(defaults)
@property
def kpoints(self):
"""
:return: Kpoints
"""
return Kpoints.gamma_automatic()
class MPMDSet(MPRelaxSet):
"""
This a modified version of the old MITMDSet pre 2018/03/12.
This set serves as the basis for the amorphous skyline paper.
(1) Aykol, M.; Dwaraknath, S. S.; Sun, W.; Persson, K. A. Thermodynamic
Limit for Synthesis of Metastable Inorganic Materials. Sci. Adv. 2018,
4 (4).
Class for writing a vasp md run. This DOES NOT do multiple stage runs.
Precision remains normal, to increase accuracy of stress tensor.
"""
def __init__(self, structure, start_temp, end_temp, nsteps, spin_polarized=False, **kwargs):
r"""
Args:
structure (Structure): Input structure.
start_temp (int): Starting temperature.
end_temp (int): Final temperature.
nsteps (int): Number of time steps for simulations. NSW parameter.
time_step (int): The time step for the simulation. The POTIM
parameter. Defaults to 2fs.
spin_polarized (bool): Whether to do spin polarized calculations.
The ISPIN parameter. Defaults to False.
**kwargs: Other kwargs supported by :class:`DictSet`.
"""
# MD default settings
defaults = {
"TEBEG": start_temp,
"TEEND": end_temp,
"NSW": nsteps,
"EDIFF_PER_ATOM": 0.00001,
"LSCALU": False,
"LCHARG": False,
"LPLANE": False,
"LWAVE": True,
"ISMEAR": 0,
"NELMIN": 4,
"LREAL": True,
"BMIX": 1,
"MAXMIX": 20,
"NELM": 500,
"NSIM": 4,
"ISYM": 0,
"ISIF": 0,
"IBRION": 0,
"NBLOCK": 1,
"KBLOCK": 100,
"SMASS": 0,
"POTIM": 2,
"PREC": "Normal",
"ISPIN": 2 if spin_polarized else 1,
"LDAU": False,
"ADDGRID": True,
}
if Element("H") in structure.species:
defaults["POTIM"] = 0.5
defaults["NSW"] = defaults["NSW"] * 4
super().__init__(structure, **kwargs)
self.start_temp = start_temp
self.end_temp = end_temp
self.nsteps = nsteps
self.spin_polarized = spin_polarized
self.kwargs = kwargs
# use VASP default ENCUT
self._config_dict["INCAR"].pop("ENCUT", None)
if defaults["ISPIN"] == 1:
self._config_dict["INCAR"].pop("MAGMOM", None)
self._config_dict["INCAR"].update(defaults)
@property
def kpoints(self):
"""
:return: Kpoints
"""
return Kpoints.gamma_automatic()
class MVLNPTMDSet(MITMDSet):
"""
Class for writing a vasp md run in NPT ensemble.
Notes:
To eliminate Pulay stress, the default ENCUT is set to a rather large
value of ENCUT, which is 1.5 * ENMAX.
"""
def __init__(self, structure, start_temp, end_temp, nsteps, time_step=2, spin_polarized=False, **kwargs):
r"""
Args:
structure (Structure): input structure.
start_temp (int): Starting temperature.
end_temp (int): Final temperature.
nsteps(int): Number of time steps for simulations. NSW parameter.
time_step (int): The time step for the simulation. The POTIM
parameter. Defaults to 2fs.
spin_polarized (bool): Whether to do spin polarized calculations.
The ISPIN parameter. Defaults to False.
**kwargs: Other kwargs supported by :class:`DictSet`.
"""
user_incar_settings = kwargs.get("user_incar_settings", {})
# NPT-AIMD default settings
defaults = {
"IALGO": 48,
"ISIF": 3,
"LANGEVIN_GAMMA": [10] * structure.ntypesp,
"LANGEVIN_GAMMA_L": 1,
"MDALGO": 3,
"PMASS": 10,
"PSTRESS": 0,
"SMASS": 0,
}
defaults.update(user_incar_settings)
kwargs["user_incar_settings"] = defaults
super().__init__(structure, start_temp, end_temp, nsteps, time_step, spin_polarized, **kwargs)
# Set NPT-AIMD ENCUT = 1.5 * VASP_default
enmax = [self.potcar[i].keywords["ENMAX"] for i in range(structure.ntypesp)]
encut = max(enmax) * 1.5
self._config_dict["INCAR"]["ENCUT"] = encut
class MVLScanRelaxSet(MPRelaxSet):
"""
Class for writing a relax input set using Strongly Constrained and
Appropriately Normed (SCAN) semilocal density functional.
Notes:
1. This functional is only available from VASP.5.4.3 upwards.
2. Meta-GGA calculations require POTCAR files that include
information on the kinetic energy density of the core-electrons,
i.e. "PBE_52" or "PBE_54". Make sure the POTCAR including the
following lines (see VASP wiki for more details):
$ grep kinetic POTCAR
kinetic energy-density
mkinetic energy-density pseudized
kinetic energy density (partial)
"""
def __init__(self, structure, **kwargs):
r"""
Args:
structure (Structure): input structure.
vdw (str): set "rVV10" to enable SCAN+rVV10, which is a versatile
van der Waals density functional by combing the SCAN functional
with the rVV10 non-local correlation functional.
**kwargs: Other kwargs supported by :class:`DictSet`.
"""
# choose PBE_52 unless the user specifies something else
if kwargs.get("potcar_functional") or kwargs.get("user_potcar_functional"):
super().__init__(structure, **kwargs)
else:
super().__init__(structure, user_potcar_functional="PBE_52", **kwargs)
if self.potcar_functional not in ["PBE_52", "PBE_54"]:
raise ValueError("SCAN calculations required PBE_52 or PBE_54!")
updates = {
"ADDGRID": True,
"EDIFF": 1e-05,
"EDIFFG": -0.05,
"LASPH": True,
"LDAU": False,
"METAGGA": "SCAN",
"NELM": 200,
}
if kwargs.get("vdw", "").lower() == "rvv10":
updates["BPARAM"] = 15.7 # This is the correct BPARAM for SCAN+rVV10
self._config_dict["INCAR"].update(updates)
class LobsterSet(MPRelaxSet):
"""
Input set to prepare VASP runs that can be digested by Lobster (See cohp.de)
"""
CONFIG = _load_yaml_config("MPRelaxSet")
def __init__(
self,
structure: Structure,
isym: int = 0,
ismear: int = -5,
reciprocal_density: int = None,
address_basis_file: str = None,
user_supplied_basis: dict = None,
user_potcar_settings: dict = {"W": "W_sv"},
**kwargs,
):
"""
Args:
structure (Structure): input structure.
isym (int): ISYM entry for INCAR, only isym=-1 and isym=0 are allowed
ismear (int): ISMEAR entry for INCAR, only ismear=-5 and ismear=0 are allowed
reciprocal_density (int): density of k-mesh by reciprocal volume
user_supplied_basis (dict): dict including basis functions for all elements in structure,
e.g. {"Fe": "3d 3p 4s", "O": "2s 2p"}; if not supplied, a standard basis is used
address_basis_file (str): address to a file similar to "BASIS_PBE_54_standaard.yaml"
in pymatgen.io.lobster.lobster_basis
**kwargs: Other kwargs supported by :class:`DictSet`.
"""
from pymatgen.io.lobster import Lobsterin
warnings.warn("Make sure that all parameters are okay! This is a brand new implementation.")
if isym not in (-1, 0):
raise ValueError("Lobster cannot digest WAVEFUNCTIONS with symmetry")
if ismear not in (-5, 0):
raise ValueError("Lobster usually works with ismear=-5 or ismear=0")
# newest potcars are preferred
# Choose PBE_54 unless the user specifies a different potcar_functional
if kwargs.get("potcar_functional") or kwargs.get("user_potcar_functional"):
super().__init__(structure, **kwargs)
else:
super().__init__(structure, user_potcar_functional="PBE_54", **kwargs)
# reciprocal density
if self.user_kpoints_settings is not None:
if not reciprocal_density or "reciprocal_density" not in self.user_kpoints_settings:
# test, if this is okay
self.reciprocal_density = 310
else:
self.reciprocal_density = reciprocal_density or self.user_kpoints_settings["reciprocal_density"]
else:
if not reciprocal_density:
# test, if this is okay
self.reciprocal_density = 310
else:
self.reciprocal_density = reciprocal_density
self.isym = isym
self.ismear = ismear
self.user_supplied_basis = user_supplied_basis
self.address_basis_file = address_basis_file
# predefined basis! Check if the basis is okay! (charge spilling and bandoverlaps!)
if user_supplied_basis is None and address_basis_file is None:
basis = Lobsterin.get_basis(structure=structure, potcar_symbols=self.potcar_symbols)
elif address_basis_file is not None:
basis = Lobsterin.get_basis(
structure=structure,
potcar_symbols=self.potcar_symbols,
address_basis_file=address_basis_file,
)
elif user_supplied_basis is not None:
# test if all elements from structure are in user_supplied_basis
for atomtype in structure.symbol_set:
if atomtype not in user_supplied_basis:
raise ValueError("There are no basis functions for the atom type " + str(atomtype))
basis = [key + " " + value for key, value in user_supplied_basis.items()]
lobsterin = Lobsterin(settingsdict={"basisfunctions": basis})
nbands = lobsterin._get_nbands(structure=structure)
update_dict = {
"EDIFF": 1e-6,
"NSW": 0,
"LWAVE": True,
"ISYM": isym,
"NBANDS": nbands,
"IBRION": -1,
"ISMEAR": ismear,
"LORBIT": 11,
"ICHARG": 0,
"ALGO": "Normal",
}
self._config_dict["INCAR"].update(update_dict)
self._config_dict["KPOINTS"].update({"reciprocal_density": self.reciprocal_density})
def get_vasprun_outcar(path, parse_dos=True, parse_eigen=True):
"""
:param path: Path to get the vasprun.xml and OUTCAR.
:param parse_dos: Whether to parse dos. Defaults to True.
:param parse_eigen: Whether to parse eigenvalue. Defaults to True.
:return:
"""
path = Path(path)
vruns = list(glob.glob(str(path / "vasprun.xml*")))
outcars = list(glob.glob(str(path / "OUTCAR*")))
if len(vruns) == 0 or len(outcars) == 0:
raise ValueError("Unable to get vasprun.xml/OUTCAR from prev calculation in %s" % path)
vsfile_fullpath = str(path / "vasprun.xml")
outcarfile_fullpath = str(path / "OUTCAR")
vsfile = vsfile_fullpath if vsfile_fullpath in vruns else sorted(vruns)[-1]
outcarfile = outcarfile_fullpath if outcarfile_fullpath in outcars else sorted(outcars)[-1]
return (
Vasprun(vsfile, parse_dos=parse_dos, parse_eigen=parse_eigen),
Outcar(outcarfile),
)
def get_structure_from_prev_run(vasprun, outcar=None):
"""
Process structure from previous run.
Args:
vasprun (Vasprun): Vasprun that contains the final structure
from previous run.
outcar (Outcar): Outcar that contains the magnetization info from
previous run.
Returns:
Returns the magmom-decorated structure that can be passed to get
Vasp input files, e.g. get_kpoints.
"""
structure = vasprun.final_structure
site_properties = {}
# magmom
if vasprun.is_spin:
if outcar and outcar.magnetization:
site_properties.update({"magmom": [i["tot"] for i in outcar.magnetization]})
else:
site_properties.update({"magmom": vasprun.parameters["MAGMOM"]})
# ldau
if vasprun.parameters.get("LDAU", False):
for k in ("LDAUU", "LDAUJ", "LDAUL"):
vals = vasprun.incar[k]
m = {}
l_val = []
s = 0
for site in structure:
if site.specie.symbol not in m:
m[site.specie.symbol] = vals[s]
s += 1
l_val.append(m[site.specie.symbol])
if len(l_val) == len(structure):
site_properties.update({k.lower(): l_val})
else:
raise ValueError(f"length of list {l_val} not the same as structure")
return structure.copy(site_properties=site_properties)
def standardize_structure(structure, sym_prec=0.1, international_monoclinic=True):
"""
Get the symmetrically standardized structure.
Args:
structure (Structure): The structure.
sym_prec (float): Tolerance for symmetry finding for standardization.
international_monoclinic (bool): Whether to use international
convention (vs Curtarolo) for monoclinic. Defaults True.
Returns:
The symmetrized structure.
"""
sym_finder = SpacegroupAnalyzer(structure, symprec=sym_prec)
new_structure = sym_finder.get_primitive_standard_structure(international_monoclinic=international_monoclinic)
# the primitive structure finding has had several bugs in the past
# defend through validation
vpa_old = structure.volume / structure.num_sites
vpa_new = new_structure.volume / new_structure.num_sites
if abs(vpa_old - vpa_new) / vpa_old > 0.02:
raise ValueError(f"Standardizing cell failed! VPA old: {vpa_old}, VPA new: {vpa_new}")
sm = StructureMatcher()
if not sm.fit(structure, new_structure):
raise ValueError("Standardizing cell failed! Old structure doesn't match new.")
return new_structure
class BadInputSetWarning(UserWarning):
"""
Warning class for bad but legal inputs.
"""
pass
def batch_write_input(
structures,
vasp_input_set=MPRelaxSet,
output_dir=".",
make_dir_if_not_present=True,
subfolder=None,
sanitize=False,
include_cif=False,
potcar_spec=False,
zip_output=False,
**kwargs,
):
"""
Batch write vasp input for a sequence of structures to
output_dir, following the format output_dir/{group}/{formula}_{number}.
Args:
structures ([Structure]): Sequence of Structures.
vasp_input_set (VaspInputSet): VaspInputSet class that creates
vasp input files from structures. Note that a class should be
supplied. Defaults to MPRelaxSet.
output_dir (str): Directory to output files. Defaults to current
directory ".".
make_dir_if_not_present (bool): Create the directory if not present.
Defaults to True.
subfolder (callable): Function to create subdirectory name from
structure. Defaults to simply "formula_count".
sanitize (bool): Boolean indicating whether to sanitize the
structure before writing the VASP input files. Sanitized output
are generally easier for viewing and certain forms of analysis.
Defaults to False.
include_cif (bool): Whether to output a CIF as well. CIF files are
generally better supported in visualization programs.
potcar_spec (bool): Instead of writing the POTCAR, write a "POTCAR.spec".
This is intended to help sharing an input set with people who might
not have a license to specific Potcar files. Given a "POTCAR.spec",
the specific POTCAR file can be re-generated using pymatgen with the
"generate_potcar" function in the pymatgen CLI.
zip_output (bool): If True, output will be zipped into a file with the
same name as the InputSet (e.g., MPStaticSet.zip)
**kwargs: Additional kwargs are passed to the vasp_input_set class
in addition to structure.
"""
output_dir = Path(output_dir)
for i, s in enumerate(structures):
formula = re.sub(r"\s+", "", s.formula)
if subfolder is not None:
subdir = subfolder(s)
d = output_dir / subdir
else:
d = output_dir / f"{formula}_{i}"
if sanitize:
s = s.copy(sanitize=True)
v = vasp_input_set(s, **kwargs)
v.write_input(
str(d),
make_dir_if_not_present=make_dir_if_not_present,
include_cif=include_cif,
potcar_spec=potcar_spec,
zip_output=zip_output,
)
_dummy_structure = Structure(
[1, 0, 0, 0, 1, 0, 0, 0, 1],
["I"],
[[0, 0, 0]],
site_properties={"magmom": [[0, 0, 1]]},
)
def get_valid_magmom_struct(structure, inplace=True, spin_mode="auto"):
"""
Make sure that the structure has valid magmoms based on the kind of caculation
Fill in missing Magmom values
Args:
structure: The input structure
inplace: True - edit the magmom of the input structurel; False - return new structure
spin_mode: "scalar"/"vector"/"none"/"auto" only first letter (s/v/n) is needed.
dictates how the spin configuration will be determined.
- auto: read the existing magmom values and decide
- scalar: use a single scalar value (for spin up/down)
- vector: use a vector value for spin-orbit systems
- none: Remove all the magmom information
Returns:
New structure if inplace == False
"""
default_values = {"s": 1.0, "v": [1.0, 1.0, 1.0], "n": None}
if spin_mode[0].lower() == "a":
mode = "n"
for isite in structure.sites:
if "magmom" not in isite.properties or isite.properties["magmom"] is None:
pass
elif isinstance(isite.properties["magmom"], (float, int)):
if mode == "v":
raise TypeError("Magmom type conflict")
mode = "s"
if isinstance(isite.properties["magmom"], int):
isite.properties["magmom"] = float(isite.properties["magmom"])
elif len(isite.properties["magmom"]) == 3:
if mode == "s":
raise TypeError("Magmom type conflict")
mode = "v"
else:
raise TypeError("Unrecognized Magmom Value")
else:
mode = spin_mode[0].lower()
if not inplace:
new_struct = structure.copy()
else:
new_struct = structure
for isite in new_struct.sites:
if mode == "n":
if "magmom" in isite.properties:
isite.properties.pop("magmom")
elif "magmom" not in isite.properties or isite.properties["magmom"] is None:
isite.properties["magmom"] = default_values[mode]
if not inplace:
return new_struct
return None
| 38.80608 | 119 | 0.593326 |
359d4b1fbeb2e50a841eded74b7099a0c8f5577e | 3,580 | py | Python | intersight/models/fault_instance_ref.py | gumpcraca/intersight-python | 780e6703c739f329084beacbbf2ad7a6a2e59b2b | [
"Apache-2.0"
] | null | null | null | intersight/models/fault_instance_ref.py | gumpcraca/intersight-python | 780e6703c739f329084beacbbf2ad7a6a2e59b2b | [
"Apache-2.0"
] | null | null | null | intersight/models/fault_instance_ref.py | gumpcraca/intersight-python | 780e6703c739f329084beacbbf2ad7a6a2e59b2b | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Intersight REST API
This is Intersight REST API
OpenAPI spec version: 1.0.9-255
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class FaultInstanceRef(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'moid': 'str',
'object_type': 'str'
}
attribute_map = {
'moid': 'Moid',
'object_type': 'ObjectType'
}
def __init__(self, moid=None, object_type=None):
"""
FaultInstanceRef - a model defined in Swagger
"""
self._moid = None
self._object_type = None
if moid is not None:
self.moid = moid
if object_type is not None:
self.object_type = object_type
@property
def moid(self):
"""
Gets the moid of this FaultInstanceRef.
:return: The moid of this FaultInstanceRef.
:rtype: str
"""
return self._moid
@moid.setter
def moid(self, moid):
"""
Sets the moid of this FaultInstanceRef.
:param moid: The moid of this FaultInstanceRef.
:type: str
"""
self._moid = moid
@property
def object_type(self):
"""
Gets the object_type of this FaultInstanceRef.
:return: The object_type of this FaultInstanceRef.
:rtype: str
"""
return self._object_type
@object_type.setter
def object_type(self, object_type):
"""
Sets the object_type of this FaultInstanceRef.
:param object_type: The object_type of this FaultInstanceRef.
:type: str
"""
self._object_type = object_type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, FaultInstanceRef):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 23.866667 | 77 | 0.537709 |
c97b6b4b4fc2c4bc8ceeeaa886aacb819568d57e | 1,220 | py | Python | fudbyte/urls.py | niieq/fudbyte | b8f7339908492d64858ca7d8471b5b45f7b4e097 | [
"BSD-3-Clause"
] | null | null | null | fudbyte/urls.py | niieq/fudbyte | b8f7339908492d64858ca7d8471b5b45f7b4e097 | [
"BSD-3-Clause"
] | 5 | 2021-03-19T03:03:36.000Z | 2022-01-13T01:33:56.000Z | fudbyte/urls.py | niieq/fudbyte | b8f7339908492d64858ca7d8471b5b45f7b4e097 | [
"BSD-3-Clause"
] | null | null | null | """fudbyte URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Import the include() function: from django.conf.urls import url, include
3. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf import settings
from django.conf.urls import url, include
from django.conf.urls.static import static
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('core.urls', namespace='core')),
url(r'^restaurant/', include('restaurant.urls', namespace='restaurant')),
url(r'^accounts/', include('account.urls', namespace='account')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
admin.site.site_header = 'Fudbyte Adminstration' | 42.068966 | 79 | 0.718033 |
e042b30c089ec186b90de49edb4373949a874624 | 1,300 | py | Python | counterpartylib/lib/message_type.py | longhoangwkm/counterparty-lib | 74a457df36261e580b29161a26f0f561f95ce06e | [
"MIT"
] | 169 | 2015-05-14T04:21:51.000Z | 2022-03-19T21:06:46.000Z | counterpartylib/lib/message_type.py | longhoangwkm/counterparty-lib | 74a457df36261e580b29161a26f0f561f95ce06e | [
"MIT"
] | 321 | 2015-04-24T08:51:21.000Z | 2022-02-23T21:52:42.000Z | counterpartylib/lib/message_type.py | longhoangwkm/counterparty-lib | 74a457df36261e580b29161a26f0f561f95ce06e | [
"MIT"
] | 120 | 2015-05-10T16:04:23.000Z | 2022-02-23T01:08:41.000Z | import logging
logger = logging.getLogger(__name__)
import struct
from counterpartylib.lib import config
from counterpartylib.lib import util
def pack(message_type_id, block_index=None):
# pack message ID into 1 byte if not zero
if util.enabled('short_tx_type_id', block_index) and message_type_id > 0 and message_type_id < 256:
return struct.pack(config.SHORT_TXTYPE_FORMAT, message_type_id)
# pack into 4 bytes
return struct.pack(config.TXTYPE_FORMAT, message_type_id)
# retuns both the message type id and the remainder of the message data
def unpack(packed_data, block_index=None):
message_type_id = None
message_remainder = None
if len(packed_data) > 1:
# try to read 1 byte first
if util.enabled('short_tx_type_id', block_index):
message_type_id = struct.unpack(config.SHORT_TXTYPE_FORMAT, packed_data[:1])[0]
if message_type_id > 0:
message_remainder = packed_data[1:]
return (message_type_id, message_remainder)
# First message byte was 0. We will read 4 bytes
if len(packed_data) > 4:
message_type_id = struct.unpack(config.TXTYPE_FORMAT, packed_data[:4])[0]
message_remainder = packed_data[4:]
return (message_type_id, message_remainder)
| 37.142857 | 103 | 0.712308 |
aface321b9b7aff48cee9712b5423cfd62ea5164 | 456 | py | Python | tests/test_web_scraper.py | meobilivang/newsreader-cli | de45540b614a29c5e5ef0054444cfee376d11b2b | [
"Apache-2.0"
] | 5 | 2021-11-01T05:13:02.000Z | 2021-11-03T06:48:26.000Z | tests/test_web_scraper.py | meobilivang/newsreader-cli | de45540b614a29c5e5ef0054444cfee376d11b2b | [
"Apache-2.0"
] | 2 | 2021-11-02T01:29:57.000Z | 2021-11-02T01:34:36.000Z | tests/test_web_scraper.py | meobilivang/newsreader-cli | de45540b614a29c5e5ef0054444cfee376d11b2b | [
"Apache-2.0"
] | null | null | null | import pytest
from newsreadercli import NewsScraper
from newsreadercli import Article
def test_web_scraper():
scraper = NewsScraper()
test_page = open("tests/test_data/vnexpress-test.html", 'r', encoding='utf-8')
scraper.page_scrape(None, test_page.read())
for article in scraper.articles_current_page:
assert isinstance(article) is Article
for category in scraper.categories:
assert type(category) is str
| 25.333333 | 82 | 0.723684 |
c944b58bde40b2f07539d989f566a4e56c44027c | 1,261 | py | Python | get_checksums.py | bdclark/ansible-filebeat | 78ef3949db0e1c408eaeb06d19a386c5020cd984 | [
"MIT"
] | null | null | null | get_checksums.py | bdclark/ansible-filebeat | 78ef3949db0e1c408eaeb06d19a386c5020cd984 | [
"MIT"
] | null | null | null | get_checksums.py | bdclark/ansible-filebeat | 78ef3949db0e1c408eaeb06d19a386c5020cd984 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import requests
import yaml
import sys
VERSIONS = [
'7.0.1',
'7.0.0',
'6.7.2',
'6.7.1',
'6.7.0',
'6.6.2',
'6.6.1',
'6.6.0',
'6.5.4',
'6.5.3',
'6.5.2',
'6.5.1',
'6.5.0',
'6.4.3',
'6.4.2',
'6.4.1',
'6.4.0',
'6.3.2',
'6.3.1',
'6.3.0',
'6.2.4',
'6.2.3',
'6.2.2',
'6.2.1',
'6.2.0',
'6.1.4',
'6.1.3',
'6.1.2',
'6.1.1',
'6.1.0',
'6.0.1',
'6.0.0',
]
def get_checksum(version, type):
if type == 'deb':
url = "https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-{}-amd64.deb.sha512".format(version)
elif type == 'rpm':
url = "https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-{}-x86_64.rpm.sha512".format(version)
r = requests.get(url)
if r.status_code == 200:
checksum = r.text.split(" ")[0]
return "sha512:" + checksum
else:
print "failed to get: " + url
sys.exit(1)
data = {
'filebeat_deb_checksums': {},
'filebeat_rpm_checksums': {}
}
for v in VERSIONS:
data['filebeat_deb_checksums'][v] = get_checksum(v, 'deb')
data['filebeat_rpm_checksums'][v] = get_checksum(v, 'rpm')
with open('vars/checksums.yml', 'w') as yaml_file:
yaml.safe_dump(data, yaml_file, default_flow_style=False, explicit_start=True)
| 19.4 | 111 | 0.569389 |
c2d60afe478887a53b296db8213319f124a5f667 | 1,226 | py | Python | algo_archive/old_dataloading_algos/retrieval/pickle_sets.py | singator/urops | 20006b39b9e181227b0b5d7f0e885aa92e79aeed | [
"MIT"
] | 1 | 2018-04-04T17:41:45.000Z | 2018-04-04T17:41:45.000Z | algo_archive/old_dataloading_algos/retrieval/pickle_sets.py | singator/urops | 20006b39b9e181227b0b5d7f0e885aa92e79aeed | [
"MIT"
] | null | null | null | algo_archive/old_dataloading_algos/retrieval/pickle_sets.py | singator/urops | 20006b39b9e181227b0b5d7f0e885aa92e79aeed | [
"MIT"
] | 1 | 2018-07-26T01:30:43.000Z | 2018-07-26T01:30:43.000Z | """ Create and pickle feature and label matrices for the three lots under
current consideration: primis, secondus, and solum.
Intended working directory: ./data/
"""
import os
import glob
import cv2
import numpy as np
# os.chdir("/Users/nurmister/Documents/academic/urops/retrieval/")
import data_manager
# os.chdir("/Users/nurmister/Documents/academic/urops/data/")
names = ["solum", "primis", "secondus"]
num_spots = [100, 28, 40]
dirs = ["solum/", "../primis", "../secondus"]
for lot_index in range(3):
os.chdir(dirs[lot_index])
all_instances = [file for file in glob.glob("*#*")]
num_instances = len(all_instances)
y = np.empty(shape=(num_instances, 1), dtype="int32")
X = np.empty(shape=(num_instances, 32, 32, 3), dtype="float32")
for file_index in range(num_instances):
fname = all_instances[file_index]
X[file_index] = cv2.imread(fname) # Note: BGR, not RGB.
if fname.endswith("e.jpg"):
y[file_index] = 0
else:
y[file_index] = 1
base_path = "../pickles/" + names[lot_index]
path_X = base_path + "_X.npy"
path_y = base_path + "_y.npy"
data_manager.pickle_dump(X, path_X)
data_manager.pickle_dump(y, path_y)
| 28.511628 | 73 | 0.659869 |
0044d3381bd175bc08b2b60102b3ed5b376cd6d4 | 680 | py | Python | LinearRegression/Bayesian_LR.py | roger8587/Machine_Learning | d435cece8993608f0da45580ab81fd6db61d345a | [
"MIT"
] | 1 | 2021-08-24T06:11:12.000Z | 2021-08-24T06:11:12.000Z | LinearRegression/Bayesian_LR.py | roger8587/Machine_Learning | d435cece8993608f0da45580ab81fd6db61d345a | [
"MIT"
] | null | null | null | LinearRegression/Bayesian_LR.py | roger8587/Machine_Learning | d435cece8993608f0da45580ab81fd6db61d345a | [
"MIT"
] | null | null | null | import numpy as np
class Bayesian_Linear_Reg(object):
def __init__(self, Data, Y):
ones = np.ones(Data.shape[0], dtype=int).reshape(-1, 1)
self.X = np.concatenate([ones, Data], axis=1)
self.Y = Y
def posterior(self, alpha, beta):
self.beta = beta
self.S_N_inv = alpha * np.eye(self.X.shape[1]) + beta * self.X.T.dot(self.X)
self.S_N = np.linalg.inv(self.S_N_inv)
self.m_N = beta * self.S_N.dot(self.X.T).dot(self.Y)
def posterior_predictive(self, Phi_test):
y = Phi_test.dot(self.m_N)
y_var = 1 / self.beta + np.sum(Phi_test.dot(self.S_N) * Phi_test, axis = 1)
return y, y_var
| 35.789474 | 84 | 0.597059 |
c36f75c08eef20e52f563c4b155d66118e9f986f | 2,457 | py | Python | deeploglizer/models/ae.py | logpai/deep-loglizer- | 1069a1e0e9b000e1bc9b353fb01d3d451d9a6d5d | [
"Apache-2.0"
] | 55 | 2021-07-21T08:34:46.000Z | 2022-03-27T19:53:14.000Z | deeploglizer/models/ae.py | logpai/deep-loglizer- | 1069a1e0e9b000e1bc9b353fb01d3d451d9a6d5d | [
"Apache-2.0"
] | 10 | 2021-07-21T08:37:27.000Z | 2022-03-21T09:00:07.000Z | deeploglizer/models/ae.py | logpai/deep-loglizer- | 1069a1e0e9b000e1bc9b353fb01d3d451d9a6d5d | [
"Apache-2.0"
] | 18 | 2021-07-09T06:40:31.000Z | 2022-02-08T07:42:40.000Z | import torch
import torch.nn.functional as F
from torch import nn
from deeploglizer.models import ForcastBasedModel
class AutoEncoder(ForcastBasedModel):
def __init__(
self,
meta_data,
hidden_size=100,
num_layers=1,
num_directions=2,
embedding_dim=16,
model_save_path="./ae_models",
feature_type="sequentials",
label_type="none",
eval_type="session",
topk=5,
use_tfidf=False,
freeze=False,
gpu=-1,
**kwargs
):
super().__init__(
meta_data=meta_data,
model_save_path=model_save_path,
feature_type=feature_type,
label_type=label_type,
eval_type=eval_type,
topk=topk,
use_tfidf=use_tfidf,
embedding_dim=embedding_dim,
freeze=freeze,
gpu=gpu,
**kwargs
)
self.feature_type = feature_type
self.label_type = label_type
self.hidden_size = hidden_size
self.num_directions = num_directions
self.use_tfidf = use_tfidf
self.embedding_dim = embedding_dim
self.rnn = nn.LSTM(
input_size=embedding_dim,
hidden_size=self.hidden_size,
num_layers=num_layers,
batch_first=True,
bidirectional=(self.num_directions == 2),
)
self.encoder = nn.Linear(
self.hidden_size * self.num_directions, self.hidden_size // 2
)
self.decoder = nn.Linear(
self.hidden_size // 2, self.hidden_size * self.num_directions
)
self.criterion = nn.MSELoss(reduction="none")
def forward(self, input_dict):
x = input_dict["features"]
self.batch_size = x.size()[0]
if self.embedding_dim == 1:
x = x.unsqueeze(-1)
else:
x = self.embedder(x)
if self.feature_type == "semantics":
if not self.use_tfidf:
x = x.sum(dim=-2) # add tf-idf
outputs, hidden = self.rnn(x.float())
# representation = outputs.mean(dim=1)
representation = outputs[:, -1, :]
x_internal = self.encoder(representation)
x_recst = self.decoder(x_internal)
pred = self.criterion(x_recst, representation).mean(dim=-1)
loss = pred.mean()
return_dict = {"loss": loss, "y_pred": pred}
return return_dict
| 28.905882 | 73 | 0.573057 |
f414e311ff00d83d57cc39d67aa9e40f55b05821 | 84 | py | Python | script_tarea2.py | AnaLopezP/warmupbien | d46883bc72f1d6aec2ab25672cfde26dfb182ede | [
"Apache-2.0"
] | null | null | null | script_tarea2.py | AnaLopezP/warmupbien | d46883bc72f1d6aec2ab25672cfde26dfb182ede | [
"Apache-2.0"
] | null | null | null | script_tarea2.py | AnaLopezP/warmupbien | d46883bc72f1d6aec2ab25672cfde26dfb182ede | [
"Apache-2.0"
] | null | null | null | import colorama
from colorama import init, Fore
print(Fore.GREEN + "Hola Mundo")
| 21 | 33 | 0.75 |
227042bf4474e38eaddb3526fab1c628dfdce495 | 7,715 | py | Python | solar-system.py | jacanchaplais/solar-system | 86e56d127af3f91234af3b9bd72888049fe6deed | [
"MIT"
] | null | null | null | solar-system.py | jacanchaplais/solar-system | 86e56d127af3f91234af3b9bd72888049fe6deed | [
"MIT"
] | null | null | null | solar-system.py | jacanchaplais/solar-system | 86e56d127af3f91234af3b9bd72888049fe6deed | [
"MIT"
] | null | null | null | ###############################################################################
## Solar System Simulation ##
## Written by Jacan Chaplais, 2019 ##
## jacan.chaplais@gmail.com ##
###############################################################################
# This code takes input of the positions and velocities of all bodies
# within a gravitationally interacting system and evolves them through
# time.
#
# The program reads and writes in CSV format using Pandas. Input data
# is provided in bodies.inp, output data is written to data/traj.out
import os
import sys
import numpy as np
import pandas as pd
# ----------------------- DEFINING ASTRONOMICAL UNITS ----------------------- #
# metres, kilograms, seconds:
au_len, au_mass, au_time = 1.495978707E+11, 1.98892E+30, 8.64E+4
grav_const = 6.67408E-11 * (au_mass * au_time ** 2) / (au_len ** 3)
# ------------------------ READING SOLAR SYSTEM DATA ------------------------ #
data = pd.read_csv('bodies.inp', index_col=[0,1], parse_dates=True)
traj = data.drop(columns=['Mass','Z', 'VZ']) # DataFrame to stores calcs
index = traj.index.copy()
columns = traj.columns.copy()
start_date = index.levels[1][0] # first date from input file
# ------- CALCULATING THE ACCELERATIONS OF THESE BODIES DUE TO GRAVITY ------ #
def accelerate(position, mass):
"""Calculates the cumulative Newtonian gravitational acceleration
exerted on all n bodies within the system.
Keyword arguments:
position -- (n x 2 array) n rows of 2d position for each body
mass -- (1-d array of length n) containing the masses of each body
Returns:
(n x 2 array) n rows of 2-dimensional acceleration for each body
"""
mass = mass[:, np.newaxis] # formats as column vector
# Subtracts position row vector from col vector, to form square
# antisymm. matrix of displacements D_ij, from body i to j.
displacement = position[:, np.newaxis] - position
# Calc matrix of distance d_ij from displacement vectors in D_ij.
distance = np.linalg.norm(displacement, axis=2)
# Calc matrix of (1 / d_ij) ^ 3, except where d_ij = 0.
inv_cube_dist = np.power(
distance, -3, where=(distance != 0.0))
inv_cube_dist = inv_cube_dist[:, :, np.newaxis]
acc = - grav_const * np.sum(
inv_cube_dist * np.swapaxes(mass * displacement, 0, 1),
axis=0)
return acc
# --------------------------- FORMATTING THE DATA --------------------------- #
def format_data(pos_data, vel_data, index, column, num_iter, cur_date):
"""Takes position and velocity arrays of shape nbod x ndim x nt
(bodies, spatial dimensions, time) and returns a DataFrame of the
data, sorted column-wise.
Keyword arguments:
pos_data -- (nbod x ndim x nt array) position data
vel_data -- (nbod x ndim x nt array) velocity data
index -- Pandas.MultiIndex for a single date
column -- (array-like) column headers for the formatted DataFrame
num_iter -- (int) number of dates over which the data represents
cur_date -- (Pandas.DateTime) the date at the start of the data
Returns:
cur_traj -- the formatted DataFrame
"""
# Create a datetime array from start to finish.
date_range = cur_date + pd.to_timedelta(np.arange(num_iter), 'D')
body_order = index.levels[0][index.codes[0]]
index.set_levels(date_range, level=1, inplace=True) # set dates new index
index.set_codes(
[
np.tile( # map planet name to number
index.codes[0],
num_iter),
np.repeat( # apply date to planets at each timestep
np.arange(num_iter),
len(index.codes[1]))
],
verify_integrity=False, # allow index to be erroneous for now
inplace=True
)
# Reshapes 3d arrays of to 2d, making time axis add repeating rows.
# position:
pos_data = pos_data.transpose(2,0,1).reshape( # moves t axis to front
# rows are each body repeated for every timestep:
pos_data.shape[0] * pos_data.shape[2],
# columns give x & y pos for each body:
pos_data.shape[1])
# same for velocity:
vel_data = vel_data.transpose(2,0,1).reshape(
vel_data.shape[0] * vel_data.shape[2],
vel_data.shape[1])
traj_data = np.hstack((pos_data, vel_data)) # pos & vel data in adj cols
# Puts it all together to create the DataFrame.
cur_traj = pd.DataFrame(data=traj_data, index=index, columns=column)
cur_traj.sort_index(inplace=True)
cur_traj = cur_traj.reindex(body_order, level='Name')
return cur_traj
# ---------------------- SETTING UP THE LOOP VARIABLES ---------------------- #
# Define length of the calc (days), & the num of calc steps to perform.
timespan = 60400.0
num_steps = 60400
time_change = timespan / float(num_steps) # resulting time diff between steps
# Set up the variables to give progress readouts during execution.
cntr = 0
pcnt = 0
pcnt_change = 10 # (int) set how much change in % of calc for readout update
cntr_change = int((pcnt_change / 100.0) * num_steps)
# -------- PREPARING THE PHYSICAL DATA STRUCTURES FROM THE INPUT FILE ------- #
# The calculation is in Numpy, formatted in Pandas after.
idx_slc = pd.IndexSlice # a pandas object to make index slicing easier
# Get initial 2D arrays for position and velocity:
pos = traj.loc[idx_slc[:, start_date], ['X','Y']].values
vel = traj.loc[idx_slc[:, start_date], ['VX','VY']].values
cur_pos = pos # store 2D array of pos at first timestep, for calcs
cur_vel = vel
pos = pos[:, :, np.newaxis] # add a time axis, for recording
vel = vel[:, :, np.newaxis]
mass = data['Mass'].values # get the mass as a 1D array
# ------------------- PERFORMING THE NUMERICAL INTEGRATION ------------------ #
# Create a data/ storage directory, if it does not already exist.
data_dir = 'data/'
fpath = data_dir + 'traj.out'
if not os.path.exists(data_dir):
os.makedirs(data_dir)
elif (os.path.isfile(fpath)):
os.remove(fpath)
for step in range(1, num_steps):
# Apply euler method to calculate new velocities & positions.
cur_vel = cur_vel + accelerate(cur_pos, mass) * time_change
cur_pos = cur_pos + cur_vel * time_change
# Record next 2D arrays of pos & vels along t axes of arrays.
vel = np.dstack((vel, cur_vel))
pos = np.dstack((pos, cur_pos))
# ----------------- WRITE TO OUTPUT FILE AND UPDATE PROGRESS ---------------- #
cntr = cntr + 1
last_step = step == num_steps - 1
if (cntr == cntr_change or last_step):
pcnt = pcnt + pcnt_change
sys.stdout.write('\r{}% complete'.format(pcnt))
sys.stdout.flush()
# Record all but the last timestep of data.
cur_date = start_date + pd.to_timedelta(step - cntr, 'D')
if not last_step:
cur_traj = format_data(
pos[:, :, :-1], vel[:, :, :-1],
index.copy(), columns,
cntr_change, cur_date)
pos = pos[:, :, -1]
vel = vel[:, :, -1]
else:
cur_traj = format_data(
pos, vel,
index.copy(), columns,
cntr_change, cur_date)
file_exists = os.path.isfile(fpath)
write_mode = 'a' if file_exists else 'w' # if store file exist, append
with open(fpath, write_mode) as f: # save as csv
cur_traj.to_csv(f, header=(not file_exists))
cntr = 0
| 35.883721 | 79 | 0.594945 |
d0c3850f49c5024a1a1c4d870a767ab286729761 | 300 | py | Python | cf8.py | ammkazi/pythonB2 | 4ceba24f3dcd40f110b6fda8057d58ab1e4be88f | [
"MIT"
] | null | null | null | cf8.py | ammkazi/pythonB2 | 4ceba24f3dcd40f110b6fda8057d58ab1e4be88f | [
"MIT"
] | null | null | null | cf8.py | ammkazi/pythonB2 | 4ceba24f3dcd40f110b6fda8057d58ab1e4be88f | [
"MIT"
] | null | null | null | # wap to print fibonacci series upto nth term
# output : 0 1 1 2 3 5 8 13 21 34 55 89
n = int(input("Enter the number of terms : "))
i = 3
n1 = 0
n2 = 1
print(n1, n2, end=' ')
n3 = n1 + n2
while i <= n:
n3 = n1 + n2
print(n3, end=' ')
n1 = n2
n2 = n3
i = i+1
| 16.666667 | 47 | 0.496667 |
e13d3eaea886ed397b12338c88d254bac98f92e4 | 777 | py | Python | src/constants.py | joehowells/critical-keep | 4aba3322a8582a2d06ab0d4b67028738249669e9 | [
"MIT"
] | 1 | 2019-04-27T22:39:33.000Z | 2019-04-27T22:39:33.000Z | src/constants.py | joehowells/critical-keep | 4aba3322a8582a2d06ab0d4b67028738249669e9 | [
"MIT"
] | null | null | null | src/constants.py | joehowells/critical-keep | 4aba3322a8582a2d06ab0d4b67028738249669e9 | [
"MIT"
] | null | null | null | DUNGEON_DEPTH = 7
DUNGEON_MIDBOSS = 3
DUNGEON_ENDBOSS = 7
WEAPON_TIER_SELECTION = [
[0, 0, 0, 0, 1],
[0, 0, 0, 1, 1],
[0, 0, 1, 1, 1],
[0, 1, 1, 1, 1],
[1, 1, 1, 1, 2],
[1, 1, 1, 2, 2],
[1, 1, 2, 2, 2],
[1, 2, 2, 2, 2],
]
CAP_HP = 80
CAP_ATTACK = 30
CAP_DEFEND = 30
CAP_HIT = 50
CAP_CRITICAL = 50
TONIC_HP = 5
TONIC_ATTACK = 2
TONIC_DEFEND = 2
TONIC_HIT = 5
TONIC_CRITICAL = 1
ELIXIR_HP = 20
COLOR_BLUE = (51, 102, 255)
COLOR_RED = (255, 0, 51)
COLOR_YELLOW = (255, 204, 51)
COLOR_BLACK = (0, 0, 0)
COLOR_WHITE = (255, 255, 255)
COLOR_GRAY1 = (51, 51, 51)
COLOR_GRAY2 = (102, 102, 102)
COLOR_GRAY3 = (153, 153, 153)
COLOR_GRAY4 = (204, 204, 204)
COLOR_MISS = COLOR_BLUE
COLOR_HIT = COLOR_WHITE
COLOR_CRITICAL = COLOR_RED
FOV_RADIUS = 10
| 16.891304 | 29 | 0.607465 |
aa48b5d0058e4d5c47e658ba0523f148dbfd35b5 | 1,844 | py | Python | baselines/gail/statistics.py | rwill128/baselines | 24dd0c80db01623bb1224ab044b64da3fbec63cc | [
"MIT"
] | null | null | null | baselines/gail/statistics.py | rwill128/baselines | 24dd0c80db01623bb1224ab044b64da3fbec63cc | [
"MIT"
] | null | null | null | baselines/gail/statistics.py | rwill128/baselines | 24dd0c80db01623bb1224ab044b64da3fbec63cc | [
"MIT"
] | null | null | null | '''
This code is highly based on https://github.com/carpedm20/deep-rl-tensorflow/blob/master/agents/statistic.py
'''
import tensorflow as tf
import numpy as np
import baselines.common.tf_util as U
class stats():
def __init__(self, scalar_keys=[], histogram_keys=[]):
self.scalar_keys = scalar_keys
self.histogram_keys = histogram_keys
self.scalar_summaries = []
self.scalar_summaries_ph = []
self.histogram_summaries_ph = []
self.histogram_summaries = []
with tf.compat.v1.variable_scope('summary'):
for k in scalar_keys:
ph = tf.compat.v1.placeholder('float32', None, name=k + '.scalar.summary')
sm = tf.summary.scalar(k + '.scalar.summary', ph)
self.scalar_summaries_ph.append(ph)
self.scalar_summaries.append(sm)
for k in histogram_keys:
ph = tf.compat.v1.placeholder('float32', None, name=k + '.histogram.summary')
sm = tf.summary.scalar(k + '.histogram.summary', ph)
self.histogram_summaries_ph.append(ph)
self.histogram_summaries.append(sm)
self.summaries = tf.summary.merge(self.scalar_summaries + self.histogram_summaries)
def add_all_summary(self, writer, values, iter):
# Note that the order of the incoming ```values``` should be the same as the that of the
# ```scalar_keys``` given in ```__init__```
if np.sum(np.isnan(values) + 0) != 0:
return
sess = U.get_session()
keys = self.scalar_summaries_ph + self.histogram_summaries_ph
feed_dict = {}
for k, v in zip(keys, values):
feed_dict.update({k: v})
summaries_str = sess.run(self.summaries, feed_dict)
writer.add_summary(summaries_str, iter)
| 40.086957 | 108 | 0.62039 |
b7d8acd7281637d8b990f697504bf8e9b4bf24e7 | 311 | py | Python | data/multilingual/Latn.OTE/Sun-ExtA_12/pdf_to_json_test_Latn.OTE_Sun-ExtA_12.py | antoinecarme/pdf_to_json_tests | d57a024fde862e698d916a1178f285883d7a3b2f | [
"BSD-3-Clause"
] | 1 | 2021-09-19T19:47:35.000Z | 2021-09-19T19:47:35.000Z | data/multilingual/Latn.OTE/Sun-ExtA_12/pdf_to_json_test_Latn.OTE_Sun-ExtA_12.py | antoinecarme/pdf_to_json_tests | d57a024fde862e698d916a1178f285883d7a3b2f | [
"BSD-3-Clause"
] | null | null | null | data/multilingual/Latn.OTE/Sun-ExtA_12/pdf_to_json_test_Latn.OTE_Sun-ExtA_12.py | antoinecarme/pdf_to_json_tests | d57a024fde862e698d916a1178f285883d7a3b2f | [
"BSD-3-Clause"
] | null | null | null | import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.OTE/Sun-ExtA_12/udhr_Latn.OTE_Sun-ExtA_12.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
| 31.1 | 81 | 0.810289 |
703c2767588c8c8e47f73a47d7bda0631fa4fc82 | 4,077 | py | Python | osf/migrations/0183_populate_file_versions_through.py | gaybro8777/osf.io | 30408511510a40bc393565817b343ef5fd76ab14 | [
"Apache-2.0"
] | 628 | 2015-01-15T04:33:22.000Z | 2022-03-30T06:40:10.000Z | osf/migrations/0183_populate_file_versions_through.py | gaybro8777/osf.io | 30408511510a40bc393565817b343ef5fd76ab14 | [
"Apache-2.0"
] | 4,712 | 2015-01-02T01:41:53.000Z | 2022-03-30T14:18:40.000Z | osf/migrations/0183_populate_file_versions_through.py | Johnetordoff/osf.io | de10bf249c46cede04c78f7e6f7e352c69e6e6b5 | [
"Apache-2.0"
] | 371 | 2015-01-12T16:14:08.000Z | 2022-03-31T18:58:29.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-03-03 17:52
from __future__ import unicode_literals
import logging
from django.db import migrations, connection
logger = logging.getLogger(__file__)
def restore_default_through_table(state, schema):
sql = """
DROP TABLE osf_basefilenode_versions;
CREATE TABLE osf_basefilenode_versions AS
SELECT
new_thru.basefilenode_id,
new_thru.fileversion_id
FROM
osf_basefileversionsthrough AS new_thru;
ALTER TABLE osf_basefilenode_versions ADD COLUMN id SERIAL PRIMARY KEY;
ALTER TABLE osf_basefilenode_versions ADD CONSTRAINT osf_basefilenod_basefilenode_id_b0knah27_fk_osf_basefilenode_id FOREIGN KEY (basefilenode_id) REFERENCES osf_basefilenode DEFERRABLE INITIALLY DEFERRED;
ALTER TABLE osf_basefilenode_versions ALTER COLUMN basefilenode_id
SET
DATA TYPE INTEGER;
ALTER TABLE osf_basefilenode_versions ALTER COLUMN fileversion_id
SET
NOT NULL;
ALTER TABLE osf_basefilenode_versions ALTER COLUMN fileversion_id
SET
DATA TYPE INTEGER;
ALTER TABLE osf_basefilenode_versions ALTER COLUMN basefilenode_id
SET
NOT NULL;
ALTER TABLE osf_basefilenode_versions ADD CONSTRAINT osf_basefilenode__fileversion_id_93etanfc_fk_osf_fileversion_id FOREIGN KEY (fileversion_id) REFERENCES osf_fileversion DEFERRABLE INITIALLY DEFERRED;
ALTER TABLE osf_basefilenode_versions ADD CONSTRAINT osf_basefilenode__fileversion_uniq564 UNIQUE (basefilenode_id, fileversion_id);
CREATE INDEX
ON osf_basefilenode_versions (basefilenode_id, fileversion_id);
CREATE INDEX
ON osf_basefilenode_versions (basefilenode_id);
CREATE INDEX
ON osf_basefilenode_versions (fileversion_id);
"""
with connection.cursor() as cursor:
cursor.execute(sql)
def populate_fileversion_name(state, schema):
sql = """
DROP TABLE osf_basefileversionsthrough;
CREATE TABLE osf_basefileversionsthrough AS
SELECT
obfv.basefilenode_id,
obfv.fileversion_id,
ob.name as version_name
FROM
osf_basefilenode_versions obfv
LEFT JOIN
osf_basefilenode ob
ON obfv.basefilenode_id = ob.id;
ALTER TABLE osf_basefileversionsthrough ADD COLUMN id SERIAL PRIMARY KEY;
ALTER TABLE osf_basefileversionsthrough ADD CONSTRAINT osf_basefilenod_basefilenode_id_b0nwad27_fk_osf_basefilenode_id FOREIGN KEY (basefilenode_id) REFERENCES osf_basefilenode DEFERRABLE INITIALLY DEFERRED;
ALTER TABLE osf_basefileversionsthrough ALTER COLUMN basefilenode_id
SET
DATA TYPE INTEGER;
ALTER TABLE osf_basefileversionsthrough ALTER COLUMN fileversion_id
SET
NOT NULL;
ALTER TABLE osf_basefileversionsthrough ALTER COLUMN fileversion_id
SET
DATA TYPE INTEGER;
ALTER TABLE osf_basefileversionsthrough ALTER COLUMN basefilenode_id
SET
NOT NULL;
ALTER TABLE osf_basefileversionsthrough ADD CONSTRAINT osf_basefilenode__fileversion_id_93nwadfc_fk_osf_fileversion_id FOREIGN KEY (fileversion_id) REFERENCES osf_fileversion DEFERRABLE INITIALLY DEFERRED;
ALTER TABLE osf_basefileversionsthrough ADD CONSTRAINT osf_basefilenode__fileversion_uniq UNIQUE (basefilenode_id, fileversion_id);
CREATE INDEX
ON osf_basefileversionsthrough (basefilenode_id, fileversion_id);
CREATE INDEX
ON osf_basefileversionsthrough (basefilenode_id);
CREATE INDEX
ON osf_basefileversionsthrough (fileversion_id);
"""
with connection.cursor() as cursor:
cursor.execute(sql)
class Migration(migrations.Migration):
dependencies = [
('osf', '0182_add_custom_file_versions_through'),
]
operations = [
migrations.RunPython(populate_fileversion_name, restore_default_through_table)
]
| 41.181818 | 215 | 0.728232 |
faf3a88f1fd67f41f2c3a50b5211925d5643f800 | 88,634 | py | Python | remaster/usr/lib/live-installer/configobj.py | LanderU/Desktop-Tools | ad63c56457f14c8fb6a8ee990923cb8f41001834 | [
"Apache-2.0"
] | null | null | null | remaster/usr/lib/live-installer/configobj.py | LanderU/Desktop-Tools | ad63c56457f14c8fb6a8ee990923cb8f41001834 | [
"Apache-2.0"
] | null | null | null | remaster/usr/lib/live-installer/configobj.py | LanderU/Desktop-Tools | ad63c56457f14c8fb6a8ee990923cb8f41001834 | [
"Apache-2.0"
] | null | null | null | # configobj.py
# A config file reader/writer that supports nested sections in config files.
# Copyright (C) 2005-2008 Michael Foord, Nicola Larosa
# E-mail: fuzzyman AT voidspace DOT org DOT uk
# nico AT tekNico DOT net
# ConfigObj 4
# http://www.voidspace.org.uk/python/configobj.html
# Released subject to the BSD License
# Please see http://www.voidspace.org.uk/python/license.shtml
# Scripts maintained at http://www.voidspace.org.uk/python/index.shtml
# For information about bugfixes, updates and support, please join the
# ConfigObj mailing list:
# http://lists.sourceforge.net/lists/listinfo/configobj-develop
# Comments, suggestions and bug reports welcome.
from __future__ import generators
import sys
INTP_VER = sys.version_info[:2]
if INTP_VER < (2, 2):
raise RuntimeError("Python v.2.2 or later needed")
import os, re
compiler = None
try:
import compiler
except ImportError:
# for IronPython
pass
from types import StringTypes
from warnings import warn
try:
from codecs import BOM_UTF8, BOM_UTF16, BOM_UTF16_BE, BOM_UTF16_LE
except ImportError:
# Python 2.2 does not have these
# UTF-8
BOM_UTF8 = '\xef\xbb\xbf'
# UTF-16, little endian
BOM_UTF16_LE = '\xff\xfe'
# UTF-16, big endian
BOM_UTF16_BE = '\xfe\xff'
if sys.byteorder == 'little':
# UTF-16, native endianness
BOM_UTF16 = BOM_UTF16_LE
else:
# UTF-16, native endianness
BOM_UTF16 = BOM_UTF16_BE
# A dictionary mapping BOM to
# the encoding to decode with, and what to set the
# encoding attribute to.
BOMS = {
BOM_UTF8: ('utf_8', None),
BOM_UTF16_BE: ('utf16_be', 'utf_16'),
BOM_UTF16_LE: ('utf16_le', 'utf_16'),
BOM_UTF16: ('utf_16', 'utf_16'),
}
# All legal variants of the BOM codecs.
# TODO: the list of aliases is not meant to be exhaustive, is there a
# better way ?
BOM_LIST = {
'utf_16': 'utf_16',
'u16': 'utf_16',
'utf16': 'utf_16',
'utf-16': 'utf_16',
'utf16_be': 'utf16_be',
'utf_16_be': 'utf16_be',
'utf-16be': 'utf16_be',
'utf16_le': 'utf16_le',
'utf_16_le': 'utf16_le',
'utf-16le': 'utf16_le',
'utf_8': 'utf_8',
'u8': 'utf_8',
'utf': 'utf_8',
'utf8': 'utf_8',
'utf-8': 'utf_8',
}
# Map of encodings to the BOM to write.
BOM_SET = {
'utf_8': BOM_UTF8,
'utf_16': BOM_UTF16,
'utf16_be': BOM_UTF16_BE,
'utf16_le': BOM_UTF16_LE,
None: BOM_UTF8
}
def match_utf8(encoding):
return BOM_LIST.get(encoding.lower()) == 'utf_8'
# Quote strings used for writing values
squot = "'%s'"
dquot = '"%s"'
noquot = "%s"
wspace_plus = ' \r\t\n\v\t\'"'
tsquot = '"""%s"""'
tdquot = "'''%s'''"
try:
enumerate
except NameError:
def enumerate(obj):
"""enumerate for Python 2.2."""
i = -1
for item in obj:
i += 1
yield i, item
try:
True, False
except NameError:
True, False = 1, 0
__version__ = '4.5.3'
__revision__ = '$Id: configobj.py 156 2006-01-31 14:57:08Z fuzzyman $'
__docformat__ = "restructuredtext en"
__all__ = (
'__version__',
'DEFAULT_INDENT_TYPE',
'DEFAULT_INTERPOLATION',
'ConfigObjError',
'NestingError',
'ParseError',
'DuplicateError',
'ConfigspecError',
'ConfigObj',
'SimpleVal',
'InterpolationError',
'InterpolationLoopError',
'MissingInterpolationOption',
'RepeatSectionError',
'ReloadError',
'UnreprError',
'UnknownType',
'__docformat__',
'flatten_errors',
)
DEFAULT_INTERPOLATION = 'configparser'
DEFAULT_INDENT_TYPE = ' '
MAX_INTERPOL_DEPTH = 10
OPTION_DEFAULTS = {
'interpolation': True,
'raise_errors': False,
'list_values': True,
'create_empty': False,
'file_error': False,
'configspec': None,
'stringify': True,
# option may be set to one of ('', ' ', '\t')
'indent_type': None,
'encoding': None,
'default_encoding': None,
'unrepr': False,
'write_empty_values': False,
}
def getObj(s):
s = "a=" + s
if compiler is None:
raise ImportError('compiler module not available')
p = compiler.parse(s)
return p.getChildren()[1].getChildren()[0].getChildren()[1]
class UnknownType(Exception):
pass
class Builder(object):
def build(self, o):
m = getattr(self, 'build_' + o.__class__.__name__, None)
if m is None:
raise UnknownType(o.__class__.__name__)
return m(o)
def build_List(self, o):
return map(self.build, o.getChildren())
def build_Const(self, o):
return o.value
def build_Dict(self, o):
d = {}
i = iter(map(self.build, o.getChildren()))
for el in i:
d[el] = i.next()
return d
def build_Tuple(self, o):
return tuple(self.build_List(o))
def build_Name(self, o):
if o.name == 'None':
return None
if o.name == 'True':
return True
if o.name == 'False':
return False
# An undefined Name
raise UnknownType('Undefined Name')
def build_Add(self, o):
real, imag = map(self.build_Const, o.getChildren())
try:
real = float(real)
except TypeError:
raise UnknownType('Add')
if not isinstance(imag, complex) or imag.real != 0.0:
raise UnknownType('Add')
return real+imag
def build_Getattr(self, o):
parent = self.build(o.expr)
return getattr(parent, o.attrname)
def build_UnarySub(self, o):
return -self.build_Const(o.getChildren()[0])
def build_UnaryAdd(self, o):
return self.build_Const(o.getChildren()[0])
_builder = Builder()
def unrepr(s):
if not s:
return s
return _builder.build(getObj(s))
class ConfigObjError(SyntaxError):
"""
This is the base class for all errors that ConfigObj raises.
It is a subclass of SyntaxError.
"""
def __init__(self, message='', line_number=None, line=''):
self.line = line
self.line_number = line_number
self.message = message
SyntaxError.__init__(self, message)
class NestingError(ConfigObjError):
"""
This error indicates a level of nesting that doesn't match.
"""
class ParseError(ConfigObjError):
"""
This error indicates that a line is badly written.
It is neither a valid ``key = value`` line,
nor a valid section marker line.
"""
class ReloadError(IOError):
"""
A 'reload' operation failed.
This exception is a subclass of ``IOError``.
"""
def __init__(self):
IOError.__init__(self, 'reload failed, filename is not set.')
class DuplicateError(ConfigObjError):
"""
The keyword or section specified already exists.
"""
class ConfigspecError(ConfigObjError):
"""
An error occured whilst parsing a configspec.
"""
class InterpolationError(ConfigObjError):
"""Base class for the two interpolation errors."""
class InterpolationLoopError(InterpolationError):
"""Maximum interpolation depth exceeded in string interpolation."""
def __init__(self, option):
InterpolationError.__init__(
self,
'interpolation loop detected in value "%s".' % option)
class RepeatSectionError(ConfigObjError):
"""
This error indicates additional sections in a section with a
``__many__`` (repeated) section.
"""
class MissingInterpolationOption(InterpolationError):
"""A value specified for interpolation was missing."""
def __init__(self, option):
InterpolationError.__init__(
self,
'missing option "%s" in interpolation.' % option)
class UnreprError(ConfigObjError):
"""An error parsing in unrepr mode."""
class InterpolationEngine(object):
"""
A helper class to help perform string interpolation.
This class is an abstract base class; its descendants perform
the actual work.
"""
# compiled regexp to use in self.interpolate()
_KEYCRE = re.compile(r"%\(([^)]*)\)s")
def __init__(self, section):
# the Section instance that "owns" this engine
self.section = section
def interpolate(self, key, value):
def recursive_interpolate(key, value, section, backtrail):
"""The function that does the actual work.
``value``: the string we're trying to interpolate.
``section``: the section in which that string was found
``backtrail``: a dict to keep track of where we've been,
to detect and prevent infinite recursion loops
This is similar to a depth-first-search algorithm.
"""
# Have we been here already?
if backtrail.has_key((key, section.name)):
# Yes - infinite loop detected
raise InterpolationLoopError(key)
# Place a marker on our backtrail so we won't come back here again
backtrail[(key, section.name)] = 1
# Now start the actual work
match = self._KEYCRE.search(value)
while match:
# The actual parsing of the match is implementation-dependent,
# so delegate to our helper function
k, v, s = self._parse_match(match)
if k is None:
# That's the signal that no further interpolation is needed
replacement = v
else:
# Further interpolation may be needed to obtain final value
replacement = recursive_interpolate(k, v, s, backtrail)
# Replace the matched string with its final value
start, end = match.span()
value = ''.join((value[:start], replacement, value[end:]))
new_search_start = start + len(replacement)
# Pick up the next interpolation key, if any, for next time
# through the while loop
match = self._KEYCRE.search(value, new_search_start)
# Now safe to come back here again; remove marker from backtrail
del backtrail[(key, section.name)]
return value
# Back in interpolate(), all we have to do is kick off the recursive
# function with appropriate starting values
value = recursive_interpolate(key, value, self.section, {})
return value
def _fetch(self, key):
"""Helper function to fetch values from owning section.
Returns a 2-tuple: the value, and the section where it was found.
"""
# switch off interpolation before we try and fetch anything !
save_interp = self.section.main.interpolation
self.section.main.interpolation = False
# Start at section that "owns" this InterpolationEngine
current_section = self.section
while True:
# try the current section first
val = current_section.get(key)
if val is not None:
break
# try "DEFAULT" next
val = current_section.get('DEFAULT', {}).get(key)
if val is not None:
break
# move up to parent and try again
# top-level's parent is itself
if current_section.parent is current_section:
# reached top level, time to give up
break
current_section = current_section.parent
# restore interpolation to previous value before returning
self.section.main.interpolation = save_interp
if val is None:
raise MissingInterpolationOption(key)
return val, current_section
def _parse_match(self, match):
"""Implementation-dependent helper function.
Will be passed a match object corresponding to the interpolation
key we just found (e.g., "%(foo)s" or "$foo"). Should look up that
key in the appropriate config file section (using the ``_fetch()``
helper function) and return a 3-tuple: (key, value, section)
``key`` is the name of the key we're looking for
``value`` is the value found for that key
``section`` is a reference to the section where it was found
``key`` and ``section`` should be None if no further
interpolation should be performed on the resulting value
(e.g., if we interpolated "$$" and returned "$").
"""
raise NotImplementedError()
class ConfigParserInterpolation(InterpolationEngine):
"""Behaves like ConfigParser."""
_KEYCRE = re.compile(r"%\(([^)]*)\)s")
def _parse_match(self, match):
key = match.group(1)
value, section = self._fetch(key)
return key, value, section
class TemplateInterpolation(InterpolationEngine):
"""Behaves like string.Template."""
_delimiter = '$'
_KEYCRE = re.compile(r"""
\$(?:
(?P<escaped>\$) | # Two $ signs
(?P<named>[_a-z][_a-z0-9]*) | # $name format
{(?P<braced>[^}]*)} # ${name} format
)
""", re.IGNORECASE | re.VERBOSE)
def _parse_match(self, match):
# Valid name (in or out of braces): fetch value from section
key = match.group('named') or match.group('braced')
if key is not None:
value, section = self._fetch(key)
return key, value, section
# Escaped delimiter (e.g., $$): return single delimiter
if match.group('escaped') is not None:
# Return None for key and section to indicate it's time to stop
return None, self._delimiter, None
# Anything else: ignore completely, just return it unchanged
return None, match.group(), None
interpolation_engines = {
'configparser': ConfigParserInterpolation,
'template': TemplateInterpolation,
}
class Section(dict):
"""
A dictionary-like object that represents a section in a config file.
It does string interpolation if the 'interpolation' attribute
of the 'main' object is set to True.
Interpolation is tried first from this object, then from the 'DEFAULT'
section of this object, next from the parent and its 'DEFAULT' section,
and so on until the main object is reached.
A Section will behave like an ordered dictionary - following the
order of the ``scalars`` and ``sections`` attributes.
You can use this to change the order of members.
Iteration follows the order: scalars, then sections.
"""
def __init__(self, parent, depth, main, indict=None, name=None):
"""
* parent is the section above
* depth is the depth level of this section
* main is the main ConfigObj
* indict is a dictionary to initialise the section with
"""
if indict is None:
indict = {}
dict.__init__(self)
# used for nesting level *and* interpolation
self.parent = parent
# used for the interpolation attribute
self.main = main
# level of nesting depth of this Section
self.depth = depth
# purely for information
self.name = name
#
self._initialise()
# we do this explicitly so that __setitem__ is used properly
# (rather than just passing to ``dict.__init__``)
for entry, value in indict.iteritems():
self[entry] = value
def _initialise(self):
# the sequence of scalar values in this Section
self.scalars = []
# the sequence of sections in this Section
self.sections = []
# for comments :-)
self.comments = {}
self.inline_comments = {}
# for the configspec
self.configspec = {}
self._order = []
self._configspec_comments = {}
self._configspec_inline_comments = {}
self._cs_section_comments = {}
self._cs_section_inline_comments = {}
# for defaults
self.defaults = []
self.default_values = {}
def _interpolate(self, key, value):
try:
# do we already have an interpolation engine?
engine = self._interpolation_engine
except AttributeError:
# not yet: first time running _interpolate(), so pick the engine
name = self.main.interpolation
if name == True: # note that "if name:" would be incorrect here
# backwards-compatibility: interpolation=True means use default
name = DEFAULT_INTERPOLATION
name = name.lower() # so that "Template", "template", etc. all work
class_ = interpolation_engines.get(name, None)
if class_ is None:
# invalid value for self.main.interpolation
self.main.interpolation = False
return value
else:
# save reference to engine so we don't have to do this again
engine = self._interpolation_engine = class_(self)
# let the engine do the actual work
return engine.interpolate(key, value)
def __getitem__(self, key):
"""Fetch the item and do string interpolation."""
val = dict.__getitem__(self, key)
if self.main.interpolation and isinstance(val, StringTypes):
return self._interpolate(key, val)
return val
def __setitem__(self, key, value, unrepr=False):
"""
Correctly set a value.
Making dictionary values Section instances.
(We have to special case 'Section' instances - which are also dicts)
Keys must be strings.
Values need only be strings (or lists of strings) if
``main.stringify`` is set.
`unrepr`` must be set when setting a value to a dictionary, without
creating a new sub-section.
"""
if not isinstance(key, StringTypes):
raise ValueError('The key "%s" is not a string.' % key)
# add the comment
if not self.comments.has_key(key):
self.comments[key] = []
self.inline_comments[key] = ''
# remove the entry from defaults
if key in self.defaults:
self.defaults.remove(key)
#
if isinstance(value, Section):
if not self.has_key(key):
self.sections.append(key)
dict.__setitem__(self, key, value)
elif isinstance(value, dict) and not unrepr:
# First create the new depth level,
# then create the section
if not self.has_key(key):
self.sections.append(key)
new_depth = self.depth + 1
dict.__setitem__(
self,
key,
Section(
self,
new_depth,
self.main,
indict=value,
name=key))
else:
if not self.has_key(key):
self.scalars.append(key)
if not self.main.stringify:
if isinstance(value, StringTypes):
pass
elif isinstance(value, (list, tuple)):
for entry in value:
if not isinstance(entry, StringTypes):
raise TypeError('Value is not a string "%s".' % entry)
else:
raise TypeError('Value is not a string "%s".' % value)
dict.__setitem__(self, key, value)
def __delitem__(self, key):
"""Remove items from the sequence when deleting."""
dict. __delitem__(self, key)
if key in self.scalars:
self.scalars.remove(key)
else:
self.sections.remove(key)
del self.comments[key]
del self.inline_comments[key]
def get(self, key, default=None):
"""A version of ``get`` that doesn't bypass string interpolation."""
try:
return self[key]
except KeyError:
return default
def update(self, indict):
"""
A version of update that uses our ``__setitem__``.
"""
for entry in indict:
self[entry] = indict[entry]
def pop(self, key, *args):
"""
'D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised'
"""
val = dict.pop(self, key, *args)
if key in self.scalars:
del self.comments[key]
del self.inline_comments[key]
self.scalars.remove(key)
elif key in self.sections:
del self.comments[key]
del self.inline_comments[key]
self.sections.remove(key)
if self.main.interpolation and isinstance(val, StringTypes):
return self._interpolate(key, val)
return val
def popitem(self):
"""Pops the first (key,val)"""
sequence = (self.scalars + self.sections)
if not sequence:
raise KeyError(": 'popitem(): dictionary is empty'")
key = sequence[0]
val = self[key]
del self[key]
return key, val
def clear(self):
"""
A version of clear that also affects scalars/sections
Also clears comments and configspec.
Leaves other attributes alone :
depth/main/parent are not affected
"""
dict.clear(self)
self.scalars = []
self.sections = []
self.comments = {}
self.inline_comments = {}
self.configspec = {}
def setdefault(self, key, default=None):
"""A version of setdefault that sets sequence if appropriate."""
try:
return self[key]
except KeyError:
self[key] = default
return self[key]
def items(self):
"""D.items() -> list of D's (key, value) pairs, as 2-tuples"""
return zip((self.scalars + self.sections), self.values())
def keys(self):
"""D.keys() -> list of D's keys"""
return (self.scalars + self.sections)
def values(self):
"""D.values() -> list of D's values"""
return [self[key] for key in (self.scalars + self.sections)]
def iteritems(self):
"""D.iteritems() -> an iterator over the (key, value) items of D"""
return iter(self.items())
def iterkeys(self):
"""D.iterkeys() -> an iterator over the keys of D"""
return iter((self.scalars + self.sections))
__iter__ = iterkeys
def itervalues(self):
"""D.itervalues() -> an iterator over the values of D"""
return iter(self.values())
def __repr__(self):
"""x.__repr__() <==> repr(x)"""
return '{%s}' % ', '.join([('%s: %s' % (repr(key), repr(self[key])))
for key in (self.scalars + self.sections)])
__str__ = __repr__
__str__.__doc__ = "x.__str__() <==> str(x)"
# Extra methods - not in a normal dictionary
def dict(self):
"""
Return a deepcopy of self as a dictionary.
All members that are ``Section`` instances are recursively turned to
ordinary dictionaries - by calling their ``dict`` method.
>>> n = a.dict()
>>> n == a
1
>>> n is a
0
"""
newdict = {}
for entry in self:
this_entry = self[entry]
if isinstance(this_entry, Section):
this_entry = this_entry.dict()
elif isinstance(this_entry, list):
# create a copy rather than a reference
this_entry = list(this_entry)
elif isinstance(this_entry, tuple):
# create a copy rather than a reference
this_entry = tuple(this_entry)
newdict[entry] = this_entry
return newdict
def merge(self, indict):
"""
A recursive update - useful for merging config files.
>>> a = '''[section1]
... option1 = True
... [[subsection]]
... more_options = False
... # end of file'''.splitlines()
>>> b = '''# File is user.ini
... [section1]
... option1 = False
... # end of file'''.splitlines()
>>> c1 = ConfigObj(b)
>>> c2 = ConfigObj(a)
>>> c2.merge(c1)
>>> c2
{'section1': {'option1': 'False', 'subsection': {'more_options': 'False'}}}
"""
for key, val in indict.items():
if (key in self and isinstance(self[key], dict) and
isinstance(val, dict)):
self[key].merge(val)
else:
self[key] = val
def rename(self, oldkey, newkey):
"""
Change a keyname to another, without changing position in sequence.
Implemented so that transformations can be made on keys,
as well as on values. (used by encode and decode)
Also renames comments.
"""
if oldkey in self.scalars:
the_list = self.scalars
elif oldkey in self.sections:
the_list = self.sections
else:
raise KeyError('Key "%s" not found.' % oldkey)
pos = the_list.index(oldkey)
#
val = self[oldkey]
dict.__delitem__(self, oldkey)
dict.__setitem__(self, newkey, val)
the_list.remove(oldkey)
the_list.insert(pos, newkey)
comm = self.comments[oldkey]
inline_comment = self.inline_comments[oldkey]
del self.comments[oldkey]
del self.inline_comments[oldkey]
self.comments[newkey] = comm
self.inline_comments[newkey] = inline_comment
def walk(self, function, raise_errors=True,
call_on_sections=False, **keywargs):
"""
Walk every member and call a function on the keyword and value.
Return a dictionary of the return values
If the function raises an exception, raise the errror
unless ``raise_errors=False``, in which case set the return value to
``False``.
Any unrecognised keyword arguments you pass to walk, will be pased on
to the function you pass in.
Note: if ``call_on_sections`` is ``True`` then - on encountering a
subsection, *first* the function is called for the *whole* subsection,
and then recurses into it's members. This means your function must be
able to handle strings, dictionaries and lists. This allows you
to change the key of subsections as well as for ordinary members. The
return value when called on the whole subsection has to be discarded.
See the encode and decode methods for examples, including functions.
.. caution::
You can use ``walk`` to transform the names of members of a section
but you mustn't add or delete members.
>>> config = '''[XXXXsection]
... XXXXkey = XXXXvalue'''.splitlines()
>>> cfg = ConfigObj(config)
>>> cfg
{'XXXXsection': {'XXXXkey': 'XXXXvalue'}}
>>> def transform(section, key):
... val = section[key]
... newkey = key.replace('XXXX', 'CLIENT1')
... section.rename(key, newkey)
... if isinstance(val, (tuple, list, dict)):
... pass
... else:
... val = val.replace('XXXX', 'CLIENT1')
... section[newkey] = val
>>> cfg.walk(transform, call_on_sections=True)
{'CLIENT1section': {'CLIENT1key': None}}
>>> cfg
{'CLIENT1section': {'CLIENT1key': 'CLIENT1value'}}
"""
out = {}
# scalars first
for i in range(len(self.scalars)):
entry = self.scalars[i]
try:
val = function(self, entry, **keywargs)
# bound again in case name has changed
entry = self.scalars[i]
out[entry] = val
except Exception:
if raise_errors:
raise
else:
entry = self.scalars[i]
out[entry] = False
# then sections
for i in range(len(self.sections)):
entry = self.sections[i]
if call_on_sections:
try:
function(self, entry, **keywargs)
except Exception:
if raise_errors:
raise
else:
entry = self.sections[i]
out[entry] = False
# bound again in case name has changed
entry = self.sections[i]
# previous result is discarded
out[entry] = self[entry].walk(
function,
raise_errors=raise_errors,
call_on_sections=call_on_sections,
**keywargs)
return out
def decode(self, encoding):
"""
Decode all strings and values to unicode, using the specified encoding.
Works with subsections and list values.
Uses the ``walk`` method.
Testing ``encode`` and ``decode``.
>>> m = ConfigObj(a)
>>> m.decode('ascii')
>>> def testuni(val):
... for entry in val:
... if not isinstance(entry, unicode):
... print >> sys.stderr, type(entry)
... raise AssertionError, 'decode failed.'
... if isinstance(val[entry], dict):
... testuni(val[entry])
... elif not isinstance(val[entry], unicode):
... raise AssertionError, 'decode failed.'
>>> testuni(m)
>>> m.encode('ascii')
>>> a == m
1
"""
warn('use of ``decode`` is deprecated.', DeprecationWarning)
def decode(section, key, encoding=encoding, warn=True):
""" """
val = section[key]
if isinstance(val, (list, tuple)):
newval = []
for entry in val:
newval.append(entry.decode(encoding))
elif isinstance(val, dict):
newval = val
else:
newval = val.decode(encoding)
newkey = key.decode(encoding)
section.rename(key, newkey)
section[newkey] = newval
# using ``call_on_sections`` allows us to modify section names
self.walk(decode, call_on_sections=True)
def encode(self, encoding):
"""
Encode all strings and values from unicode,
using the specified encoding.
Works with subsections and list values.
Uses the ``walk`` method.
"""
warn('use of ``encode`` is deprecated.', DeprecationWarning)
def encode(section, key, encoding=encoding):
""" """
val = section[key]
if isinstance(val, (list, tuple)):
newval = []
for entry in val:
newval.append(entry.encode(encoding))
elif isinstance(val, dict):
newval = val
else:
newval = val.encode(encoding)
newkey = key.encode(encoding)
section.rename(key, newkey)
section[newkey] = newval
self.walk(encode, call_on_sections=True)
def istrue(self, key):
"""A deprecated version of ``as_bool``."""
warn('use of ``istrue`` is deprecated. Use ``as_bool`` method '
'instead.', DeprecationWarning)
return self.as_bool(key)
def as_bool(self, key):
"""
Accepts a key as input. The corresponding value must be a string or
the objects (``True`` or 1) or (``False`` or 0). We allow 0 and 1 to
retain compatibility with Python 2.2.
If the string is one of ``True``, ``On``, ``Yes``, or ``1`` it returns
``True``.
If the string is one of ``False``, ``Off``, ``No``, or ``0`` it returns
``False``.
``as_bool`` is not case sensitive.
Any other input will raise a ``ValueError``.
>>> a = ConfigObj()
>>> a['a'] = 'fish'
>>> a.as_bool('a')
Traceback (most recent call last):
ValueError: Value "fish" is neither True nor False
>>> a['b'] = 'True'
>>> a.as_bool('b')
1
>>> a['b'] = 'off'
>>> a.as_bool('b')
0
"""
val = self[key]
if val == True:
return True
elif val == False:
return False
else:
try:
if not isinstance(val, StringTypes):
# TODO: Why do we raise a KeyError here?
raise KeyError()
else:
return self.main._bools[val.lower()]
except KeyError:
raise ValueError('Value "%s" is neither True nor False' % val)
def as_int(self, key):
"""
A convenience method which coerces the specified value to an integer.
If the value is an invalid literal for ``int``, a ``ValueError`` will
be raised.
>>> a = ConfigObj()
>>> a['a'] = 'fish'
>>> a.as_int('a')
Traceback (most recent call last):
ValueError: invalid literal for int(): fish
>>> a['b'] = '1'
>>> a.as_int('b')
1
>>> a['b'] = '3.2'
>>> a.as_int('b')
Traceback (most recent call last):
ValueError: invalid literal for int(): 3.2
"""
return int(self[key])
def as_float(self, key):
"""
A convenience method which coerces the specified value to a float.
If the value is an invalid literal for ``float``, a ``ValueError`` will
be raised.
>>> a = ConfigObj()
>>> a['a'] = 'fish'
>>> a.as_float('a')
Traceback (most recent call last):
ValueError: invalid literal for float(): fish
>>> a['b'] = '1'
>>> a.as_float('b')
1.0
>>> a['b'] = '3.2'
>>> a.as_float('b')
3.2000000000000002
"""
return float(self[key])
def restore_default(self, key):
"""
Restore (and return) default value for the specified key.
This method will only work for a ConfigObj that was created
with a configspec and has been validated.
If there is no default value for this key, ``KeyError`` is raised.
"""
default = self.default_values[key]
dict.__setitem__(self, key, default)
if key not in self.defaults:
self.defaults.append(key)
return default
def restore_defaults(self):
"""
Recursively restore default values to all members
that have them.
This method will only work for a ConfigObj that was created
with a configspec and has been validated.
It doesn't delete or modify entries without default values.
"""
for key in self.default_values:
self.restore_default(key)
for section in self.sections:
self[section].restore_defaults()
class ConfigObj(Section):
"""An object to read, create, and write config files."""
_keyword = re.compile(r'''^ # line start
(\s*) # indentation
( # keyword
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'"=].*?) # no quotes
)
\s*=\s* # divider
(.*) # value (including list values and comments)
$ # line end
''',
re.VERBOSE)
_sectionmarker = re.compile(r'''^
(\s*) # 1: indentation
((?:\[\s*)+) # 2: section marker open
( # 3: section name open
(?:"\s*\S.*?\s*")| # at least one non-space with double quotes
(?:'\s*\S.*?\s*')| # at least one non-space with single quotes
(?:[^'"\s].*?) # at least one non-space unquoted
) # section name close
((?:\s*\])+) # 4: section marker close
\s*(\#.*)? # 5: optional comment
$''',
re.VERBOSE)
# this regexp pulls list values out as a single string
# or single values and comments
# FIXME: this regex adds a '' to the end of comma terminated lists
# workaround in ``_handle_value``
_valueexp = re.compile(r'''^
(?:
(?:
(
(?:
(?:
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\#][^,\#]*?) # unquoted
)
\s*,\s* # comma
)* # match all list items ending in a comma (if any)
)
(
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\#\s][^,]*?)| # unquoted
(?:(?<!,)) # Empty value
)? # last item in a list - or string value
)|
(,) # alternatively a single comma - empty list
)
\s*(\#.*)? # optional comment
$''',
re.VERBOSE)
# use findall to get the members of a list value
_listvalueexp = re.compile(r'''
(
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\#].*?) # unquoted
)
\s*,\s* # comma
''',
re.VERBOSE)
# this regexp is used for the value
# when lists are switched off
_nolistvalue = re.compile(r'''^
(
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'"\#].*?)| # unquoted
(?:) # Empty value
)
\s*(\#.*)? # optional comment
$''',
re.VERBOSE)
# regexes for finding triple quoted values on one line
_single_line_single = re.compile(r"^'''(.*?)'''\s*(#.*)?$")
_single_line_double = re.compile(r'^"""(.*?)"""\s*(#.*)?$')
_multi_line_single = re.compile(r"^(.*?)'''\s*(#.*)?$")
_multi_line_double = re.compile(r'^(.*?)"""\s*(#.*)?$')
_triple_quote = {
"'''": (_single_line_single, _multi_line_single),
'"""': (_single_line_double, _multi_line_double),
}
# Used by the ``istrue`` Section method
_bools = {
'yes': True, 'no': False,
'on': True, 'off': False,
'1': True, '0': False,
'true': True, 'false': False,
}
def __init__(self, infile=None, options=None, **kwargs):
"""
Parse a config file or create a config file object.
``ConfigObj(infile=None, options=None, **kwargs)``
"""
# init the superclass
Section.__init__(self, self, 0, self)
if infile is None:
infile = []
if options is None:
options = {}
else:
options = dict(options)
# keyword arguments take precedence over an options dictionary
options.update(kwargs)
defaults = OPTION_DEFAULTS.copy()
# TODO: check the values too.
for entry in options:
if entry not in defaults:
raise TypeError('Unrecognised option "%s".' % entry)
# Add any explicit options to the defaults
defaults.update(options)
self._initialise(defaults)
configspec = defaults['configspec']
self._original_configspec = configspec
self._load(infile, configspec)
def _load(self, infile, configspec):
if isinstance(infile, StringTypes):
self.filename = infile
if os.path.isfile(infile):
h = open(infile, 'rb')
infile = h.read() or []
h.close()
elif self.file_error:
# raise an error if the file doesn't exist
raise IOError('Config file not found: "%s".' % self.filename)
else:
# file doesn't already exist
if self.create_empty:
# this is a good test that the filename specified
# isn't impossible - like on a non-existent device
h = open(infile, 'w')
h.write('')
h.close()
infile = []
elif isinstance(infile, (list, tuple)):
infile = list(infile)
elif isinstance(infile, dict):
# initialise self
# the Section class handles creating subsections
if isinstance(infile, ConfigObj):
# get a copy of our ConfigObj
infile = infile.dict()
for entry in infile:
self[entry] = infile[entry]
del self._errors
if configspec is not None:
self._handle_configspec(configspec)
else:
self.configspec = None
return
elif hasattr(infile, 'read'):
# This supports file like objects
infile = infile.read() or []
# needs splitting into lines - but needs doing *after* decoding
# in case it's not an 8 bit encoding
else:
raise TypeError('infile must be a filename, file like object, or list of lines.')
if infile:
# don't do it for the empty ConfigObj
infile = self._handle_bom(infile)
# infile is now *always* a list
#
# Set the newlines attribute (first line ending it finds)
# and strip trailing '\n' or '\r' from lines
for line in infile:
if (not line) or (line[-1] not in ('\r', '\n', '\r\n')):
continue
for end in ('\r\n', '\n', '\r'):
if line.endswith(end):
self.newlines = end
break
break
infile = [line.rstrip('\r\n') for line in infile]
self._parse(infile)
# if we had any errors, now is the time to raise them
if self._errors:
info = "at line %s." % self._errors[0].line_number
if len(self._errors) > 1:
msg = "Parsing failed with several errors.\nFirst error %s" % info
error = ConfigObjError(msg)
else:
error = self._errors[0]
# set the errors attribute; it's a list of tuples:
# (error_type, message, line_number)
error.errors = self._errors
# set the config attribute
error.config = self
raise error
# delete private attributes
del self._errors
if configspec is None:
self.configspec = None
else:
self._handle_configspec(configspec)
def _initialise(self, options=None):
if options is None:
options = OPTION_DEFAULTS
# initialise a few variables
self.filename = None
self._errors = []
self.raise_errors = options['raise_errors']
self.interpolation = options['interpolation']
self.list_values = options['list_values']
self.create_empty = options['create_empty']
self.file_error = options['file_error']
self.stringify = options['stringify']
self.indent_type = options['indent_type']
self.encoding = options['encoding']
self.default_encoding = options['default_encoding']
self.BOM = False
self.newlines = None
self.write_empty_values = options['write_empty_values']
self.unrepr = options['unrepr']
self.initial_comment = []
self.final_comment = []
self.configspec = {}
# Clear section attributes as well
Section._initialise(self)
def __repr__(self):
return ('ConfigObj({%s})' %
', '.join([('%s: %s' % (repr(key), repr(self[key])))
for key in (self.scalars + self.sections)]))
def _handle_bom(self, infile):
"""
Handle any BOM, and decode if necessary.
If an encoding is specified, that *must* be used - but the BOM should
still be removed (and the BOM attribute set).
(If the encoding is wrongly specified, then a BOM for an alternative
encoding won't be discovered or removed.)
If an encoding is not specified, UTF8 or UTF16 BOM will be detected and
removed. The BOM attribute will be set. UTF16 will be decoded to
unicode.
NOTE: This method must not be called with an empty ``infile``.
Specifying the *wrong* encoding is likely to cause a
``UnicodeDecodeError``.
``infile`` must always be returned as a list of lines, but may be
passed in as a single string.
"""
if ((self.encoding is not None) and
(self.encoding.lower() not in BOM_LIST)):
# No need to check for a BOM
# the encoding specified doesn't have one
# just decode
return self._decode(infile, self.encoding)
if isinstance(infile, (list, tuple)):
line = infile[0]
else:
line = infile
if self.encoding is not None:
# encoding explicitly supplied
# And it could have an associated BOM
# TODO: if encoding is just UTF16 - we ought to check for both
# TODO: big endian and little endian versions.
enc = BOM_LIST[self.encoding.lower()]
if enc == 'utf_16':
# For UTF16 we try big endian and little endian
for BOM, (encoding, final_encoding) in BOMS.items():
if not final_encoding:
# skip UTF8
continue
if infile.startswith(BOM):
### BOM discovered
##self.BOM = True
# Don't need to remove BOM
return self._decode(infile, encoding)
# If we get this far, will *probably* raise a DecodeError
# As it doesn't appear to start with a BOM
return self._decode(infile, self.encoding)
# Must be UTF8
BOM = BOM_SET[enc]
if not line.startswith(BOM):
return self._decode(infile, self.encoding)
newline = line[len(BOM):]
# BOM removed
if isinstance(infile, (list, tuple)):
infile[0] = newline
else:
infile = newline
self.BOM = True
return self._decode(infile, self.encoding)
# No encoding specified - so we need to check for UTF8/UTF16
for BOM, (encoding, final_encoding) in BOMS.items():
if not line.startswith(BOM):
continue
else:
# BOM discovered
self.encoding = final_encoding
if not final_encoding:
self.BOM = True
# UTF8
# remove BOM
newline = line[len(BOM):]
if isinstance(infile, (list, tuple)):
infile[0] = newline
else:
infile = newline
# UTF8 - don't decode
if isinstance(infile, StringTypes):
return infile.splitlines(True)
else:
return infile
# UTF16 - have to decode
return self._decode(infile, encoding)
# No BOM discovered and no encoding specified, just return
if isinstance(infile, StringTypes):
# infile read from a file will be a single string
return infile.splitlines(True)
return infile
def _a_to_u(self, aString):
"""Decode ASCII strings to unicode if a self.encoding is specified."""
if self.encoding:
return aString.decode('ascii')
else:
return aString
def _decode(self, infile, encoding):
"""
Decode infile to unicode. Using the specified encoding.
if is a string, it also needs converting to a list.
"""
if isinstance(infile, StringTypes):
# can't be unicode
# NOTE: Could raise a ``UnicodeDecodeError``
return infile.decode(encoding).splitlines(True)
for i, line in enumerate(infile):
if not isinstance(line, unicode):
# NOTE: The isinstance test here handles mixed lists of unicode/string
# NOTE: But the decode will break on any non-string values
# NOTE: Or could raise a ``UnicodeDecodeError``
infile[i] = line.decode(encoding)
return infile
def _decode_element(self, line):
"""Decode element to unicode if necessary."""
if not self.encoding:
return line
if isinstance(line, str) and self.default_encoding:
return line.decode(self.default_encoding)
return line
def _str(self, value):
"""
Used by ``stringify`` within validate, to turn non-string values
into strings.
"""
if not isinstance(value, StringTypes):
return str(value)
else:
return value
def _parse(self, infile):
"""Actually parse the config file."""
temp_list_values = self.list_values
if self.unrepr:
self.list_values = False
comment_list = []
done_start = False
this_section = self
maxline = len(infile) - 1
cur_index = -1
reset_comment = False
while cur_index < maxline:
if reset_comment:
comment_list = []
cur_index += 1
line = infile[cur_index]
sline = line.strip()
# do we have anything on the line ?
if not sline or sline.startswith('#'):
reset_comment = False
comment_list.append(line)
continue
if not done_start:
# preserve initial comment
self.initial_comment = comment_list
comment_list = []
done_start = True
reset_comment = True
# first we check if it's a section marker
mat = self._sectionmarker.match(line)
if mat is not None:
# is a section line
(indent, sect_open, sect_name, sect_close, comment) = mat.groups()
if indent and (self.indent_type is None):
self.indent_type = indent
cur_depth = sect_open.count('[')
if cur_depth != sect_close.count(']'):
self._handle_error("Cannot compute the section depth at line %s.",
NestingError, infile, cur_index)
continue
if cur_depth < this_section.depth:
# the new section is dropping back to a previous level
try:
parent = self._match_depth(this_section,
cur_depth).parent
except SyntaxError:
self._handle_error("Cannot compute nesting level at line %s.",
NestingError, infile, cur_index)
continue
elif cur_depth == this_section.depth:
# the new section is a sibling of the current section
parent = this_section.parent
elif cur_depth == this_section.depth + 1:
# the new section is a child the current section
parent = this_section
else:
self._handle_error("Section too nested at line %s.",
NestingError, infile, cur_index)
sect_name = self._unquote(sect_name)
if parent.has_key(sect_name):
self._handle_error('Duplicate section name at line %s.',
DuplicateError, infile, cur_index)
continue
# create the new section
this_section = Section(
parent,
cur_depth,
self,
name=sect_name)
parent[sect_name] = this_section
parent.inline_comments[sect_name] = comment
parent.comments[sect_name] = comment_list
continue
#
# it's not a section marker,
# so it should be a valid ``key = value`` line
mat = self._keyword.match(line)
if mat is None:
# it neither matched as a keyword
# or a section marker
self._handle_error(
'Invalid line at line "%s".',
ParseError, infile, cur_index)
else:
# is a keyword value
# value will include any inline comment
(indent, key, value) = mat.groups()
if indent and (self.indent_type is None):
self.indent_type = indent
# check for a multiline value
if value[:3] in ['"""', "'''"]:
try:
(value, comment, cur_index) = self._multiline(
value, infile, cur_index, maxline)
except SyntaxError:
self._handle_error(
'Parse error in value at line %s.',
ParseError, infile, cur_index)
continue
else:
if self.unrepr:
comment = ''
try:
value = unrepr(value)
except Exception, e:
if type(e) == UnknownType:
msg = 'Unknown name or type in value at line %s.'
else:
msg = 'Parse error in value at line %s.'
self._handle_error(msg, UnreprError, infile,
cur_index)
continue
else:
if self.unrepr:
comment = ''
try:
value = unrepr(value)
except Exception, e:
if isinstance(e, UnknownType):
msg = 'Unknown name or type in value at line %s.'
else:
msg = 'Parse error in value at line %s.'
self._handle_error(msg, UnreprError, infile,
cur_index)
continue
else:
# extract comment and lists
try:
(value, comment) = self._handle_value(value)
except SyntaxError:
self._handle_error(
'Parse error in value at line %s.',
ParseError, infile, cur_index)
continue
#
key = self._unquote(key)
if this_section.has_key(key):
self._handle_error(
'Duplicate keyword name at line %s.',
DuplicateError, infile, cur_index)
continue
# add the key.
# we set unrepr because if we have got this far we will never
# be creating a new section
this_section.__setitem__(key, value, unrepr=True)
this_section.inline_comments[key] = comment
this_section.comments[key] = comment_list
continue
#
if self.indent_type is None:
# no indentation used, set the type accordingly
self.indent_type = ''
# preserve the final comment
if not self and not self.initial_comment:
self.initial_comment = comment_list
elif not reset_comment:
self.final_comment = comment_list
self.list_values = temp_list_values
def _match_depth(self, sect, depth):
"""
Given a section and a depth level, walk back through the sections
parents to see if the depth level matches a previous section.
Return a reference to the right section,
or raise a SyntaxError.
"""
while depth < sect.depth:
if sect is sect.parent:
# we've reached the top level already
raise SyntaxError()
sect = sect.parent
if sect.depth == depth:
return sect
# shouldn't get here
raise SyntaxError()
def _handle_error(self, text, ErrorClass, infile, cur_index):
"""
Handle an error according to the error settings.
Either raise the error or store it.
The error will have occured at ``cur_index``
"""
line = infile[cur_index]
cur_index += 1
message = text % cur_index
error = ErrorClass(message, cur_index, line)
if self.raise_errors:
# raise the error - parsing stops here
raise error
# store the error
# reraise when parsing has finished
self._errors.append(error)
def _unquote(self, value):
"""Return an unquoted version of a value"""
if (value[0] == value[-1]) and (value[0] in ('"', "'")):
value = value[1:-1]
return value
def _quote(self, value, multiline=True):
"""
Return a safely quoted version of a value.
Raise a ConfigObjError if the value cannot be safely quoted.
If multiline is ``True`` (default) then use triple quotes
if necessary.
Don't quote values that don't need it.
Recursively quote members of a list and return a comma joined list.
Multiline is ``False`` for lists.
Obey list syntax for empty and single member lists.
If ``list_values=False`` then the value is only quoted if it contains
a ``\n`` (is multiline) or '#'.
If ``write_empty_values`` is set, and the value is an empty string, it
won't be quoted.
"""
if multiline and self.write_empty_values and value == '':
# Only if multiline is set, so that it is used for values not
# keys, and not values that are part of a list
return ''
if multiline and isinstance(value, (list, tuple)):
if not value:
return ','
elif len(value) == 1:
return self._quote(value[0], multiline=False) + ','
return ', '.join([self._quote(val, multiline=False)
for val in value])
if not isinstance(value, StringTypes):
if self.stringify:
value = str(value)
else:
raise TypeError('Value "%s" is not a string.' % value)
if not value:
return '""'
no_lists_no_quotes = not self.list_values and '\n' not in value and '#' not in value
need_triple = multiline and ((("'" in value) and ('"' in value)) or ('\n' in value ))
hash_triple_quote = multiline and not need_triple and ("'" in value) and ('"' in value) and ('#' in value)
check_for_single = (no_lists_no_quotes or not need_triple) and not hash_triple_quote
if check_for_single:
if not self.list_values:
# we don't quote if ``list_values=False``
quot = noquot
# for normal values either single or double quotes will do
elif '\n' in value:
# will only happen if multiline is off - e.g. '\n' in key
raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
elif ((value[0] not in wspace_plus) and
(value[-1] not in wspace_plus) and
(',' not in value)):
quot = noquot
else:
quot = self._get_single_quote(value)
else:
# if value has '\n' or "'" *and* '"', it will need triple quotes
quot = self._get_triple_quote(value)
if quot == noquot and '#' in value and self.list_values:
quot = self._get_single_quote(value)
return quot % value
def _get_single_quote(self, value):
if ("'" in value) and ('"' in value):
raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
elif '"' in value:
quot = squot
else:
quot = dquot
return quot
def _get_triple_quote(self, value):
if (value.find('"""') != -1) and (value.find("'''") != -1):
raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
if value.find('"""') == -1:
quot = tdquot
else:
quot = tsquot
return quot
def _handle_value(self, value):
"""
Given a value string, unquote, remove comment,
handle lists. (including empty and single member lists)
"""
# do we look for lists in values ?
if not self.list_values:
mat = self._nolistvalue.match(value)
if mat is None:
raise SyntaxError()
# NOTE: we don't unquote here
return mat.groups()
#
mat = self._valueexp.match(value)
if mat is None:
# the value is badly constructed, probably badly quoted,
# or an invalid list
raise SyntaxError()
(list_values, single, empty_list, comment) = mat.groups()
if (list_values == '') and (single is None):
# change this if you want to accept empty values
raise SyntaxError()
# NOTE: note there is no error handling from here if the regex
# is wrong: then incorrect values will slip through
if empty_list is not None:
# the single comma - meaning an empty list
return ([], comment)
if single is not None:
# handle empty values
if list_values and not single:
# FIXME: the '' is a workaround because our regex now matches
# '' at the end of a list if it has a trailing comma
single = None
else:
single = single or '""'
single = self._unquote(single)
if list_values == '':
# not a list value
return (single, comment)
the_list = self._listvalueexp.findall(list_values)
the_list = [self._unquote(val) for val in the_list]
if single is not None:
the_list += [single]
return (the_list, comment)
def _multiline(self, value, infile, cur_index, maxline):
"""Extract the value, where we are in a multiline situation."""
quot = value[:3]
newvalue = value[3:]
single_line = self._triple_quote[quot][0]
multi_line = self._triple_quote[quot][1]
mat = single_line.match(value)
if mat is not None:
retval = list(mat.groups())
retval.append(cur_index)
return retval
elif newvalue.find(quot) != -1:
# somehow the triple quote is missing
raise SyntaxError()
#
while cur_index < maxline:
cur_index += 1
newvalue += '\n'
line = infile[cur_index]
if line.find(quot) == -1:
newvalue += line
else:
# end of multiline, process it
break
else:
# we've got to the end of the config, oops...
raise SyntaxError()
mat = multi_line.match(line)
if mat is None:
# a badly formed line
raise SyntaxError()
(value, comment) = mat.groups()
return (newvalue + value, comment, cur_index)
def _handle_configspec(self, configspec):
"""Parse the configspec."""
# FIXME: Should we check that the configspec was created with the
# correct settings ? (i.e. ``list_values=False``)
if not isinstance(configspec, ConfigObj):
try:
configspec = ConfigObj(configspec,
raise_errors=True,
file_error=True,
list_values=False)
except ConfigObjError, e:
# FIXME: Should these errors have a reference
# to the already parsed ConfigObj ?
raise ConfigspecError('Parsing configspec failed: %s' % e)
except IOError, e:
raise IOError('Reading configspec failed: %s' % e)
self._set_configspec_value(configspec, self)
def _set_configspec_value(self, configspec, section):
"""Used to recursively set configspec values."""
if '__many__' in configspec.sections:
section.configspec['__many__'] = configspec['__many__']
if len(configspec.sections) > 1:
# FIXME: can we supply any useful information here ?
raise RepeatSectionError()
if hasattr(configspec, 'initial_comment'):
section._configspec_initial_comment = configspec.initial_comment
section._configspec_final_comment = configspec.final_comment
section._configspec_encoding = configspec.encoding
section._configspec_BOM = configspec.BOM
section._configspec_newlines = configspec.newlines
section._configspec_indent_type = configspec.indent_type
for entry in configspec.scalars:
section._configspec_comments[entry] = configspec.comments[entry]
section._configspec_inline_comments[entry] = configspec.inline_comments[entry]
section.configspec[entry] = configspec[entry]
section._order.append(entry)
for entry in configspec.sections:
if entry == '__many__':
continue
section._cs_section_comments[entry] = configspec.comments[entry]
section._cs_section_inline_comments[entry] = configspec.inline_comments[entry]
if not section.has_key(entry):
section[entry] = {}
self._set_configspec_value(configspec[entry], section[entry])
def _handle_repeat(self, section, configspec):
"""Dynamically assign configspec for repeated section."""
try:
section_keys = configspec.sections
scalar_keys = configspec.scalars
except AttributeError:
section_keys = [entry for entry in configspec
if isinstance(configspec[entry], dict)]
scalar_keys = [entry for entry in configspec
if not isinstance(configspec[entry], dict)]
if '__many__' in section_keys and len(section_keys) > 1:
# FIXME: can we supply any useful information here ?
raise RepeatSectionError()
scalars = {}
sections = {}
for entry in scalar_keys:
val = configspec[entry]
scalars[entry] = val
for entry in section_keys:
val = configspec[entry]
if entry == '__many__':
scalars[entry] = val
continue
sections[entry] = val
section.configspec = scalars
for entry in sections:
if not section.has_key(entry):
section[entry] = {}
self._handle_repeat(section[entry], sections[entry])
def _write_line(self, indent_string, entry, this_entry, comment):
"""Write an individual line, for the write method"""
# NOTE: the calls to self._quote here handles non-StringType values.
if not self.unrepr:
val = self._decode_element(self._quote(this_entry))
else:
val = repr(this_entry)
return '%s%s%s%s%s' % (indent_string,
self._decode_element(self._quote(entry, multiline=False)),
self._a_to_u(' = '),
val,
self._decode_element(comment))
def _write_marker(self, indent_string, depth, entry, comment):
"""Write a section marker line"""
return '%s%s%s%s%s' % (indent_string,
self._a_to_u('[' * depth),
self._quote(self._decode_element(entry), multiline=False),
self._a_to_u(']' * depth),
self._decode_element(comment))
def _handle_comment(self, comment):
"""Deal with a comment."""
if not comment:
return ''
start = self.indent_type
if not comment.startswith('#'):
start += self._a_to_u(' # ')
return (start + comment)
# Public methods
def write(self, outfile=None, section=None):
"""
Write the current ConfigObj as a file
tekNico: FIXME: use StringIO instead of real files
>>> filename = a.filename
>>> a.filename = 'test.ini'
>>> a.write()
>>> a.filename = filename
>>> a == ConfigObj('test.ini', raise_errors=True)
1
"""
if self.indent_type is None:
# this can be true if initialised from a dictionary
self.indent_type = DEFAULT_INDENT_TYPE
out = []
cs = self._a_to_u('#')
csp = self._a_to_u('# ')
if section is None:
int_val = self.interpolation
self.interpolation = False
section = self
for line in self.initial_comment:
line = self._decode_element(line)
stripped_line = line.strip()
if stripped_line and not stripped_line.startswith(cs):
line = csp + line
out.append(line)
indent_string = self.indent_type * section.depth
for entry in (section.scalars + section.sections):
if entry in section.defaults:
# don't write out default values
continue
for comment_line in section.comments[entry]:
comment_line = self._decode_element(comment_line.lstrip())
if comment_line and not comment_line.startswith(cs):
comment_line = csp + comment_line
out.append(indent_string + comment_line)
this_entry = section[entry]
comment = self._handle_comment(section.inline_comments[entry])
if isinstance(this_entry, dict):
# a section
out.append(self._write_marker(
indent_string,
this_entry.depth,
entry,
comment))
out.extend(self.write(section=this_entry))
else:
out.append(self._write_line(
indent_string,
entry,
this_entry,
comment))
if section is self:
for line in self.final_comment:
line = self._decode_element(line)
stripped_line = line.strip()
if stripped_line and not stripped_line.startswith(cs):
line = csp + line
out.append(line)
self.interpolation = int_val
if section is not self:
return out
if (self.filename is None) and (outfile is None):
# output a list of lines
# might need to encode
# NOTE: This will *screw* UTF16, each line will start with the BOM
if self.encoding:
out = [l.encode(self.encoding) for l in out]
if (self.BOM and ((self.encoding is None) or
(BOM_LIST.get(self.encoding.lower()) == 'utf_8'))):
# Add the UTF8 BOM
if not out:
out.append('')
out[0] = BOM_UTF8 + out[0]
return out
# Turn the list to a string, joined with correct newlines
newline = self.newlines or os.linesep
output = self._a_to_u(newline).join(out)
if self.encoding:
output = output.encode(self.encoding)
if self.BOM and ((self.encoding is None) or match_utf8(self.encoding)):
# Add the UTF8 BOM
output = BOM_UTF8 + output
if not output.endswith(newline):
output += newline
if outfile is not None:
outfile.write(output)
else:
h = open(self.filename, 'wb')
h.write(output)
h.close()
def validate(self, validator, preserve_errors=False, copy=False,
section=None):
"""
Test the ConfigObj against a configspec.
It uses the ``validator`` object from *validate.py*.
To run ``validate`` on the current ConfigObj, call: ::
test = config.validate(validator)
(Normally having previously passed in the configspec when the ConfigObj
was created - you can dynamically assign a dictionary of checks to the
``configspec`` attribute of a section though).
It returns ``True`` if everything passes, or a dictionary of
pass/fails (True/False). If every member of a subsection passes, it
will just have the value ``True``. (It also returns ``False`` if all
members fail).
In addition, it converts the values from strings to their native
types if their checks pass (and ``stringify`` is set).
If ``preserve_errors`` is ``True`` (``False`` is default) then instead
of a marking a fail with a ``False``, it will preserve the actual
exception object. This can contain info about the reason for failure.
For example the ``VdtValueTooSmallError`` indicates that the value
supplied was too small. If a value (or section) is missing it will
still be marked as ``False``.
You must have the validate module to use ``preserve_errors=True``.
You can then use the ``flatten_errors`` function to turn your nested
results dictionary into a flattened list of failures - useful for
displaying meaningful error messages.
"""
if section is None:
if self.configspec is None:
raise ValueError('No configspec supplied.')
if preserve_errors:
# We do this once to remove a top level dependency on the validate module
# Which makes importing configobj faster
from validate import VdtMissingValue
self._vdtMissingValue = VdtMissingValue
section = self
#
spec_section = section.configspec
if copy and hasattr(section, '_configspec_initial_comment'):
section.initial_comment = section._configspec_initial_comment
section.final_comment = section._configspec_final_comment
section.encoding = section._configspec_encoding
section.BOM = section._configspec_BOM
section.newlines = section._configspec_newlines
section.indent_type = section._configspec_indent_type
if '__many__' in section.configspec:
many = spec_section['__many__']
# dynamically assign the configspecs
# for the sections below
for entry in section.sections:
self._handle_repeat(section[entry], many)
#
out = {}
ret_true = True
ret_false = True
order = [k for k in section._order if k in spec_section]
order += [k for k in spec_section if k not in order]
for entry in order:
if entry == '__many__':
continue
if (not entry in section.scalars) or (entry in section.defaults):
# missing entries
# or entries from defaults
missing = True
val = None
if copy and not entry in section.scalars:
# copy comments
section.comments[entry] = (
section._configspec_comments.get(entry, []))
section.inline_comments[entry] = (
section._configspec_inline_comments.get(entry, ''))
#
else:
missing = False
val = section[entry]
try:
check = validator.check(spec_section[entry],
val,
missing=missing
)
except validator.baseErrorClass, e:
if not preserve_errors or isinstance(e, self._vdtMissingValue):
out[entry] = False
else:
# preserve the error
out[entry] = e
ret_false = False
ret_true = False
else:
try:
section.default_values.pop(entry, None)
except AttributeError:
# For Python 2.2 compatibility
try:
del section.default_values[entry]
except KeyError:
pass
if hasattr(validator, 'get_default_value'):
try:
section.default_values[entry] = validator.get_default_value(spec_section[entry])
except KeyError:
# No default
pass
ret_false = False
out[entry] = True
if self.stringify or missing:
# if we are doing type conversion
# or the value is a supplied default
if not self.stringify:
if isinstance(check, (list, tuple)):
# preserve lists
check = [self._str(item) for item in check]
elif missing and check is None:
# convert the None from a default to a ''
check = ''
else:
check = self._str(check)
if (check != val) or missing:
section[entry] = check
if not copy and missing and entry not in section.defaults:
section.defaults.append(entry)
# Missing sections will have been created as empty ones when the
# configspec was read.
for entry in section.sections:
# FIXME: this means DEFAULT is not copied in copy mode
if section is self and entry == 'DEFAULT':
continue
if copy:
section.comments[entry] = section._cs_section_comments.get(entry, [])
section.inline_comments[entry] = section._cs_section_inline_comments.get(entry, '')
check = self.validate(validator, preserve_errors=preserve_errors, copy=copy, section=section[entry])
out[entry] = check
if check == False:
ret_true = False
elif check == True:
ret_false = False
else:
ret_true = False
ret_false = False
#
if ret_true:
return True
elif ret_false:
return False
return out
def reset(self):
"""Clear ConfigObj instance and restore to 'freshly created' state."""
self.clear()
self._initialise()
# FIXME: Should be done by '_initialise', but ConfigObj constructor (and reload)
# requires an empty dictionary
self.configspec = None
# Just to be sure ;-)
self._original_configspec = None
def reload(self):
"""
Reload a ConfigObj from file.
This method raises a ``ReloadError`` if the ConfigObj doesn't have
a filename attribute pointing to a file.
"""
if not isinstance(self.filename, StringTypes):
raise ReloadError()
filename = self.filename
current_options = {}
for entry in OPTION_DEFAULTS:
if entry == 'configspec':
continue
current_options[entry] = getattr(self, entry)
configspec = self._original_configspec
current_options['configspec'] = configspec
self.clear()
self._initialise(current_options)
self._load(filename, configspec)
class SimpleVal(object):
"""
A simple validator.
Can be used to check that all members expected are present.
To use it, provide a configspec with all your members in (the value given
will be ignored). Pass an instance of ``SimpleVal`` to the ``validate``
method of your ``ConfigObj``. ``validate`` will return ``True`` if all
members are present, or a dictionary with True/False meaning
present/missing. (Whole missing sections will be replaced with ``False``)
"""
def __init__(self):
self.baseErrorClass = ConfigObjError
def check(self, check, member, missing=False):
"""A dummy check method, always returns the value unchanged."""
if missing:
raise self.baseErrorClass()
return member
# Check / processing functions for options
def flatten_errors(cfg, res, levels=None, results=None):
"""
An example function that will turn a nested dictionary of results
(as returned by ``ConfigObj.validate``) into a flat list.
``cfg`` is the ConfigObj instance being checked, ``res`` is the results
dictionary returned by ``validate``.
(This is a recursive function, so you shouldn't use the ``levels`` or
``results`` arguments - they are used by the function.
Returns a list of keys that failed. Each member of the list is a tuple :
::
([list of sections...], key, result)
If ``validate`` was called with ``preserve_errors=False`` (the default)
then ``result`` will always be ``False``.
*list of sections* is a flattened list of sections that the key was found
in.
If the section was missing then key will be ``None``.
If the value (or section) was missing then ``result`` will be ``False``.
If ``validate`` was called with ``preserve_errors=True`` and a value
was present, but failed the check, then ``result`` will be the exception
object returned. You can use this as a string that describes the failure.
For example *The value "3" is of the wrong type*.
>>> import validate
>>> vtor = validate.Validator()
>>> my_ini = '''
... option1 = True
... [section1]
... option1 = True
... [section2]
... another_option = Probably
... [section3]
... another_option = True
... [[section3b]]
... value = 3
... value2 = a
... value3 = 11
... '''
>>> my_cfg = '''
... option1 = boolean()
... option2 = boolean()
... option3 = boolean(default=Bad_value)
... [section1]
... option1 = boolean()
... option2 = boolean()
... option3 = boolean(default=Bad_value)
... [section2]
... another_option = boolean()
... [section3]
... another_option = boolean()
... [[section3b]]
... value = integer
... value2 = integer
... value3 = integer(0, 10)
... [[[section3b-sub]]]
... value = string
... [section4]
... another_option = boolean()
... '''
>>> cs = my_cfg.split('\\n')
>>> ini = my_ini.split('\\n')
>>> cfg = ConfigObj(ini, configspec=cs)
>>> res = cfg.validate(vtor, preserve_errors=True)
>>> errors = []
>>> for entry in flatten_errors(cfg, res):
... section_list, key, error = entry
... section_list.insert(0, '[root]')
... if key is not None:
... section_list.append(key)
... else:
... section_list.append('[missing]')
... section_string = ', '.join(section_list)
... errors.append((section_string, ' = ', error))
>>> errors.sort()
>>> for entry in errors:
... print entry[0], entry[1], (entry[2] or 0)
[root], option2 = 0
[root], option3 = the value "Bad_value" is of the wrong type.
[root], section1, option2 = 0
[root], section1, option3 = the value "Bad_value" is of the wrong type.
[root], section2, another_option = the value "Probably" is of the wrong type.
[root], section3, section3b, section3b-sub, [missing] = 0
[root], section3, section3b, value2 = the value "a" is of the wrong type.
[root], section3, section3b, value3 = the value "11" is too big.
[root], section4, [missing] = 0
"""
if levels is None:
# first time called
levels = []
results = []
if res is True:
return results
if res is False:
results.append((levels[:], None, False))
if levels:
levels.pop()
return results
for (key, val) in res.items():
if val == True:
continue
if isinstance(cfg.get(key), dict):
# Go down one level
levels.append(key)
flatten_errors(cfg[key], val, levels, results)
continue
results.append((levels[:], key, val))
#
# Go up one level
if levels:
levels.pop()
#
return results
"""*A programming language is a medium of expression.* - Paul Graham"""
| 35.4536 | 114 | 0.530451 |
f029eeefad4504ada84a96950c92a809d782769c | 3,135 | py | Python | resource/pypi/cryptography-1.7.1/src/cryptography/hazmat/primitives/keywrap.py | hipnusleo/Laserjet | f53e0b740f48f2feb0c0bb285ec6728b313b4ccc | [
"Apache-2.0"
] | null | null | null | resource/pypi/cryptography-1.7.1/src/cryptography/hazmat/primitives/keywrap.py | hipnusleo/Laserjet | f53e0b740f48f2feb0c0bb285ec6728b313b4ccc | [
"Apache-2.0"
] | null | null | null | resource/pypi/cryptography-1.7.1/src/cryptography/hazmat/primitives/keywrap.py | hipnusleo/Laserjet | f53e0b740f48f2feb0c0bb285ec6728b313b4ccc | [
"Apache-2.0"
] | null | null | null | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import struct
from cryptography.hazmat.primitives.ciphers import Cipher
from cryptography.hazmat.primitives.ciphers.algorithms import AES
from cryptography.hazmat.primitives.ciphers.modes import ECB
from cryptography.hazmat.primitives.constant_time import bytes_eq
def aes_key_wrap(wrapping_key, key_to_wrap, backend):
if len(wrapping_key) not in [16, 24, 32]:
raise ValueError("The wrapping key must be a valid AES key length")
if len(key_to_wrap) < 16:
raise ValueError("The key to wrap must be at least 16 bytes")
if len(key_to_wrap) % 8 != 0:
raise ValueError("The key to wrap must be a multiple of 8 bytes")
# RFC 3394 Key Wrap - 2.2.1 (index method)
encryptor = Cipher(AES(wrapping_key), ECB(), backend).encryptor()
a = b"\xa6\xa6\xa6\xa6\xa6\xa6\xa6\xa6"
r = [key_to_wrap[i:i + 8] for i in range(0, len(key_to_wrap), 8)]
n = len(r)
for j in range(6):
for i in range(n):
# every encryption operation is a discrete 16 byte chunk (because
# AES has a 128-bit block size) and since we're using ECB it is
# safe to reuse the encryptor for the entire operation
b = encryptor.update(a + r[i])
# pack/unpack are safe as these are always 64-bit chunks
a = struct.pack(
">Q", struct.unpack(">Q", b[:8])[0] ^ ((n * j) + i + 1)
)
r[i] = b[-8:]
assert encryptor.finalize() == b""
return a + b"".join(r)
def aes_key_unwrap(wrapping_key, wrapped_key, backend):
if len(wrapped_key) < 24:
raise ValueError("Must be at least 24 bytes")
if len(wrapped_key) % 8 != 0:
raise ValueError("The wrapped key must be a multiple of 8 bytes")
if len(wrapping_key) not in [16, 24, 32]:
raise ValueError("The wrapping key must be a valid AES key length")
# Implement RFC 3394 Key Unwrap - 2.2.2 (index method)
decryptor = Cipher(AES(wrapping_key), ECB(), backend).decryptor()
aiv = b"\xa6\xa6\xa6\xa6\xa6\xa6\xa6\xa6"
r = [wrapped_key[i:i + 8] for i in range(0, len(wrapped_key), 8)]
a = r.pop(0)
n = len(r)
for j in reversed(range(6)):
for i in reversed(range(n)):
# pack/unpack are safe as these are always 64-bit chunks
atr = struct.pack(
">Q", struct.unpack(">Q", a)[0] ^ ((n * j) + i + 1)
) + r[i]
# every decryption operation is a discrete 16 byte chunk so
# it is safe to reuse the decryptor for the entire operation
b = decryptor.update(atr)
a = b[:8]
r[i] = b[-8:]
assert decryptor.finalize() == b""
if not bytes_eq(a, aiv):
raise InvalidUnwrap()
return b"".join(r)
class InvalidUnwrap(Exception):
pass
| 36.453488 | 80 | 0.606699 |
3326990f5d431b40ba473e37466e486d973603e6 | 2,875 | py | Python | imcsdk/imcbasetype.py | ecoen66/imcsdk | b10eaa926a5ee57cea7182ae0adc8dd1c818b0ab | [
"Apache-2.0"
] | 31 | 2016-06-14T07:23:59.000Z | 2021-09-12T17:17:26.000Z | imcsdk/imcbasetype.py | sthagen/imcsdk | 1831eaecb5960ca03a8624b1579521749762b932 | [
"Apache-2.0"
] | 109 | 2016-05-25T03:56:56.000Z | 2021-10-18T02:58:12.000Z | imcsdk/imcbasetype.py | sthagen/imcsdk | 1831eaecb5960ca03a8624b1579521749762b932 | [
"Apache-2.0"
] | 67 | 2016-05-17T05:53:56.000Z | 2022-03-24T15:52:53.000Z | # Copyright 2015 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This is an auto-generated module.
It contains supporting classes for Filter and External Method.
"""
from .imccore import BaseObject
class Method(BaseObject):
"""This is Method class."""
def __init__(self, **kwargs):
BaseObject.__init__(self, "Method", "method")
if kwargs:
for n, v in kwargs.items():
self.attr_set(n, v)
class ConfigConfig(BaseObject):
"""This is ConfigConfig class."""
def __init__(self, **kwargs):
BaseObject.__init__(self, "ConfigConfig", "configConfig")
if kwargs:
for n, v in kwargs.items():
self.attr_set(n, v)
class ConfigMap(BaseObject):
"""This is ConfigMap class."""
def __init__(self, **kwargs):
BaseObject.__init__(self, "ConfigMap", "configMap")
if kwargs:
for n, v in kwargs.items():
self.attr_set(n, v)
class ConfigSet(BaseObject):
"""This is ConfigSet class."""
def __init__(self, **kwargs):
BaseObject.__init__(self, "ConfigSet", "configSet")
if kwargs:
for n, v in kwargs.items():
self.attr_set(n, v)
class FailedMo(BaseObject):
"""This is FailedMo class."""
def __init__(self, **kwargs):
BaseObject.__init__(self, "FailedMo", "failedMo")
self.dn = None
self.error_code = None
self.error_descr = None
self.name = None
if kwargs:
for n, v in kwargs.items():
self.attr_set(n, v)
class FailedMos(BaseObject):
"""This is FailedMos class."""
def __init__(self, **kwargs):
BaseObject.__init__(self, "FailedMos", "failedMos")
if kwargs:
for n, v in kwargs.items():
self.attr_set(n, v)
class FilterFilter(BaseObject):
"""This is FilterFilter class."""
def __init__(self, **kwargs):
BaseObject.__init__(self, "FilterFilter", "filter")
if kwargs:
for n, v in kwargs.items():
self.attr_set(n, v)
class Pair(BaseObject):
"""This is Pair class."""
def __init__(self, **kwargs):
BaseObject.__init__(self, "Pair", "pair")
self.key = None
if kwargs:
for n, v in kwargs.items():
self.attr_set(n, v)
| 29.040404 | 74 | 0.613217 |
2984b93d5704706985fe48a7d7d829b341c12c0f | 10,734 | py | Python | reveal_graph_embedding/experiments/utility.py | MKLab-ITI/reveal-graph-embedding | 72d4af794536f97b8ede06c0f27f261ea85d8c4b | [
"Apache-2.0"
] | 31 | 2015-07-14T16:21:25.000Z | 2021-06-30T14:10:44.000Z | reveal_graph_embedding/experiments/utility.py | MKLab-ITI/reveal-graph-embedding | 72d4af794536f97b8ede06c0f27f261ea85d8c4b | [
"Apache-2.0"
] | null | null | null | reveal_graph_embedding/experiments/utility.py | MKLab-ITI/reveal-graph-embedding | 72d4af794536f97b8ede06c0f27f261ea85d8c4b | [
"Apache-2.0"
] | 11 | 2016-08-21T03:07:20.000Z | 2020-03-07T03:17:05.000Z | __author__ = 'Georgios Rizos (georgerizos@iti.gr)'
import time
import numpy as np
from scipy.sparse import issparse
from sklearn.multiclass import OneVsRestClassifier
from sklearn import svm
from sklearn.preprocessing import normalize
from reveal_graph_embedding.datautil.snow_datautil import snow_read_data
from reveal_graph_embedding.datautil.asu_datautil import asu_read_data
from reveal_graph_embedding.datautil.insight_datautil import insight_read_data
from reveal_graph_embedding.embedding.arcte.arcte import arcte
from reveal_graph_embedding.embedding.competing_methods import laplacian_eigenmaps, replicator_eigenmaps, louvain,\
mroc, base_communities
from reveal_graph_embedding.embedding.common import normalize_columns
from reveal_graph_embedding.learning.holdout import generate_folds
from reveal_graph_embedding.embedding.community_weighting import chi2_contingency_matrix,\
peak_snr_weight_aggregation, community_weighting
from reveal_graph_embedding.learning import evaluation
def run_experiment(dataset_name,
dataset_folder,
feature_extraction_method_name,
percentages,
trial_num,
thread_num,
feature_extraction_parameters,
classifier_parameters):
if dataset_name == "snow2014":
adjacency_matrix,\
node_label_matrix,\
labelled_node_indices,\
number_of_categories = read_snow2014graph_data(dataset_folder)
elif dataset_name == "flickr":
adjacency_matrix,\
node_label_matrix,\
labelled_node_indices,\
number_of_categories = read_asu_data(dataset_folder)
elif dataset_name == "youtube":
adjacency_matrix,\
node_label_matrix,\
labelled_node_indices,\
number_of_categories = read_asu_data(dataset_folder)
elif dataset_name == "politicsuk":
adjacency_matrix,\
node_label_matrix,\
labelled_node_indices,\
number_of_categories = read_insight_data(dataset_folder)
else:
print("Invalid dataset name.")
raise RuntimeError
print("Graphs and labels read.")
feature_matrix,\
feature_extraction_elapsed_time = feature_extraction(adjacency_matrix,
feature_extraction_method_name,
thread_num,
feature_extraction_parameters)
print("Feature extraction elapsed time: ", feature_extraction_elapsed_time)
if feature_extraction_parameters["community_weighting"] is None:
pass
elif feature_extraction_parameters["community_weighting"] == "chi2":
feature_matrix = normalize_columns(feature_matrix)
elif feature_extraction_parameters["community_weighting"] == "ivf":
feature_matrix = normalize_columns(feature_matrix)
else:
print("Invalid community weighting selection.")
raise RuntimeError
C = classifier_parameters["C"]
fit_intercept = classifier_parameters["fit_intercept"]
for p in np.arange(percentages.size):
percentage = percentages[p]
# Initialize the metric storage arrays to zero
macro_F1 = np.zeros(trial_num, dtype=np.float)
micro_F1 = np.zeros(trial_num, dtype=np.float)
folds = generate_folds(node_label_matrix,
labelled_node_indices,
number_of_categories,
percentage,
trial_num)
for trial in np.arange(trial_num):
train, test = next(folds)
########################################################################################################
# Separate train and test sets
########################################################################################################
X_train, X_test, y_train, y_test = feature_matrix[train, :],\
feature_matrix[test, :],\
node_label_matrix[train, :],\
node_label_matrix[test, :]
if issparse(feature_matrix):
if feature_extraction_parameters["community_weighting"] == "chi2":
contingency_matrix = chi2_contingency_matrix(X_train, y_train)
community_weights = peak_snr_weight_aggregation(contingency_matrix)
X_train, X_test = community_weighting(X_train, X_test, community_weights)
else:
X_train = normalize(X_train, norm="l2")
X_test = normalize(X_test, norm="l2")
############################################################################################################
# Train model
############################################################################################################
# Train classifier.
start_time = time.time()
model = OneVsRestClassifier(svm.LinearSVC(C=C,
random_state=None,
dual=False,
fit_intercept=fit_intercept),
n_jobs=thread_num)
model.fit(X_train, y_train)
hypothesis_training_time = time.time() - start_time
print('Model fitting time: ', hypothesis_training_time)
############################################################################################################
# Make predictions
############################################################################################################
start_time = time.time()
y_pred = model.decision_function(X_test)
prediction_time = time.time() - start_time
print('Prediction time: ', prediction_time)
############################################################################################################
# Calculate measures
############################################################################################################
y_pred = evaluation.form_node_label_prediction_matrix(y_pred, y_test)
measures = evaluation.calculate_measures(y_pred, y_test)
macro_F1[trial] = measures[4]
micro_F1[trial] = measures[5]
# print('Trial ', trial+1, ':')
# print(' Macro-F1: ', macro_F1[trial])
# print(' Micro-F1: ', micro_F1[trial])
# print('\n')
################################################################################################################
# Experiment results
################################################################################################################
print(percentage)
print('\n')
print('Macro F1 average: ', np.mean(macro_F1))
print('Micro F1 average: ', np.mean(micro_F1))
print('Macro F1 std: ', np.std(macro_F1))
print('Micro F1 std: ', np.std(micro_F1))
def read_snow2014graph_data(dataset_folder):
adjacency_matrix = snow_read_data.read_adjacency_matrix(file_path=dataset_folder + "/men_ret_graph.tsv",
separator="\t")
node_label_matrix,\
labelled_node_indices,\
number_of_categories = snow_read_data.read_node_label_matrix(file_path=dataset_folder + "/user_label_matrix.tsv",
separator="\t")
return adjacency_matrix,\
node_label_matrix,\
labelled_node_indices,\
number_of_categories
def read_asu_data(dataset_folder):
adjacency_matrix = asu_read_data.read_adjacency_matrix(file_path=dataset_folder + "/edges.csv",
separator=",")
node_label_matrix,\
labelled_node_indices,\
number_of_categories = asu_read_data.read_node_label_matrix(file_path=dataset_folder + "/group-edges.csv",
separator=",",
number_of_nodes=adjacency_matrix.shape[0])
return adjacency_matrix,\
node_label_matrix,\
labelled_node_indices,\
number_of_categories
def read_insight_data(dataset_folder):
adjacency_matrix = insight_read_data.read_adjacency_matrix(file_path=dataset_folder + "/men_ret_graph.tsv",
separator="\t")
node_label_matrix,\
labelled_node_indices,\
number_of_categories = insight_read_data.read_node_label_matrix(file_path=dataset_folder + "/user_label_matrix.tsv",
separator="\t")
return adjacency_matrix,\
node_label_matrix,\
labelled_node_indices,\
number_of_categories
def feature_extraction(adjacency_matrix,
feature_extraction_method_name,
thread_num,
feature_extraction_parameters):
start_time = time.time()
if feature_extraction_method_name == "arcte":
epsilon = feature_extraction_parameters["epsilon"]
rho = feature_extraction_parameters["rho"]
feature_matrix = arcte(adjacency_matrix, rho, epsilon, thread_num)
elif feature_extraction_method_name == "mroc":
alpha = feature_extraction_parameters["alpha"]
feature_matrix = mroc(adjacency_matrix, alpha)
elif feature_extraction_method_name == "louvain":
feature_matrix = louvain(adjacency_matrix)
elif feature_extraction_method_name == "basecomm":
feature_matrix = base_communities(adjacency_matrix)
elif feature_extraction_method_name == "lapeig":
dimensionality = feature_extraction_parameters["dimensionality"]
feature_matrix = laplacian_eigenmaps(adjacency_matrix, dimensionality)
elif feature_extraction_method_name == "repeig":
dimensionality = feature_extraction_parameters["dimensionality"]
feature_matrix = replicator_eigenmaps(adjacency_matrix, dimensionality)
else:
print("Invalid feature extraction name.")
raise RuntimeError
elapsed_time = time.time() - start_time
return feature_matrix, elapsed_time
| 46.873362 | 120 | 0.548351 |
5a091bbab763b3f308bd3181be7744ac8a99f868 | 792 | py | Python | app/helpers/__init__.py | geoadmin/service-alti | 813f3271ddef8d6075954b5800a0f99c865f3fef | [
"BSD-3-Clause"
] | 1 | 2017-02-17T21:34:23.000Z | 2017-02-17T21:34:23.000Z | app/helpers/__init__.py | geoadmin/service-alti | 813f3271ddef8d6075954b5800a0f99c865f3fef | [
"BSD-3-Clause"
] | 48 | 2016-07-18T08:59:40.000Z | 2022-02-24T13:16:38.000Z | app/helpers/__init__.py | geoadmin/service-alti | 813f3271ddef8d6075954b5800a0f99c865f3fef | [
"BSD-3-Clause"
] | 3 | 2017-01-22T09:59:23.000Z | 2021-04-29T07:21:58.000Z | # -*- coding: utf-8 -*-
import logging
import logging.config
import os
from os import path
import yaml
from flask import jsonify
from flask import make_response
from app.settings import LOGGING_CFG
logger = logging.getLogger(__name__)
def make_error_msg(code, msg):
return make_response(jsonify({'success': False, 'error': {'code': code, 'message': msg}}), code)
def get_logging_cfg():
print(f"LOGS_DIR is {os.environ['LOGS_DIR']}")
print(f"LOGGING_CFG is {LOGGING_CFG}")
config = {}
with open(LOGGING_CFG, 'rt') as fd:
config = yaml.safe_load(path.expandvars(fd.read()))
logger.debug('Load logging configuration from file %s', LOGGING_CFG)
return config
def init_logging():
config = get_logging_cfg()
logging.config.dictConfig(config)
| 22 | 100 | 0.707071 |
7287e6ab6a921b52630e04331947971b300a6171 | 24,115 | py | Python | Lib/site-packages/win32com/client/build.py | egorcompany/telegram-chat-members | 19a7c2bffe2fb832b79a4475ca324c438d5f548d | [
"MIT"
] | 3 | 2016-11-24T03:57:22.000Z | 2019-02-27T15:19:50.000Z | Lib/site-packages/win32com/client/build.py | egorcompany/telegram-chat-members | 19a7c2bffe2fb832b79a4475ca324c438d5f548d | [
"MIT"
] | 67 | 2016-10-19T01:23:47.000Z | 2016-12-14T04:30:38.000Z | Lib/site-packages/win32com/client/build.py | egorcompany/telegram-chat-members | 19a7c2bffe2fb832b79a4475ca324c438d5f548d | [
"MIT"
] | 1 | 2020-08-08T12:44:48.000Z | 2020-08-08T12:44:48.000Z | """Contains knowledge to build a COM object definition.
This module is used by both the @dynamic@ and @makepy@ modules to build
all knowledge of a COM object.
This module contains classes which contain the actual knowledge of the object.
This include parameter and return type information, the COM dispid and CLSID, etc.
Other modules may use this information to generate .py files, use the information
dynamically, or possibly even generate .html documentation for objects.
"""
#
# NOTES: DispatchItem and MapEntry used by dynamic.py.
# the rest is used by makepy.py
#
# OleItem, DispatchItem, MapEntry, BuildCallList() is used by makepy
import sys
import string
from keyword import iskeyword
import pythoncom
from pywintypes import TimeType
import winerror
import datetime
# It isn't really clear what the quoting rules are in a C/IDL string and
# literals like a quote char and backslashes makes life a little painful to
# always render the string perfectly - so just punt and fall-back to a repr()
def _makeDocString(s):
if sys.version_info < (3,):
s = s.encode("mbcs")
return repr(s)
error = "PythonCOM.Client.Build error"
class NotSupportedException(Exception): pass # Raised when we cant support a param type.
DropIndirection="DropIndirection"
NoTranslateTypes = [
pythoncom.VT_BOOL, pythoncom.VT_CLSID, pythoncom.VT_CY,
pythoncom.VT_DATE, pythoncom.VT_DECIMAL, pythoncom.VT_EMPTY,
pythoncom.VT_ERROR, pythoncom.VT_FILETIME, pythoncom.VT_HRESULT,
pythoncom.VT_I1, pythoncom.VT_I2, pythoncom.VT_I4,
pythoncom.VT_I8, pythoncom.VT_INT, pythoncom.VT_NULL,
pythoncom.VT_R4, pythoncom.VT_R8, pythoncom.VT_NULL,
pythoncom.VT_STREAM,
pythoncom.VT_UI1, pythoncom.VT_UI2, pythoncom.VT_UI4,
pythoncom.VT_UI8, pythoncom.VT_UINT, pythoncom.VT_VOID,
]
NoTranslateMap = {}
for v in NoTranslateTypes:
NoTranslateMap[v] = None
class MapEntry:
"Simple holder for named attibutes - items in a map."
def __init__(self, desc_or_id, names=None, doc=None, resultCLSID=pythoncom.IID_NULL, resultDoc = None, hidden=0):
if type(desc_or_id)==type(0):
self.dispid = desc_or_id
self.desc = None
else:
self.dispid = desc_or_id[0]
self.desc = desc_or_id
self.names = names
self.doc = doc
self.resultCLSID = resultCLSID
self.resultDocumentation = resultDoc
self.wasProperty = 0 # Have I been transformed into a function so I can pass args?
self.hidden = hidden
def GetResultCLSID(self):
rc = self.resultCLSID
if rc == pythoncom.IID_NULL: return None
return rc
# Return a string, suitable for output - either "'{...}'" or "None"
def GetResultCLSIDStr(self):
rc = self.GetResultCLSID()
if rc is None: return "None"
return repr(str(rc)) # Convert the IID object to a string, then to a string in a string.
def GetResultName(self):
if self.resultDocumentation is None:
return None
return self.resultDocumentation[0]
class OleItem:
typename = "OleItem"
def __init__(self, doc=None):
self.doc = doc
if self.doc:
self.python_name = MakePublicAttributeName(self.doc[0])
else:
self.python_name = None
self.bWritten = 0
self.bIsDispatch = 0
self.bIsSink = 0
self.clsid = None
self.co_class = None
class DispatchItem(OleItem):
typename = "DispatchItem"
def __init__(self, typeinfo=None, attr=None, doc=None, bForUser=1):
OleItem.__init__(self,doc)
self.propMap = {}
self.propMapGet = {}
self.propMapPut = {}
self.mapFuncs = {}
self.defaultDispatchName = None
self.hidden = 0
if typeinfo:
self.Build(typeinfo, attr, bForUser)
def _propMapPutCheck_(self,key,item):
ins, outs, opts = self.CountInOutOptArgs(item.desc[2])
if ins>1: # if a Put property takes more than 1 arg:
if opts+1==ins or ins==item.desc[6]+1:
newKey = "Set" + key
deleteExisting = 0 # This one is still OK
else:
deleteExisting = 1 # No good to us
if key in self.mapFuncs or key in self.propMapGet:
newKey = "Set" + key
else:
newKey = key
item.wasProperty = 1
self.mapFuncs[newKey] = item
if deleteExisting:
del self.propMapPut[key]
def _propMapGetCheck_(self,key,item):
ins, outs, opts = self.CountInOutOptArgs(item.desc[2])
if ins > 0: # if a Get property takes _any_ in args:
if item.desc[6]==ins or ins==opts:
newKey = "Get" + key
deleteExisting = 0 # This one is still OK
else:
deleteExisting = 1 # No good to us
if key in self.mapFuncs:
newKey = "Get" + key
else:
newKey = key
item.wasProperty = 1
self.mapFuncs[newKey] = item
if deleteExisting:
del self.propMapGet[key]
def _AddFunc_(self,typeinfo,fdesc,bForUser):
id = fdesc.memid
funcflags = fdesc.wFuncFlags
try:
names = typeinfo.GetNames(id)
name=names[0]
except pythoncom.ole_error:
name = ""
names = None
doc = None
try:
if bForUser:
doc = typeinfo.GetDocumentation(id)
except pythoncom.ole_error:
pass
if id==0 and name:
self.defaultDispatchName = name
invkind = fdesc.invkind
# We need to translate any Alias', Enums, structs etc in result and args
typerepr, flag, defval = fdesc.rettype
# sys.stderr.write("%s result - %s -> " % (name, typerepr))
typerepr, resultCLSID, resultDoc = _ResolveType(typerepr, typeinfo)
# sys.stderr.write("%s\n" % (typerepr,))
fdesc.rettype = typerepr, flag, defval, resultCLSID
# Translate any Alias or Enums in argument list.
argList = []
for argDesc in fdesc.args:
typerepr, flag, defval = argDesc
# sys.stderr.write("%s arg - %s -> " % (name, typerepr))
arg_type, arg_clsid, arg_doc = _ResolveType(typerepr, typeinfo)
argDesc = arg_type, flag, defval, arg_clsid
# sys.stderr.write("%s\n" % (argDesc[0],))
argList.append(argDesc)
fdesc.args = tuple(argList)
hidden = (funcflags & pythoncom.FUNCFLAG_FHIDDEN) != 0
if invkind == pythoncom.INVOKE_PROPERTYGET:
map = self.propMapGet
# This is not the best solution, but I dont think there is
# one without specific "set" syntax.
# If there is a single PUT or PUTREF, it will function as a property.
# If there are both, then the PUT remains a property, and the PUTREF
# gets transformed into a function.
# (in vb, PUT=="obj=other_obj", PUTREF="set obj=other_obj
elif invkind in (pythoncom.INVOKE_PROPERTYPUT, pythoncom.INVOKE_PROPERTYPUTREF):
# Special case
existing = self.propMapPut.get(name, None)
if existing is not None:
if existing.desc[4]==pythoncom.INVOKE_PROPERTYPUT: # Keep this one
map = self.mapFuncs
name = "Set"+name
else: # Existing becomes a func.
existing.wasProperty = 1
self.mapFuncs["Set"+name]=existing
map = self.propMapPut # existing gets overwritten below.
else:
map = self.propMapPut # first time weve seen it.
elif invkind == pythoncom.INVOKE_FUNC:
map = self.mapFuncs
else:
map = None
if not map is None:
# if map.has_key(name):
# sys.stderr.write("Warning - overwriting existing method/attribute %s\n" % name)
map[name] = MapEntry(tuple(fdesc), names, doc, resultCLSID, resultDoc, hidden)
# any methods that can't be reached via DISPATCH we return None
# for, so dynamic dispatch doesnt see it.
if fdesc.funckind != pythoncom.FUNC_DISPATCH:
return None
return (name,map)
return None
def _AddVar_(self,typeinfo,fdesc,bForUser):
### need pythoncom.VARFLAG_FRESTRICTED ...
### then check it
if fdesc.varkind == pythoncom.VAR_DISPATCH:
id = fdesc.memid
names = typeinfo.GetNames(id)
# Translate any Alias or Enums in result.
typerepr, flags, defval = fdesc.elemdescVar
typerepr, resultCLSID, resultDoc = _ResolveType(typerepr, typeinfo)
fdesc.elemdescVar = typerepr, flags, defval
doc = None
try:
if bForUser: doc = typeinfo.GetDocumentation(id)
except pythoncom.ole_error:
pass
# handle the enumerator specially
map = self.propMap
# Check if the element is hidden.
hidden = 0
if hasattr(fdesc,"wVarFlags"):
hidden = (fdesc.wVarFlags & 0x40) != 0 # VARFLAG_FHIDDEN
map[names[0]] = MapEntry(tuple(fdesc), names, doc, resultCLSID, resultDoc, hidden)
return (names[0],map)
else:
return None
def Build(self, typeinfo, attr, bForUser = 1):
self.clsid = attr[0]
self.bIsDispatch = (attr.wTypeFlags & pythoncom.TYPEFLAG_FDISPATCHABLE) != 0
if typeinfo is None: return
# Loop over all methods
for j in range(attr[6]):
fdesc = typeinfo.GetFuncDesc(j)
self._AddFunc_(typeinfo,fdesc,bForUser)
# Loop over all variables (ie, properties)
for j in range(attr[7]):
fdesc = typeinfo.GetVarDesc(j)
self._AddVar_(typeinfo,fdesc,bForUser)
# Now post-process the maps. For any "Get" or "Set" properties
# that have arguments, we must turn them into methods. If a method
# of the same name already exists, change the name.
for key, item in list(self.propMapGet.items()):
self._propMapGetCheck_(key,item)
for key, item in list(self.propMapPut.items()):
self._propMapPutCheck_(key,item)
def CountInOutOptArgs(self, argTuple):
"Return tuple counting in/outs/OPTS. Sum of result may not be len(argTuple), as some args may be in/out."
ins = out = opts = 0
for argCheck in argTuple:
inOut = argCheck[1]
if inOut==0:
ins = ins + 1
out = out + 1
else:
if inOut & pythoncom.PARAMFLAG_FIN:
ins = ins + 1
if inOut & pythoncom.PARAMFLAG_FOPT:
opts = opts + 1
if inOut & pythoncom.PARAMFLAG_FOUT:
out = out + 1
return ins, out, opts
def MakeFuncMethod(self, entry, name, bMakeClass = 1):
# If we have a type description, and not varargs...
if entry.desc is not None and (len(entry.desc) < 6 or entry.desc[6]!=-1):
return self.MakeDispatchFuncMethod(entry, name, bMakeClass)
else:
return self.MakeVarArgsFuncMethod(entry, name, bMakeClass)
def MakeDispatchFuncMethod(self, entry, name, bMakeClass = 1):
fdesc = entry.desc
doc = entry.doc
names = entry.names
ret = []
if bMakeClass:
linePrefix = "\t"
defNamedOptArg = "defaultNamedOptArg"
defNamedNotOptArg = "defaultNamedNotOptArg"
defUnnamedArg = "defaultUnnamedArg"
else:
linePrefix = ""
defNamedOptArg = "pythoncom.Missing"
defNamedNotOptArg = "pythoncom.Missing"
defUnnamedArg = "pythoncom.Missing"
defOutArg = "pythoncom.Missing"
id = fdesc[0]
s = linePrefix + 'def ' + name + '(self' + BuildCallList(fdesc, names, defNamedOptArg, defNamedNotOptArg, defUnnamedArg, defOutArg) + '):'
ret.append(s)
if doc and doc[1]:
ret.append(linePrefix + '\t' + _makeDocString(doc[1]))
# print "fdesc is ", fdesc
resclsid = entry.GetResultCLSID()
if resclsid:
resclsid = "'%s'" % resclsid
else:
resclsid = 'None'
# Strip the default values from the arg desc
retDesc = fdesc[8][:2]
argsDesc = tuple([what[:2] for what in fdesc[2]])
# The runtime translation of the return types is expensive, so when we know the
# return type of the function, there is no need to check the type at runtime.
# To qualify, this function must return a "simple" type, and have no byref args.
# Check if we have byrefs or anything in the args which mean we still need a translate.
param_flags = [what[1] for what in fdesc[2]]
bad_params = [flag for flag in param_flags if flag & (pythoncom.PARAMFLAG_FOUT | pythoncom.PARAMFLAG_FRETVAL)!=0]
s = None
if len(bad_params)==0 and len(retDesc)==2 and retDesc[1]==0:
rd = retDesc[0]
if rd in NoTranslateMap:
s = '%s\treturn self._oleobj_.InvokeTypes(%d, LCID, %s, %s, %s%s)' % (linePrefix, id, fdesc[4], retDesc, argsDesc, _BuildArgList(fdesc, names))
elif rd in [pythoncom.VT_DISPATCH, pythoncom.VT_UNKNOWN]:
s = '%s\tret = self._oleobj_.InvokeTypes(%d, LCID, %s, %s, %s%s)\n' % (linePrefix, id, fdesc[4], retDesc, repr(argsDesc), _BuildArgList(fdesc, names))
s = s + '%s\tif ret is not None:\n' % (linePrefix,)
if rd == pythoncom.VT_UNKNOWN:
s = s + "%s\t\t# See if this IUnknown is really an IDispatch\n" % (linePrefix,)
s = s + "%s\t\ttry:\n" % (linePrefix,)
s = s + "%s\t\t\tret = ret.QueryInterface(pythoncom.IID_IDispatch)\n" % (linePrefix,)
s = s + "%s\t\texcept pythoncom.error:\n" % (linePrefix,)
s = s + "%s\t\t\treturn ret\n" % (linePrefix,)
s = s + '%s\t\tret = Dispatch(ret, %s, %s)\n' % (linePrefix,repr(name), resclsid)
s = s + '%s\treturn ret' % (linePrefix)
elif rd == pythoncom.VT_BSTR:
s = "%s\t# Result is a Unicode object\n" % (linePrefix,)
s = s + '%s\treturn self._oleobj_.InvokeTypes(%d, LCID, %s, %s, %s%s)' % (linePrefix, id, fdesc[4], retDesc, repr(argsDesc), _BuildArgList(fdesc, names))
# else s remains None
if s is None:
s = '%s\treturn self._ApplyTypes_(%d, %s, %s, %s, %s, %s%s)' % (linePrefix, id, fdesc[4], retDesc, argsDesc, repr(name), resclsid, _BuildArgList(fdesc, names))
ret.append(s)
ret.append("")
return ret
def MakeVarArgsFuncMethod(self, entry, name, bMakeClass = 1):
fdesc = entry.desc
names = entry.names
doc = entry.doc
ret = []
argPrefix = "self"
if bMakeClass:
linePrefix = "\t"
else:
linePrefix = ""
ret.append(linePrefix + 'def ' + name + '(' + argPrefix + ', *args):')
if doc and doc[1]: ret.append(linePrefix + '\t' + _makeDocString(doc[1]))
if fdesc:
invoketype = fdesc[4]
else:
invoketype = pythoncom.DISPATCH_METHOD
s = linePrefix + '\treturn self._get_good_object_(self._oleobj_.Invoke(*(('
ret.append(s + str(entry.dispid) + ",0,%d,1)+args)),'%s')" % (invoketype, names[0]))
ret.append("")
return ret
# Note - "DispatchItem" poorly named - need a new intermediate class.
class VTableItem(DispatchItem):
def Build(self, typeinfo, attr, bForUser = 1):
DispatchItem.Build(self, typeinfo, attr, bForUser)
assert typeinfo is not None, "Cant build vtables without type info!"
meth_list = list(self.mapFuncs.values()) + list(self.propMapGet.values()) + list(self.propMapPut.values())
meth_list.sort(key=lambda m: m.desc[7])
# Now turn this list into the run-time representation
# (ready for immediate use or writing to gencache)
self.vtableFuncs = []
for entry in meth_list:
self.vtableFuncs.append( (entry.names, entry.dispid, entry.desc) )
# A Lazy dispatch item - builds an item on request using info from
# an ITypeComp. The dynamic module makes the called to build each item,
# and also holds the references to the typeinfo and typecomp.
class LazyDispatchItem(DispatchItem):
typename = "LazyDispatchItem"
def __init__(self, attr, doc):
self.clsid = attr[0]
DispatchItem.__init__(self, None, attr, doc, 0)
typeSubstMap = {
pythoncom.VT_INT: pythoncom.VT_I4,
pythoncom.VT_UINT: pythoncom.VT_UI4,
pythoncom.VT_HRESULT: pythoncom.VT_I4,
}
def _ResolveType(typerepr, itypeinfo):
# Resolve VT_USERDEFINED (often aliases or typed IDispatches)
if type(typerepr)==tuple:
indir_vt, subrepr = typerepr
if indir_vt == pythoncom.VT_PTR:
# If it is a VT_PTR to a VT_USERDEFINED that is an IDispatch/IUnknown,
# then it resolves to simply the object.
# Otherwise, it becomes a ByRef of the resolved type
# We need to drop an indirection level on pointer to user defined interfaces.
# eg, (VT_PTR, (VT_USERDEFINED, somehandle)) needs to become VT_DISPATCH
# only when "somehandle" is an object.
# but (VT_PTR, (VT_USERDEFINED, otherhandle)) doesnt get the indirection dropped.
was_user = type(subrepr)==tuple and subrepr[0]==pythoncom.VT_USERDEFINED
subrepr, sub_clsid, sub_doc = _ResolveType(subrepr, itypeinfo)
if was_user and subrepr in [pythoncom.VT_DISPATCH, pythoncom.VT_UNKNOWN, pythoncom.VT_RECORD]:
# Drop the VT_PTR indirection
return subrepr, sub_clsid, sub_doc
# Change PTR indirection to byref
return subrepr | pythoncom.VT_BYREF, sub_clsid, sub_doc
if indir_vt == pythoncom.VT_SAFEARRAY:
# resolve the array element, and convert to VT_ARRAY
subrepr, sub_clsid, sub_doc = _ResolveType(subrepr, itypeinfo)
return pythoncom.VT_ARRAY | subrepr, sub_clsid, sub_doc
if indir_vt == pythoncom.VT_CARRAY: # runtime has no support for this yet.
# resolve the array element, and convert to VT_CARRAY
# sheesh - return _something_
return pythoncom.VT_CARRAY, None, None
if indir_vt == pythoncom.VT_USERDEFINED:
try:
resultTypeInfo = itypeinfo.GetRefTypeInfo(subrepr)
except pythoncom.com_error as details:
if details.hresult in [winerror.TYPE_E_CANTLOADLIBRARY, winerror.TYPE_E_LIBNOTREGISTERED]:
# an unregistered interface
return pythoncom.VT_UNKNOWN, None, None
raise
resultAttr = resultTypeInfo.GetTypeAttr()
typeKind = resultAttr.typekind
if typeKind == pythoncom.TKIND_ALIAS:
tdesc = resultAttr.tdescAlias
return _ResolveType(tdesc, resultTypeInfo)
elif typeKind in [pythoncom.TKIND_ENUM, pythoncom.TKIND_MODULE]:
# For now, assume Long
return pythoncom.VT_I4, None, None
elif typeKind == pythoncom.TKIND_DISPATCH:
clsid = resultTypeInfo.GetTypeAttr()[0]
retdoc = resultTypeInfo.GetDocumentation(-1)
return pythoncom.VT_DISPATCH, clsid, retdoc
elif typeKind in [pythoncom.TKIND_INTERFACE,
pythoncom.TKIND_COCLASS]:
# XXX - should probably get default interface for CO_CLASS???
clsid = resultTypeInfo.GetTypeAttr()[0]
retdoc = resultTypeInfo.GetDocumentation(-1)
return pythoncom.VT_UNKNOWN, clsid, retdoc
elif typeKind == pythoncom.TKIND_RECORD:
return pythoncom.VT_RECORD, None, None
raise NotSupportedException("Can not resolve alias or user-defined type")
return typeSubstMap.get(typerepr,typerepr), None, None
def _BuildArgList(fdesc, names):
"Builds list of args to the underlying Invoke method."
# Word has TypeInfo for Insert() method, but says "no args"
numArgs = max(fdesc[6], len(fdesc[2]))
names = list(names)
while None in names:
i = names.index(None)
names[i] = "arg%d" % (i,)
# We've seen 'source safe' libraries offer the name of 'ret' params in
# 'names' - although we can't reproduce this, it would be insane to offer
# more args than we have arg infos for - hence the upper limit on names...
names = list(map(MakePublicAttributeName, names[1:(numArgs + 1)]))
name_num = 0
while len(names) < numArgs:
names.append("arg%d" % (len(names),))
# As per BuildCallList(), avoid huge lines.
# Hack a "\n" at the end of every 5th name - "strides" would be handy
# here but don't exist in 2.2
for i in range(0, len(names), 5):
names[i] = names[i] + "\n\t\t\t"
return "," + ", ".join(names)
valid_identifier_chars = string.ascii_letters + string.digits + "_"
def demunge_leading_underscores(className):
i = 0
while className[i] == "_":
i += 1
assert i >= 2, "Should only be here with names starting with '__'"
return className[i-1:] + className[:i-1]
# Given a "public name" (eg, the name of a class, function, etc)
# make sure it is a legal (and reasonable!) Python name.
def MakePublicAttributeName(className, is_global = False):
# Given a class attribute that needs to be public, convert it to a
# reasonable name.
# Also need to be careful that the munging doesnt
# create duplicates - eg, just removing a leading "_" is likely to cause
# a clash.
# if is_global is True, then the name is a global variable that may
# overwrite a builtin - eg, "None"
if className[:2]=='__':
return demunge_leading_underscores(className)
elif className == 'None':
# assign to None is evil (and SyntaxError in 2.4, even though
# iskeyword says False there) - note that if it was a global
# it would get picked up below
className = 'NONE'
elif iskeyword(className):
# most keywords are lower case (except True, False etc in py3k)
ret = className.capitalize()
# but those which aren't get forced upper.
if ret == className:
ret = ret.upper()
return ret
elif is_global and hasattr(__builtins__, className):
# builtins may be mixed case. If capitalizing it doesn't change it,
# force to all uppercase (eg, "None", "True" become "NONE", "TRUE"
ret = className.capitalize()
if ret==className: # didn't change - force all uppercase.
ret = ret.upper()
return ret
# Strip non printable chars
return ''.join([char for char in className if char in valid_identifier_chars])
# Given a default value passed by a type library, return a string with
# an appropriate repr() for the type.
# Takes a raw ELEMDESC and returns a repr string, or None
# (NOTE: The string itself may be '"None"', which is valid, and different to None.
# XXX - To do: Dates are probably screwed, but can they come in?
def MakeDefaultArgRepr(defArgVal):
try:
inOut = defArgVal[1]
except IndexError:
# something strange - assume is in param.
inOut = pythoncom.PARAMFLAG_FIN
if inOut & pythoncom.PARAMFLAG_FHASDEFAULT:
# times need special handling...
val = defArgVal[2]
if isinstance(val, datetime.datetime):
# VARIANT <-> SYSTEMTIME conversions always lose any sub-second
# resolution, so just use a 'timetuple' here.
return repr(tuple(val.utctimetuple()))
if type(val) is TimeType:
# must be the 'old' pywintypes time object...
year=val.year; month=val.month; day=val.day; hour=val.hour; minute=val.minute; second=val.second; msec=val.msec
return "pywintypes.Time((%(year)d, %(month)d, %(day)d, %(hour)d, %(minute)d, %(second)d,0,0,0,%(msec)d))" % locals()
return repr(val)
return None
def BuildCallList(fdesc, names, defNamedOptArg, defNamedNotOptArg, defUnnamedArg, defOutArg, is_comment = False):
"Builds a Python declaration for a method."
# Names[0] is the func name - param names are from 1.
numArgs = len(fdesc[2])
numOptArgs = fdesc[6]
strval = ''
if numOptArgs==-1: # Special value that says "var args after here"
firstOptArg = numArgs
numArgs = numArgs - 1
else:
firstOptArg = numArgs - numOptArgs
for arg in range(numArgs):
try:
argName = names[arg+1]
namedArg = argName is not None
except IndexError:
namedArg = 0
if not namedArg: argName = "arg%d" % (arg)
thisdesc = fdesc[2][arg]
# See if the IDL specified a default value
defArgVal = MakeDefaultArgRepr(thisdesc)
if defArgVal is None:
# Out params always get their special default
if thisdesc[1] & (pythoncom.PARAMFLAG_FOUT | pythoncom.PARAMFLAG_FIN) == pythoncom.PARAMFLAG_FOUT:
defArgVal = defOutArg
else:
# Unnamed arg - always allow default values.
if namedArg:
# Is a named argument
if arg >= firstOptArg:
defArgVal = defNamedOptArg
else:
defArgVal = defNamedNotOptArg
else:
defArgVal = defUnnamedArg
argName = MakePublicAttributeName(argName)
# insanely long lines with an 'encoding' flag crashes python 2.4.0
# keep 5 args per line
# This may still fail if the arg names are insane, but that seems
# unlikely. See also _BuildArgList()
if (arg+1) % 5 == 0:
strval = strval + "\n"
if is_comment:
strval = strval + "#"
strval = strval + "\t\t\t"
strval = strval + ", " + argName
if defArgVal:
strval = strval + "=" + defArgVal
if numOptArgs==-1:
strval = strval + ", *" + names[-1]
return strval
if __name__=='__main__':
print("Use 'makepy.py' to generate Python code - this module is just a helper")
| 38.217116 | 163 | 0.672569 |
fcfdf96381b919d6f93357a8a19aaa4ee15fab71 | 2,487 | py | Python | catalog/aws/ml-ops/lambda-python/cloudwatch_model_metrics.py | JamesWoolfenden/dataops-infra | 151477a3dc5c3b600cdf2138926726a889fef231 | [
"MIT"
] | null | null | null | catalog/aws/ml-ops/lambda-python/cloudwatch_model_metrics.py | JamesWoolfenden/dataops-infra | 151477a3dc5c3b600cdf2138926726a889fef231 | [
"MIT"
] | null | null | null | catalog/aws/ml-ops/lambda-python/cloudwatch_model_metrics.py | JamesWoolfenden/dataops-infra | 151477a3dc5c3b600cdf2138926726a889fef231 | [
"MIT"
] | null | null | null | """ This class implements a function to send evaluation metrics to CloudWatch using Boto3 """
import boto3
modelname = "${var.job_name}"
client = boto3.client("cloudwatch")
class CWEvalMetrics:
# initialize the region and the model name with the class instantiation
def __init__(self, region="${var.environment.aws_region}", model_name=modelname):
self.region = region
self.model_name = model_name
# A function to send the training evaluation metrics
# the metric_type parameters will determine whether the data sent is for training or validation.
def CW_eval(self, is_training, **kwargs):
# collecting the loss and accuracy values
# loss = kwargs.get('Loss', 0)
accuracy = kwargs.get("Accuracy")
f1 = kwargs.get("f1")
auc = kwargs.get("auc")
# determine if the passed values are for training or validation
if is_training:
metric_type = "Training"
else:
metric_type = "Validation"
# Collecting the hyperparameters to be used as the metrics dimensions
# hyperparameter = kwargs.get('hyperparameters')
# optimizer = str(hyperparameter.get('optimizer'))
# epochs = str(hyperparameter.get('epochs'))
# learning_rate = str(hyperparameter.get('learning_rate'))
response = client.put_metric_data(
MetricData=[
{
"MetricName": f"{metric_type} Accuracy",
"Value": f"{accuracy}",
"Unit": "Percent",
"StorageResolution": 1,
},
{
"MetricName": f"{metric_type} f1",
"Value": f"{f1}",
"Unit": "Percent",
"StorageResolution": 1,
},
{
"MetricName": f"{metric_type} auc",
"Value": f"{auc}",
"Unit": "Percent",
"StorageResolution": 1,
},
],
)
return response
def performance_watch():
measure = CWEvalMetrics()
response = measure.CW_eval(model_name=modelname, is_training=True)
return response
def lambda_handler(event, context):
response = performance_watch()
return {"performance evaluation results": response}
def main():
""" Main function """
return performance_watch()
if __name__ == "__main__":
main()
| 31.0875 | 100 | 0.567752 |
7ebb5f25ebd5803fc65a8483de77a67f92044fe0 | 451 | py | Python | tests/util/test_util_c.py | philihp/openskill.py | 657a7ddeb81564a23b9aaf19ba225d82b1193046 | [
"MIT"
] | 120 | 2021-09-03T03:06:11.000Z | 2022-03-28T05:54:54.000Z | tests/util/test_util_c.py | philihp/openskill.py | 657a7ddeb81564a23b9aaf19ba225d82b1193046 | [
"MIT"
] | 48 | 2021-09-23T07:15:13.000Z | 2022-03-31T14:47:25.000Z | tests/util/test_util_c.py | philihp/openskill.py | 657a7ddeb81564a23b9aaf19ba225d82b1193046 | [
"MIT"
] | 6 | 2022-01-20T16:45:28.000Z | 2022-03-28T23:48:07.000Z | import pytest
from openskill import Rating, team_rating
from openskill.util import util_c
r = Rating()
team_1 = [r]
team_2 = [r, r]
def test_util_c():
# Compute Values
team_ratings = team_rating([team_1, team_2])
assert util_c(team_ratings) == pytest.approx(15.590239, 0.00000001)
# Compute 5 v 5
team_ratings = team_rating([[r, r, r, r, r], [r, r, r, r, r]])
assert util_c(team_ratings) == pytest.approx(27.003, 0.00001)
| 23.736842 | 71 | 0.674058 |
f8708ac4e7924ae7df29e73e62f8a9e8b050b83a | 26,083 | py | Python | IPython/frontend/html/notebook/notebookapp.py | gitforhf/ipython | 48b63c19a53f9e26dc4f64150641c730cb6a2305 | [
"BSD-3-Clause-Clear"
] | 1 | 2019-04-19T23:00:22.000Z | 2019-04-19T23:00:22.000Z | IPython/frontend/html/notebook/notebookapp.py | kod3r/ipython | 48b63c19a53f9e26dc4f64150641c730cb6a2305 | [
"BSD-3-Clause-Clear"
] | null | null | null | IPython/frontend/html/notebook/notebookapp.py | kod3r/ipython | 48b63c19a53f9e26dc4f64150641c730cb6a2305 | [
"BSD-3-Clause-Clear"
] | 1 | 2020-05-03T10:25:12.000Z | 2020-05-03T10:25:12.000Z | # coding: utf-8
"""A tornado based IPython notebook server.
Authors:
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# stdlib
import errno
import logging
import os
import random
import re
import select
import signal
import socket
import sys
import threading
import time
import uuid
import webbrowser
# Third party
import zmq
from jinja2 import Environment, FileSystemLoader
# Install the pyzmq ioloop. This has to be done before anything else from
# tornado is imported.
from zmq.eventloop import ioloop
ioloop.install()
from tornado import httpserver
from tornado import web
# Our own libraries
from .kernelmanager import MappingKernelManager
from .handlers import (LoginHandler, LogoutHandler,
ProjectDashboardHandler, NewHandler, NamedNotebookHandler,
MainKernelHandler, KernelHandler, KernelActionHandler, IOPubHandler,
ShellHandler, NotebookRootHandler, NotebookHandler, NotebookCopyHandler,
RSTHandler, AuthenticatedFileHandler, PrintNotebookHandler,
MainClusterHandler, ClusterProfileHandler, ClusterActionHandler,
FileFindHandler,
)
from .nbmanager import NotebookManager
from .filenbmanager import FileNotebookManager
from .clustermanager import ClusterManager
from IPython.config.application import catch_config_error, boolean_flag
from IPython.core.application import BaseIPythonApplication
from IPython.core.profiledir import ProfileDir
from IPython.frontend.consoleapp import IPythonConsoleApp
from IPython.kernel import swallow_argv
from IPython.kernel.zmq.session import Session, default_secure
from IPython.kernel.zmq.zmqshell import ZMQInteractiveShell
from IPython.kernel.zmq.kernelapp import (
kernel_flags,
kernel_aliases,
IPKernelApp
)
from IPython.utils.importstring import import_item
from IPython.utils.localinterfaces import LOCALHOST
from IPython.utils.traitlets import (
Dict, Unicode, Integer, List, Enum, Bool,
DottedObjectName
)
from IPython.utils import py3compat
from IPython.utils.path import filefind
#-----------------------------------------------------------------------------
# Module globals
#-----------------------------------------------------------------------------
_kernel_id_regex = r"(?P<kernel_id>\w+-\w+-\w+-\w+-\w+)"
_kernel_action_regex = r"(?P<action>restart|interrupt)"
_notebook_id_regex = r"(?P<notebook_id>\w+-\w+-\w+-\w+-\w+)"
_profile_regex = r"(?P<profile>[^\/]+)" # there is almost no text that is invalid
_cluster_action_regex = r"(?P<action>start|stop)"
_examples = """
ipython notebook # start the notebook
ipython notebook --profile=sympy # use the sympy profile
ipython notebook --pylab=inline # pylab in inline plotting mode
ipython notebook --certfile=mycert.pem # use SSL/TLS certificate
ipython notebook --port=5555 --ip=* # Listen on port 5555, all interfaces
"""
# Packagers: modify this line if you store the notebook static files elsewhere
DEFAULT_STATIC_FILES_PATH = os.path.join(os.path.dirname(__file__), "static")
#-----------------------------------------------------------------------------
# Helper functions
#-----------------------------------------------------------------------------
def url_path_join(a,b):
if a.endswith('/') and b.startswith('/'):
return a[:-1]+b
else:
return a+b
def random_ports(port, n):
"""Generate a list of n random ports near the given port.
The first 5 ports will be sequential, and the remaining n-5 will be
randomly selected in the range [port-2*n, port+2*n].
"""
for i in range(min(5, n)):
yield port + i
for i in range(n-5):
yield port + random.randint(-2*n, 2*n)
#-----------------------------------------------------------------------------
# The Tornado web application
#-----------------------------------------------------------------------------
class NotebookWebApplication(web.Application):
def __init__(self, ipython_app, kernel_manager, notebook_manager,
cluster_manager, log,
base_project_url, settings_overrides):
handlers = [
(r"/", ProjectDashboardHandler),
(r"/login", LoginHandler),
(r"/logout", LogoutHandler),
(r"/new", NewHandler),
(r"/%s" % _notebook_id_regex, NamedNotebookHandler),
(r"/%s/copy" % _notebook_id_regex, NotebookCopyHandler),
(r"/%s/print" % _notebook_id_regex, PrintNotebookHandler),
(r"/kernels", MainKernelHandler),
(r"/kernels/%s" % _kernel_id_regex, KernelHandler),
(r"/kernels/%s/%s" % (_kernel_id_regex, _kernel_action_regex), KernelActionHandler),
(r"/kernels/%s/iopub" % _kernel_id_regex, IOPubHandler),
(r"/kernels/%s/shell" % _kernel_id_regex, ShellHandler),
(r"/notebooks", NotebookRootHandler),
(r"/notebooks/%s" % _notebook_id_regex, NotebookHandler),
(r"/rstservice/render", RSTHandler),
(r"/files/(.*)", AuthenticatedFileHandler, {'path' : notebook_manager.notebook_dir}),
(r"/clusters", MainClusterHandler),
(r"/clusters/%s/%s" % (_profile_regex, _cluster_action_regex), ClusterActionHandler),
(r"/clusters/%s" % _profile_regex, ClusterProfileHandler),
]
# Python < 2.6.5 doesn't accept unicode keys in f(**kwargs), and
# base_project_url will always be unicode, which will in turn
# make the patterns unicode, and ultimately result in unicode
# keys in kwargs to handler._execute(**kwargs) in tornado.
# This enforces that base_project_url be ascii in that situation.
#
# Note that the URLs these patterns check against are escaped,
# and thus guaranteed to be ASCII: 'héllo' is really 'h%C3%A9llo'.
base_project_url = py3compat.unicode_to_str(base_project_url, 'ascii')
settings = dict(
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=ipython_app.static_file_path,
static_handler_class = FileFindHandler,
static_url_prefix = url_path_join(base_project_url,'/static/'),
cookie_secret=os.urandom(1024),
login_url=url_path_join(base_project_url,'/login'),
cookie_name='username-%s' % uuid.uuid4(),
)
# allow custom overrides for the tornado web app.
settings.update(settings_overrides)
# prepend base_project_url onto the patterns that we match
new_handlers = []
for handler in handlers:
pattern = url_path_join(base_project_url, handler[0])
new_handler = tuple([pattern]+list(handler[1:]))
new_handlers.append( new_handler )
super(NotebookWebApplication, self).__init__(new_handlers, **settings)
self.kernel_manager = kernel_manager
self.notebook_manager = notebook_manager
self.cluster_manager = cluster_manager
self.ipython_app = ipython_app
self.read_only = self.ipython_app.read_only
self.config = self.ipython_app.config
self.use_less = self.ipython_app.use_less
self.log = log
self.jinja2_env = Environment(loader=FileSystemLoader(os.path.join(os.path.dirname(__file__), "templates")))
#-----------------------------------------------------------------------------
# Aliases and Flags
#-----------------------------------------------------------------------------
flags = dict(kernel_flags)
flags['no-browser']=(
{'NotebookApp' : {'open_browser' : False}},
"Don't open the notebook in a browser after startup."
)
flags['no-mathjax']=(
{'NotebookApp' : {'enable_mathjax' : False}},
"""Disable MathJax
MathJax is the javascript library IPython uses to render math/LaTeX. It is
very large, so you may want to disable it if you have a slow internet
connection, or for offline use of the notebook.
When disabled, equations etc. will appear as their untransformed TeX source.
"""
)
flags['read-only'] = (
{'NotebookApp' : {'read_only' : True}},
"""Allow read-only access to notebooks.
When using a password to protect the notebook server, this flag
allows unauthenticated clients to view the notebook list, and
individual notebooks, but not edit them, start kernels, or run
code.
If no password is set, the server will be entirely read-only.
"""
)
# Add notebook manager flags
flags.update(boolean_flag('script', 'FileNotebookManager.save_script',
'Auto-save a .py script everytime the .ipynb notebook is saved',
'Do not auto-save .py scripts for every notebook'))
# the flags that are specific to the frontend
# these must be scrubbed before being passed to the kernel,
# or it will raise an error on unrecognized flags
notebook_flags = ['no-browser', 'no-mathjax', 'read-only', 'script', 'no-script']
aliases = dict(kernel_aliases)
aliases.update({
'ip': 'NotebookApp.ip',
'port': 'NotebookApp.port',
'port-retries': 'NotebookApp.port_retries',
'transport': 'KernelManager.transport',
'keyfile': 'NotebookApp.keyfile',
'certfile': 'NotebookApp.certfile',
'notebook-dir': 'NotebookManager.notebook_dir',
'browser': 'NotebookApp.browser',
})
# remove ipkernel flags that are singletons, and don't make sense in
# multi-kernel evironment:
aliases.pop('f', None)
notebook_aliases = [u'port', u'port-retries', u'ip', u'keyfile', u'certfile',
u'notebook-dir']
#-----------------------------------------------------------------------------
# NotebookApp
#-----------------------------------------------------------------------------
class NotebookApp(BaseIPythonApplication):
name = 'ipython-notebook'
default_config_file_name='ipython_notebook_config.py'
description = """
The IPython HTML Notebook.
This launches a Tornado based HTML Notebook Server that serves up an
HTML5/Javascript Notebook client.
"""
examples = _examples
classes = IPythonConsoleApp.classes + [MappingKernelManager, NotebookManager,
FileNotebookManager]
flags = Dict(flags)
aliases = Dict(aliases)
kernel_argv = List(Unicode)
log_level = Enum((0,10,20,30,40,50,'DEBUG','INFO','WARN','ERROR','CRITICAL'),
default_value=logging.INFO,
config=True,
help="Set the log level by value or name.")
# create requested profiles by default, if they don't exist:
auto_create = Bool(True)
# file to be opened in the notebook server
file_to_run = Unicode('')
# Network related information.
ip = Unicode(LOCALHOST, config=True,
help="The IP address the notebook server will listen on."
)
def _ip_changed(self, name, old, new):
if new == u'*': self.ip = u''
port = Integer(8888, config=True,
help="The port the notebook server will listen on."
)
port_retries = Integer(50, config=True,
help="The number of additional ports to try if the specified port is not available."
)
certfile = Unicode(u'', config=True,
help="""The full path to an SSL/TLS certificate file."""
)
keyfile = Unicode(u'', config=True,
help="""The full path to a private key file for usage with SSL/TLS."""
)
password = Unicode(u'', config=True,
help="""Hashed password to use for web authentication.
To generate, type in a python/IPython shell:
from IPython.lib import passwd; passwd()
The string should be of the form type:salt:hashed-password.
"""
)
open_browser = Bool(True, config=True,
help="""Whether to open in a browser after starting.
The specific browser used is platform dependent and
determined by the python standard library `webbrowser`
module, unless it is overridden using the --browser
(NotebookApp.browser) configuration option.
""")
browser = Unicode(u'', config=True,
help="""Specify what command to use to invoke a web
browser when opening the notebook. If not specified, the
default browser will be determined by the `webbrowser`
standard library module, which allows setting of the
BROWSER environment variable to override it.
""")
read_only = Bool(False, config=True,
help="Whether to prevent editing/execution of notebooks."
)
use_less = Bool(False, config=True,
help="""Wether to use Browser Side less-css parsing
instead of compiled css version in templates that allows
it. This is mainly convenient when working on the less
file to avoid a build step, or if user want to overwrite
some of the less variables without having to recompile
everything.
You will need to install the less.js component in the static directory
either in the source tree or in your profile folder.
""")
webapp_settings = Dict(config=True,
help="Supply overrides for the tornado.web.Application that the "
"IPython notebook uses.")
enable_mathjax = Bool(True, config=True,
help="""Whether to enable MathJax for typesetting math/TeX
MathJax is the javascript library IPython uses to render math/LaTeX. It is
very large, so you may want to disable it if you have a slow internet
connection, or for offline use of the notebook.
When disabled, equations etc. will appear as their untransformed TeX source.
"""
)
def _enable_mathjax_changed(self, name, old, new):
"""set mathjax url to empty if mathjax is disabled"""
if not new:
self.mathjax_url = u''
base_project_url = Unicode('/', config=True,
help='''The base URL for the notebook server.
Leading and trailing slashes can be omitted,
and will automatically be added.
''')
def _base_project_url_changed(self, name, old, new):
if not new.startswith('/'):
self.base_project_url = '/'+new
elif not new.endswith('/'):
self.base_project_url = new+'/'
base_kernel_url = Unicode('/', config=True,
help='''The base URL for the kernel server
Leading and trailing slashes can be omitted,
and will automatically be added.
''')
def _base_kernel_url_changed(self, name, old, new):
if not new.startswith('/'):
self.base_kernel_url = '/'+new
elif not new.endswith('/'):
self.base_kernel_url = new+'/'
websocket_host = Unicode("", config=True,
help="""The hostname for the websocket server."""
)
extra_static_paths = List(Unicode, config=True,
help="""Extra paths to search for serving static files.
This allows adding javascript/css to be available from the notebook server machine,
or overriding individual files in the IPython"""
)
def _extra_static_paths_default(self):
return [os.path.join(self.profile_dir.location, 'static')]
@property
def static_file_path(self):
"""return extra paths + the default location"""
return self.extra_static_paths + [DEFAULT_STATIC_FILES_PATH]
mathjax_url = Unicode("", config=True,
help="""The url for MathJax.js."""
)
def _mathjax_url_default(self):
if not self.enable_mathjax:
return u''
static_url_prefix = self.webapp_settings.get("static_url_prefix",
"/static/")
try:
mathjax = filefind(os.path.join('mathjax', 'MathJax.js'), self.static_file_path)
except IOError:
if self.certfile:
# HTTPS: load from Rackspace CDN, because SSL certificate requires it
base = u"https://c328740.ssl.cf1.rackcdn.com"
else:
base = u"http://cdn.mathjax.org"
url = base + u"/mathjax/latest/MathJax.js"
self.log.info("Using MathJax from CDN: %s", url)
return url
else:
self.log.info("Using local MathJax from %s" % mathjax)
return static_url_prefix+u"mathjax/MathJax.js"
def _mathjax_url_changed(self, name, old, new):
if new and not self.enable_mathjax:
# enable_mathjax=False overrides mathjax_url
self.mathjax_url = u''
else:
self.log.info("Using MathJax: %s", new)
notebook_manager_class = DottedObjectName('IPython.frontend.html.notebook.filenbmanager.FileNotebookManager',
config=True,
help='The notebook manager class to use.')
def parse_command_line(self, argv=None):
super(NotebookApp, self).parse_command_line(argv)
if argv is None:
argv = sys.argv[1:]
# Scrub frontend-specific flags
self.kernel_argv = swallow_argv(argv, notebook_aliases, notebook_flags)
# Kernel should inherit default config file from frontend
self.kernel_argv.append("--IPKernelApp.parent_appname='%s'" % self.name)
if self.extra_args:
f = os.path.abspath(self.extra_args[0])
if os.path.isdir(f):
nbdir = f
else:
self.file_to_run = f
nbdir = os.path.dirname(f)
self.config.NotebookManager.notebook_dir = nbdir
def init_configurables(self):
# force Session default to be secure
default_secure(self.config)
self.kernel_manager = MappingKernelManager(
config=self.config, log=self.log, kernel_argv=self.kernel_argv,
connection_dir = self.profile_dir.security_dir,
)
kls = import_item(self.notebook_manager_class)
self.notebook_manager = kls(config=self.config, log=self.log)
self.notebook_manager.log_info()
self.notebook_manager.load_notebook_names()
self.cluster_manager = ClusterManager(config=self.config, log=self.log)
self.cluster_manager.update_profiles()
def init_logging(self):
# This prevents double log messages because tornado use a root logger that
# self.log is a child of. The logging module dipatches log messages to a log
# and all of its ancenstors until propagate is set to False.
self.log.propagate = False
def init_webapp(self):
"""initialize tornado webapp and httpserver"""
self.web_app = NotebookWebApplication(
self, self.kernel_manager, self.notebook_manager,
self.cluster_manager, self.log,
self.base_project_url, self.webapp_settings
)
if self.certfile:
ssl_options = dict(certfile=self.certfile)
if self.keyfile:
ssl_options['keyfile'] = self.keyfile
else:
ssl_options = None
self.web_app.password = self.password
self.http_server = httpserver.HTTPServer(self.web_app, ssl_options=ssl_options)
if not self.ip:
warning = "WARNING: The notebook server is listening on all IP addresses"
if ssl_options is None:
self.log.critical(warning + " and not using encryption. This"
"is not recommended.")
if not self.password and not self.read_only:
self.log.critical(warning + "and not using authentication."
"This is highly insecure and not recommended.")
success = None
for port in random_ports(self.port, self.port_retries+1):
try:
self.http_server.listen(port, self.ip)
except socket.error as e:
if e.errno != errno.EADDRINUSE:
raise
self.log.info('The port %i is already in use, trying another random port.' % port)
else:
self.port = port
success = True
break
if not success:
self.log.critical('ERROR: the notebook server could not be started because '
'no available port could be found.')
self.exit(1)
def init_signal(self):
# FIXME: remove this check when pyzmq dependency is >= 2.1.11
# safely extract zmq version info:
try:
zmq_v = zmq.pyzmq_version_info()
except AttributeError:
zmq_v = [ int(n) for n in re.findall(r'\d+', zmq.__version__) ]
if 'dev' in zmq.__version__:
zmq_v.append(999)
zmq_v = tuple(zmq_v)
if zmq_v >= (2,1,9) and not sys.platform.startswith('win'):
# This won't work with 2.1.7 and
# 2.1.9-10 will log ugly 'Interrupted system call' messages,
# but it will work
signal.signal(signal.SIGINT, self._handle_sigint)
signal.signal(signal.SIGTERM, self._signal_stop)
def _handle_sigint(self, sig, frame):
"""SIGINT handler spawns confirmation dialog"""
# register more forceful signal handler for ^C^C case
signal.signal(signal.SIGINT, self._signal_stop)
# request confirmation dialog in bg thread, to avoid
# blocking the App
thread = threading.Thread(target=self._confirm_exit)
thread.daemon = True
thread.start()
def _restore_sigint_handler(self):
"""callback for restoring original SIGINT handler"""
signal.signal(signal.SIGINT, self._handle_sigint)
def _confirm_exit(self):
"""confirm shutdown on ^C
A second ^C, or answering 'y' within 5s will cause shutdown,
otherwise original SIGINT handler will be restored.
This doesn't work on Windows.
"""
# FIXME: remove this delay when pyzmq dependency is >= 2.1.11
time.sleep(0.1)
sys.stdout.write("Shutdown Notebook Server (y/[n])? ")
sys.stdout.flush()
r,w,x = select.select([sys.stdin], [], [], 5)
if r:
line = sys.stdin.readline()
if line.lower().startswith('y'):
self.log.critical("Shutdown confirmed")
ioloop.IOLoop.instance().stop()
return
else:
print "No answer for 5s:",
print "resuming operation..."
# no answer, or answer is no:
# set it back to original SIGINT handler
# use IOLoop.add_callback because signal.signal must be called
# from main thread
ioloop.IOLoop.instance().add_callback(self._restore_sigint_handler)
def _signal_stop(self, sig, frame):
self.log.critical("received signal %s, stopping", sig)
ioloop.IOLoop.instance().stop()
@catch_config_error
def initialize(self, argv=None):
self.init_logging()
super(NotebookApp, self).initialize(argv)
self.init_configurables()
self.init_webapp()
self.init_signal()
def cleanup_kernels(self):
"""Shutdown all kernels.
The kernels will shutdown themselves when this process no longer exists,
but explicit shutdown allows the KernelManagers to cleanup the connection files.
"""
self.log.info('Shutting down kernels')
self.kernel_manager.shutdown_all()
def start(self):
ip = self.ip if self.ip else '[all ip addresses on your system]'
proto = 'https' if self.certfile else 'http'
info = self.log.info
info("The IPython Notebook is running at: %s://%s:%i%s" %
(proto, ip, self.port,self.base_project_url) )
info("Use Control-C to stop this server and shut down all kernels.")
if self.open_browser or self.file_to_run:
ip = self.ip or LOCALHOST
try:
browser = webbrowser.get(self.browser or None)
except webbrowser.Error as e:
self.log.warn('No web browser found: %s.' % e)
browser = None
if self.file_to_run:
name, _ = os.path.splitext(os.path.basename(self.file_to_run))
url = self.notebook_manager.rev_mapping.get(name, '')
else:
url = ''
if browser:
b = lambda : browser.open("%s://%s:%i%s%s" % (proto, ip,
self.port, self.base_project_url, url), new=2)
threading.Thread(target=b).start()
try:
ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
info("Interrupted...")
finally:
self.cleanup_kernels()
#-----------------------------------------------------------------------------
# Main entry point
#-----------------------------------------------------------------------------
def launch_new_instance():
app = NotebookApp.instance()
app.initialize()
app.start()
| 39.519697 | 116 | 0.595484 |
4128d0ccc3f5257701c702bb896cfebf70a4aaaa | 404 | py | Python | 06/01/TextCalendar/formatmonth.py | pylangstudy/201709 | 53d868786d7327a83bfa7f4149549c6f9855a6c6 | [
"CC0-1.0"
] | null | null | null | 06/01/TextCalendar/formatmonth.py | pylangstudy/201709 | 53d868786d7327a83bfa7f4149549c6f9855a6c6 | [
"CC0-1.0"
] | 32 | 2017-09-01T00:52:17.000Z | 2017-10-01T00:30:02.000Z | 06/01/TextCalendar/formatmonth.py | pylangstudy/201709 | 53d868786d7327a83bfa7f4149549c6f9855a6c6 | [
"CC0-1.0"
] | null | null | null | import calendar
print(calendar.TextCalendar())
print(calendar.TextCalendar(firstweekday=0))#月曜日
print(calendar.TextCalendar(firstweekday=6))#日曜日
for firstweekday in [0, 6]:#月,日
c = calendar.TextCalendar(firstweekday=firstweekday)
print(c.formatmonth(2017, 9))
for firstweekday in [0, 6]:#月,日
c = calendar.TextCalendar(firstweekday=firstweekday)
print(c.formatmonth(2017, 9, w=4, l=2))
| 28.857143 | 56 | 0.74505 |
d978beb4625e0712ec2b3dd1369b085d21426a2c | 370 | py | Python | backend/core/services/__init__.py | ethosdev/poap-fun | a4ef4f8f0fd9ad1b3dc925c7024078783ae9e3c5 | [
"MIT"
] | 8 | 2020-10-20T04:49:29.000Z | 2022-02-27T00:03:49.000Z | backend/core/services/__init__.py | ethosdev/poap-fun | a4ef4f8f0fd9ad1b3dc925c7024078783ae9e3c5 | [
"MIT"
] | 17 | 2021-03-19T15:08:08.000Z | 2021-09-22T19:37:31.000Z | backend/core/services/__init__.py | ethosdev/poap-fun | a4ef4f8f0fd9ad1b3dc925c7024078783ae9e3c5 | [
"MIT"
] | 8 | 2021-04-28T22:29:26.000Z | 2022-03-28T16:47:47.000Z | from ._poap_integration_service import PoapIntegrationService
from ._raffle_results_service import RaffleResultsService
from ._email_service import EmailService
from ._raffle_multi_join import RaffleMultipleJoinService
poap_integration_service = PoapIntegrationService()
raffle_results_service = RaffleResultsService()
raffle_multi_join = RaffleMultipleJoinService()
| 33.636364 | 61 | 0.889189 |
552414d89e83f3119d580c4fcc98578d08a15b9c | 1,790 | py | Python | unsdgbot/message_processor/bot_response_text.py | btaba/UN-SDG-bot | 46fde9a866cefe37b8e600e87b37ed0f73bc94a2 | [
"MIT"
] | null | null | null | unsdgbot/message_processor/bot_response_text.py | btaba/UN-SDG-bot | 46fde9a866cefe37b8e600e87b37ed0f73bc94a2 | [
"MIT"
] | null | null | null | unsdgbot/message_processor/bot_response_text.py | btaba/UN-SDG-bot | 46fde9a866cefe37b8e600e87b37ed0f73bc94a2 | [
"MIT"
] | null | null | null | """
Some generic text that the bot will return
"""
def get_sdg_quick_replies(sequence_number=1):
sdg_quick_replies = [
{
"content_type": "text",
"title": 'Continue',
"payload": 'SDG_QUICK_REPLY_CONTINUE' + str(sequence_number)
},
{
"content_type": "text",
"title": 'Quit',
"payload": 'SDG_QUICK_REPLY_QUIT'
}
]
return sdg_quick_replies
help_text = u"I didn't find any suggestions in my database. Please check out the SDGs in the Menu. \U0001f30e"
help_postback_text = [
(
u"I can help inform you about the UN Sustainable Development Goals (SDGs)."
u" Check out my Menu for Latest articles on sustainability or to read more about the SDGs.\n"
),
(
u"You can also type keywords or sentences about activities you do, and I'll help suggest simple ways "
u"you can contribute to the SDGs!"
)
]
sdg_intro = "SDGs are the Sustainable Development Goals set by the United Nations. Click around to learn more!"
welcome_message = [
{
"type": "template",
"payload": {
"template_type": "generic",
"elements": [
{
"title": "Welcome to the UN Sustainable Development Goals bot!",
"item_url": "https://www.facebook.com/climatechangebot/",
"image_url": "",
"subtitle": "I'll help you understand the Sustainable Development Goals!",
}
]
}
},
(
u"Hey there! Use the menu bar to learn about the Sustainable Development Goals."
),
u"You can also type a word and I can tell you how you can incorporate the SDGs into your daily life."
]
| 30.862069 | 111 | 0.580447 |
8b724cfadcf14ad99db7518bab7a67136a3a67c8 | 1,740 | py | Python | Researchforum/researchforum/config/environment.py | Pratikgit/Researchforum | 3f86d1d959ac51ca224faba302092c23e805cfb4 | [
"Apache-2.0"
] | null | null | null | Researchforum/researchforum/config/environment.py | Pratikgit/Researchforum | 3f86d1d959ac51ca224faba302092c23e805cfb4 | [
"Apache-2.0"
] | null | null | null | Researchforum/researchforum/config/environment.py | Pratikgit/Researchforum | 3f86d1d959ac51ca224faba302092c23e805cfb4 | [
"Apache-2.0"
] | null | null | null | """Pylons environment configuration"""
import os
from mako.lookup import TemplateLookup
from pylons.configuration import PylonsConfig
from pylons.error import handle_mako_error
import researchforum.lib.app_globals as app_globals
import researchforum.lib.helpers
from researchforum.config.routing import make_map
def load_environment(global_conf, app_conf):
"""Configure the Pylons environment via the ``pylons.config``
object
"""
config = PylonsConfig()
# Pylons paths
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
paths = dict(root=root,
controllers=os.path.join(root, 'controllers'),
static_files=os.path.join(root, 'public'),
templates=[os.path.join(root, 'templates')])
# Initialize config with the basic options
config.init_app(global_conf, app_conf, package='researchforum', paths=paths)
config['routes.map'] = make_map(config)
config['pylons.app_globals'] = app_globals.Globals(config)
config['pylons.h'] = researchforum.lib.helpers
# Setup cache object as early as possible
import pylons
pylons.cache._push_object(config['pylons.app_globals'].cache)
# Create the Mako TemplateLookup, with the default auto-escaping
config['pylons.app_globals'].mako_lookup = TemplateLookup(
directories=paths['templates'],
error_handler=handle_mako_error,
module_directory=os.path.join(app_conf['cache_dir'], 'templates'),
input_encoding='utf-8', default_filters=['escape'],
imports=['from markupsafe import escape'])
# CONFIGURATION OPTIONS HERE (note: all config options will override
# any Pylons config options)
return config
| 35.510204 | 80 | 0.709195 |
a7389c573e3553cd8fc6d587c7545e9a38b1141e | 9,075 | py | Python | msc_pygeoapi/connector/elasticsearch_.py | Dukestep/msc-pygeoapi | a7376ed0031f2cb4675ebd9f17d802c1783a2941 | [
"MIT"
] | 9 | 2020-12-11T04:21:42.000Z | 2022-01-04T19:00:22.000Z | msc_pygeoapi/connector/elasticsearch_.py | Dukestep/msc-pygeoapi | a7376ed0031f2cb4675ebd9f17d802c1783a2941 | [
"MIT"
] | 25 | 2020-04-22T22:17:06.000Z | 2022-01-31T12:12:40.000Z | msc_pygeoapi/connector/elasticsearch_.py | Dukestep/msc-pygeoapi | a7376ed0031f2cb4675ebd9f17d802c1783a2941 | [
"MIT"
] | 18 | 2020-04-22T23:40:40.000Z | 2022-02-07T14:07:19.000Z | # =================================================================
#
# Author: Etienne <etienne.pelletier@canada.ca>
#
# Copyright (c) 2021 Etienne Pelletier
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import logging
from urllib.parse import urlparse
from elasticsearch import Elasticsearch
from elasticsearch.helpers import streaming_bulk, BulkIndexError
from msc_pygeoapi.connector.base import BaseConnector
from msc_pygeoapi.env import (
MSC_PYGEOAPI_ES_USERNAME,
MSC_PYGEOAPI_ES_PASSWORD,
MSC_PYGEOAPI_ES_URL,
MSC_PYGEOAPI_ES_TIMEOUT,
)
LOGGER = logging.getLogger(__name__)
class ElasticsearchConnector(BaseConnector):
"""Elasticsearch Connector"""
def __init__(self, connector_def={}):
"""
Elasticticsearch connection initialization
:param connector_def: connection definition dictionnary
:returns: msc_pygeoapi.connector.elasticsearch_.ElasticsearchConnector
"""
self.url = connector_def.get('url', MSC_PYGEOAPI_ES_URL)
# if no URL passed in connector def or env variable not set default
# to default ES port on localhost
if not self.url:
self.url = 'http://localhost:9200'
self.verify_certs = connector_def.get('verify_certs', True)
if 'auth' in connector_def:
self.auth = connector_def['auth']
elif all([MSC_PYGEOAPI_ES_USERNAME, MSC_PYGEOAPI_ES_PASSWORD]):
self.auth = (MSC_PYGEOAPI_ES_USERNAME, MSC_PYGEOAPI_ES_PASSWORD)
else:
self.auth = None
self.Elasticsearch = self.connect()
def connect(self):
"""create Elasticsearch connection"""
LOGGER.debug('Connecting to Elasticsearch')
# if no protocol specified in url append http:// by default
if not self.url.startswith('http'):
self.url = 'http://{}'.format(self.url)
url_parsed = urlparse(self.url)
url_settings = {'host': url_parsed.hostname}
if url_parsed.port is None: # proxy to default HTTP(S) port
if url_parsed.scheme == 'https':
url_settings['port'] = 443
url_settings['scheme'] = url_parsed.scheme
else:
url_settings['port'] = 80
else: # was set explictly
url_settings['port'] = url_parsed.port
if url_parsed.path:
url_settings['url_prefix'] = url_parsed.path
LOGGER.debug('URL settings: {}'.format(url_settings))
es_args = {
'hosts': [url_settings],
'verify_certs': self.verify_certs,
'retry_on_timeout': True,
'max_retries': 3
}
if self.auth:
es_args['http_auth'] = self.auth
return Elasticsearch(**es_args)
def create(self, index_name, mapping, overwrite=False):
"""
create an Elaticsearch index
:param index_name: name of in index to create
:mapping: `dict` mapping of index to create
:overwrite: `bool` indicating whether to overwrite index if it already
exists
:returns: `bool` of creation status
"""
# if overwrite is False, do not recreate an existing index
if self.Elasticsearch.indices.exists(index_name) and not overwrite:
LOGGER.info('{} index already exists.')
return False
elif self.Elasticsearch.indices.exists(index_name) and overwrite:
self.Elasticsearch.indices.delete(index_name)
LOGGER.info('Deleted existing {} index.'.format(index_name))
self.Elasticsearch.indices.create(
index=index_name,
body=mapping,
request_timeout=MSC_PYGEOAPI_ES_TIMEOUT
)
return True
def get(self, pattern):
"""
get list of Elaticsearch index matching a pattern
:param pattern: `str` of pattern to match
:returns: `list` of index names matching patterns
"""
return list(self.Elasticsearch.indices.get(pattern).keys())
def delete(self, indexes):
"""
delete ES index(es)
:param indexes: indexes to delete, comma-seperated if multiple.
:returns: `bool` of deletion status
"""
if indexes in ['*', '_all'] or not self.Elasticsearch.indices.exists(
indexes
):
msg = (
'Cannot delete {}. '.format(indexes),
'Either the index does not exist or an unaccepted index ',
'pattern was given (\'*\' or \'_all\')',
)
LOGGER.error(msg)
raise ValueError(msg)
LOGGER.info('Deleting indexes {}'.format(indexes))
self.Elasticsearch.indices.delete(indexes)
return True
def create_template(self, name, settings):
"""
create an Elasticsearch index template
:param name: `str` index template name
:param settings: `dict` settings dictionnary for index template
:return: `bool` of index template creation status
"""
if not self.Elasticsearch.indices.exists_template(name):
self.Elasticsearch.indices.put_template(name, settings)
return True
def delete_template(self, name):
"""
delete an Elasticsearch index template
:param name: `str` index template name
:return: `bool` of index template deletion status
"""
if self.Elasticsearch.indices.exists_template(name):
self.Elasticsearch.indices.delete_template(name)
return True
def submit_elastic_package(self, package, request_size=10000):
"""
helper function to send an update request to Elasticsearch and
log the status of the request. Returns True if the upload succeeded.
:param package: Iterable of bulk API update actions.
:param request_size: Number of documents to upload per request.
:returns: `bool` of whether the operation was successful.
"""
inserts = 0
updates = 0
noops = 0
errors = []
try:
for ok, response in streaming_bulk(
self.Elasticsearch,
package,
chunk_size=request_size,
request_timeout=MSC_PYGEOAPI_ES_TIMEOUT,
raise_on_error=False,
):
if not ok:
errors.append(response)
else:
status = response['update']['result']
if status == 'created':
inserts += 1
elif status == 'updated':
updates += 1
elif status == 'noop':
noops += 1
else:
LOGGER.error('Unhandled status code {}'.format(status))
errors.append(response)
except BulkIndexError as err:
LOGGER.error(
'Unable to perform bulk insert due to: {}'.format(err.errors)
)
return False
total = inserts + updates + noops
LOGGER.info(
'Inserted package of {} documents ({} inserts, {} updates,'
' {} no-ops)'.format(total, inserts, updates, noops)
)
if len(errors) > 0:
LOGGER.warning(
'{} errors encountered in bulk insert: {}'.format(
len(errors), errors
)
)
return False
return True
def update_by_query(self, query, name):
"""
update an Elasticsearch feature
:param query: `str` query template
:param name: `str` index name
:return: `bool` of index update status
"""
self.Elasticsearch.update_by_query(body=query, index=name)
return True
def __repr__(self):
return '<ElasticsearchConnector> {}'.format(self.url)
| 31.842105 | 79 | 0.598457 |
480eb035c11c58128e23a94857ff0e46bd61d84d | 339 | py | Python | jsp_fwk/__init__.py | dothinking/job_shop_schedule | ad61467b37471a5debdb59a96aa9c7c3b2eef55a | [
"Apache-1.1"
] | null | null | null | jsp_fwk/__init__.py | dothinking/job_shop_schedule | ad61467b37471a5debdb59a96aa9c7c3b2eef55a | [
"Apache-1.1"
] | null | null | null | jsp_fwk/__init__.py | dothinking/job_shop_schedule | ad61467b37471a5debdb59a96aa9c7c3b2eef55a | [
"Apache-1.1"
] | 1 | 2022-03-15T06:18:06.000Z | 2022-03-15T06:18:06.000Z | # domain class
from .model.domain import (Job, Machine, Operation)
# variable
from .model.variable import (JobStep, MachineStep, OperationStep)
# problem, solution and solver
from .model.problem import JSProblem
from .model.solution import JSSolution
from .model.solver import JSSolver
# benchmark
from .model.benchmark import BenchMark | 26.076923 | 65 | 0.80236 |
5773000f9f98a75234e9668d7a8cc8e22b7e8e16 | 365 | py | Python | src/test/tinc/tinctest/test/dryrun/mocktests/dryrun_test_load_failure.py | rodel-talampas/gpdb | 9c955e350334abbd922102f289f782697eb52069 | [
"PostgreSQL",
"Apache-2.0"
] | 9 | 2018-04-20T03:31:01.000Z | 2020-05-13T14:10:53.000Z | src/test/tinc/tinctest/test/dryrun/mocktests/dryrun_test_load_failure.py | rodel-talampas/gpdb | 9c955e350334abbd922102f289f782697eb52069 | [
"PostgreSQL",
"Apache-2.0"
] | 36 | 2017-09-21T09:12:27.000Z | 2020-06-17T16:40:48.000Z | src/test/tinc/tinctest/test/dryrun/mocktests/dryrun_test_load_failure.py | rodel-talampas/gpdb | 9c955e350334abbd922102f289f782697eb52069 | [
"PostgreSQL",
"Apache-2.0"
] | 32 | 2017-08-31T12:50:52.000Z | 2022-03-01T07:34:53.000Z | from tinctest.case import TINCTestCase
class Sample2TINCTests(TINCTestCase):
@classmethod
def setUpClass(cls):
print "SetupClass"
def setUp(self):
print "setup"
def test_021(self):
pass
def tearDown(self):
print "teardown"
@classmethod
def tearDownClass(cls):
print "teardown class"
| 17.380952 | 38 | 0.619178 |
417d717d58cd0463499394f2b91c2deaf9fe3ebc | 943 | py | Python | braggi-rest-api/braggi_rest_api/dashboard/views.py | SushritPasupuleti/Braggi-A-Python-Based-Contextual-Chatbot-Framework | c81fa4bb0551d2d32fedc5c5d5fbf17940f5d2b1 | [
"MIT"
] | 6 | 2018-07-01T14:55:50.000Z | 2020-11-02T13:22:07.000Z | braggi-rest-api/braggi_rest_api/dashboard/views.py | SushritPasupuleti/Braggi-A-Python-Based-Contextual-Chatbot-Framework | c81fa4bb0551d2d32fedc5c5d5fbf17940f5d2b1 | [
"MIT"
] | null | null | null | braggi-rest-api/braggi_rest_api/dashboard/views.py | SushritPasupuleti/Braggi-A-Python-Based-Contextual-Chatbot-Framework | c81fa4bb0551d2d32fedc5c5d5fbf17940f5d2b1 | [
"MIT"
] | 2 | 2018-09-10T11:45:22.000Z | 2019-11-21T11:40:20.000Z | from django.shortcuts import render
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import TemplateView
from rest_api.Braggi_Engine import Run
from django.template import Context, loader
from django.http import HttpResponse
class DashboardHomeView(LoginRequiredMixin, TemplateView):
login_url = '/api-admin/login/'
redirect_field_name = '/api-admin/login/'
class TrainView(TemplateView):
template_name="dashboard/success.html"
login_url = '/api-admin/login/'
redirect_field_name = '/api-admin/login/'
def get(self, request):
self.train()
return render(request, 'dashboard/success.html', {'Operation': 'Training'})
def train(self):
Run.Run_Model('admin-override-input=null', Trained=False)
class ChatListView(TemplateView):
template_name="dashboard/chat_logs.html"
login_url = '/api-admin/login/'
redirect_field_name = '/api-admin/login/' | 33.678571 | 83 | 0.744433 |
40e4d567f6bc1aa42c47cadefd89a79bf3bdd4f6 | 10,674 | py | Python | sdk/python/pulumi_aws/cloudhsmv2/cluster.py | sibuthomasmathew/pulumi-aws | 6351f2182eb6f693d4e09e4136c385adfa0ab674 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/cloudhsmv2/cluster.py | sibuthomasmathew/pulumi-aws | 6351f2182eb6f693d4e09e4136c385adfa0ab674 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/cloudhsmv2/cluster.py | sibuthomasmathew/pulumi-aws | 6351f2182eb6f693d4e09e4136c385adfa0ab674 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['Cluster']
class Cluster(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
hsm_type: Optional[pulumi.Input[str]] = None,
source_backup_identifier: Optional[pulumi.Input[str]] = None,
subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Creates an Amazon CloudHSM v2 cluster.
For information about CloudHSM v2, see the
[AWS CloudHSM User Guide](https://docs.aws.amazon.com/cloudhsm/latest/userguide/introduction.html) and the [Amazon
CloudHSM API Reference][2].
> **NOTE:** A CloudHSM Cluster can take several minutes to set up.
Practically no single attribute can be updated, except for `tags`.
If you need to delete a cluster, you have to remove its HSM modules first.
To initialize cluster, you have to add an HSM instance to the cluster, then sign CSR and upload it.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] hsm_type: The type of HSM module in the cluster. Currently, only `hsm1.medium` is supported.
:param pulumi.Input[str] source_backup_identifier: The id of Cloud HSM v2 cluster backup to be restored.
:param pulumi.Input[Sequence[pulumi.Input[str]]] subnet_ids: The IDs of subnets in which cluster will operate.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if hsm_type is None and not opts.urn:
raise TypeError("Missing required property 'hsm_type'")
__props__['hsm_type'] = hsm_type
__props__['source_backup_identifier'] = source_backup_identifier
if subnet_ids is None and not opts.urn:
raise TypeError("Missing required property 'subnet_ids'")
__props__['subnet_ids'] = subnet_ids
__props__['tags'] = tags
__props__['cluster_certificates'] = None
__props__['cluster_id'] = None
__props__['cluster_state'] = None
__props__['security_group_id'] = None
__props__['vpc_id'] = None
super(Cluster, __self__).__init__(
'aws:cloudhsmv2/cluster:Cluster',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
cluster_certificates: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ClusterClusterCertificateArgs']]]]] = None,
cluster_id: Optional[pulumi.Input[str]] = None,
cluster_state: Optional[pulumi.Input[str]] = None,
hsm_type: Optional[pulumi.Input[str]] = None,
security_group_id: Optional[pulumi.Input[str]] = None,
source_backup_identifier: Optional[pulumi.Input[str]] = None,
subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vpc_id: Optional[pulumi.Input[str]] = None) -> 'Cluster':
"""
Get an existing Cluster resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ClusterClusterCertificateArgs']]]] cluster_certificates: The list of cluster certificates.
* `cluster_certificates.0.cluster_certificate` - The cluster certificate issued (signed) by the issuing certificate authority (CA) of the cluster's owner.
* `cluster_certificates.0.cluster_csr` - The certificate signing request (CSR). Available only in `UNINITIALIZED` state after an HSM instance is added to the cluster.
* `cluster_certificates.0.aws_hardware_certificate` - The HSM hardware certificate issued (signed) by AWS CloudHSM.
* `cluster_certificates.0.hsm_certificate` - The HSM certificate issued (signed) by the HSM hardware.
* `cluster_certificates.0.manufacturer_hardware_certificate` - The HSM hardware certificate issued (signed) by the hardware manufacturer.
:param pulumi.Input[str] cluster_id: The id of the CloudHSM cluster.
:param pulumi.Input[str] cluster_state: The state of the CloudHSM cluster.
:param pulumi.Input[str] hsm_type: The type of HSM module in the cluster. Currently, only `hsm1.medium` is supported.
:param pulumi.Input[str] security_group_id: The ID of the security group associated with the CloudHSM cluster.
:param pulumi.Input[str] source_backup_identifier: The id of Cloud HSM v2 cluster backup to be restored.
:param pulumi.Input[Sequence[pulumi.Input[str]]] subnet_ids: The IDs of subnets in which cluster will operate.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource.
:param pulumi.Input[str] vpc_id: The id of the VPC that the CloudHSM cluster resides in.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["cluster_certificates"] = cluster_certificates
__props__["cluster_id"] = cluster_id
__props__["cluster_state"] = cluster_state
__props__["hsm_type"] = hsm_type
__props__["security_group_id"] = security_group_id
__props__["source_backup_identifier"] = source_backup_identifier
__props__["subnet_ids"] = subnet_ids
__props__["tags"] = tags
__props__["vpc_id"] = vpc_id
return Cluster(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="clusterCertificates")
def cluster_certificates(self) -> pulumi.Output[Sequence['outputs.ClusterClusterCertificate']]:
"""
The list of cluster certificates.
* `cluster_certificates.0.cluster_certificate` - The cluster certificate issued (signed) by the issuing certificate authority (CA) of the cluster's owner.
* `cluster_certificates.0.cluster_csr` - The certificate signing request (CSR). Available only in `UNINITIALIZED` state after an HSM instance is added to the cluster.
* `cluster_certificates.0.aws_hardware_certificate` - The HSM hardware certificate issued (signed) by AWS CloudHSM.
* `cluster_certificates.0.hsm_certificate` - The HSM certificate issued (signed) by the HSM hardware.
* `cluster_certificates.0.manufacturer_hardware_certificate` - The HSM hardware certificate issued (signed) by the hardware manufacturer.
"""
return pulumi.get(self, "cluster_certificates")
@property
@pulumi.getter(name="clusterId")
def cluster_id(self) -> pulumi.Output[str]:
"""
The id of the CloudHSM cluster.
"""
return pulumi.get(self, "cluster_id")
@property
@pulumi.getter(name="clusterState")
def cluster_state(self) -> pulumi.Output[str]:
"""
The state of the CloudHSM cluster.
"""
return pulumi.get(self, "cluster_state")
@property
@pulumi.getter(name="hsmType")
def hsm_type(self) -> pulumi.Output[str]:
"""
The type of HSM module in the cluster. Currently, only `hsm1.medium` is supported.
"""
return pulumi.get(self, "hsm_type")
@property
@pulumi.getter(name="securityGroupId")
def security_group_id(self) -> pulumi.Output[str]:
"""
The ID of the security group associated with the CloudHSM cluster.
"""
return pulumi.get(self, "security_group_id")
@property
@pulumi.getter(name="sourceBackupIdentifier")
def source_backup_identifier(self) -> pulumi.Output[Optional[str]]:
"""
The id of Cloud HSM v2 cluster backup to be restored.
"""
return pulumi.get(self, "source_backup_identifier")
@property
@pulumi.getter(name="subnetIds")
def subnet_ids(self) -> pulumi.Output[Sequence[str]]:
"""
The IDs of subnets in which cluster will operate.
"""
return pulumi.get(self, "subnet_ids")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A map of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="vpcId")
def vpc_id(self) -> pulumi.Output[str]:
"""
The id of the VPC that the CloudHSM cluster resides in.
"""
return pulumi.get(self, "vpc_id")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 49.646512 | 181 | 0.664512 |
1c87d3c9874cf7783f10ad214ee5d216c5c0123c | 6,743 | py | Python | tfkit/model/clas/model.py | fossabot/TFkit | aade08634171eaee41e3d687b0f65259bef8fe43 | [
"Apache-2.0"
] | null | null | null | tfkit/model/clas/model.py | fossabot/TFkit | aade08634171eaee41e3d687b0f65259bef8fe43 | [
"Apache-2.0"
] | null | null | null | tfkit/model/clas/model.py | fossabot/TFkit | aade08634171eaee41e3d687b0f65259bef8fe43 | [
"Apache-2.0"
] | null | null | null | import sys
import os
import torch
from torch import nn
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.abspath(os.path.join(dir_path, os.pardir)))
from torch import softmax, sigmoid
from tfkit.model.clas.dataloader import get_feature_from_data
from tfkit.utility.loss import FocalLoss, BCEFocalLoss
from torch.distributions import Categorical
class Model(nn.Module):
def __init__(self, tokenizer, pretrained, tasks_detail, maxlen=512, dropout=0.1, **kwargs):
super().__init__()
self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
print('Using device:', self.device)
self.tokenizer = tokenizer
self.pretrained = pretrained
self.dropout = nn.Dropout(dropout)
self.loss_fct = FocalLoss()
self.loss_fct_mt = BCEFocalLoss()
self.tasks = dict()
self.tasks_detail = tasks_detail
self.classifier_list = nn.ModuleList()
for task, labels in tasks_detail.items():
self.classifier_list.append(nn.Linear(self.pretrained.config.hidden_size, len(labels)).to(self.device))
self.tasks[task] = len(self.classifier_list) - 1
self.maxlen = maxlen
self.pretrained = self.pretrained.to(self.device)
self.classifier_list = self.classifier_list.to(self.device)
self.loss_fct = self.loss_fct.to(self.device)
self.loss_fct_mt = self.loss_fct_mt.to(self.device)
# from https://github.com/UKPLab/sentence-transformers
# Mean Pooling - Take attention mask into account for correct averaging
# modify - mask from -1 to 0
def mean_pooling(self, model_output, attention_mask):
input_mask_expanded = attention_mask.unsqueeze(-1).expand(model_output.size()).float()
input_mask_expanded[input_mask_expanded < 0] = 0
sum_embeddings = torch.sum(model_output * input_mask_expanded, 1)
sum_mask = torch.clamp(input_mask_expanded.sum(1), min=1e-9)
return sum_embeddings / sum_mask
def forward(self, batch_data, eval=False, **args):
tasks = batch_data['task']
inputs = torch.as_tensor(batch_data['input']).to(self.device)
targets = torch.as_tensor(batch_data['target']).to(self.device)
masks = torch.as_tensor(batch_data['mask']).to(self.device)
result_dict = {
'label_prob_all': [],
'label_map': []
}
result_logits = []
result_labels = []
for p, zin in enumerate(zip(tasks, inputs, masks)):
task, input, mask = zin
task_id = self.tasks[task]
task_lables = self.tasks_detail[task]
output = self.pretrained(input.unsqueeze(0), mask.unsqueeze(0))[0]
pooled_output = self.dropout(self.mean_pooling(output, mask.unsqueeze(0)))
classifier_output = self.classifier_list[task_id](pooled_output)
reshaped_logits = classifier_output.view(-1, len(task_lables)) # 0 for cls position
result_logits.append(reshaped_logits)
if eval is False:
target = targets[p]
result_labels.append(target)
else:
if 'multi_label' in task:
reshaped_logits = sigmoid(reshaped_logits)
else:
reshaped_logits = softmax(reshaped_logits, dim=1)
logit_prob = reshaped_logits[0].data.tolist()
logit_label = dict(zip(task_lables, logit_prob))
result_dict['label_prob_all'].append({task: logit_label})
if 'multi_label' in task:
result_dict['label_map'].append({task: [k for k, v in logit_label.items() if v > 0.5]})
else:
result_dict['label_map'].append({task: [task_lables[logit_prob.index(max(logit_prob))]]})
if eval:
outputs = result_dict
else:
loss = 0
for logits, labels, task in zip(result_logits, result_labels, tasks):
if 'multi_label' in task:
loss += self.loss_fct_mt(logits, labels.type_as(logits))
else:
loss += self.loss_fct(logits, labels)
outputs = loss
return outputs
def get_all_task(self):
return list(self.tasks.keys())
def predict(self, input='', topk=1, task=get_all_task, handle_exceed='slide',
merge_strategy=['minentropy', 'maxcount', 'maxprob']):
topk = int(topk)
if callable(task):
task = task(self)
task = task[0] if isinstance(task, list) else task
handle_exceed = handle_exceed[0] if isinstance(handle_exceed, list) else handle_exceed
merge_strategy = merge_strategy[0] if isinstance(merge_strategy, list) else merge_strategy
self.eval()
with torch.no_grad():
ret_result = []
ret_detail = []
for feature in get_feature_from_data(tokenizer=self.tokenizer, maxlen=self.maxlen,
tasks=self.tasks_detail[task], task=task, input=input,
handle_exceed=handle_exceed):
for k, v in feature.items():
feature[k] = [v]
result = self.forward(feature, eval=True)
if topk < 2:
ret_result.append([i[task] for i in result['label_map'] if task in i][0])
ret_detail.append(result)
else:
task_map = [i[task] for i in result['label_prob_all'] if task in i][0]
ret_result.append(sorted(task_map, key=task_map.get, reverse=True)[:topk])
ret_detail.append(result)
# apply different strategy to merge result after sliding windows
if merge_strategy == 'maxcount':
ret_result = max(ret_result, key=ret_result.count)
else:
results_prob = []
results_entropy = []
for detail in ret_detail:
prob_map = detail['label_prob_all'][0][task]
result_value = [v for _, v in prob_map.items()]
results_entropy.append(Categorical(probs=torch.tensor(result_value)).entropy().data.tolist())
results_prob.append(max(result_value))
min_entropy_index = results_entropy.index(min(results_entropy))
max_prob_index = results_prob.index(max(results_prob))
if merge_strategy == 'minentropy':
ret_result = ret_result[min_entropy_index]
if merge_strategy == 'maxprob':
ret_result = ret_result[max_prob_index]
return ret_result, ret_detail
| 44.361842 | 115 | 0.607148 |
63d32294b1f786161a3afaa769057710173b69a7 | 21,252 | py | Python | autoscraper/auto_scraper.py | hicala/autoscraper | deb8a297d35bc5725b30219df31a09efb936cffb | [
"MIT"
] | 1 | 2020-11-11T19:46:21.000Z | 2020-11-11T19:46:21.000Z | autoscraper/auto_scraper.py | hicala/autoscraper | deb8a297d35bc5725b30219df31a09efb936cffb | [
"MIT"
] | null | null | null | autoscraper/auto_scraper.py | hicala/autoscraper | deb8a297d35bc5725b30219df31a09efb936cffb | [
"MIT"
] | null | null | null | import hashlib
import json
import unicodedata
from collections import defaultdict
from html import unescape
from urllib.parse import urljoin, urlparse
import requests
from bs4 import BeautifulSoup
from autoscraper.utils import get_random_str, unique_hashable, unique_stack_list, \
ResultItem, FuzzyText
class AutoScraper(object):
"""
AutoScraper : A Smart, Automatic, Fast and Lightweight Web Scraper for Python.
AutoScraper automatically learns a set of rules required to extract the needed content
from a web page. So the programmer doesn't need to explicitly construct the rules.
Attributes
----------
stack_list: list
List of rules learned by AutoScraper
Methods
-------
build() - Learns a set of rules represented as stack_list based on the wanted_list,
which can be reused for scraping similar elements from other web pages in the future.
get_result_similar() - Gets similar results based on the previously learned rules.
get_result_exact() - Gets exact results based on the previously learned rules.
get_results() - Gets exact and similar results based on the previously learned rules.
save() - Serializes the stack_list as JSON and saves it to disk.
load() - De-serializes the JSON representation of the stack_list and loads it back.
remove_rules() - Removes one or more learned rule[s] from the stack_list.
keep_rules() - Keeps only the specified learned rules in the stack_list and removes the others.
"""
request_headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 \
(KHTML, like Gecko) Chrome/84.0.4147.135 Safari/537.36'
}
def __init__(self, stack_list=None):
self.stack_list = stack_list or []
def save(self, file_path):
"""
Serializes the stack_list as JSON and saves it to the disk.
Parameters
----------
file_path: str
Path of the JSON output
Returns
-------
None
"""
data = dict(stack_list=self.stack_list)
with open(file_path, 'w') as f:
json.dump(data, f)
def load(self, file_path):
"""
De-serializes the JSON representation of the stack_list and loads it back.
Parameters
----------
file_path: str
Path of the JSON file to load stack_list from.
Returns
-------
None
"""
with open(file_path, 'r') as f:
data = json.load(f)
# for backward compatibility
if isinstance(data, list):
self.stack_list = data
return
self.stack_list = data['stack_list']
@classmethod
def _get_soup(cls, url=None, html=None, request_args=None):
request_args = request_args or {}
if html:
html = unicodedata.normalize("NFKD", unescape(html))
return BeautifulSoup(html, 'lxml')
headers = dict(cls.request_headers)
if url:
headers['Host'] = urlparse(url).netloc
user_headers = request_args.pop('headers', {})
headers.update(user_headers)
html = requests.get(url, headers=headers, **request_args).text
html = unicodedata.normalize("NFKD", unescape(html))
return BeautifulSoup(html, 'lxml')
@staticmethod
def _get_valid_attrs(item):
key_attrs = {'class', 'style'}
attrs = {
k: v if v != [] else '' for k, v in item.attrs.items() if k in key_attrs
}
for attr in key_attrs:
if attr not in attrs:
attrs[attr] = ''
return attrs
@staticmethod
def _child_has_text(child, text, url):
child_text = child.getText().strip()
if text == child_text:
parent_text = child.parent.getText().strip()
if child_text == parent_text:
return False
child.wanted_attr = None
return True
for key, value in child.attrs.items():
if not isinstance(value, str):
continue
value = value.strip()
if text == value:
child.wanted_attr = key
return True
if key in {'href', 'src'}:
full_url = urljoin(url, value)
if text == full_url:
child.wanted_attr = key
child.is_full_url = True
return True
return False
def _get_children(self, soup, text, url):
text = text.strip()
children = reversed(soup.findChildren())
children = [x for x in children if self._child_has_text(x, text, url)]
return children
def build(self, url=None, wanted_list=None, wanted_dict=None, html=None, request_args=None, update=False):
"""
Automatically constructs a set of rules to scrape the specified target[s] from a web page.
The rules are represented as stack_list.
Parameters:
----------
url: str, optional
URL of the target web page. You should either pass url or html or both.
wanted_list: list, optional
A list of needed contents to be scraped.
AutoScraper learns a set of rules to scrape these targets. If specified,
wanted_dict will be ignored.
wanted_dict: dict, optional
A dict of needed contents to be scraped. Keys are aliases and values are list of target texts.
AutoScraper learns a set of rules to scrape these targets and sets its aliases.
html: str, optional
An HTML string can also be passed instead of URL.
You should either pass url or html or both.
request_args: dict, optional
A dictionary used to specify a set of additional request parameters used by requests
module. You can specify proxy URLs, custom headers etc.
update: bool, optional, defaults to False
If True, new learned rules will be added to the previous ones.
If False, all previously learned rules will be removed.
Returns:
--------
None
"""
soup = self._get_soup(url=url, html=html, request_args=request_args)
result_list = []
if update is False:
self.stack_list = []
if wanted_list:
wanted_dict = {'': wanted_list}
wanted_list = []
for alias, wanted_items in wanted_dict.items():
wanted_items = [unicodedata.normalize("NFKD", w) for w in wanted_items]
wanted_list += wanted_items
for wanted in wanted_items:
children = self._get_children(soup, wanted, url)
for child in children:
result, stack = self._get_result_for_child(child, soup, url)
stack['alias'] = alias
result_list += result
self.stack_list.append(stack)
result_list = [item.text for item in result_list]
result_list = unique_hashable(result_list)
if all(w in result_list for w in wanted_list):
self.stack_list = unique_stack_list(self.stack_list)
return result_list
return None
@classmethod
def _build_stack(cls, child, url):
content = [(child.name, cls._get_valid_attrs(child))]
parent = child
while True:
grand_parent = parent.findParent()
if not grand_parent:
break
children = grand_parent.findAll(parent.name, cls._get_valid_attrs(parent),
recursive=False)
for i, c in enumerate(children):
if c == parent:
content.insert(
0, (grand_parent.name, cls._get_valid_attrs(grand_parent), i))
break
if grand_parent.name == 'html':
break
parent = grand_parent
wanted_attr = getattr(child, 'wanted_attr', None)
is_full_url = getattr(child, 'is_full_url', False)
stack = dict(content=content, wanted_attr=wanted_attr, is_full_url=is_full_url)
stack['url'] = url if is_full_url else ''
stack['hash'] = hashlib.sha256(str(stack).encode('utf-8')).hexdigest()
stack['stack_id'] = 'rule_' + get_random_str(4)
return stack
def _get_result_for_child(self, child, soup, url):
stack = self._build_stack(child, url)
result = self._get_result_with_stack(stack, soup, url, 1.0)
return result, stack
@staticmethod
def _fetch_result_from_child(child, wanted_attr, is_full_url, url):
if wanted_attr is None:
return child.getText().strip()
if wanted_attr not in child.attrs:
return None
if is_full_url:
return urljoin(url, child.attrs[wanted_attr])
return child.attrs[wanted_attr]
@staticmethod
def _get_fuzzy_attrs(attrs, attr_fuzz_ratio):
attrs = dict(attrs)
for key, val in attrs.items():
if isinstance(val, str) and val:
val = FuzzyText(val, attr_fuzz_ratio)
elif isinstance(val, (list, tuple)):
val = [FuzzyText(x, attr_fuzz_ratio) if x else x for x in val]
attrs[key] = val
return attrs
def _get_result_with_stack(self, stack, soup, url, attr_fuzz_ratio, **kwargs):
parents = [soup]
stack_content = stack['content']
contain_sibling_leaves = kwargs.get('contain_sibling_leaves', False)
for index, item in enumerate(stack_content):
children = []
for parent in parents:
attrs = item[1]
if attr_fuzz_ratio < 1.0:
attrs = self._get_fuzzy_attrs(attrs, attr_fuzz_ratio)
found = parent.findAll(item[0], attrs, recursive=False)
if not found:
continue
if not contain_sibling_leaves and index == len(stack_content) - 1:
idx = min(len(found) - 1, stack_content[index - 1][2])
found = [found[idx]]
children += found
parents = children
wanted_attr = stack['wanted_attr']
is_full_url = stack['is_full_url']
result = [ResultItem(self._fetch_result_from_child(i, wanted_attr, is_full_url, url),
getattr(i, 'child_index', 0)) for i in parents]
result = [x for x in result if x.text]
return result
def _get_result_with_stack_index_based(self, stack, soup, url, attr_fuzz_ratio, **kwargs):
p = soup.findChildren(recursive=False)[0]
stack_content = stack['content']
for index, item in enumerate(stack_content[:-1]):
content = stack_content[index + 1]
attrs = content[1]
if attr_fuzz_ratio < 1.0:
attrs = self._get_fuzzy_attrs(attrs, attr_fuzz_ratio)
p = p.findAll(content[0], attrs, recursive=False)
if not p:
return []
idx = min(len(p) - 1, item[2])
p = p[idx]
result = [ResultItem(self._fetch_result_from_child(
p, stack['wanted_attr'], stack['is_full_url'], url), getattr(p, 'child_index', 0))]
result = [x for x in result if x.text]
return result
def _get_result_by_func(self, func, url, html, soup, request_args, grouped,
group_by_alias, unique, attr_fuzz_ratio, **kwargs):
if not soup:
soup = self._get_soup(url=url, html=html, request_args=request_args)
keep_order = kwargs.get('keep_order', False)
if group_by_alias or (keep_order and not grouped):
for index, child in enumerate(soup.findChildren()):
setattr(child, 'child_index', index)
result_list = []
grouped_result = defaultdict(list)
for stack in self.stack_list:
if not url:
url = stack.get('url', '')
result = func(stack, soup, url, attr_fuzz_ratio, **kwargs)
if not grouped and not group_by_alias:
result_list += result
continue
group_id = stack.get('alias', '') if group_by_alias else stack['stack_id']
grouped_result[group_id] += result
return self._clean_result(result_list, grouped_result, grouped, group_by_alias,
unique, keep_order)
@staticmethod
def _clean_result(result_list, grouped_result, grouped, grouped_by_alias, unique, keep_order):
if not grouped and not grouped_by_alias:
if unique is None:
unique = True
if keep_order:
result_list = sorted(result_list, key=lambda x: x.index)
result = [x.text for x in result_list]
if unique:
result = unique_hashable(result)
return result
for k, val in grouped_result.items():
if grouped_by_alias:
val = sorted(val, key=lambda x: x.index)
val = [x.text for x in val]
if unique:
val = unique_hashable(val)
grouped_result[k] = val
return dict(grouped_result)
def get_result_similar(self, url=None, html=None, soup=None, request_args=None,
grouped=False, group_by_alias=False, unique=None, attr_fuzz_ratio=1.0,
keep_order=False, contain_sibling_leaves=False):
"""
Gets similar results based on the previously learned rules.
Parameters:
----------
url: str, optional
URL of the target web page. You should either pass url or html or both.
html: str, optional
An HTML string can also be passed instead of URL.
You should either pass url or html or both.
request_args: dict, optional
A dictionary used to specify a set of additional request parameters used by requests
module. You can specify proxy URLs, custom headers etc.
grouped: bool, optional, defaults to False
If set to True, the result will be a dictionary with the rule_ids as keys
and a list of scraped data per rule as values.
group_by_alias: bool, optional, defaults to False
If set to True, the result will be a dictionary with the rule alias as keys
and a list of scraped data per alias as values.
unique: bool, optional, defaults to True for non grouped results and
False for grouped results.
If set to True, will remove duplicates from returned result list.
attr_fuzz_ratio: float in range [0, 1], optional, defaults to 1.0
The fuzziness ratio threshold for matching html tag attributes.
keep_order: bool, optional, defaults to False
If set to True, the results will be ordered as they are present on the web page.
contain_sibling_leaves: bool, optional, defaults to False
If set to True, the results will also contain the sibling leaves of the wanted elements.
Returns:
--------
List of similar results scraped from the web page.
Dictionary if grouped=True or group_by_alias=True.
"""
func = self._get_result_with_stack
return self._get_result_by_func(func, url, html, soup, request_args, grouped,
group_by_alias, unique, attr_fuzz_ratio,
keep_order=keep_order,
contain_sibling_leaves=contain_sibling_leaves)
def get_result_exact(self, url=None, html=None, soup=None, request_args=None,
grouped=False, group_by_alias=False, unique=None, attr_fuzz_ratio=1.0):
"""
Gets exact results based on the previously learned rules.
Parameters:
----------
url: str, optional
URL of the target web page. You should either pass url or html or both.
html: str, optional
An HTML string can also be passed instead of URL.
You should either pass url or html or both.
request_args: dict, optional
A dictionary used to specify a set of additional request parameters used by requests
module. You can specify proxy URLs, custom headers etc.
grouped: bool, optional, defaults to False
If set to True, the result will be a dictionary with the rule_ids as keys
and a list of scraped data per rule as values.
group_by_alias: bool, optional, defaults to False
If set to True, the result will be a dictionary with the rule alias as keys
and a list of scraped data per alias as values.
unique: bool, optional, defaults to True for non grouped results and
False for grouped results.
If set to True, will remove duplicates from returned result list.
attr_fuzz_ratio: float in range [0, 1], optional, defaults to 1.0
The fuzziness ratio threshold for matching html tag attributes.
Returns:
--------
List of exact results scraped from the web page.
Dictionary if grouped=True or group_by_alias=True.
"""
func = self._get_result_with_stack_index_based
return self._get_result_by_func(func, url, html, soup, request_args, grouped,
group_by_alias, unique, attr_fuzz_ratio)
def get_result(self, url=None, html=None, request_args=None, grouped=False,
group_by_alias=False, unique=None, attr_fuzz_ratio=1.0):
"""
Gets similar and exact results based on the previously learned rules.
Parameters:
----------
url: str, optional
URL of the target web page. You should either pass url or html or both.
html: str, optional
An HTML string can also be passed instead of URL.
You should either pass url or html or both.
request_args: dict, optional
A dictionary used to specify a set of additional request parameters used by requests
module. You can specify proxy URLs, custom headers etc.
grouped: bool, optional, defaults to False
If set to True, the result will be dictionaries with the rule_ids as keys
and a list of scraped data per rule as values.
group_by_alias: bool, optional, defaults to False
If set to True, the result will be a dictionary with the rule alias as keys
and a list of scraped data per alias as values.
unique: bool, optional, defaults to True for non grouped results and
False for grouped results.
If set to True, will remove duplicates from returned result list.
attr_fuzz_ratio: float in range [0, 1], optional, defaults to 1.0
The fuzziness ratio threshold for matching html tag attributes.
Returns:
--------
Pair of (similar, exact) results.
See get_result_similar and get_result_exact methods.
"""
soup = self._get_soup(url=url, html=html, request_args=request_args)
args = dict(url=url, soup=soup, grouped=grouped, group_by_alias=group_by_alias,
unique=unique, attr_fuzz_ratio=attr_fuzz_ratio)
similar = self.get_result_similar(**args)
exact = self.get_result_exact(**args)
return similar, exact
def remove_rules(self, rules):
"""
Removes a list of learned rules from stack_list.
Parameters:
----------
rules : list
A list of rules to be removed
Returns:
--------
None
"""
self.stack_list = [x for x in self.stack_list if x['stack_id'] not in rules]
def keep_rules(self, rules):
"""
Removes all other rules except the specified ones.
Parameters:
----------
rules : list
A list of rules to keep in stack_list and removing the rest.
Returns:
--------
None
"""
self.stack_list = [x for x in self.stack_list if x['stack_id'] in rules]
def set_rule_aliases(self, rule_aliases):
"""
Sets the specified alias for each rule
Parameters:
----------
rule_aliases : dict
A dictionary with keys of rule_id and values of alias
Returns:
--------
None
"""
id_to_stack = {stack['stack_id']: stack for stack in self.stack_list}
for rule_id, alias in rule_aliases.items():
id_to_stack[rule_id]['alias'] = alias
def generate_python_code(self):
# deprecated
print('This function is deprecated. Please use save() and load() instead.')
| 36.515464 | 110 | 0.595191 |
2dbc9d6967d9da83edb2e46827f1095b68c4962a | 414 | py | Python | meiduo_mall/apps/verifications/urls.py | MarioKarting/Django_meiduo_project | ef06e70b1ddb6709983ebb644452c980afc29000 | [
"MIT"
] | null | null | null | meiduo_mall/apps/verifications/urls.py | MarioKarting/Django_meiduo_project | ef06e70b1ddb6709983ebb644452c980afc29000 | [
"MIT"
] | null | null | null | meiduo_mall/apps/verifications/urls.py | MarioKarting/Django_meiduo_project | ef06e70b1ddb6709983ebb644452c980afc29000 | [
"MIT"
] | null | null | null | # !/usr/bin/env python
# _*_ coding:utf-8 _*_
from django.conf.urls import url
from . import views
urlpatterns = [
# 1.获取图形验证码 image_codes/(?P<uuid>[\w-]+)/
url(r'^image_codes/(?P<uuid>[\w-]+)/$', views.ImageCodeView.as_view(), name='img_code'),
# 2. 短信验证码sms_codes/(?P<mobile>1[3-9]\d{9})/
url(r'^sms_codes/(?P<mobile>1[3-9]\d{9})/$', views.SMSCodeView.as_view(), name='sms_code'),
]
| 23 | 95 | 0.608696 |
397cf01a005b89afc96e3869078734e3ebb23018 | 55 | py | Python | FSWDF-Project/main.py | chrischism8063/Mongo-py | c531f93cd37cd9d9420e39811a37c8bcfc1e5d1c | [
"MIT"
] | null | null | null | FSWDF-Project/main.py | chrischism8063/Mongo-py | c531f93cd37cd9d9420e39811a37c8bcfc1e5d1c | [
"MIT"
] | null | null | null | FSWDF-Project/main.py | chrischism8063/Mongo-py | c531f93cd37cd9d9420e39811a37c8bcfc1e5d1c | [
"MIT"
] | null | null | null | # main entry to the system
from application import app
| 18.333333 | 27 | 0.8 |
16f389e5ce30ba25217b58ac39ef7ace30f1f583 | 1,252 | py | Python | tests/test_vbox.py | swsachith/cm | d99837917f0dafe60c25829cf78ae77bbe02bd85 | [
"Apache-2.0"
] | null | null | null | tests/test_vbox.py | swsachith/cm | d99837917f0dafe60c25829cf78ae77bbe02bd85 | [
"Apache-2.0"
] | null | null | null | tests/test_vbox.py | swsachith/cm | d99837917f0dafe60c25829cf78ae77bbe02bd85 | [
"Apache-2.0"
] | null | null | null | import time
#from cm4.vm.Vm import Vm
from cm4.configuration.config import Config
# from cm4.mongo.mongoDB import MongoDB
from cm4.common.debug import HEADING, myself
import subprocess
# nosetest -v --nopature
# nosetests -v --nocapture tests/test_vbox.py
class Test_vagrant:
def setup(self):
pass
def rprint(self, r):
print (". Begin .", 70 * ".")
print (r)
print (". End .", 70 * ".")
def test_01_image_add(self):
HEADING(myself())
r = subprocess.check_output("cms vbox image add ubuntu/bionic64", shell=True).decode("utf-8")
self.rprint(r)
assert "ubuntu/bionic64" in r
def test_02_image_list(self):
HEADING(myself())
r = subprocess.check_output("cms vbox image list", shell=True).decode("utf-8")
self.rprint (r)
assert "ubuntu/bionic64" in r
def test_03_image_boot(self):
HEADING(myself())
r = subprocess.check_output("cms vbox vm create test-bionic", shell=True).decode("utf-8")
self.rprint (r)
assert "ubuntu/bionic64" in r
r = subprocess.check_output("cms vbox vm boot test-bionic", shell=True).decode("utf-8")
self.rprint (r)
assert "ubuntu/bionic64" in r
| 26.083333 | 101 | 0.630192 |
9376e3bc4fca50d0e4bb00b25f621c99d359cb87 | 15,735 | py | Python | qiskit_machine_learning/algorithms/distribution_learners/qgan/numpy_discriminator.py | Cryoris/qiskit-machine-learning | 140ab993c9f80e513fea896d9d67459ed20d1b60 | [
"Apache-2.0"
] | null | null | null | qiskit_machine_learning/algorithms/distribution_learners/qgan/numpy_discriminator.py | Cryoris/qiskit-machine-learning | 140ab993c9f80e513fea896d9d67459ed20d1b60 | [
"Apache-2.0"
] | null | null | null | qiskit_machine_learning/algorithms/distribution_learners/qgan/numpy_discriminator.py | Cryoris/qiskit-machine-learning | 140ab993c9f80e513fea896d9d67459ed20d1b60 | [
"Apache-2.0"
] | null | null | null | # This code is part of Qiskit.
#
# (C) Copyright IBM 2019, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Discriminator
The neural network is based on a neural network introduced in:
https://towardsdatascience.com/lets-code-a-neural-network-in-plain-numpy-ae7e74410795
"""
from typing import Dict, Any
import os
import logging
import numpy as np
from qiskit.utils import algorithm_globals
from qiskit.algorithms.optimizers import ADAM
from .discriminative_network import DiscriminativeNetwork
logger = logging.getLogger(__name__)
# pylint: disable=invalid-name
class DiscriminatorNet:
"""
Discriminator
The neural network is based on a neural network introduced in:
https://towardsdatascience.com/lets-code-a-neural-network-in-plain-numpy-ae7e74410795
"""
def __init__(self, n_features=1, n_out=1):
"""
Initialize the discriminator network.
Args:
n_features (int): Dimension of input data samples.
n_out (int): output dimension
"""
self.architecture = [
{"input_dim": n_features, "output_dim": 50, "activation": "leaky_relu"},
{"input_dim": 50, "output_dim": 20, "activation": "leaky_relu"},
{"input_dim": 20, "output_dim": n_out, "activation": "sigmoid"},
]
self.parameters = []
self.memory = {}
for _, layer in enumerate(self.architecture):
activ_function_curr = layer["activation"]
layer_input_size = layer["input_dim"]
layer_output_size = layer["output_dim"]
params_layer = algorithm_globals.random.random(layer_output_size * layer_input_size)
if activ_function_curr == "leaky_relu":
params_layer = (params_layer * 2 - np.ones(np.shape(params_layer))) * 0.7
elif activ_function_curr == "sigmoid":
params_layer = (params_layer * 2 - np.ones(np.shape(params_layer))) * 0.2
else:
params_layer = params_layer * 2 - np.ones(np.shape(params_layer))
self.parameters = np.append(self.parameters, params_layer)
self.parameters.flatten()
def forward(self, x):
"""
Forward propagation.
Args:
x (numpy.ndarray): , Discriminator input, i.e. data sample.
Returns:
list: Discriminator output, i.e. data label.
"""
def sigmoid(z):
sig = 1 / (1 + np.exp(-z))
return sig
def leaky_relu(z, slope=0.2):
return np.maximum(
np.zeros(np.shape(z)), z) + slope * np.minimum(np.zeros(np.shape(z)), z)
def single_layer_forward_propagation(x_old, w_new, activation="leaky_relu"):
z_curr = np.dot(w_new, x_old)
if activation == "leaky_relu":
activation_func = leaky_relu
elif activation == "sigmoid":
activation_func = sigmoid
else:
raise Exception('Non-supported activation function')
return activation_func(z_curr), z_curr
x_new = x
pointer = 0
for idx, layer in enumerate(self.architecture):
layer_idx = idx + 1
activ_function_curr = layer["activation"]
layer_input_size = layer["input_dim"]
layer_output_size = layer["output_dim"]
if idx == 0:
x_old = np.reshape(x_new, (layer_input_size, len(x_new)))
else:
x_old = x_new
pointer_next = pointer + (layer_output_size * layer_input_size)
w_curr = self.parameters[pointer:pointer_next]
w_curr = np.reshape(w_curr, (layer_output_size, layer_input_size))
pointer = pointer_next
x_new, z_curr = single_layer_forward_propagation(x_old, w_curr, activ_function_curr)
self.memory["a" + str(idx)] = x_old
self.memory["z" + str(layer_idx)] = z_curr
return x_new
def backward(self, x, y, weights=None):
"""
Backward propagation.
Args:
x (numpy.ndarray): sample label (equivalent to discriminator output)
y (numpy.ndarray): array, target label
weights (numpy.ndarray): customized scaling for each sample (optional)
Returns:
tuple(numpy.ndarray, numpy.ndarray): parameter gradients
"""
def sigmoid_backward(da, z):
sig = 1 / (1 + np.exp(-z))
return da * sig * (1 - sig)
def leaky_relu_backward(da, z, slope=0.2):
dz = np.array(da, copy=True)
for i, line in enumerate(z):
for j, element in enumerate(line):
if element < 0:
dz[i, j] = dz[i, j] * slope
return dz
def single_layer_backward_propagation(da_curr,
w_curr, z_curr, a_prev, activation="leaky_relu"):
# m = a_prev.shape[1]
if activation == "leaky_relu":
backward_activation_func = leaky_relu_backward
elif activation == "sigmoid":
backward_activation_func = sigmoid_backward
else:
raise Exception('Non-supported activation function')
dz_curr = backward_activation_func(da_curr, z_curr)
dw_curr = np.dot(dz_curr, a_prev.T)
da_prev = np.dot(w_curr.T, dz_curr)
return da_prev, dw_curr
grads_values = np.array([])
m = y.shape[1]
y = y.reshape(np.shape(x))
if weights is not None:
da_prev = - np.multiply(
weights,
np.divide(y, np.maximum(np.ones(np.shape(x)) * 1e-4, x))
- np.divide(1 - y, np.maximum(np.ones(np.shape(x)) * 1e-4, 1 - x)))
else:
da_prev = - (np.divide(y, np.maximum(np.ones(np.shape(x)) * 1e-4, x))
- np.divide(1 - y, np.maximum(np.ones(np.shape(x)) * 1e-4, 1 - x))) / m
pointer = 0
for layer_idx_prev, layer in reversed(list(enumerate(self.architecture))):
layer_idx_curr = layer_idx_prev + 1
activ_function_curr = layer["activation"]
da_curr = da_prev
a_prev = self.memory["a" + str(layer_idx_prev)]
z_curr = self.memory["z" + str(layer_idx_curr)]
layer_input_size = layer["input_dim"]
layer_output_size = layer["output_dim"]
pointer_prev = pointer - (layer_output_size * layer_input_size)
if pointer == 0:
w_curr = self.parameters[pointer_prev:]
else:
w_curr = self.parameters[pointer_prev:pointer]
w_curr = np.reshape(w_curr, (layer_output_size, layer_input_size))
pointer = pointer_prev
da_prev, dw_curr = single_layer_backward_propagation(da_curr,
np.array(w_curr), z_curr, a_prev,
activ_function_curr)
grads_values = np.append([dw_curr], grads_values)
return grads_values
class NumPyDiscriminator(DiscriminativeNetwork):
"""
Discriminator based on NumPy
"""
def __init__(self, n_features: int = 1, n_out: int = 1) -> None:
"""
Args:
n_features: Dimension of input data vector.
n_out: Dimension of the discriminator's output vector.
"""
super().__init__()
self._n_features = n_features
self._n_out = n_out
self._discriminator = DiscriminatorNet(self._n_features, self._n_out)
self._optimizer = ADAM(maxiter=1, tol=1e-6, lr=1e-3, beta_1=0.7, beta_2=0.99,
noise_factor=1e-4,
eps=1e-6, amsgrad=True)
self._ret = {} # type: Dict[str, Any]
def set_seed(self, seed):
"""
Set seed.
Args:
seed (int): seed
"""
algorithm_globals.random_seed = seed
def save_model(self, snapshot_dir):
"""
Save discriminator model
Args:
snapshot_dir (str): directory path for saving the model
"""
# save self._discriminator.params_values
np.save(os.path.join(snapshot_dir, 'np_discriminator_architecture.csv'),
self._discriminator.architecture)
np.save(os.path.join(snapshot_dir,
'np_discriminator_memory.csv'), self._discriminator.memory)
np.save(os.path.join(snapshot_dir,
'np_discriminator_params.csv'), self._discriminator.parameters)
self._optimizer.save_params(snapshot_dir)
def load_model(self, load_dir):
"""
Load discriminator model
Args:
load_dir (str): file with stored pytorch discriminator model to be loaded
"""
self._discriminator.architecture = \
np.load(os.path.join(load_dir, 'np_discriminator_architecture.csv'))
self._discriminator.memory = np.load(os.path.join(load_dir,
'np_discriminator_memory.csv'))
self._discriminator.parameters = np.load(os.path.join(load_dir,
'np_discriminator_params.csv'))
self._optimizer.load_params(load_dir)
@property
def discriminator_net(self):
"""
Get discriminator
Returns:
DiscriminatorNet: discriminator object
"""
return self._discriminator
@discriminator_net.setter
def discriminator_net(self, net):
self._discriminator = net
def get_label(self, x, detach=False): # pylint: disable=arguments-differ,unused-argument
"""
Get data sample labels, i.e. true or fake.
Args:
x (numpy.ndarray): Discriminator input, i.e. data sample.
detach (bool): depreciated for numpy network
Returns:
numpy.ndarray: Discriminator output, i.e. data label
"""
return self._discriminator.forward(x)
def loss(self, x, y, weights=None):
"""
Loss function
Args:
x (numpy.ndarray): sample label (equivalent to discriminator output)
y (numpy.ndarray): target label
weights(numpy.ndarray): customized scaling for each sample (optional)
Returns:
float: loss function
"""
if weights is not None:
# Use weights as scaling factors for the samples and compute the sum
return (-1) * np.dot(np.multiply(y,
np.log(np.maximum(np.ones(np.shape(x)) * 1e-4, x)))
+ np.multiply(np.ones(np.shape(y)) - y,
np.log(np.maximum(np.ones(np.shape(x)) * 1e-4,
np.ones(np.shape(x)) - x))),
weights)
else:
# Compute the mean
return (-1) * np.mean(np.multiply(y,
np.log(np.maximum(np.ones(np.shape(x)) * 1e-4, x)))
+ np.multiply(np.ones(np.shape(y)) - y,
np.log(np.maximum(np.ones(np.shape(x)) * 1e-4,
np.ones(np.shape(x)) - x))))
def _get_objective_function(self, data, weights):
"""
Get the objective function
Args:
data (tuple): training and generated data
weights (numpy.ndarray): weights corresponding to training resp. generated data
Returns:
objective_function: objective function for the optimization
"""
real_batch = data[0]
real_prob = weights[0]
generated_batch = data[1]
generated_prob = weights[1]
def objective_function(params):
self._discriminator.parameters = params
# Train on Real Data
prediction_real = self.get_label(real_batch)
loss_real = self.loss(prediction_real, np.ones(np.shape(prediction_real)), real_prob)
prediction_fake = self.get_label(generated_batch)
loss_fake = self.loss(prediction_fake,
np.zeros(np.shape(prediction_fake)), generated_prob)
return 0.5 * (loss_real[0] + loss_fake[0])
return objective_function
def _get_gradient_function(self, data, weights):
"""
Get the gradient function
Args:
data (tuple): training and generated data
weights (numpy.ndarray): weights corresponding to training resp. generated data
Returns:
gradient_function: Gradient function for the optimization
"""
real_batch = data[0]
real_prob = weights[0]
generated_batch = data[1]
generated_prob = weights[1]
def gradient_function(params):
self._discriminator.parameters = params
prediction_real = self.get_label(real_batch)
grad_real = self._discriminator.backward(prediction_real,
np.ones(np.shape(prediction_real)), real_prob)
prediction_generated = self.get_label(generated_batch)
grad_generated = self._discriminator.backward(prediction_generated, np.zeros(
np.shape(prediction_generated)), generated_prob)
return np.add(grad_real, grad_generated)
return gradient_function
def train(self, data, weights, penalty=False,
quantum_instance=None, shots=None) -> Dict[str, Any]:
"""
Perform one training step w.r.t to the discriminator's parameters
Args:
data (tuple(numpy.ndarray, numpy.ndarray)):
real_batch: array, Training data batch.
generated_batch: array, Generated data batch.
weights (tuple):real problem, generated problem
penalty (bool): Depreciated for classical networks.
quantum_instance (QuantumInstance): Depreciated for classical networks.
shots (int): Number of shots for hardware or qasm execution.
Ignored for classical networks.
Returns:
dict: with Discriminator loss and updated parameters.
"""
# Train on Generated Data
# Force single optimization iteration
self._optimizer._maxiter = 1
self._optimizer._t = 0
objective = self._get_objective_function(data, weights)
gradient = self._get_gradient_function(data, weights)
self._discriminator.parameters, loss, _ = \
self._optimizer.optimize(num_vars=len(self._discriminator.parameters),
objective_function=objective,
initial_point=np.array(self._discriminator.parameters),
gradient_function=gradient)
self._ret['loss'] = loss
self._ret['params'] = self._discriminator.parameters
return self._ret
| 37.915663 | 99 | 0.572609 |
b573b06042927b4f47ee115d8e793754e5c21466 | 1,392 | py | Python | DS-Unit-3-Sprint-1-Software-Engineering/acme_report.py | matthew-sessions/DS-Unit-3-Sprint-1-Software-Engineering | 8acef8ef1348b49b3f5410d75592c3e7b8161b53 | [
"MIT"
] | null | null | null | DS-Unit-3-Sprint-1-Software-Engineering/acme_report.py | matthew-sessions/DS-Unit-3-Sprint-1-Software-Engineering | 8acef8ef1348b49b3f5410d75592c3e7b8161b53 | [
"MIT"
] | null | null | null | DS-Unit-3-Sprint-1-Software-Engineering/acme_report.py | matthew-sessions/DS-Unit-3-Sprint-1-Software-Engineering | 8acef8ef1348b49b3f5410d75592c3e7b8161b53 | [
"MIT"
] | null | null | null | from acme import Product
import random
from random import randint, sample, uniform
ADJECTIVES = ['Awesome', 'Shiny', 'Impressive', 'Portable', 'Improved']
NOUNS = ['Anvil', 'Catapult', 'Disguise', 'Mousetrap', '???']
def generate_products(num_products=30):
"""generates 30 products"""
products = []
for i in range(num_products):
adj = random.choice(ADJECTIVES)
noun = random.choice(NOUNS)
name = f"{adj} {noun}"
price = random.randint(5, 100)
weight = random.randint(5, 100)
flammability = random.uniform(0.0, 2.5)
products.append(Product(name,price,weight,flammability))
return(products)
def inventory_report(products):
"""provites a report for all the features of the products"""
names = [i.name for i in products]
price = [i.price for i in products]
weight = [i.weight for i in products]
flammability = [i.flammability for i in products]
nameun = []
for i in names:
if i not in nameun:
nameun.append(i)
print('ACME CORPORATION OFFICIAL INVENTORY REPORT')
print(f'Unique product names: {len(nameun)}')
print(f'Average price: {sum(price)/len(price)}')
print(f'Average weight: {sum(weight)/len(weight)}')
print(f'Average flammability: {sum(flammability)/len(flammability)}')
if __name__ == '__main__':
inventory_report(generate_products())
| 32.372093 | 73 | 0.656609 |
19c7f23c1f8fd727b8a05be2d2460580ec1bc551 | 9,115 | py | Python | custom_components/thermiagenesis/climate.py | CJNE/thermiagenesis | 14b87d4469bd11ebd8bef03d41d44029ebfe871f | [
"MIT"
] | 7 | 2020-10-16T09:12:35.000Z | 2022-03-01T06:52:33.000Z | custom_components/thermiagenesis/climate.py | CJNE/thermiagenesis | 14b87d4469bd11ebd8bef03d41d44029ebfe871f | [
"MIT"
] | 37 | 2021-12-07T09:01:49.000Z | 2022-03-30T13:27:07.000Z | custom_components/thermiagenesis/climate.py | CJNE/thermiagenesis | 14b87d4469bd11ebd8bef03d41d44029ebfe871f | [
"MIT"
] | 1 | 2021-12-10T20:19:12.000Z | 2021-12-10T20:19:12.000Z | """ Thermia Genesis climate sensors."""
import logging
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import ATTR_CURRENT_TEMPERATURE
from homeassistant.components.climate.const import ATTR_MAX_TEMP
from homeassistant.components.climate.const import ATTR_MIN_TEMP
from homeassistant.components.climate.const import ATTR_TARGET_TEMP_HIGH
from homeassistant.components.climate.const import ATTR_TARGET_TEMP_LOW
from homeassistant.components.climate.const import ATTR_TARGET_TEMP_STEP
from homeassistant.components.climate.const import CURRENT_HVAC_HEAT
from homeassistant.components.climate.const import CURRENT_HVAC_IDLE
from homeassistant.components.climate.const import CURRENT_HVAC_OFF
from homeassistant.components.climate.const import HVAC_MODE_AUTO
from homeassistant.components.climate.const import HVAC_MODE_HEAT
from homeassistant.components.climate.const import HVAC_MODE_OFF
from homeassistant.components.climate.const import SUPPORT_TARGET_TEMPERATURE
from homeassistant.components.climate.const import SUPPORT_TARGET_TEMPERATURE_RANGE
from homeassistant.const import ATTR_TEMPERATURE
from homeassistant.const import TEMP_CELSIUS
from homeassistant.util.temperature import convert as convert_temperature
from .const import ATTR_DEFAULT_ENABLED
from .const import ATTR_ENABLED
from .const import ATTR_LABEL
from .const import ATTR_MANUFACTURER
from .const import ATTR_STATUS
from .const import CLIMATE_TYPES
from .const import DOMAIN
from .const import KEY_STATUS_VALUE
ATTR_FIRMWARE = "firmware"
ATTR_MODEL = "Diplomat Inverter Duo"
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Add Thermia entities from a config_entry."""
coordinator = hass.data[DOMAIN][config_entry.entry_id]
sensors = []
device_info = {
"identifiers": {(DOMAIN, ATTR_MODEL)},
"name": ATTR_MODEL,
"manufacturer": ATTR_MANUFACTURER,
"model": ATTR_MODEL,
"sw_version": coordinator.data.get(ATTR_FIRMWARE),
}
for sensor in CLIMATE_TYPES:
sensors.append(ThermiaClimateSensor(coordinator, sensor, device_info))
async_add_entities(sensors, False)
class ThermiaClimateSensor(ClimateEntity):
"""Define a Thermia climate sensor."""
def __init__(self, coordinator, kind, device_info):
"""Initialize."""
self.kind = kind
self.meta = CLIMATE_TYPES[kind]
self._name = f"{self.meta[ATTR_LABEL]}"
# self._name = f"{coordinator.data[ATTR_MODEL]} {SENSOR_TYPES[kind][ATTR_LABEL]}"
self._unique_id = f"thermiagenesis_{kind}"
self._device_info = device_info
self.coordinator = coordinator
self._hvac_mode = CURRENT_HVAC_IDLE
self._attrs = {}
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement used by the platform."""
return TEMP_CELSIUS
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
ret = 0
if ATTR_TEMPERATURE in self.meta:
ret = ret | SUPPORT_TARGET_TEMPERATURE
if ATTR_TARGET_TEMP_HIGH in self.meta and ATTR_TARGET_TEMP_LOW in self.meta:
ret = ret | SUPPORT_TARGET_TEMPERATURE_RANGE
return ret
@property
def name(self):
"""Return the name."""
return self._name
@property
def current_temperature(self):
"""Return the current temperature."""
val = self.coordinator.data.get(self.meta[ATTR_CURRENT_TEMPERATURE])
return val
@property
def target_temperature_low(self):
"""Return the target low temperature."""
if ATTR_TARGET_TEMP_LOW not in self.meta:
return None
val = self.coordinator.data.get(self.meta[ATTR_TARGET_TEMP_LOW])
return val
@property
def min_temp(self) -> float:
"""Return the minimum temperature."""
return convert_temperature(
self.meta[ATTR_MIN_TEMP], TEMP_CELSIUS, self.temperature_unit
)
@property
def max_temp(self) -> float:
"""Return the maximum temperature."""
return convert_temperature(
self.meta[ATTR_MAX_TEMP], TEMP_CELSIUS, self.temperature_unit
)
@property
def target_temperature_high(self):
"""Return the target high temperature."""
if ATTR_TARGET_TEMP_HIGH not in self.meta:
return None
val = self.coordinator.data.get(self.meta[ATTR_TARGET_TEMP_HIGH])
return val
@property
def target_temperature_step(self):
"""Return the target temperature step."""
if ATTR_TARGET_TEMP_STEP not in self.meta:
return 1
val = self.meta[ATTR_TARGET_TEMP_STEP]
return val
@property
def target_temperature(self):
"""Return the target temperature."""
if ATTR_TEMPERATURE not in self.meta:
return None
val = self.coordinator.data.get(self.meta[ATTR_TEMPERATURE])
return val
@property
def unique_id(self):
"""Return a unique_id for this entity."""
return self._unique_id
@property
def available(self):
"""Return True if entity is available."""
return self.coordinator.last_update_success
@property
def should_poll(self):
"""Return the polling requirement of the entity."""
return False
@property
def device_info(self):
"""Return the device info."""
return self._device_info
@property
def entity_registry_enabled_default(self):
"""Return if the entity should be enabled when first added to the entity registry."""
return self.meta[ATTR_DEFAULT_ENABLED]
@property
def hvac_action(self):
"""Return the current running hvac operation if supported.
Need to be one of CURRENT_HVAC_*.
"""
isEnabled = self.coordinator.data.get(self.meta[ATTR_ENABLED])
if not isEnabled:
return CURRENT_HVAC_OFF
val = self.coordinator.data.get(ATTR_STATUS)
if val == self.meta[KEY_STATUS_VALUE]:
return CURRENT_HVAC_HEAT
return CURRENT_HVAC_IDLE
@property
def hvac_mode(self):
isEnabled = self.coordinator.data.get(self.meta[ATTR_ENABLED])
if not isEnabled:
return HVAC_MODE_OFF
val = self.coordinator.data.get(ATTR_STATUS)
if val == self.meta[KEY_STATUS_VALUE]:
return HVAC_MODE_HEAT
return HVAC_MODE_AUTO
@property
def hvac_modes(self):
return [HVAC_MODE_OFF, HVAC_MODE_AUTO]
async def async_set_hvac_mode(self, hvac_mode: str):
"""Set new target hvac mode."""
print(f"Set hvac mode {hvac_mode}")
if hvac_mode == HVAC_MODE_OFF:
await self.coordinator._async_set_data(self.meta[ATTR_ENABLED], False)
if hvac_mode == HVAC_MODE_AUTO:
await self.coordinator._async_set_data(self.meta[ATTR_ENABLED], True)
async def async_turn_on(self):
await self.coordinator._async_set_data(self.meta[ATTR_ENABLED], True)
async def async_turn_off(self):
await self.coordinator._async_set_data(self.meta[ATTR_ENABLED], False)
def async_write_ha_state(self):
print(f"Writing state for {self.kind}: {self.state} ")
super().async_write_ha_state()
async def async_added_to_hass(self):
register_attr = []
if ATTR_TEMPERATURE in self.meta:
register_attr.append(self.meta[ATTR_TEMPERATURE])
if ATTR_CURRENT_TEMPERATURE in self.meta:
register_attr.append(self.meta[ATTR_CURRENT_TEMPERATURE])
if ATTR_TARGET_TEMP_HIGH in self.meta:
register_attr.append(self.meta[ATTR_TARGET_TEMP_HIGH])
if ATTR_TARGET_TEMP_LOW in self.meta:
register_attr.append(self.meta[ATTR_TARGET_TEMP_LOW])
if ATTR_ENABLED in self.meta:
register_attr.append(self.meta[ATTR_ENABLED])
self.coordinator.registerAttribute(register_attr)
"""Connect to dispatcher listening for entity data notifications."""
self.async_on_remove(
self.coordinator.async_add_listener(self.async_write_ha_state)
)
async def async_update(self):
"""Update Thermia entity."""
await self.coordinator.wantsRefresh(self.kind)
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
print("Set temperature")
print(kwargs)
writes = {}
if ATTR_TARGET_TEMP_LOW in kwargs:
writes[self.meta[ATTR_TARGET_TEMP_LOW]] = kwargs[ATTR_TARGET_TEMP_LOW]
if ATTR_TARGET_TEMP_HIGH in kwargs:
writes[self.meta[ATTR_TARGET_TEMP_HIGH]] = kwargs[ATTR_TARGET_TEMP_HIGH]
if ATTR_TEMPERATURE in kwargs:
writes[self.meta[ATTR_TEMPERATURE]] = kwargs[ATTR_TEMPERATURE]
for i, (reg, val) in enumerate(writes.items()):
print(f"Write {reg} value {val}")
await self.coordinator._async_set_data(reg, val)
| 36.314741 | 93 | 0.693801 |
62895c8e59bae96876957bc323411b1fbb27003a | 3,705 | py | Python | main.py | evanloshin/CarND-Advanced-Lane-Lines | 36fdf250694105fa0ed557e911ac9105348cab4d | [
"MIT"
] | null | null | null | main.py | evanloshin/CarND-Advanced-Lane-Lines | 36fdf250694105fa0ed557e911ac9105348cab4d | [
"MIT"
] | null | null | null | main.py | evanloshin/CarND-Advanced-Lane-Lines | 36fdf250694105fa0ed557e911ac9105348cab4d | [
"MIT"
] | null | null | null | # import python libraries
print('Importing dependencies...')
import numpy as np
import cv2
from matplotlib import pyplot as plt
from moviepy.editor import VideoFileClip
import sys
# import project dependencies
import functions as fn
from classes import undistorter, transformer, laneFinder
def main(argv):
######################## HYPERPARAMETERS ########################
nx_corners = 9 # number of chessboard corners in horizontal direction
ny_corners = 6 # number of chessboard corners in vertical direction
x_offset = 400 # horizontal offset for perspective transformation
y_offset = -10 # vertical offset for perspective transformation
sobelx_thresh = [20, 255] # pixel thresholds for horizontal color gradient
sobely_thresh = [35, 255] # pixel thresholds for vertical color gradient
saturation_thresh = [60, 255] # pixel thresholds for saturation channel in HLS color space
value_thresh = [40, 255] # pixel thresholds for value channel in HSV color space
n_windows = 9 # number of cross-sliding windows
window_width = 150 # width (pixels) of cross-sliding windows
min_pix = 40 # minimum number of activated pixels required to recenter window
curve_margin = 50 # margin around last polynomial fit for including new lane pixels
smooth_factor = 8 # moving average period for last n polynomial fits
max_diff = 50 # allowable percent difference between new and previous coefficients
###################### END HYPERPARAMETERS ######################
# take video i/o filenames from terminal
try:
input_file = argv[1]
output_file = argv[2]
except IndexError:
print("Invalid arguments passed to main.py. Using default i/o filenames.")
input_file = 'test_videos/project_video.mp4'
output_file = 'output_videos/output_project_video.mp4'
# package up hyperparameters for passing to video pipeline
hyperparameters = [n_windows, window_width, min_pix, curve_margin, smooth_factor, max_diff]
# grab example driving images and calibration chessboard images
print('Calibrating camera...')
Examples = fn.read_image_dir('test_images/')
Chessboards = fn.read_image_dir('camera_cal/')
# measure camera distortion
undist = undistorter(Chessboards, nx_corners, ny_corners)
# create perspective transformation matrix
print('Creating detection pipeline...')
cal = undist.undistort(np.copy(Examples[0]))
mask = np.int32([[[597, 450], [680, 450], [1020, 675], [290, 675]]])
txf = transformer(cal, mask, x_offset, y_offset)
txf.create_transformation()
#txf.plot_mask() # visual aid for determining coordinates
# undistort and warp perspective
undistorted = undist.undistort(cal)
warped = txf.warp(undistorted)
#plt.imsave('temp.png', warped)
# create a binarized image to filter out noise
thresholds = [sobelx_thresh, sobely_thresh, saturation_thresh, value_thresh]
binarized = fn.binarize(warped, sobelx_thresh, sobely_thresh, saturation_thresh, value_thresh)
# pixel to meter conversion
lanes = laneFinder()
lanes.generate_unit_conversion(binarized)
# generate video
output_file = 'output_videos/output_project_video_test.mp4'
input_file = 'test_videos/project_video.mp4'
clip = VideoFileClip(input_file).subclip(5, 7)
result = clip.fl_image(lambda frame: fn.video_pipeline(frame, undist, txf, thresholds, lanes, hyperparameters))
result.write_videofile(output_file, audio=False)
# result = fn.video_pipeline(Examples[0], undist, txf, thresholds, lanes, hyperparameters)
# plt.imsave('test.png', result)
if __name__ == '__main__': main(sys.argv[1:]) | 44.638554 | 115 | 0.716059 |
8ef158ace90ba28e36d7fe10f23d5ec9230b2ff3 | 855 | py | Python | tutorials_for_myself/dataclass.py | pestun/ultimate-utils | 676002e80422067256c43172a78825ed12954bcb | [
"MIT"
] | 5 | 2021-03-13T16:07:26.000Z | 2021-09-09T17:00:36.000Z | tutorials_for_myself/dataclass.py | pestun/ultimate-utils | 676002e80422067256c43172a78825ed12954bcb | [
"MIT"
] | 8 | 2021-03-09T21:52:09.000Z | 2021-12-02T17:23:33.000Z | tutorials_for_myself/dataclass.py | pestun/ultimate-utils | 676002e80422067256c43172a78825ed12954bcb | [
"MIT"
] | 5 | 2021-03-24T20:38:43.000Z | 2022-03-17T07:54:12.000Z | # https://realpython.com/python-data-classes/
from dataclasses import dataclass
@dataclass
class DataClassCard:
rank: str
suit: str
default_test: str = 'default'
card = DataClassCard('Q', 'Hearts')
print(card)
print(card == DataClassCard('Q', 'Hearts'))
#%%
from dataclasses import dataclass, field
def get_str():
return 'default'
def get_lst():
return [1, 2]
@dataclass
class DataClassCard:
rank: str
suit: str
# - you need to be careful with mutable objects otherwise if you change it in one class it changes in all of them!
# luckly python will throw an error if you try to do that...
# default_test: str = field(default_factory=get_str)
# default_test: str = [1, 2]
default_test: str = field(default_factory=get_lst)
card = DataClassCard('Q', 'Hearts')
print(card)
print(card.default_test) | 20.357143 | 118 | 0.694737 |
e67cdbd4bc550a44580b4a16a330135360d6cbef | 1,456 | py | Python | app/api/v2/views/rsvps.py | MaggieKimani1/QUESTIONER-API-V2 | e1440ce75a52757278b48b19223f81b95f0b1eee | [
"MIT"
] | null | null | null | app/api/v2/views/rsvps.py | MaggieKimani1/QUESTIONER-API-V2 | e1440ce75a52757278b48b19223f81b95f0b1eee | [
"MIT"
] | null | null | null | app/api/v2/views/rsvps.py | MaggieKimani1/QUESTIONER-API-V2 | e1440ce75a52757278b48b19223f81b95f0b1eee | [
"MIT"
] | null | null | null | from flask import Flask, request, jsonify, json
from flask_restful import Resource
from app.api.v2.models.meetupmodel import Meetups
from app.api.v2.models.rsvpmodel import Rsvps
from flask_expects_json import expects_json
from app.api.v2.utils.json_schema import rsvp_schema
from flask_jwt_extended import get_jwt_identity, jwt_required
rsvp = Rsvps()
meetup = Meetups()
class RSVPEndpoint(Resource):
'''Endpoint for all questions functionality'''
@expects_json(rsvp_schema)
# @jwt_required
def post(self, meetup_id):
'''Post an RSVP'''
try:
meetup_id = int(meetup_id)
except:
return{"message": "The id has to be an integer"}, 400
meetup_available = Meetups().get_specific_meetup(meetup_id)
if not meetup_available:
return {"message": "You cannot RSVP an unavailable meetup"}, 400
data = request.get_json()
if not data:
{"message": "Please submit your RSVP", "status": 400}, 400
user_id = data['user_id']
meetup_id = meetup_id
response = data['response']
if (response == "yes" or response == "no" or response == "maybe"):
new_rsvp = rsvp.create_rsvp(user_id, meetup_id, response)
return {"status": 201, "data": new_rsvp, "message": "RSVP saved for this meetup"}, 201
else:
return {"message": "response should be a yes, no or maybe", "status": 400}, 400
| 36.4 | 98 | 0.653846 |
b2cae7f552f2d2d0b4d2d33295b8ceff37e94207 | 7,694 | py | Python | etna/transforms/timestamp/time_flags.py | martins0n/etna | 51e9cec5183da2499ca247b0e2db215507246ceb | [
"Apache-2.0"
] | 326 | 2021-11-18T15:30:50.000Z | 2022-03-31T09:44:15.000Z | etna/transforms/timestamp/time_flags.py | martins0n/etna | 51e9cec5183da2499ca247b0e2db215507246ceb | [
"Apache-2.0"
] | 305 | 2021-11-17T10:28:31.000Z | 2022-03-31T18:05:03.000Z | etna/transforms/timestamp/time_flags.py | martins0n/etna | 51e9cec5183da2499ca247b0e2db215507246ceb | [
"Apache-2.0"
] | 29 | 2021-11-21T12:10:48.000Z | 2022-03-31T22:55:06.000Z | from copy import deepcopy
from typing import Optional
import numpy as np
import pandas as pd
from etna.transforms.base import FutureMixin
from etna.transforms.base import Transform
class TimeFlagsTransform(Transform, FutureMixin):
"""TimeFlagsTransform is a class that implements extraction of the main time-based features from datetime column."""
def __init__(
self,
minute_in_hour_number: bool = True,
fifteen_minutes_in_hour_number: bool = False,
hour_number: bool = True,
half_hour_number: bool = False,
half_day_number: bool = False,
one_third_day_number: bool = False,
out_column: Optional[str] = None,
):
"""Initialise class attributes.
Parameters
----------
minute_in_hour_number:
if True: add column with minute number to feature dataframe in transform
fifteen_minutes_in_hour_number:
if True: add column with number of fifteen-minute interval within hour with numeration from 0
to feature dataframe in transform
hour_number:
if True: add column with hour number to feature dataframe in transform
half_hour_number:
if True: add column with 0 for the first half of the hour and 1 for the second
to feature dataframe in transform
half_day_number:
if True: add column with 0 for the first half of the day and 1 for the second
to feature dataframe in transform
one_third_day_number:
if True: add column with number of 8-hour interval within day with numeration from 0
to feature dataframe in transform
out_column:
base for the name of created columns;
* if set the final name is '{out_column}_{feature_name}';
* if don't set, name will be ``transform.__repr__()``,
repr will be made for transform that creates exactly this column
Raises
------
ValueError: if feature has invalid initial params
"""
if not any(
[
minute_in_hour_number,
fifteen_minutes_in_hour_number,
hour_number,
half_hour_number,
half_day_number,
one_third_day_number,
]
):
raise ValueError(
f"{type(self).__name__} feature does nothing with given init args configuration, "
f"at least one of minute_in_hour_number, fifteen_minutes_in_hour_number, hour_number, "
f"half_hour_number, half_day_number, one_third_day_number should be True."
)
self.date_column_name = None
self.minute_in_hour_number: bool = minute_in_hour_number
self.fifteen_minutes_in_hour_number: bool = fifteen_minutes_in_hour_number
self.hour_number: bool = hour_number
self.half_hour_number: bool = half_hour_number
self.half_day_number: bool = half_day_number
self.one_third_day_number: bool = one_third_day_number
self.out_column = out_column
# create empty init parameters
self._empty_parameters = dict(
minute_in_hour_number=False,
fifteen_minutes_in_hour_number=False,
hour_number=False,
half_hour_number=False,
half_day_number=False,
one_third_day_number=False,
)
def _get_column_name(self, feature_name: str) -> str:
if self.out_column is None:
init_parameters = deepcopy(self._empty_parameters)
init_parameters[feature_name] = self.__dict__[feature_name]
temp_transform = TimeFlagsTransform(**init_parameters, out_column=self.out_column) # type: ignore
return repr(temp_transform)
else:
return f"{self.out_column}_{feature_name}"
def fit(self, *args, **kwargs) -> "TimeFlagsTransform":
"""Fit datetime model."""
return self
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
"""
Transform method for features based on time.
Parameters
----------
df:
Features dataframe with time
Returns
-------
result: pd.DataFrame
Dataframe with extracted features
"""
features = pd.DataFrame(index=df.index)
timestamp_series = pd.Series(df.index)
if self.minute_in_hour_number:
minute_in_hour_number = self._get_minute_number(timestamp_series=timestamp_series)
features[self._get_column_name("minute_in_hour_number")] = minute_in_hour_number
if self.fifteen_minutes_in_hour_number:
fifteen_minutes_in_hour_number = self._get_period_in_hour(
timestamp_series=timestamp_series, period_in_minutes=15
)
features[self._get_column_name("fifteen_minutes_in_hour_number")] = fifteen_minutes_in_hour_number
if self.hour_number:
hour_number = self._get_hour_number(timestamp_series=timestamp_series)
features[self._get_column_name("hour_number")] = hour_number
if self.half_hour_number:
half_hour_number = self._get_period_in_hour(timestamp_series=timestamp_series, period_in_minutes=30)
features[self._get_column_name("half_hour_number")] = half_hour_number
if self.half_day_number:
half_day_number = self._get_period_in_day(timestamp_series=timestamp_series, period_in_hours=12)
features[self._get_column_name("half_day_number")] = half_day_number
if self.one_third_day_number:
one_third_day_number = self._get_period_in_day(timestamp_series=timestamp_series, period_in_hours=8)
features[self._get_column_name("one_third_day_number")] = one_third_day_number
for feature in features.columns:
features[feature] = features[feature].astype("category")
dataframes = []
for seg in df.columns.get_level_values("segment").unique():
tmp = df[seg].join(features)
_idx = tmp.columns.to_frame()
_idx.insert(0, "segment", seg)
tmp.columns = pd.MultiIndex.from_frame(_idx)
dataframes.append(tmp)
result = pd.concat(dataframes, axis=1).sort_index(axis=1)
result.columns.names = ["segment", "feature"]
return result
@staticmethod
def _get_minute_number(timestamp_series: pd.Series) -> np.ndarray:
"""Generate array with the minute number in the hour."""
return timestamp_series.apply(lambda x: x.minute).values
@staticmethod
def _get_period_in_hour(timestamp_series: pd.Series, period_in_minutes: int = 15) -> np.ndarray:
"""Generate an array with the period number in the hour.
Accepts a period length in minutes as input and returns array where timestamps marked by period number.
"""
return timestamp_series.apply(lambda x: x.minute // period_in_minutes).values
@staticmethod
def _get_hour_number(timestamp_series: pd.Series) -> np.ndarray:
"""Generate an array with the hour number in the day."""
return timestamp_series.apply(lambda x: x.hour).values
@staticmethod
def _get_period_in_day(timestamp_series: pd.Series, period_in_hours: int = 12) -> np.ndarray:
"""Generate an array with the period number in the day.
Accepts a period length in hours as input and returns array where timestamps marked by period number.
"""
return timestamp_series.apply(lambda x: x.hour // period_in_hours).values
__all__ = ["TimeFlagsTransform"]
| 40.282723 | 120 | 0.659865 |
f21ede9bd6e772b8bcf1f85e87d157c687b22489 | 1,297 | py | Python | kgl_deepfake/pytorch_retinaface/utils/timer.py | qAp/kgl_deepfake | d3ee36d704d82d5d72068ea16276a88b5746c8de | [
"Apache-2.0"
] | null | null | null | kgl_deepfake/pytorch_retinaface/utils/timer.py | qAp/kgl_deepfake | d3ee36d704d82d5d72068ea16276a88b5746c8de | [
"Apache-2.0"
] | null | null | null | kgl_deepfake/pytorch_retinaface/utils/timer.py | qAp/kgl_deepfake | d3ee36d704d82d5d72068ea16276a88b5746c8de | [
"Apache-2.0"
] | null | null | null | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/12h_pytorch_retinaface.utils.timer.ipynb (unless otherwise specified).
__all__ = ['Timer']
# Cell
# %load Pytorch_Retinaface/utils/timer.py
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
import time
class Timer(object):
"""A simple timer."""
def __init__(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.average_time = 0.
def tic(self):
# using time.time instead of time.clock because time time.clock
# does not normalize for multithreading
self.start_time = time.time()
def toc(self, average=True):
self.diff = time.time() - self.start_time
self.total_time += self.diff
self.calls += 1
self.average_time = self.total_time / self.calls
if average:
return self.average_time
else:
return self.diff
def clear(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.average_time = 0.
| 27.020833 | 118 | 0.561295 |
b69b6b72f7037628cfc1a2f1056170d837474838 | 437 | py | Python | pytest_djangoapp/compat.py | jayvdb/pytest-djangoapp | 271df3cd57045961aadeb1ee4433de3f4fcfd2ef | [
"BSD-3-Clause"
] | 34 | 2018-06-18T12:18:13.000Z | 2021-05-14T14:15:39.000Z | pytest_djangoapp/compat.py | jayvdb/pytest-djangoapp | 271df3cd57045961aadeb1ee4433de3f4fcfd2ef | [
"BSD-3-Clause"
] | 25 | 2019-05-21T23:13:13.000Z | 2022-02-10T10:49:50.000Z | pytest_djangoapp/compat.py | jayvdb/pytest-djangoapp | 271df3cd57045961aadeb1ee4433de3f4fcfd2ef | [
"BSD-3-Clause"
] | 4 | 2018-06-25T03:59:22.000Z | 2021-05-27T04:09:57.000Z | from django import VERSION
def get_urlpatterns(patterns_list):
"""Returns object suitable to use as urlpatterns in `urls.py`.
Example::
urlpatterns = get_urlpatterns([
url(r'^index/$', index, name='index'),
])
:param patterns_list:
:return:
"""
if VERSION >= (1, 9):
return patterns_list
from django.conf.urls import patterns
return patterns('', *patterns_list)
| 19.863636 | 66 | 0.617849 |
0298c309bebdf6ba9ce0b82bf5b8c05a550be290 | 7,574 | py | Python | rdr_service/alembic/versions/f2aa951ca1a7_add_summary_columns.py | all-of-us/raw-data-repository | d28ad957557587b03ff9c63d55dd55e0508f91d8 | [
"BSD-3-Clause"
] | 39 | 2017-10-13T19:16:27.000Z | 2021-09-24T16:58:21.000Z | rdr_service/alembic/versions/f2aa951ca1a7_add_summary_columns.py | all-of-us/raw-data-repository | d28ad957557587b03ff9c63d55dd55e0508f91d8 | [
"BSD-3-Clause"
] | 312 | 2017-09-08T15:42:13.000Z | 2022-03-23T18:21:40.000Z | rdr_service/alembic/versions/f2aa951ca1a7_add_summary_columns.py | all-of-us/raw-data-repository | d28ad957557587b03ff9c63d55dd55e0508f91d8 | [
"BSD-3-Clause"
] | 19 | 2017-09-15T13:58:00.000Z | 2022-02-07T18:33:20.000Z | """Add summary columns
Revision ID: f2aa951ca1a7
Revises: 7e250583b9cb
Create Date: 2017-10-23 16:50:06.586388
"""
import model.utils
import sqlalchemy as sa
from alembic import op
from rdr_service.participant_enums import OrderStatus
# revision identifiers, used by Alembic.
revision = "f2aa951ca1a7"
down_revision = "7e250583b9cb"
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("participant_summary", sa.Column("biospecimen_collected_site_id", sa.Integer(), nullable=True))
op.add_column("participant_summary", sa.Column("biospecimen_finalized_site_id", sa.Integer(), nullable=True))
op.add_column("participant_summary", sa.Column("biospecimen_order_time", model.utils.UTCDateTime(), nullable=True))
op.add_column("participant_summary", sa.Column("biospecimen_processed_site_id", sa.Integer(), nullable=True))
op.add_column("participant_summary", sa.Column("biospecimen_source_site_id", sa.Integer(), nullable=True))
op.add_column("participant_summary", sa.Column("biospecimen_status", model.utils.Enum(OrderStatus), nullable=True))
op.add_column(
"participant_summary", sa.Column("physical_measurements_created_site_id", sa.Integer(), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("physical_measurements_finalized_site_id", sa.Integer(), nullable=True)
)
op.add_column(
"participant_summary",
sa.Column("physical_measurements_finalized_time", model.utils.UTCDateTime(), nullable=True),
)
op.add_column(
"participant_summary", sa.Column("sample_order_status_1ed04", model.utils.Enum(OrderStatus), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_order_status_1ed04_time", model.utils.UTCDateTime(), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_order_status_1ed10", model.utils.Enum(OrderStatus), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_order_status_1ed10_time", model.utils.UTCDateTime(), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_order_status_1hep4", model.utils.Enum(OrderStatus), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_order_status_1hep4_time", model.utils.UTCDateTime(), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_order_status_1pst8", model.utils.Enum(OrderStatus), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_order_status_1pst8_time", model.utils.UTCDateTime(), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_order_status_1sal", model.utils.Enum(OrderStatus), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_order_status_1sal_time", model.utils.UTCDateTime(), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_order_status_1sst8", model.utils.Enum(OrderStatus), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_order_status_1sst8_time", model.utils.UTCDateTime(), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_order_status_1ur10", model.utils.Enum(OrderStatus), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_order_status_1ur10_time", model.utils.UTCDateTime(), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_order_status_2ed10", model.utils.Enum(OrderStatus), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_order_status_2ed10_time", model.utils.UTCDateTime(), nullable=True)
)
op.create_foreign_key(None, "participant_summary", "site", ["biospecimen_processed_site_id"], ["site_id"])
op.create_foreign_key(
None, "participant_summary", "site", ["physical_measurements_finalized_site_id"], ["site_id"]
)
op.create_foreign_key(None, "participant_summary", "site", ["physical_measurements_created_site_id"], ["site_id"])
op.create_foreign_key(None, "participant_summary", "site", ["biospecimen_collected_site_id"], ["site_id"])
op.create_foreign_key(None, "participant_summary", "site", ["biospecimen_source_site_id"], ["site_id"])
op.create_foreign_key(None, "participant_summary", "site", ["biospecimen_finalized_site_id"], ["site_id"])
op.add_column("physical_measurements", sa.Column("finalized", model.utils.UTCDateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("physical_measurements", "finalized")
op.drop_constraint(None, "participant_summary", type_="foreignkey")
op.drop_constraint(None, "participant_summary", type_="foreignkey")
op.drop_constraint(None, "participant_summary", type_="foreignkey")
op.drop_constraint(None, "participant_summary", type_="foreignkey")
op.drop_constraint(None, "participant_summary", type_="foreignkey")
op.drop_constraint(None, "participant_summary", type_="foreignkey")
op.drop_column("participant_summary", "sample_order_status_2ed10_time")
op.drop_column("participant_summary", "sample_order_status_2ed10")
op.drop_column("participant_summary", "sample_order_status_1ur10_time")
op.drop_column("participant_summary", "sample_order_status_1ur10")
op.drop_column("participant_summary", "sample_order_status_1sst8_time")
op.drop_column("participant_summary", "sample_order_status_1sst8")
op.drop_column("participant_summary", "sample_order_status_1sal_time")
op.drop_column("participant_summary", "sample_order_status_1sal")
op.drop_column("participant_summary", "sample_order_status_1pst8_time")
op.drop_column("participant_summary", "sample_order_status_1pst8")
op.drop_column("participant_summary", "sample_order_status_1hep4_time")
op.drop_column("participant_summary", "sample_order_status_1hep4")
op.drop_column("participant_summary", "sample_order_status_1ed10_time")
op.drop_column("participant_summary", "sample_order_status_1ed10")
op.drop_column("participant_summary", "sample_order_status_1ed04_time")
op.drop_column("participant_summary", "sample_order_status_1ed04")
op.drop_column("participant_summary", "physical_measurements_finalized_time")
op.drop_column("participant_summary", "physical_measurements_finalized_site_id")
op.drop_column("participant_summary", "physical_measurements_created_site_id")
op.drop_column("participant_summary", "biospecimen_status")
op.drop_column("participant_summary", "biospecimen_source_site_id")
op.drop_column("participant_summary", "biospecimen_processed_site_id")
op.drop_column("participant_summary", "biospecimen_order_time")
op.drop_column("participant_summary", "biospecimen_finalized_site_id")
op.drop_column("participant_summary", "biospecimen_collected_site_id")
# ### end Alembic commands ###
| 49.181818 | 119 | 0.746633 |
b614d97625fcda8cccccac9ff6ae6c80f3e48f4b | 3,409 | py | Python | model_YYMetRod.py | LeiDai/meep_metamaterials | 2ca51c861e1f69e638e920c9bdacc7c583e22aed | [
"MIT"
] | 6 | 2019-03-18T07:00:51.000Z | 2021-09-01T12:42:13.000Z | model_YYMetRod.py | LeiDai/meep_metamaterials | 2ca51c861e1f69e638e920c9bdacc7c583e22aed | [
"MIT"
] | null | null | null | model_YYMetRod.py | LeiDai/meep_metamaterials | 2ca51c861e1f69e638e920c9bdacc7c583e22aed | [
"MIT"
] | 5 | 2017-02-20T12:00:46.000Z | 2021-05-29T23:57:07.000Z | #!/usr/bin/env python
#-*- coding: utf-8 -*-
import meep_utils, meep_materials
import numpy as np
from meep_utils import in_sphere, in_xcyl, in_ycyl, in_zcyl, in_xslab, in_yslab, in_zslab
"""
"""
class YYMR(meep_utils.AbstractMeepModel): #{{{
"""
MMxxxxxxMM
MMxxxxxxMM
^ z MMxxxxxxMM
| MMxxxxxxMM
| MMxxxxxxMM
+-----> y
FD 2014-02
"""
def cell_centers(self):
""" Helper function for stacked multilayered metamaterials """
return np.arange(-self.monzd*(self.cells-1)/2, self.monzd*(self.cells-1)/2+1e-12, self.monzd)
## alternatively: add surrounding two cells to compare the propagation _inside_ a metamaterial
#return np.arange(-self.monzd*(self.cells+1)/2, self.monzd*(self.cells+1)/2+1e-12, self.monzd)
meep_utils.AbstractMeepModel.__init__(self) ## Base class initialisation
self.simulation_name = "XCylWire" ##
self.register_locals(locals()) ## Remember the parameters
## Initialization of materials used
self.materials = [meep_materials.material_dielectric(where = self.where_wire, eps=epsilon, loss=0.01),
meep_materials.material_Metal_THz(where = self.where_metal)
]
#if 'TiO2' in comment:
#self.materials = [meep_materials.material_TiO2_THz(where = self.where_wire)]
#elif 'STO' in comment:
#self.materials = [meep_materials.material_STO_THz(where = self.where_wire)]
#elif 'DielLossless' in comment:
#self.materials = [meep_materials.material_dielectric(where = self.where_wire, eps=epsilon, loss=0.0)]
#elif 'Diel' in comment:
#self.materials = [meep_materials.material_dielectric(where = self.where_wire, eps=epsilon, loss=0.01)]
#else:
#self.materials = [meep_materials.material_Metal_THz(where = self.where_wire)]
#
## Dimension constants for the simulation
#self.size_x, self.size_y, self.size_z = xyspacing, xyspacing, 400e-6+cells*self.monzd
self.pml_thickness = 20e-6
self.size_x = resolution/1.8
self.size_y = yspacing
self.size_z = 200e-6+cells*self.monzd + 2*self.padding + 2*self.pml_thickness
## constants for the simulation
(self.monitor_z1, self.monitor_z2) = (-(monzd*cells)/2+monzc - padding, (monzd*cells)/2+monzc + padding)
self.simtime = simtime # [s]
self.srcWidth = 5000e9
self.srcFreq = 4e9 + self.srcWidth/2 + (Ky**2+Kx**2)**.5 / (2*np.pi/3e8) ## cutoff for oblique incidence
self.interesting_frequencies = (0., 2000e9)
#meep_utils.plot_eps(self.materials, freq_range=(1e10, 1e14), plot_conductivity=True)
self.TestMaterials() ## catches (most) errors in structure syntax, before they crash the callback
def where_diel(self, r):
for cellz in self.cell_centers():
if (in_zslab(r, cz=cellz, d=self.radius)) and in_yslab(r, cy=.7e-6, d=60e-6-self.radius)): # XXX
return self.return_value
return 0
def where_metal(self, r):
if where_diel(self, r):
return 0
for cellz in self.cell_centers():
#if (in_zslab(r, cz=cellz, d=self.radius)) and in_yslab(r, cy=.7e-6, d=60e-6-self.radius)): # XXX
#return self.return_value
return 0
#}}}
| 44.855263 | 115 | 0.63127 |
bef0b396db1f0d92743bab2d85c4369ff0db45fb | 371 | py | Python | dist/weewx-3.9.2/bin/weeplot/__init__.py | v0rts/docker-weewx | 70b2f252051dfead4fcb74e74662b297831e6342 | [
"Apache-2.0"
] | 10 | 2017-01-05T17:30:48.000Z | 2021-09-18T15:04:20.000Z | dist/weewx-3.9.2/bin/weeplot/__init__.py | v0rts/docker-weewx | 70b2f252051dfead4fcb74e74662b297831e6342 | [
"Apache-2.0"
] | 2 | 2019-07-21T10:48:42.000Z | 2022-02-16T20:36:45.000Z | dist/weewx-3.9.2/bin/weeplot/__init__.py | v0rts/docker-weewx | 70b2f252051dfead4fcb74e74662b297831e6342 | [
"Apache-2.0"
] | 12 | 2017-01-05T18:50:30.000Z | 2021-10-05T07:35:45.000Z | #
# Copyright (c) 2009-2015 Tom Keffer <tkeffer@gmail.com>
#
# See the file LICENSE.txt for your full rights.
#
"""Package weeplot. A set of modules for doing simple plots
"""
# Define possible exceptions that could get thrown.
class ViolatedPrecondition(StandardError):
"""Exception thrown when a function is called with violated preconditions.
"""
| 24.733333 | 78 | 0.716981 |
276ca847c4b55092ff6718ca7d1390a93ee237fd | 2,166 | py | Python | nuagevsdsim/simentities/nusimcloudmgmtsystem.py | pdellaert/vspk-sim | 459a84366a9bdde82d74aca18ea866e3d55d62ee | [
"BSD-3-Clause"
] | null | null | null | nuagevsdsim/simentities/nusimcloudmgmtsystem.py | pdellaert/vspk-sim | 459a84366a9bdde82d74aca18ea866e3d55d62ee | [
"BSD-3-Clause"
] | null | null | null | nuagevsdsim/simentities/nusimcloudmgmtsystem.py | pdellaert/vspk-sim | 459a84366a9bdde82d74aca18ea866e3d55d62ee | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# BSD 3-Clause License
#
# Copyright (c) 2017, Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
NUSimCloudMgmtSystem
"""
from vspk import v5_0 as vsdk
from nuagevsdsim.simentities.nusimresource import NUSimResource
class NUSimCloudMgmtSystem(NUSimResource):
""" Represents a CloudMgmtSystem
Notes:
Object that identifies a cloud management system.
"""
__vspk_class__ = vsdk.NUCloudMgmtSystem
__unique_fields__ = ['externalID']
__mandatory_fields__ = []
__default_fields__ = {
}
__get_parents__ = ['me']
__create_parents__ = ['me']
def __init__(self):
super(NUSimCloudMgmtSystem, self).__init__() | 39.381818 | 80 | 0.753463 |
52eab244dd28a96397d7ff83d1bed8847384b762 | 628 | py | Python | pomegranate/__init__.py | jkleckner/pomegranate | c91c80b50c2e9ea4c5edcadad7e314c1abbb3178 | [
"MIT"
] | 1 | 2018-12-29T04:43:30.000Z | 2018-12-29T04:43:30.000Z | pomegranate/__init__.py | jkleckner/pomegranate | c91c80b50c2e9ea4c5edcadad7e314c1abbb3178 | [
"MIT"
] | null | null | null | pomegranate/__init__.py | jkleckner/pomegranate | c91c80b50c2e9ea4c5edcadad7e314c1abbb3178 | [
"MIT"
] | null | null | null | # __init__.py: pomegranate
# Contact: Jacob Schreiber <jmschreiber91@gmail.com>
"""
For detailed documentation and examples, see the README.
"""
import os
from .base import *
from .parallel import *
from .distributions import *
from .kmeans import Kmeans
from .gmm import GeneralMixtureModel
from .NaiveBayes import NaiveBayes
from .BayesClassifier import BayesClassifier
from .MarkovChain import MarkovChain
from .hmm import HiddenMarkovModel
from .BayesianNetwork import BayesianNetwork
from .FactorGraph import FactorGraph
__version__ = '0.10.0'
os.environ['OMP_NUM_THREADS'] = '1'
os.environ['MKL_NUM_THREADS'] = '1'
| 22.428571 | 56 | 0.789809 |
ee7b0459ef0e8b4a8711a64029779af69688da5e | 1,398 | py | Python | venv/Lib/site-packages/nipype/interfaces/cmtk/tests/test_auto_CFFConverter.py | richung99/digitizePlots | 6b408c820660a415a289726e3223e8f558d3e18b | [
"MIT"
] | 585 | 2015-01-12T16:06:47.000Z | 2022-03-26T14:51:08.000Z | nipype/interfaces/cmtk/tests/test_auto_CFFConverter.py | tamires-consulting/nipype | b7879d75a63b6500b2e7d2c3eba5aa7670339274 | [
"Apache-2.0"
] | 2,329 | 2015-01-01T09:56:41.000Z | 2022-03-30T14:24:49.000Z | nipype/interfaces/cmtk/tests/test_auto_CFFConverter.py | tamires-consulting/nipype | b7879d75a63b6500b2e7d2c3eba5aa7670339274 | [
"Apache-2.0"
] | 487 | 2015-01-20T01:04:52.000Z | 2022-03-21T21:22:47.000Z | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ..convert import CFFConverter
def test_CFFConverter_inputs():
input_map = dict(
creator=dict(),
data_files=dict(),
description=dict(
usedefault=True,
),
email=dict(),
gifti_labels=dict(),
gifti_surfaces=dict(),
gpickled_networks=dict(),
graphml_networks=dict(),
license=dict(),
nifti_volumes=dict(),
out_file=dict(
extensions=None,
usedefault=True,
),
publisher=dict(),
references=dict(),
relation=dict(),
rights=dict(),
script_files=dict(),
species=dict(
usedefault=True,
),
timeseries_files=dict(),
title=dict(),
tract_files=dict(),
)
inputs = CFFConverter.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_CFFConverter_outputs():
output_map = dict(
connectome_file=dict(
extensions=None,
),
)
outputs = CFFConverter.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| 26.377358 | 67 | 0.572961 |
49059ef86ac659f49bc90661f08b5970671685cd | 2,563 | py | Python | transformer_pytorch/trainer/metrics.py | walkacross/transformer-pytorch | 6aa06889f6d10fceb5587b47aba15e65fa305074 | [
"Apache-2.0"
] | 1 | 2022-01-05T08:59:30.000Z | 2022-01-05T08:59:30.000Z | transformer_pytorch/trainer/metrics.py | walkacross/transformer-pytorch | 6aa06889f6d10fceb5587b47aba15e65fa305074 | [
"Apache-2.0"
] | null | null | null | transformer_pytorch/trainer/metrics.py | walkacross/transformer-pytorch | 6aa06889f6d10fceb5587b47aba15e65fa305074 | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn as nn
import pdb
class TokenCrossEntropyLoss(nn.Module):
def __init__(self, pad_idx=0):
super().__init__()
self.pad_idx = pad_idx
self.loss_function = nn.CrossEntropyLoss(reduction="sum",ignore_index=pad_idx)
def forward(self, input, target):
input_flat = input.contiguous().view(-1, input.size(-1))
target_flat = target.contiguous().view(-1)
batch_loss_sum = self.loss_function(input_flat, target_flat)
count = (target_flat != self.pad_idx).sum().item()
return batch_loss_sum, count
class LabelSmoothingLoss(nn.Module):
def __init__(self, size, label_smoothing, pad_idx):
assert 0.0 < label_smoothing <=1.0
super().__init__()
self.pad_idx = pad_idx
self.log_softmax = nn.LogSoftmax(dim=-1)
self.criterion = nn.KLDivLoss(reduction="sum")
self.smoothing = label_smoothing
self.condidence = 1.0 - label_smoothing
self.size = size
self.true_dist = None
def forward(self, input, target):
#pdb.set_trace()
input = self.log_softmax(input)
# input flat
input = input.contiguous().view(-1, input.size(-1))
target = target.contiguous().view(-1)
true_dist = input.data.clone()
true_dist.requires_grad = False
true_dist.fill_(self.smoothing/(self.size-2))
true_dist.scatter_(1, target.unsqueeze(1), self.condidence)
true_dist[:,self.pad_idx] =0
true_dist.masked_fill_((target==self.pad_idx).unsqueeze(1),0)
self.true_dist = true_dist
loss = self.criterion(input, true_dist)
count = (target != self.pad_idx).sum().item()
return loss, count
class AccuracyMetric(nn.Module):
def __init__(self, pad_idx=0):
super().__init__()
self.pad_idx = pad_idx
def forward(self, input, target):
batch_size, seq_len, vocabulary_size = input.size()
input_flat = input.contiguous().view(batch_size*seq_len, vocabulary_size)
target_flat = target.contiguous().view(batch_size*seq_len)
predicts = input_flat.argmax(dim=1)
corrects = predicts == target_flat
corrects.masked_fill_((target_flat==self.pad_idx),0)
correct_count = corrects.sum().item()
total_count = (target_flat != self.pad_idx).sum().item()
return correct_count, total_count
| 32.443038 | 86 | 0.611783 |
739ce7b2ecb4d72cb424909a9e08c3f9a2164104 | 14,689 | py | Python | python/ccxt/async_support/__init__.py | npomfret/ccxt | 55e64d28c1a4a4925353076af9880dc18101fb73 | [
"MIT"
] | null | null | null | python/ccxt/async_support/__init__.py | npomfret/ccxt | 55e64d28c1a4a4925353076af9880dc18101fb73 | [
"MIT"
] | null | null | null | python/ccxt/async_support/__init__.py | npomfret/ccxt | 55e64d28c1a4a4925353076af9880dc18101fb73 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""CCXT: CryptoCurrency eXchange Trading Library (Async)"""
# -----------------------------------------------------------------------------
__version__ = '1.64.30'
# -----------------------------------------------------------------------------
from ccxt.async_support.base.exchange import Exchange # noqa: F401
from ccxt.base.decimal_to_precision import decimal_to_precision # noqa: F401
from ccxt.base.decimal_to_precision import TRUNCATE # noqa: F401
from ccxt.base.decimal_to_precision import ROUND # noqa: F401
from ccxt.base.decimal_to_precision import TICK_SIZE # noqa: F401
from ccxt.base.decimal_to_precision import DECIMAL_PLACES # noqa: F401
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS # noqa: F401
from ccxt.base.decimal_to_precision import NO_PADDING # noqa: F401
from ccxt.base.decimal_to_precision import PAD_WITH_ZERO # noqa: F401
from ccxt.base import errors # noqa: F401
from ccxt.base.errors import BaseError # noqa: F401
from ccxt.base.errors import ExchangeError # noqa: F401
from ccxt.base.errors import AuthenticationError # noqa: F401
from ccxt.base.errors import PermissionDenied # noqa: F401
from ccxt.base.errors import AccountSuspended # noqa: F401
from ccxt.base.errors import ArgumentsRequired # noqa: F401
from ccxt.base.errors import BadRequest # noqa: F401
from ccxt.base.errors import BadSymbol # noqa: F401
from ccxt.base.errors import BadResponse # noqa: F401
from ccxt.base.errors import NullResponse # noqa: F401
from ccxt.base.errors import InsufficientFunds # noqa: F401
from ccxt.base.errors import InvalidAddress # noqa: F401
from ccxt.base.errors import AddressPending # noqa: F401
from ccxt.base.errors import InvalidOrder # noqa: F401
from ccxt.base.errors import OrderNotFound # noqa: F401
from ccxt.base.errors import OrderNotCached # noqa: F401
from ccxt.base.errors import CancelPending # noqa: F401
from ccxt.base.errors import OrderImmediatelyFillable # noqa: F401
from ccxt.base.errors import OrderNotFillable # noqa: F401
from ccxt.base.errors import DuplicateOrderId # noqa: F401
from ccxt.base.errors import NotSupported # noqa: F401
from ccxt.base.errors import NetworkError # noqa: F401
from ccxt.base.errors import DDoSProtection # noqa: F401
from ccxt.base.errors import RateLimitExceeded # noqa: F401
from ccxt.base.errors import ExchangeNotAvailable # noqa: F401
from ccxt.base.errors import OnMaintenance # noqa: F401
from ccxt.base.errors import InvalidNonce # noqa: F401
from ccxt.base.errors import RequestTimeout # noqa: F401
from ccxt.base.errors import error_hierarchy # noqa: F401
from ccxt.async_support.aax import aax # noqa: F401
from ccxt.async_support.ascendex import ascendex # noqa: F401
from ccxt.async_support.bequant import bequant # noqa: F401
from ccxt.async_support.bibox import bibox # noqa: F401
from ccxt.async_support.bigone import bigone # noqa: F401
from ccxt.async_support.binance import binance # noqa: F401
from ccxt.async_support.binancecoinm import binancecoinm # noqa: F401
from ccxt.async_support.binanceus import binanceus # noqa: F401
from ccxt.async_support.binanceusdm import binanceusdm # noqa: F401
from ccxt.async_support.bit2c import bit2c # noqa: F401
from ccxt.async_support.bitbank import bitbank # noqa: F401
from ccxt.async_support.bitbay import bitbay # noqa: F401
from ccxt.async_support.bitbns import bitbns # noqa: F401
from ccxt.async_support.bitcoincom import bitcoincom # noqa: F401
from ccxt.async_support.bitfinex import bitfinex # noqa: F401
from ccxt.async_support.bitfinex2 import bitfinex2 # noqa: F401
from ccxt.async_support.bitflyer import bitflyer # noqa: F401
from ccxt.async_support.bitforex import bitforex # noqa: F401
from ccxt.async_support.bitget import bitget # noqa: F401
from ccxt.async_support.bithumb import bithumb # noqa: F401
from ccxt.async_support.bitmart import bitmart # noqa: F401
from ccxt.async_support.bitmex import bitmex # noqa: F401
from ccxt.async_support.bitpanda import bitpanda # noqa: F401
from ccxt.async_support.bitrue import bitrue # noqa: F401
from ccxt.async_support.bitso import bitso # noqa: F401
from ccxt.async_support.bitstamp import bitstamp # noqa: F401
from ccxt.async_support.bitstamp1 import bitstamp1 # noqa: F401
from ccxt.async_support.bittrex import bittrex # noqa: F401
from ccxt.async_support.bitvavo import bitvavo # noqa: F401
from ccxt.async_support.bl3p import bl3p # noqa: F401
from ccxt.async_support.btcalpha import btcalpha # noqa: F401
from ccxt.async_support.btcbox import btcbox # noqa: F401
from ccxt.async_support.btcmarkets import btcmarkets # noqa: F401
from ccxt.async_support.btctradeua import btctradeua # noqa: F401
from ccxt.async_support.btcturk import btcturk # noqa: F401
from ccxt.async_support.buda import buda # noqa: F401
from ccxt.async_support.bw import bw # noqa: F401
from ccxt.async_support.bybit import bybit # noqa: F401
from ccxt.async_support.bytetrade import bytetrade # noqa: F401
from ccxt.async_support.cdax import cdax # noqa: F401
from ccxt.async_support.cex import cex # noqa: F401
from ccxt.async_support.coinbase import coinbase # noqa: F401
from ccxt.async_support.coinbaseprime import coinbaseprime # noqa: F401
from ccxt.async_support.coinbasepro import coinbasepro # noqa: F401
from ccxt.async_support.coincheck import coincheck # noqa: F401
from ccxt.async_support.coinex import coinex # noqa: F401
from ccxt.async_support.coinfalcon import coinfalcon # noqa: F401
from ccxt.async_support.coinmate import coinmate # noqa: F401
from ccxt.async_support.coinone import coinone # noqa: F401
from ccxt.async_support.coinspot import coinspot # noqa: F401
from ccxt.async_support.crex24 import crex24 # noqa: F401
from ccxt.async_support.currencycom import currencycom # noqa: F401
from ccxt.async_support.delta import delta # noqa: F401
from ccxt.async_support.deribit import deribit # noqa: F401
from ccxt.async_support.digifinex import digifinex # noqa: F401
from ccxt.async_support.eqonex import eqonex # noqa: F401
from ccxt.async_support.equos import equos # noqa: F401
from ccxt.async_support.exmo import exmo # noqa: F401
from ccxt.async_support.flowbtc import flowbtc # noqa: F401
from ccxt.async_support.ftx import ftx # noqa: F401
from ccxt.async_support.ftxus import ftxus # noqa: F401
from ccxt.async_support.gateio import gateio # noqa: F401
from ccxt.async_support.gemini import gemini # noqa: F401
from ccxt.async_support.hitbtc import hitbtc # noqa: F401
from ccxt.async_support.hitbtc3 import hitbtc3 # noqa: F401
from ccxt.async_support.hollaex import hollaex # noqa: F401
from ccxt.async_support.huobi import huobi # noqa: F401
from ccxt.async_support.huobijp import huobijp # noqa: F401
from ccxt.async_support.huobipro import huobipro # noqa: F401
from ccxt.async_support.idex import idex # noqa: F401
from ccxt.async_support.independentreserve import independentreserve # noqa: F401
from ccxt.async_support.indodax import indodax # noqa: F401
from ccxt.async_support.itbit import itbit # noqa: F401
from ccxt.async_support.kraken import kraken # noqa: F401
from ccxt.async_support.kucoin import kucoin # noqa: F401
from ccxt.async_support.kuna import kuna # noqa: F401
from ccxt.async_support.latoken import latoken # noqa: F401
from ccxt.async_support.latoken1 import latoken1 # noqa: F401
from ccxt.async_support.lbank import lbank # noqa: F401
from ccxt.async_support.liquid import liquid # noqa: F401
from ccxt.async_support.luno import luno # noqa: F401
from ccxt.async_support.lykke import lykke # noqa: F401
from ccxt.async_support.mercado import mercado # noqa: F401
from ccxt.async_support.mexc import mexc # noqa: F401
from ccxt.async_support.ndax import ndax # noqa: F401
from ccxt.async_support.novadax import novadax # noqa: F401
from ccxt.async_support.oceanex import oceanex # noqa: F401
from ccxt.async_support.okcoin import okcoin # noqa: F401
from ccxt.async_support.okex import okex # noqa: F401
from ccxt.async_support.okex3 import okex3 # noqa: F401
from ccxt.async_support.okex5 import okex5 # noqa: F401
from ccxt.async_support.paymium import paymium # noqa: F401
from ccxt.async_support.phemex import phemex # noqa: F401
from ccxt.async_support.poloniex import poloniex # noqa: F401
from ccxt.async_support.probit import probit # noqa: F401
from ccxt.async_support.qtrade import qtrade # noqa: F401
from ccxt.async_support.ripio import ripio # noqa: F401
from ccxt.async_support.stex import stex # noqa: F401
from ccxt.async_support.therock import therock # noqa: F401
from ccxt.async_support.tidebit import tidebit # noqa: F401
from ccxt.async_support.tidex import tidex # noqa: F401
from ccxt.async_support.timex import timex # noqa: F401
from ccxt.async_support.upbit import upbit # noqa: F401
from ccxt.async_support.vcc import vcc # noqa: F401
from ccxt.async_support.wavesexchange import wavesexchange # noqa: F401
from ccxt.async_support.whitebit import whitebit # noqa: F401
from ccxt.async_support.xena import xena # noqa: F401
from ccxt.async_support.yobit import yobit # noqa: F401
from ccxt.async_support.zaif import zaif # noqa: F401
from ccxt.async_support.zb import zb # noqa: F401
from ccxt.async_support.zipmex import zipmex # noqa: F401
from ccxt.async_support.zonda import zonda # noqa: F401
exchanges = [
'aax',
'ascendex',
'bequant',
'bibox',
'bigone',
'binance',
'binancecoinm',
'binanceus',
'binanceusdm',
'bit2c',
'bitbank',
'bitbay',
'bitbns',
'bitcoincom',
'bitfinex',
'bitfinex2',
'bitflyer',
'bitforex',
'bitget',
'bithumb',
'bitmart',
'bitmex',
'bitpanda',
'bitrue',
'bitso',
'bitstamp',
'bitstamp1',
'bittrex',
'bitvavo',
'bl3p',
'btcalpha',
'btcbox',
'btcmarkets',
'btctradeua',
'btcturk',
'buda',
'bw',
'bybit',
'bytetrade',
'cdax',
'cex',
'coinbase',
'coinbaseprime',
'coinbasepro',
'coincheck',
'coinex',
'coinfalcon',
'coinmate',
'coinone',
'coinspot',
'crex24',
'currencycom',
'delta',
'deribit',
'digifinex',
'eqonex',
'equos',
'exmo',
'flowbtc',
'ftx',
'ftxus',
'gateio',
'gemini',
'hitbtc',
'hitbtc3',
'hollaex',
'huobi',
'huobijp',
'huobipro',
'idex',
'independentreserve',
'indodax',
'itbit',
'kraken',
'kucoin',
'kuna',
'latoken',
'latoken1',
'lbank',
'liquid',
'luno',
'lykke',
'mercado',
'mexc',
'ndax',
'novadax',
'oceanex',
'okcoin',
'okex',
'okex3',
'okex5',
'paymium',
'phemex',
'poloniex',
'probit',
'qtrade',
'ripio',
'stex',
'therock',
'tidebit',
'tidex',
'timex',
'upbit',
'vcc',
'wavesexchange',
'whitebit',
'xena',
'yobit',
'zaif',
'zb',
'zipmex',
'zonda',
]
base = [
'Exchange',
'exchanges',
'decimal_to_precision',
]
__all__ = base + errors.__all__ + exchanges
| 50.82699 | 86 | 0.557015 |
e69684e13ea956e9fb0141c56ff363a4cb5b3201 | 2,155 | py | Python | scripts/ntrip_client_node.py | jewos0127/ntrip_client | 0989fb9d15e2a83acbc36fa9cc721060303ab4aa | [
"MIT"
] | null | null | null | scripts/ntrip_client_node.py | jewos0127/ntrip_client | 0989fb9d15e2a83acbc36fa9cc721060303ab4aa | [
"MIT"
] | null | null | null | scripts/ntrip_client_node.py | jewos0127/ntrip_client | 0989fb9d15e2a83acbc36fa9cc721060303ab4aa | [
"MIT"
] | null | null | null | #!/usr/bin/python
import rospy
import socket
from base64 import b64encode
from rtcm_msgs.msg import Message
def rtcm_run():
# Init ROS Node
rospy.init_node('ntrip_client_node', anonymous=True)
# Setting for NTRIP
ntrip_server = rospy.get_param('~ntrip_server', 'ntrip.server.com')
ntrip_mountpoint = rospy.get_param('~ntrip_mountpoint', 'mountpoint')
ntrip_port = rospy.get_param('~ntrip_port', 2101)
ntrip_user = rospy.get_param('~ntrip_user', 'user')
ntrip_pass = rospy.get_param('~ntrip_pass', 'pass')
ntrip_bufSize = rospy.get_param('~ntrip_bufSize', 1024)
ntrip_stop = False
# Make Publisher
rtcm_pub = rospy.Publisher("rtcm_topic", Message, queue_size=1)
# Make header for ntrip
pwd = b64encode("{}:{}".format(ntrip_user, ntrip_pass))
header = \
"GET /{} HTTP/1.1\r\n".format(ntrip_mountpoint) +\
"HOST: {}\r\n".format(ntrip_server) +\
"Ntrip-Version: Ntrip/2.0\r\n" +\
"User-Agent: NTRIP ntrip_ros\r\n" +\
"Connection: close\r\n" +\
"Authorization: Basic {}\r\n\r\n".format(pwd)
try:
# Open the socket for NTRIP
ntrip = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#ntrip.settimeout(10) # Set timeout
ntrip.connect((ntrip_server, ntrip_port))
ntrip.send(header)
# Check the server connection
resp = ntrip.recv(ntrip_bufSize)
if ('200' in resp) and ('OK' in resp):
rospy.loginfo("Server Connection")
else:
rospy.logerr("NTRIP: NOT WORKING")
ntrip_stop = True
ntrip.close()
# TODO: The check to server status
# Receive the RTCM data
rtcm_msg = Message()
while (not ntrip_stop) and (not rospy.is_shutdown()):
buf = ""
buf = ntrip.recv(ntrip_bufSize)
rtcm_msg.message = buf
rtcm_msg.header.seq += 1
rtcm_msg.header.stamp = rospy.get_rostime()
rtcm_pub.publish(rtcm_msg)
except rospy.ROSInterruptException:
ntrip.close() # Close ntrip socket
if __name__ == '__main__':
rtcm_run()
| 30.352113 | 73 | 0.618561 |
eb8e8c76a1963aee891e75fbe3d82cad5d566f58 | 2,095 | py | Python | tests/functional/test_fish.py | frankhli843/thedarn | 9e00f854c248156fba820f39b2834e8273583984 | [
"MIT"
] | null | null | null | tests/functional/test_fish.py | frankhli843/thedarn | 9e00f854c248156fba820f39b2834e8273583984 | [
"MIT"
] | null | null | null | tests/functional/test_fish.py | frankhli843/thedarn | 9e00f854c248156fba820f39b2834e8273583984 | [
"MIT"
] | null | null | null | import pytest
from tests.functional.plots import with_confirmation, without_confirmation, \
refuse_with_confirmation, select_command_with_arrows
containers = (('thedarn/python3-fish',
u'''FROM python:3
# Use jessie-backports since it has the fish package. See here for details:
# https://github.com/tianon/docker-brew-debian/blob/88ae21052affd8a14553bb969f9d41c464032122/jessie/backports/Dockerfile
RUN awk '$1 ~ "^deb" { $3 = $3 "-backports"; print; exit }' /etc/apt/sources.list > /etc/apt/sources.list.d/backports.list
RUN apt-get update
RUN apt-get install -yy fish''',
u'fish'),
('thedarn/python2-fish',
u'''FROM python:2
# Use jessie-backports since it has the fish package. See here for details:
# https://github.com/tianon/docker-brew-debian/blob/88ae21052affd8a14553bb969f9d41c464032122/jessie/backports/Dockerfile
RUN awk '$1 ~ "^deb" { $3 = $3 "-backports"; print; exit }' /etc/apt/sources.list > /etc/apt/sources.list.d/backports.list
RUN apt-get update
RUN apt-get install -yy fish''',
u'fish'))
@pytest.fixture(params=containers)
def proc(request, spawnu, TIMEOUT):
proc = spawnu(*request.param)
proc.sendline(u"pip install /src")
assert proc.expect([TIMEOUT, u'Successfully installed'])
proc.sendline(u'thedarn --alias > ~/.config/fish/config.fish')
proc.sendline(u'fish')
return proc
@pytest.mark.functional
def test_with_confirmation(proc, TIMEOUT):
with_confirmation(proc, TIMEOUT)
@pytest.mark.functional
def test_select_command_with_arrows(proc, TIMEOUT):
select_command_with_arrows(proc, TIMEOUT)
@pytest.mark.functional
def test_refuse_with_confirmation(proc, TIMEOUT):
refuse_with_confirmation(proc, TIMEOUT)
@pytest.mark.functional
def test_without_confirmation(proc, TIMEOUT):
without_confirmation(proc, TIMEOUT)
# TODO: ensure that history changes.
| 39.528302 | 141 | 0.663484 |
2c5bfe50b80142ed8bf87e9160ef83dc10ddb68f | 3,902 | py | Python | feature_engine/imputation/missing_indicator.py | ttungl/feature_engine | cf2ff55c5b4eeb4353021b12d57a7c2870a7febb | [
"BSD-3-Clause"
] | 1 | 2020-10-22T21:49:38.000Z | 2020-10-22T21:49:38.000Z | feature_engine/imputation/missing_indicator.py | ttungl/feature_engine | cf2ff55c5b4eeb4353021b12d57a7c2870a7febb | [
"BSD-3-Clause"
] | 3 | 2020-10-27T16:27:49.000Z | 2020-11-13T13:22:08.000Z | feature_engine/imputation/missing_indicator.py | SunnyxBd/feature_engine | e40457ce8b4baa1e146976bf1af1bbdf6eae1305 | [
"BSD-3-Clause"
] | 1 | 2020-11-16T21:58:05.000Z | 2020-11-16T21:58:05.000Z | # Authors: Soledad Galli <solegalli@protonmail.com>
# License: BSD 3 clause
import numpy as np
from feature_engine.dataframe_checks import _is_dataframe
from feature_engine.imputation.base_imputer import BaseImputer
from feature_engine.variable_manipulation import _define_variables
class AddMissingIndicator(BaseImputer):
"""
The AddMissingIndicator() adds an additional column or binary variable that
indicates if data is missing.
AddMissingIndicator() will add as many missing indicators as variables
indicated by the user, or variables with missing data in the train set.
The AddMissingIndicator() works for both numerical and categorical variables.
The user can pass a list with the variables for which the missing indicators
should be added as a list. Alternatively, the imputer will select and add missing
indicators to all variables in the training set that show missing data.
Parameters
----------
missing_only : bool, defatult=True
Indicates if missing indicators should be added to variables with missing
data or to all variables.
True: indicators will be created only for those variables that showed
missing data during fit.
False: indicators will be created for all variables
variables : list, default=None
The list of variables to be imputed. If None, the imputer will find and
select all variables with missing data.
Note: the transformer will first select all variables or all user entered
variables and if how=missing_only, it will re-select from the original group
only those that show missing data in during fit.
"""
def __init__(self, missing_only=True, variables=None):
if not isinstance(missing_only, bool):
raise ValueError("missing_only takes values True or False")
self.variables = _define_variables(variables)
self.missing_only = missing_only
def fit(self, X, y=None):
"""
Learns the variables for which the missing indicators will be created.
Parameters
----------
X : pandas dataframe of shape = [n_samples, n_features]
The training input samples.
y : None
y is not needed in this imputation. You can pass None or y.
Attributes
----------
variables_: list
the lit of variables for which the missing indicator will be created.
"""
# check input dataframe
X = _is_dataframe(X)
# find variables for which indicator should be added
if self.missing_only:
if not self.variables:
self.variables_ = [
var for var in X.columns if X[var].isnull().sum() > 0
]
else:
self.variables_ = [
var for var in self.variables if X[var].isnull().sum() > 0
]
else:
if not self.variables:
self.variables_ = [var for var in X.columns]
else:
self.variables_ = self.variables
self.input_shape_ = X.shape
return self
def transform(self, X):
"""
Adds the binary missing indicators.
Parameters
----------
X : pandas dataframe of shape = [n_samples, n_features]
The dataframe to be transformed.
Returns
-------
X_transformed : pandas dataframe of shape = [n_samples, n_features]
The dataframe containing the additional binary variables.
Binary variables are named with the original variable name plus
'_na'.
"""
X = self._check_transform_input_and_state(X)
X = X.copy()
for feature in self.variables_:
X[feature + "_na"] = np.where(X[feature].isnull(), 1, 0)
return X
| 31.983607 | 85 | 0.633521 |
0e779ec3c203a6ae2c314fba25409bb89c0ccbfb | 7,019 | py | Python | app/controllers/journal.py | ukayaj620/rejournal | daf80036c3aabc55ce7ba145241a50bf52af1c75 | [
"MIT"
] | null | null | null | app/controllers/journal.py | ukayaj620/rejournal | daf80036c3aabc55ce7ba145241a50bf52af1c75 | [
"MIT"
] | null | null | null | app/controllers/journal.py | ukayaj620/rejournal | daf80036c3aabc55ce7ba145241a50bf52af1c75 | [
"MIT"
] | null | null | null | from flask import redirect, url_for, flash, send_file
from flask_login import current_user
import os
from app.models.journal import Journal
from app.models.journal_log import JournalLog
from app.models.author import Author
from app.models.status import Status
from app.models.reviewer import Reviewer
from app.utils.mailer import send_review_notification, send_custom_mail, send_acceptance_notification, send_publication_notification
from app.utils.file import save_doc, delete_doc
from app.config import Config
class JournalController:
def __init__(self):
self.journal = Journal()
self.author = Author()
self.status = Status()
self.journal_log = JournalLog()
self.reviewer = Reviewer()
def fetch_by_id(self, journal_id):
return self.journal.query.filter_by(id=journal_id).first()
def fetch_all(self, sort_by_time=False):
return self.journal.query.filter_by(user_id=current_user.id).all()
def fetch_publication(self, year=None):
status_id = self.status.query.filter_by(name='Published').first().id
journals = self.journal.query.join(JournalLog).filter(
JournalLog.status_id == status_id
).order_by(JournalLog.timestamp.desc()).all()
publication_year = [journal.journal_log.timestamp.year for journal in journals]
if year is not None:
return [journal for journal in journals if journal.journal_log.timestamp.year == year]
return (journals, publication_year)
def fetch_by_reviewer(self):
reviewer = self.reviewer.query.filter_by(user_id=current_user.id).first()
journals = self.journal.query.join(JournalLog).filter(JournalLog.reviewer_id == reviewer.id).all()
return [journal for journal in journals if journal.journal_log.status.name not in ['Submitted', 'Published']]
def fetch_by_status(self, status):
status_id = self.status.query.filter_by(name=status).first().id
return self.journal.query.join(JournalLog).filter_by(status_id=status_id).all()
def fetch_statuses(self):
return self.status.query.all()
def create(self, request, doc):
doc_path = save_doc(doc)
if doc_path is False:
flash('Wrong file type. Allowed file type is .pdf', 'warning')
return redirect(url_for('home.journal_create'))
journal = self.journal.create(
title=request['title'],
abstract=request['abstract'],
journal_path=doc_path,
user_id=current_user.id,
topic_id=request['topic'],
)
self.journal_log.create(
journal_id=journal.id,
status_id=self.status.query.filter_by(name='Submitted').first().id
)
authors_name = request.getlist('names[]')
authors_email = request.getlist('emails[]')
authors_institution = request.getlist('institutions[]')
n_author = len(authors_name)
for index in range(0, n_author):
self.author.create(
name=authors_name[index],
email=authors_email[index],
institution=authors_institution[index],
journal_id=journal.id
)
return redirect(url_for('home.journal_view_detail', id=journal.id))
def update(self, request, doc):
doc_path = save_doc(doc) if doc else None
if doc_path is False:
flash('Wrong file type. Allowed file type is .pdf, .doc, and .docx', 'warning')
return redirect(url_for('home.journal_create'))
journal = self.fetch_by_id(journal_id=request['id'])
delete_doc(journal.journal_path) if doc_path is not None else None
self.journal.update(
journal_id=request['id'],
title=request['title'],
abstract=request['abstract'],
journal_path=doc_path,
user_id=current_user.id,
topic_id=request['topic'],
)
journal_log = self.journal_log.query.filter_by(journal_id=request['id']).first()
if journal_log.status.name == 'Rejected':
self.journal_log.update(
journal_id=journal.id,
status_id=self.status.query.filter_by(name='Submitted').first().id
)
old_author_ids = [author.id for author in journal.author]
updated_author_ids = [int(id) for id in request.getlist('ids[]')]
authors_name = request.getlist('names[]')
authors_email = request.getlist('emails[]')
authors_institution = request.getlist('institutions[]')
n_old_author = len(updated_author_ids)
n_submitted_author = len(authors_name)
ids_to_update = list(set(updated_author_ids) & set(old_author_ids))
ids_to_delete = list(set(old_author_ids) - set(updated_author_ids))
for index in range(0, n_old_author):
if updated_author_ids[index] in ids_to_update:
self.author.update(
author_id=updated_author_ids[index],
name=authors_name[index],
email=authors_email[index],
institution=authors_institution[index],
journal_id=journal.id
)
for index in range(n_old_author, n_submitted_author):
self.author.create(
name=authors_name[index],
email=authors_email[index],
institution=authors_institution[index],
journal_id=journal.id
)
for id_to_delete in ids_to_delete:
self.author.delete(author_id=id_to_delete)
return redirect(url_for('home.journal_view_detail', id=journal.id))
def delete(self, request):
journal = self.fetch_by_id(journal_id=request['id'])
delete_doc(journal.journal_path)
self.journal.delete(journal_id=request['id'])
def download(self, filename):
directory = os.path.join('static/docs/uploads', filename)
return send_file(directory, as_attachment=True)
def review(self, request):
journal = self.fetch_by_id(journal_id=request['id'])
reviewer = self.reviewer.query.filter_by(user_id=current_user.id).first()
self.journal_log.update(
journal_id=request['id'],
status_id=self.status.query.filter_by(name='In Review').first().id,
reviewer_id=reviewer.id
)
send_review_notification(
to=journal.user.email,
title=journal.title
)
def reject(self, request):
journal = self.fetch_by_id(journal_id=request['id'])
self.journal_log.update(
journal_id=request['id'],
status_id=self.status.query.filter_by(name='Rejected').first().id,
)
send_custom_mail(
to=journal.user.email,
subject=request['subject'],
content=request['messages']
)
def accept(self, request):
journal = self.fetch_by_id(journal_id=request['id'])
self.journal_log.update(
journal_id=request['id'],
status_id=self.status.query.filter_by(name='Accepted').first().id
)
send_acceptance_notification(
to=journal.user.email,
title=journal.title
)
def publish(self, request):
journal = self.fetch_by_id(journal_id=request['id'])
self.journal_log.update(
journal_id=request['id'],
status_id=self.status.query.filter_by(name='Published').first().id
)
send_publication_notification(
to=journal.user.email,
title=journal.title,
series=journal.journal_log.timestamp.year
)
| 32.646512 | 132 | 0.701382 |
e7f96f30eaa47c72278e9a3c55a557be8bc58c1e | 2,738 | py | Python | CV.py | ccfelius/mandelbrotset | 0fdc02749bb6886677d58e179965e7ce00a5ef16 | [
"MIT"
] | 1 | 2020-12-10T17:36:42.000Z | 2020-12-10T17:36:42.000Z | CV.py | ccfelius/mandelbrotset | 0fdc02749bb6886677d58e179965e7ce00a5ef16 | [
"MIT"
] | null | null | null | CV.py | ccfelius/mandelbrotset | 0fdc02749bb6886677d58e179965e7ce00a5ef16 | [
"MIT"
] | 1 | 2021-01-05T13:07:40.000Z | 2021-01-05T13:07:40.000Z | from mandelbrotset.monte import *
import scipy.stats as st
import math
def conf_int(mean, var, n, p=0.95):
pnew = (p+1)/2
zval = st.norm.ppf(pnew)
sigma = math.sqrt(var)
alambda = (zval*sigma)/math.sqrt(n)
min_lambda = mean - alambda
plus_lambda = mean + alambda
return f"Confidence interval: [{min_lambda:.4f} < X < {plus_lambda:.4f}] with p = {p}"
### Control Variets ###
## Simulations
z = 0.0j
step = 0.01
xmax= 0.5
xmin= -2.0
ymax= 1.1j
ymin= -1.1j
# amount of samples has to be a int if square root!
# in orther for orthogonal sampling to work
samples = 1024
simulations = 50
p_value = 0.95
maxiter = 200
EX = mandelbrot(xmax, xmin, ymax, ymin, maxiter) # gives 1.5139
# Simulations
rs_samples = []
lhs_samples = []
orth_samples = []
for n in range(1, simulations+1):
r_sampling = random_sampling(xmax, xmin, ymax, ymin, maxiter, samples)
lhs_sim = LHS(xmax, xmin, ymax, ymin, maxiter, samples, plot=False)
orth_sim = orthogonal_sampling(xmax, xmin, ymax, ymin, maxiter, samples, plot=False)
print(f"Simulation {n}, s={samples}, i={maxiter}, RS: {r_sampling}, LHS: {lhs_sim}, Orthogonal: {orth_sim}")
rs_samples.append(r_sampling)
lhs_samples.append(lhs_sim)
orth_samples.append(orth_sim)
print()
print("NORMAL ESTIMATES: ")
print(f"\nEstimated Mandelbrot Area {EX}")
print(f"Simulations: {simulations}, Samples: {samples}\nVariance RS: {np.var(rs_samples)}\nVariance LHS: {np.var(lhs_samples)}\nVariance Orthogonal Sampling: {np.var(orth_samples)}\n ")
print(f"Estimated Mandelbrot Area E[X]: {EX:.4f}")
print(conf_int(np.mean(rs_samples), np.var(rs_samples), simulations, p=0.95))
print(conf_int(np.mean(lhs_samples), np.var(lhs_samples), simulations, p=0.95))
print(conf_int(np.mean(orth_samples), np.var(orth_samples), simulations, p=0.95))
print()
print("CONTROL VARIATES ESTIMATES: ")
mid = int(simulations/2)
cv_rs_x = rs_samples[:mid]
cv_rs_y = rs_samples[mid:]
cv_lhs_x = lhs_samples[:mid]
cv_lhs_y = lhs_samples[mid:]
cv_orth_x = orth_samples[:mid]
cv_orth_y = orth_samples[mid:]
cv_rs = [a_i - b_i + EX for a_i, b_i in zip(cv_rs_x, cv_rs_y)]
cv_lhs = [a_i - b_i + EX for a_i, b_i in zip(cv_lhs_x, cv_lhs_y)]
cv_orth = [a_i - b_i + EX for a_i, b_i in zip(cv_orth_x, cv_orth_y)]
print(f"\nEstimated Mandelbrot Area {EX}")
print(f"Simulations: {simulations}, Samples: {samples}\nVariance RS: {np.var(cv_rs)}\nVariance LHS: {np.var(cv_lhs)}\nVariance Orthogonal Sampling: {np.var(cv_orth)}\n ")
print(f"Estimated Mandelbrot Area E[X]: {EX:.4f}")
print(conf_int(np.mean(cv_rs), np.var(cv_rs), simulations, p=0.95))
print(conf_int(np.mean(cv_lhs), np.var(cv_lhs), simulations, p=0.95))
print(conf_int(np.mean(cv_orth), np.var(cv_orth), simulations, p=0.95))
| 35.102564 | 185 | 0.70672 |
48e0b8f9d9ea9a113d8f004c7d93531b653af3e2 | 1,676 | py | Python | models/python/hypothalamus/dynamical/miro_experiments/entropy_percept.py | ABRG-Models/MammalBot | 0b153232b94197c7a65156c1c3451ab2b9f725ae | [
"MIT"
] | null | null | null | models/python/hypothalamus/dynamical/miro_experiments/entropy_percept.py | ABRG-Models/MammalBot | 0b153232b94197c7a65156c1c3451ab2b9f725ae | [
"MIT"
] | null | null | null | models/python/hypothalamus/dynamical/miro_experiments/entropy_percept.py | ABRG-Models/MammalBot | 0b153232b94197c7a65156c1c3451ab2b9f725ae | [
"MIT"
] | null | null | null |
import cv2
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import entropy
from scipy import ndimage
# imo = cv2.imread('ball1.jpg')
# imo = cv2.imread('room_balls.jpg')
def computeEntropy( I, Q = None):
# x,b = np.histogram( I, 20 )
x = I.flatten()
x = x/float(sum(x))
if Q is not None:
y = Q.flatten()
# y,b = np.histogram( Q, 20 )
y = y/float(sum(y))
# print x
# print y
y[y == 0] = 1e-10
x[x == 0] = 1e-10
# b = (b[1:] + b[0:-1])/2.0
h = entropy(x.astype(np.float32), y.astype(np.float32))
else:
h = entropy(x)
return h
def getEntropy( im ):
m,n = im.shape
num_n = 40
num_m = 40
im = im/255.0
sn = n//num_n
sm = m//num_m
alo = m//2
alf = alo + sn
aro = 330
arf = aro + sm
imr = np.zeros(im.shape)
I = np.zeros((sm,sn))
for i in range(num_m):
for j in range(num_n):
lo = sm*i
lf = min(sm*(i+1),m)
ro = sn*j
rf = min(sn*(j+1),n)
I += im[lo:lf,ro:rf]
I /= num_n*num_m
for i in range(num_m):
for j in range(num_n):
lo = sm*i
lf = min(sm*(i+1),m)
ro = sn*j
rf = min(sn*(j+1),n)
# h = computeEntropy( im[lo:lf,ro:rf], im[alo:alf, aro:arf] )
h = computeEntropy( im[lo:lf,ro:rf], I )
imr[lo:lf,ro:rf] = h
# imw = im.copy()
# # imw[alo:alf, aro:arf] = im[alo:alf, aro:arf]*10
# ax[0].imshow( imw, cmap='gray' )
# ax[1].imshow( imr )
return imr
# plotEntropy( im )
# plt.show()
| 22.052632 | 73 | 0.470167 |
33b8149e457ebceec0434136ac555e58a0b82fec | 12,290 | py | Python | tests/test_rewind.py | wmendes-ionos/patroni | 0057f9018bc0b8f78cd5c53d8a6ae7a06141e24d | [
"MIT"
] | null | null | null | tests/test_rewind.py | wmendes-ionos/patroni | 0057f9018bc0b8f78cd5c53d8a6ae7a06141e24d | [
"MIT"
] | null | null | null | tests/test_rewind.py | wmendes-ionos/patroni | 0057f9018bc0b8f78cd5c53d8a6ae7a06141e24d | [
"MIT"
] | null | null | null | from mock import Mock, PropertyMock, patch, mock_open
from patroni.postgresql import Postgresql
from patroni.postgresql.cancellable import CancellableSubprocess
from patroni.postgresql.rewind import Rewind
from six.moves import builtins
from . import BaseTestPostgresql, MockCursor, psycopg_connect
class MockThread(object):
def __init__(self, target, args):
self._target = target
self._args = args
def start(self):
self._target(*self._args)
def mock_cancellable_call(*args, **kwargs):
communicate = kwargs.pop('communicate', None)
if isinstance(communicate, dict):
communicate.update(stdout=b'', stderr=b'pg_rewind: error: could not open file ' +
b'"data/postgresql0/pg_xlog/000000010000000000000003": No such file')
return 1
def mock_cancellable_call0(*args, **kwargs):
communicate = kwargs.pop('communicate', None)
if isinstance(communicate, dict):
communicate.update(stdout=b'', stderr=b'')
return 0
def mock_cancellable_call1(*args, **kwargs):
communicate = kwargs.pop('communicate', None)
if isinstance(communicate, dict):
communicate.update(stdout=b'', stderr=b'')
return 1
def mock_single_user_mode(self, communicate, options):
communicate['stdout'] = b'foo'
communicate['stderr'] = b'bar'
return 1
@patch('subprocess.call', Mock(return_value=0))
@patch('patroni.psycopg.connect', psycopg_connect)
class TestRewind(BaseTestPostgresql):
def setUp(self):
super(TestRewind, self).setUp()
self.r = Rewind(self.p)
def test_can_rewind(self):
with patch.object(Postgresql, 'controldata', Mock(return_value={'wal_log_hints setting': 'on'})):
self.assertTrue(self.r.can_rewind)
with patch('subprocess.call', Mock(return_value=1)):
self.assertFalse(self.r.can_rewind)
with patch('subprocess.call', side_effect=OSError):
self.assertFalse(self.r.can_rewind)
self.p.config._config['use_pg_rewind'] = False
self.assertFalse(self.r.can_rewind)
def test_pg_rewind(self):
r = {'user': '', 'host': '', 'port': '', 'database': '', 'password': ''}
with patch.object(Postgresql, 'major_version', PropertyMock(return_value=130000)),\
patch.object(CancellableSubprocess, 'call', Mock(return_value=None)):
with patch('subprocess.check_output', Mock(return_value=b'boo')):
self.assertFalse(self.r.pg_rewind(r))
with patch('subprocess.check_output', Mock(side_effect=Exception)):
self.assertFalse(self.r.pg_rewind(r))
with patch.object(Postgresql, 'major_version', PropertyMock(return_value=120000)),\
patch('subprocess.check_output', Mock(return_value=b'foo %f %p %r %% % %')):
with patch.object(CancellableSubprocess, 'call', mock_cancellable_call):
self.assertFalse(self.r.pg_rewind(r))
with patch.object(CancellableSubprocess, 'call', mock_cancellable_call0):
self.assertTrue(self.r.pg_rewind(r))
with patch.object(CancellableSubprocess, 'call', mock_cancellable_call1):
self.assertFalse(self.r.pg_rewind(r))
@patch.object(Rewind, 'can_rewind', PropertyMock(return_value=True))
def test__get_local_timeline_lsn(self):
self.r.trigger_check_diverged_lsn()
with patch.object(Postgresql, 'controldata',
Mock(return_value={'Database cluster state': 'shut down in recovery',
'Minimum recovery ending location': '0/0',
"Min recovery ending loc's timeline": '0',
'Latest checkpoint location': '0/'})):
self.r.rewind_or_reinitialize_needed_and_possible(self.leader)
with patch.object(Postgresql, 'is_running', Mock(return_value=True)):
with patch.object(MockCursor, 'fetchone', Mock(side_effect=[(0, 0, 1, 1, 0, 0, 0, 0, 0, None), Exception])):
self.r.rewind_or_reinitialize_needed_and_possible(self.leader)
@patch.object(CancellableSubprocess, 'call', mock_cancellable_call)
@patch.object(Postgresql, 'checkpoint', side_effect=['', '1'],)
@patch.object(Postgresql, 'stop', Mock(return_value=False))
@patch.object(Postgresql, 'start', Mock())
def test_execute(self, mock_checkpoint):
self.r.execute(self.leader)
with patch.object(Postgresql, 'major_version', PropertyMock(return_value=130000)):
self.r.execute(self.leader)
with patch.object(MockCursor, 'fetchone', Mock(side_effect=Exception)):
self.r.execute(self.leader)
with patch.object(Rewind, 'pg_rewind', Mock(return_value=False)):
mock_checkpoint.side_effect = ['1', '', '', '']
self.r.execute(self.leader)
self.r.execute(self.leader)
with patch.object(Rewind, 'check_leader_is_not_in_recovery', Mock(return_value=False)):
self.r.execute(self.leader)
self.p.config._config['remove_data_directory_on_rewind_failure'] = False
self.r.trigger_check_diverged_lsn()
self.r.execute(self.leader)
self.leader.member.data.update(version='1.5.7', checkpoint_after_promote=False, role='master')
self.assertIsNone(self.r.execute(self.leader))
del self.leader.member.data['checkpoint_after_promote']
with patch.object(Rewind, 'check_leader_is_not_in_recovery', Mock(return_value=False)):
self.assertIsNone(self.r.execute(self.leader))
with patch.object(Postgresql, 'is_running', Mock(return_value=True)):
self.r.execute(self.leader)
@patch('patroni.postgresql.rewind.logger.info')
def test__log_master_history(self, mock_logger):
history = [[n, n, ''] for n in range(1, 10)]
self.r._log_master_history(history, 1)
expected = '\n'.join(['{0}\t0/{0}\t'.format(n) for n in range(1, 4)] + ['...', '9\t0/9\t'])
self.assertEqual(mock_logger.call_args[0][1], expected)
@patch.object(Postgresql, 'start', Mock())
@patch.object(Rewind, 'can_rewind', PropertyMock(return_value=True))
@patch.object(Rewind, '_get_local_timeline_lsn')
@patch.object(Rewind, 'check_leader_is_not_in_recovery')
def test__check_timeline_and_lsn(self, mock_check_leader_is_not_in_recovery, mock_get_local_timeline_lsn):
mock_get_local_timeline_lsn.return_value = (True, 2, 67197377)
mock_check_leader_is_not_in_recovery.return_value = False
self.r.trigger_check_diverged_lsn()
self.assertFalse(self.r.rewind_or_reinitialize_needed_and_possible(self.leader))
self.leader = self.leader.member
self.assertFalse(self.r.rewind_or_reinitialize_needed_and_possible(self.leader))
mock_check_leader_is_not_in_recovery.return_value = True
self.assertFalse(self.r.rewind_or_reinitialize_needed_and_possible(self.leader))
self.r.trigger_check_diverged_lsn()
with patch.object(MockCursor, 'fetchone', Mock(side_effect=[('', 3, '0/0'), ('', b'4\t0/40159C0\tn\n')])):
self.assertTrue(self.r.rewind_or_reinitialize_needed_and_possible(self.leader))
self.r.reset_state()
self.r.trigger_check_diverged_lsn()
with patch('patroni.psycopg.connect', Mock(side_effect=Exception)):
self.assertFalse(self.r.rewind_or_reinitialize_needed_and_possible(self.leader))
self.r.trigger_check_diverged_lsn()
with patch.object(MockCursor, 'fetchone', Mock(side_effect=[('', 3, '0/0'), ('', b'1\t0/40159C0\tn\n')])):
self.assertTrue(self.r.rewind_or_reinitialize_needed_and_possible(self.leader))
self.r.reset_state()
self.r.trigger_check_diverged_lsn()
with patch.object(MockCursor, 'fetchone', Mock(return_value=('', 1, '0/0'))):
with patch.object(Rewind, '_get_local_timeline_lsn', Mock(return_value=(True, 1, '0/0'))):
self.assertFalse(self.r.rewind_or_reinitialize_needed_and_possible(self.leader))
self.r.trigger_check_diverged_lsn()
self.assertTrue(self.r.rewind_or_reinitialize_needed_and_possible(self.leader))
self.r.reset_state()
self.r.trigger_check_diverged_lsn()
mock_get_local_timeline_lsn.return_value = (False, 2, 67296664)
self.assertTrue(self.r.rewind_or_reinitialize_needed_and_possible(self.leader))
with patch('subprocess.Popen') as mock_popen:
mock_popen.return_value.communicate.return_value = (
b'0, lsn: 0/040159C1, prev 0/\n',
b'pg_waldump: fatal: error in WAL record at 0/40159C1: invalid record length at /: wanted 24, got 0\n'
)
self.r.reset_state()
self.r.trigger_check_diverged_lsn()
mock_get_local_timeline_lsn.return_value = (False, 2, 67197377)
self.assertTrue(self.r.rewind_or_reinitialize_needed_and_possible(self.leader))
self.r.reset_state()
self.r.trigger_check_diverged_lsn()
mock_popen.side_effect = Exception
self.assertTrue(self.r.rewind_or_reinitialize_needed_and_possible(self.leader))
@patch.object(MockCursor, 'fetchone', Mock(side_effect=[(True,), Exception]))
def test_check_leader_is_not_in_recovery(self):
self.r.check_leader_is_not_in_recovery({})
self.r.check_leader_is_not_in_recovery({})
def test_read_postmaster_opts(self):
m = mock_open(read_data='/usr/lib/postgres/9.6/bin/postgres "-D" "data/postgresql0" \
"--listen_addresses=127.0.0.1" "--port=5432" "--hot_standby=on" "--wal_level=hot_standby" \
"--wal_log_hints=on" "--max_wal_senders=5" "--max_replication_slots=5"\n')
with patch.object(builtins, 'open', m):
data = self.r.read_postmaster_opts()
self.assertEqual(data['wal_level'], 'hot_standby')
self.assertEqual(int(data['max_replication_slots']), 5)
self.assertEqual(data.get('D'), None)
m.side_effect = IOError
data = self.r.read_postmaster_opts()
self.assertEqual(data, dict())
@patch('psutil.Popen')
def test_single_user_mode(self, subprocess_popen_mock):
subprocess_popen_mock.return_value.wait.return_value = 0
subprocess_popen_mock.return_value.communicate.return_value = ('', '')
self.assertEqual(self.r.single_user_mode({'input': 'CHECKPOINT'}, {'archive_mode': 'on'}), 0)
@patch('os.listdir', Mock(side_effect=[OSError, ['a', 'b']]))
@patch('os.unlink', Mock(side_effect=OSError))
@patch('os.remove', Mock())
@patch('os.path.islink', Mock(side_effect=[True, False]))
@patch('os.path.isfile', Mock(return_value=True))
def test_cleanup_archive_status(self):
self.r.cleanup_archive_status()
self.r.cleanup_archive_status()
@patch('os.unlink', Mock())
@patch('os.listdir', Mock(return_value=[]))
@patch('os.path.isfile', Mock(return_value=True))
@patch.object(Rewind, 'read_postmaster_opts', Mock(return_value={}))
@patch.object(Rewind, 'single_user_mode', mock_single_user_mode)
def test_ensure_clean_shutdown(self):
self.assertIsNone(self.r.ensure_clean_shutdown())
@patch('patroni.postgresql.rewind.Thread', MockThread)
@patch.object(Postgresql, 'controldata')
@patch.object(Postgresql, 'checkpoint')
@patch.object(Postgresql, 'get_master_timeline')
def test_ensure_checkpoint_after_promote(self, mock_get_master_timeline, mock_checkpoint, mock_controldata):
mock_controldata.return_value = {"Latest checkpoint's TimeLineID": 1}
mock_get_master_timeline.return_value = 1
self.r.ensure_checkpoint_after_promote(Mock())
self.r.reset_state()
mock_get_master_timeline.return_value = 2
mock_checkpoint.return_value = 0
self.r.ensure_checkpoint_after_promote(Mock())
self.r.ensure_checkpoint_after_promote(Mock())
self.r.reset_state()
mock_controldata.side_effect = TypeError
mock_checkpoint.side_effect = Exception
self.r.ensure_checkpoint_after_promote(Mock())
| 49.35743 | 120 | 0.673637 |
90b39b407d8b1d7cc90e14397e100ca9f34500ad | 2,602 | py | Python | starlingx-dashboard/starlingx-dashboard/starlingx_dashboard/dashboards/admin/inventory/kubernetes_labels/tables.py | starlingx/gui | f6546289180f0ed97b357ed1ad426084f1e64588 | [
"Apache-2.0"
] | 1 | 2020-02-07T19:01:54.000Z | 2020-02-07T19:01:54.000Z | starlingx-dashboard/starlingx-dashboard/starlingx_dashboard/dashboards/admin/inventory/kubernetes_labels/tables.py | starlingx/gui | f6546289180f0ed97b357ed1ad426084f1e64588 | [
"Apache-2.0"
] | null | null | null | starlingx-dashboard/starlingx-dashboard/starlingx_dashboard/dashboards/admin/inventory/kubernetes_labels/tables.py | starlingx/gui | f6546289180f0ed97b357ed1ad426084f1e64588 | [
"Apache-2.0"
] | 1 | 2021-06-17T17:17:08.000Z | 2021-06-17T17:17:08.000Z | #
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (c) 2019 Wind River Systems, Inc.
# Copyright (C) 2019 Intel Corporation
#
import logging
from django.urls import reverse # noqa
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import exceptions
from horizon import tables
from starlingx_dashboard import api as stx_api
LOG = logging.getLogger(__name__)
def host_locked(host=None):
if not host:
return False
return host._administrative == 'locked'
class AssignKubeLabel(tables.LinkAction):
name = "assignKubelabel"
verbose_name = _("Assign Kube Label")
url = "horizon:admin:inventory:assignlabel"
classes = ("ajax-modal", "btn-create")
def get_link_url(self, datum=None):
host_id = self.table.kwargs['host_id']
return reverse(self.url, args=(host_id,))
def allowed(self, request, datum):
host = self.table.kwargs['host']
return host_locked(host)
class RemoveLabel(tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
"Delete Label",
"Delete Labels",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
"Deleted Label",
"Deleted Labels",
count
)
def allowed(self, request, datum):
host = self.table.kwargs['host']
return host_locked(host)
def delete(self, request, label_id):
host_id = self.table.kwargs['host_id']
try:
stx_api.sysinv.host_label_remove(request, label_id)
except Exception:
msg = _('Failed to delete host %(hid)s label %(lid)s') % {
'hid': host_id, 'lid': label_id}
LOG.error(msg)
redirect = reverse('horizon:admin:inventory:detail',
args=(host_id,))
exceptions.handle(request, msg, redirect=redirect)
class LabelTable(tables.DataTable):
uuid = tables.Column('uuid',
verbose_name=_('UUID'))
label_key = tables.Column('label_key',
verbose_name=_('Label Key'))
label_value = tables.Column('label_value',
verbose_name=_('Label Value'))
def get_object_id(self, datum):
return str(datum.uuid)
class Meta(object):
name = "labels"
verbose_name = _("Label")
multi_select = False
row_actions = (RemoveLabel, )
table_actions = (AssignKubeLabel, )
| 27.389474 | 70 | 0.614527 |
9a9c9ffe0ae1946cbcd5be70ca3201a1f9c899c2 | 972 | py | Python | setup.py | ilemhadri/lassoNet | adf0aaf7131a57d8427ae40db7a19e608d6809a4 | [
"MIT"
] | 1 | 2020-05-28T02:33:00.000Z | 2020-05-28T02:33:00.000Z | setup.py | ilemhadri/lassoNet | adf0aaf7131a57d8427ae40db7a19e608d6809a4 | [
"MIT"
] | null | null | null | setup.py | ilemhadri/lassoNet | adf0aaf7131a57d8427ae40db7a19e608d6809a4 | [
"MIT"
] | null | null | null | from setuptools import setup
from pathlib import Path
def read(fname):
return (Path(__file__).parent / fname).open().read()
setup(
name="lassonet",
version="0.0.9",
author="Louis Abraham, Ismael Lemhadri",
author_email="louis.abraham@yahoo.fr, lemhadri@stanford.edu",
license="MIT",
description="Reference implementation of LassoNet",
long_description=read("README.md"),
long_description_content_type="text/markdown",
url="https://github.com/ilemhadri/lassonet",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Operating System :: OS Independent",
],
packages=["lassonet"],
install_requires=[
"torch",
"scikit-learn",
"matplotlib",
"sortedcontainers",
"tqdm",
],
tests_require=["pytest"],
python_requires=">=3.6.5",
)
| 27 | 69 | 0.634774 |
e1972ea524bab133dfcf1e008bb9cbf28bab75dd | 1,114 | py | Python | website/settings/production.py | LKKTGB/lkk-website | d9cd2f5a11f2b4316ea4b242c5e09981207abdfb | [
"MIT"
] | null | null | null | website/settings/production.py | LKKTGB/lkk-website | d9cd2f5a11f2b4316ea4b242c5e09981207abdfb | [
"MIT"
] | 5 | 2020-04-26T09:03:33.000Z | 2022-02-02T13:00:39.000Z | website/settings/production.py | LKKTGB/lkk-website | d9cd2f5a11f2b4316ea4b242c5e09981207abdfb | [
"MIT"
] | null | null | null | import os
import dj_database_url
from website.settings.base import *
SECRET_KEY = os.environ["DJANGO_SECRET_KEY"]
DEBUG = False
GA_TRACKING_ID = 'UA-114678735-1'
# security
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
SOCIAL_AUTH_REDIRECT_IS_HTTPS = True
ALLOWED_HOSTS = [
'lkkpomia.tgb.org.tw',
'lkk-website-production.herokuapp.com',
'lkkpomia-production.azurewebsites.net',
]
# WhiteNoise
MIDDLEWARE.extend([
'whitenoise.middleware.WhiteNoiseMiddleware',
])
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'ERROR'),
},
},
}
| 21.018868 | 79 | 0.679533 |
6d5c3886ee0e28fec541324f4dcead8b2e037653 | 5,081 | py | Python | hparams.py | sebakeaaen/autovc | aecc37901528f65209dafe30ac98094ccc6ea475 | [
"MIT"
] | 2 | 2022-02-24T22:27:10.000Z | 2022-03-10T22:03:45.000Z | hparams.py | sebakeaaen/autovc | aecc37901528f65209dafe30ac98094ccc6ea475 | [
"MIT"
] | null | null | null | hparams.py | sebakeaaen/autovc | aecc37901528f65209dafe30ac98094ccc6ea475 | [
"MIT"
] | null | null | null | # NOTE: If you want full control for model architecture. please take a look
# at the code and change whatever you want. Some hyper parameters are hardcoded.
class Map(dict):
"""
Example:
m = Map({'first_name': 'Eduardo'}, last_name='Pool', age=24, sports=['Soccer'])
Credits to epool:
https://stackoverflow.com/questions/2352181/how-to-use-a-dot-to-access-members-of-dictionary
"""
def __init__(self, *args, **kwargs):
super(Map, self).__init__(*args, **kwargs)
for arg in args:
if isinstance(arg, dict):
for k, v in arg.items():
self[k] = v
if kwargs:
for k, v in kwargs.iteritems():
self[k] = v
def __getattr__(self, attr):
return self.get(attr)
def __setattr__(self, key, value):
self.__setitem__(key, value)
def __setitem__(self, key, value):
super(Map, self).__setitem__(key, value)
self.__dict__.update({key: value})
def __delattr__(self, item):
self.__delitem__(item)
def __delitem__(self, key):
super(Map, self).__delitem__(key)
del self.__dict__[key]
# Default hyperparameters:
hparams = Map({
'name': "wavenet_vocoder",
# Convenient model builder
'builder': "wavenet",
# Input type:
# 1. raw [-1, 1]
# 2. mulaw [-1, 1]
# 3. mulaw-quantize [0, mu]
# If input_type is raw or mulaw, network assumes scalar input and
# discretized mixture of logistic distributions output, otherwise one-hot
# input and softmax output are assumed.
# **NOTE**: if you change the one of the two parameters below, you need to
# re-run preprocessing before training.
'input_type': "raw",
'quantize_channels': 65536, # 65536 or 256
# Audio:
'sample_rate': 16000,
# this is only valid for mulaw is True
'silence_threshold': 2,
'num_mels': 80,
'fmin': 125,
'fmax': 7600,
'fft_size': 1024,
# shift can be specified by either hop_size or frame_shift_ms
'hop_size': 256,
'frame_shift_ms': None,
'min_level_db': -100,
'ref_level_db': 20,
# whether to rescale waveform or not.
# Let x is an input waveform, rescaled waveform y is given by:
# y = x / np.abs(x).max() * rescaling_max
'rescaling': True,
'rescaling_max': 0.999,
# mel-spectrogram is normalized to [0, 1] for each utterance and clipping may
# happen depends on min_level_db and ref_level_db, causing clipping noise.
# If False, assertion is added to ensure no clipping happens.o0
'allow_clipping_in_normalization': True,
# Mixture of logistic distributions:
'log_scale_min': float(-32.23619130191664),
# Model:
# This should equal to `quantize_channels` if mu-law quantize enabled
# otherwise num_mixture * 3 (pi, mean, log_scale)
'out_channels': 10 * 3,
'layers': 24,
'stacks': 4,
'residual_channels': 512,
'gate_channels': 512, # split into 2 gropus internally for gated activation
'skip_out_channels': 256,
'dropout': 1 - 0.95,
'kernel_size': 3,
# If True, apply weight normalization as same as DeepVoice3
'weight_normalization': True,
# Use legacy code or not. Default is True since we already provided a model
# based on the legacy code that can generate high-quality audio.
# Ref: https://github.com/r9y9/wavenet_vocoder/pull/73
'legacy': True,
# Local conditioning (set negative value to disable))
'cin_channels': 80,
# If True, use transposed convolutions to upsample conditional features,
# otherwise repeat features to adjust time resolution
'upsample_conditional_features': True,
# should np.prod(upsample_scales) == hop_size
'upsample_scales': [4, 4, 4, 4],
# Freq axis kernel size for upsampling network
'freq_axis_kernel_size': 3,
# Global conditioning (set negative value to disable)
# currently limited for speaker embedding
# this should only be enabled for multi-speaker dataset
'gin_channels': -1, # i.e., speaker embedding dim
'n_speakers': -1,
# Data loader
'pin_memory': True,
'num_workers': 2,
# train/test
# test size can be specified as portion or num samples
'test_size': 0.0441, # 50 for CMU ARCTIC single speaker
'test_num_samples': None,
'random_state': 1234,
# Loss
# Training:
'batch_size': 2,
'adam_beta1': 0.9,
'adam_beta2': 0.999,
'adam_eps': 1e-8,
'amsgrad': False,
'initial_learning_rate': 1e-3,
# see lrschedule.py for available lr_schedule
'lr_schedule': "noam_learning_rate_decay",
'lr_schedule_kwargs': {}, # {"anneal_rate": 0.5, "anneal_interval": 50000},
'nepochs': 2000,
'weight_decay': 0.0,
'clip_thresh': -1,
# max time steps can either be specified as sec or steps
# if both are None, then full audio samples are used in a batch
'max_time_sec': None,
'max_time_steps': 8000,
# Hold moving averaged parameters and use them for evaluation
'exponential_moving_average': True,
# averaged = decay * averaged + (1 - decay) * x
'ema_decay': 0.9999,
# Save
# per-step intervals
'checkpoint_interval': 10000,
'train_eval_interval': 10000,
# per-epoch interval
'test_eval_epoch_interval': 5,
'save_optimizer_state': True,
# Eval:
})
def hparams_debug_string():
values = hparams.values()
hp = [' %s: %s' % (name, values[name]) for name in sorted(values)]
return 'Hyperparameters:\n' + '\n'.join(hp) | 29.71345 | 96 | 0.708128 |
a6416b561c30ee72079d70f5f1dabcdb070e8cc8 | 1,284 | py | Python | nicos_mlz/toftof/devices/datasinks/__init__.py | ebadkamil/nicos | 0355a970d627aae170c93292f08f95759c97f3b5 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 12 | 2019-11-06T15:40:36.000Z | 2022-01-01T16:23:00.000Z | nicos_mlz/toftof/devices/datasinks/__init__.py | ebadkamil/nicos | 0355a970d627aae170c93292f08f95759c97f3b5 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 91 | 2020-08-18T09:20:26.000Z | 2022-02-01T11:07:14.000Z | nicos_mlz/toftof/devices/datasinks/__init__.py | ISISComputingGroup/nicos | 94cb4d172815919481f8c6ee686f21ebb76f2068 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 6 | 2020-01-11T10:52:30.000Z | 2022-02-25T12:35:23.000Z | # -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2021 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# Jens Krüger <jens.krueger@frm2.tum.de>
#
# *****************************************************************************
from nicos_mlz.toftof.devices.datasinks.legacy import TofImageSink
from nicos_mlz.toftof.devices.datasinks.live import ToftofLiveViewSink
from nicos_mlz.toftof.devices.datasinks.nexussink import NexusSink
| 45.857143 | 79 | 0.67757 |
b1365cd56d1faa2bab51a7cf5d3586bc53a20499 | 338 | py | Python | django/solution/untitled/iris/admin.py | giserh/book-python | ebd4e70cea1dd56986aa8efbae3629ba3f1ba087 | [
"MIT"
] | 1 | 2019-01-02T15:04:08.000Z | 2019-01-02T15:04:08.000Z | django/solution/untitled/iris/admin.py | giserh/book-python | ebd4e70cea1dd56986aa8efbae3629ba3f1ba087 | [
"MIT"
] | null | null | null | django/solution/untitled/iris/admin.py | giserh/book-python | ebd4e70cea1dd56986aa8efbae3629ba3f1ba087 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Iris
@admin.register(Iris)
class IrisAdmin(admin.ModelAdmin):
list_display = ['date_added', 'species']
list_display_links = ['species']
search_fields = ['^species']
list_filter = ['species', 'date_added']
radio_fields = {
'species': admin.VERTICAL,
}
| 24.142857 | 44 | 0.674556 |
76faae602507e8b1fff21ab92ab079fd4feceea5 | 2,340 | py | Python | migrations/versions/2ce405cef0af_.py | leduy8/gotit-final-project | 02f2cabbaef0f9f0542aa3efc9f835774beec948 | [
"MIT"
] | null | null | null | migrations/versions/2ce405cef0af_.py | leduy8/gotit-final-project | 02f2cabbaef0f9f0542aa3efc9f835774beec948 | [
"MIT"
] | null | null | null | migrations/versions/2ce405cef0af_.py | leduy8/gotit-final-project | 02f2cabbaef0f9f0542aa3efc9f835774beec948 | [
"MIT"
] | null | null | null | """empty message
Revision ID: 2ce405cef0af
Revises:
Create Date: 2022-03-16 15:19:59.781471
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2ce405cef0af'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created', sa.DateTime(), nullable=False),
sa.Column('updated', sa.DateTime(), nullable=False),
sa.Column('email', sa.String(length=254), nullable=False),
sa.Column('password_hash', sa.String(length=64), nullable=False),
sa.Column('password_salt', sa.String(length=12), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_user_email'), 'user', ['email'], unique=True)
op.create_table('category',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created', sa.DateTime(), nullable=False),
sa.Column('updated', sa.DateTime(), nullable=False),
sa.Column('name', sa.String(length=50), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('item',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created', sa.DateTime(), nullable=False),
sa.Column('updated', sa.DateTime(), nullable=False),
sa.Column('name', sa.String(length=80), nullable=False),
sa.Column('description', sa.String(length=200), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('category_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['category_id'], ['category.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_item_name'), 'item', ['name'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_item_name'), table_name='item')
op.drop_table('item')
op.drop_table('category')
op.drop_index(op.f('ix_user_email'), table_name='user')
op.drop_table('user')
# ### end Alembic commands ###
| 36 | 74 | 0.665812 |
63354a3c7e20470c272155516a2973e439b26b8f | 1,491 | py | Python | src/pymortests/algorithms.py | fameyer/pymorWin | b449a38754fddb719d554f1aacf9280a585f1168 | [
"Unlicense"
] | null | null | null | src/pymortests/algorithms.py | fameyer/pymorWin | b449a38754fddb719d554f1aacf9280a585f1168 | [
"Unlicense"
] | null | null | null | src/pymortests/algorithms.py | fameyer/pymorWin | b449a38754fddb719d554f1aacf9280a585f1168 | [
"Unlicense"
] | null | null | null | # This file is part of the pyMOR project (http://www.pymor.org).
# Copyright Holders: Rene Milk, Stephan Rave, Felix Schindler
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
from __future__ import absolute_import, division, print_function
import numpy as np
import pytest
from pymor.la.numpyvectorarray import NumpyVectorArray
from pymortests.base import runmodule, MonomOperator
from pymor.algorithms.newton import newton, NewtonError
import pymor.algorithms.basisextension as bxt
from pymor.tools.floatcmp import float_cmp
def _newton(order):
mop = MonomOperator(order)
rhs = NumpyVectorArray([0.0])
guess = NumpyVectorArray([1.0])
return newton(mop, rhs, initial_guess=guess)
@pytest.mark.parametrize("order", range(1, 8))
def test_newton(order):
U, _ = _newton(order)
assert float_cmp(U.data, 0.0)
def test_newton_fail():
with pytest.raises(NewtonError):
_ = _newton(0)
@pytest.fixture(params=('pod_basis_extension', 'gram_schmidt_basis_extension', 'trivial_basis_extension'))
def extension_alg(request):
return getattr(bxt, request.param)
def test_ext(extension_alg):
size = 5
ident = np.identity(size)
current = ident[0]
for i in range(1, size):
c = NumpyVectorArray(current)
n, _ = extension_alg(c, NumpyVectorArray(ident[i]))
assert np.allclose(n.data, ident[0:i+1])
current = ident[0:i+1]
if __name__ == "__main__":
runmodule(filename=__file__)
| 28.673077 | 106 | 0.726358 |
ab68421753501228e37854b9540d2c19ca20d33a | 7,674 | py | Python | data_helpers.py | nguyentungcuong/SA-master | 358b30aabac8982e1e8369d401ac951a85640ecf | [
"Apache-2.0"
] | null | null | null | data_helpers.py | nguyentungcuong/SA-master | 358b30aabac8982e1e8369d401ac951a85640ecf | [
"Apache-2.0"
] | null | null | null | data_helpers.py | nguyentungcuong/SA-master | 358b30aabac8982e1e8369d401ac951a85640ecf | [
"Apache-2.0"
] | null | null | null | import numpy as np
import re
from pyvi import ViTokenizer
import pandas as pd
import string
import itertools
from collections import Counter
# data dict_abbreviation
filename = './data1/dict/dict_abbreviation.csv'
data = pd.read_csv(filename, sep="\t", encoding='utf-8')
list_abbreviation = data['abbreviation']
list_converts = data['convert']
#data stopword
filename1='./data1/dict/stopwords.csv'
data1=pd.read_csv(filename1,sep="\t",encoding='utf-8')
list_stopwords=data1['stopwords']
#Tiền xử lý dữ liệu
def clean_str(string):
"""
Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
#hàm này đọc dataset raw và tách chuỗi câu
def readdata(path):
data = []
with open(path, 'r',encoding="UTF-8") as f:
rawdata = f.read().splitlines() #Hàm này sẽ tách chuỗi bởi các ký tự \n.(tách chuỗi theo dòng)
for i in rawdata:
if len(i.strip())==0:
rawdata.remove(i)
"""
for onecomment in rawdata:
data.append(onecomment.split(':', 1)) #phân tách chuỗi trong dòng bởi ':', 1 là tách chuỗi 1 lần
X = [data[i][1] for i in range(len(data))] #chứa câu
Y = [data[i][0] for i in range(len(data))] #chứa chuỗi nhãn 0:là neutral,1:là negative,2:là positive
"""
return [rawdata[i] for i in range(len(rawdata))]
def clean_data(comment):
# loai link lien ket
comment = re.sub(r'\shttps?:\/\/[^\s]*\s+|^https?:\/\/[^\s]*\s+|https?:\/\/[^\s]*$', ' link_spam ', comment)
#chuyển hết link trong comment thành "link_spam"
return comment
def convert_Abbreviation(comment):
comment = re.sub('\s+', " ", comment)
for i in range(len(list_converts)):
abbreviation = '(\s' + list_abbreviation[i] + '\s)|(^' + list_abbreviation[i] + '\s)|(\s' \
+ list_abbreviation[i] + '$)'
convert = ' ' + str(list_converts[i]) + ' '
comment = re.sub(abbreviation, convert, comment)
return comment
def remove_Stopword(comment):
re_comment = []
words = comment.split()
for word in words:
if (not word.isnumeric()) and len(word) > 1 and word not in list_stopwords:
re_comment.append(word)
comment = ' '.join(re_comment)
return comment
def tokenize(comment):
text_token = ViTokenizer.tokenize(comment)
return text_token
def normalize_Text(comment):
comment = comment.encode().decode()
comment = comment.lower()
# thay gia tien bang text
moneytag = [u'k', u'đ', u'ngàn', u'nghìn', u'usd', u'tr', u'củ', u'triệu', u'yên']
for money in moneytag:
comment = re.sub('(^\d*([,.]?\d+)+\s*' + money + ')|(' + '\s\d*([,.]?\d+)+\s*' + money + ')', ' monney ',
comment)
comment = re.sub('(^\d+\s*\$)|(\s\d+\s*\$)', ' monney ', comment)
comment = re.sub('(^\$\d+\s*)|(\s\$\d+\s*\$)', ' monney ', comment)
# loai dau cau: nhuoc diem bi vo cau truc: vd; km/h. V-NAND
listpunctuation = string.punctuation
for i in listpunctuation:
comment = comment.replace(i, ' ')
# thay thong so bang specifications
comment = re.sub('^(\d+[a-z]+)([a-z]*\d*)*\s|\s\d+[a-z]+([a-z]*\d*)*\s|\s(\d+[a-z]+)([a-z]*\d*)*$', ' ', comment)
comment = re.sub('^([a-z]+\d+)([a-z]*\d*)*\s|\s[a-z]+\d+([a-z]*\d*)*\s|\s([a-z]+\d+)([a-z]*\d*)*$', ' ', comment)
# thay thong so bang text lan 2
comment = re.sub('^(\d+[a-z]+)([a-z]*\d*)*\s|\s\d+[a-z]+([a-z]*\d*)*\s|\s(\d+[a-z]+)([a-z]*\d*)*$', ' ', comment)
comment = re.sub('^([a-z]+\d+)([a-z]*\d*)*\s|\s[a-z]+\d+([a-z]*\d*)*\s|\s([a-z]+\d+)([a-z]*\d*)*$', ' ', comment)
# xu ly lay am tiet
comment = re.sub(r'(\D)\1+', r'\1', comment)
# #them dau cho nhung cau khong dau
# words = comment.split()
# for word in words:
# try:
# comment.encode('utf-8')
# word = accent.accent_comment(word)
# print word
# except UnicodeError:
# word = word.decode('utf-8')
# words_normal.append(word)
# # them dau tach thanh tung cau
# sents = re.split("([.?!])?[\n]+|[.?!] ", comment)
# listpunctuation = string.punctuation
# sents_normal = []
# # them dau theo cau
# for sent in sents:
# if sent != None:
# for i in listpunctuation:
# sent = sent.replace(i, " ")
# sent = accent.accent_comment(sent.decode('utf-8'))
# sents_normal.append(sent.lower())
return comment
def predata(path):
X = readdata(path)
X_re = []
i = 0
for comment in X:
comment = remove_Stopword(tokenize(convert_Abbreviation(normalize_Text(clean_data(comment)))))
X_re.append(comment)
return X_re #X_re chứa comment đã tiền xử lý
def load_data_and_labels(positive_data_file, negative_data_file):
"""
Loads MR polarity data from files, splits the data into words and generates labels.
Returns split sentences and labels.
"""
# Load data from files
"""
# positive_examples=list các (câu ~ một dòng)
positive_examples = list(open(positive_data_file, "r", encoding='utf-8').readlines())
positive_examples = [s.strip() for s in positive_examples] #xóa backspace trong câu
negative_examples = list(open(negative_data_file, "r", encoding='utf-8').readlines())
negative_examples = [s.strip() for s in negative_examples] #xóa backspace trong câu
# Split by words
x_text = positive_examples + negative_examples
x_text = [clean_str(sent) for sent in x_text] #Tiền xử lý các câu(dòng)
"""
positive_examples =predata(positive_data_file)
negative_examples=predata(negative_data_file)
x_text = positive_examples + negative_examples
# Generate labels
positive_labels = [[0, 1] for _ in positive_examples]
negative_labels = [[1, 0] for _ in negative_examples]
y = np.concatenate([positive_labels, negative_labels], 0)
return [x_text, y]
def batch_iter(data, batch_size, num_epochs, shuffle):
"""
Generates a batch iterator for a dataset.
"""
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = int((len(data)-1)/batch_size) + 1
# num_batches_per_epoch
for epoch in range(num_epochs):
# Shufle the data at each epoch,trộn data ở mỗi epoch
if shuffle:
shuffle_indices = np.random.permutation(np.arange(data_size))
#np.random.permutation():đảo ngẫu nhiên các element trong mảng
shuffled_data = data[shuffle_indices]
else:
shuffled_data = data
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index]
# generator là một hàm trả kết quả về là một chuỗi kết quả thay vì một giá trị duy nhất.
#Mỗi lần lệnh yield được chạy, nó sẽ sinh ra một giá trị mới. (Vì thế nó mới được gọi là generator)
| 38.562814 | 117 | 0.598124 |
f5302191f1eef39d61232e01099890896a91df6c | 2,702 | py | Python | bin/scrape_software_versions.py | SciLifeLab/NGI-MethylSeq | 3d3f291c975669fc6dd0ce96e3dfbc6622002a1b | [
"MIT"
] | 17 | 2016-06-08T21:15:30.000Z | 2019-01-09T09:09:20.000Z | bin/scrape_software_versions.py | SciLifeLab/NGI-MethylSeq | 3d3f291c975669fc6dd0ce96e3dfbc6622002a1b | [
"MIT"
] | 10 | 2016-10-17T15:16:40.000Z | 2018-02-27T12:55:53.000Z | bin/scrape_software_versions.py | SciLifeLab/NGI-MethylSeq | 3d3f291c975669fc6dd0ce96e3dfbc6622002a1b | [
"MIT"
] | 10 | 2016-06-08T14:51:05.000Z | 2018-02-21T12:54:36.000Z | #!/usr/bin/env python
from __future__ import print_function
from collections import OrderedDict
import re
regexes = {
'NGI-MethylSeq': ['v_ngi_methylseq.txt', r"(\S+)"],
'Nextflow': ['v_nextflow.txt', r"(\S+)"],
'Bismark genomePrep': ['v_bismark_genome_preparation.txt', r"Bismark Genome Preparation Version: v(\S+)"],
'FastQC': ['v_fastqc.txt', r"FastQC v(\S+)"],
'Cutadapt': ['v_cutadapt.txt', r"(\S+)"],
'Trim Galore!': ['v_trim_galore.txt', r"version (\S+)"],
'Bismark': ['v_bismark.txt', r"Bismark Version: v(\S+)"],
'Bismark Deduplication': ['v_deduplicate_bismark.txt', r"Deduplicator Version: v(\S+)"],
'Bismark methXtract': ['v_bismark_methylation_extractor.txt', r"Bismark Extractor Version: v(\S+)"],
'Bismark Report': ['v_bismark2report.txt', r"bismark2report version: v(\S+)"],
'Bismark Summary': ['v_bismark2summary.txt', r"bismark2summary version: (\S+)"],
'Samtools': ['v_samtools.txt', r"samtools (\S+)"],
'Qualimap': ['v_qualimap.txt', r"QualiMap v.(\S+)"],
'MultiQC': ['v_multiqc.txt', r"multiqc, version (\S+)"],
}
results = OrderedDict()
results['NGI-MethylSeq'] = '<span style="color:#999999;\">N/A</span>'
results['Nextflow'] = '<span style="color:#999999;\">N/A</span>'
results['Bismark genomePrep'] = '<span style="color:#999999;\">N/A</span>'
results['FastQC'] = '<span style="color:#999999;\">N/A</span>'
results['Cutadapt'] = '<span style="color:#999999;\">N/A</span>'
results['Trim Galore!'] = '<span style="color:#999999;\">N/A</span>'
results['Bismark'] = '<span style="color:#999999;\">N/A</span>'
results['Bismark Deduplication'] = '<span style="color:#999999;\">N/A</span>'
results['Bismark methXtract'] = '<span style="color:#999999;\">N/A</span>'
results['Bismark Report'] = '<span style="color:#999999;\">N/A</span>'
results['Bismark Summary'] = '<span style="color:#999999;\">N/A</span>'
results['Samtools'] = '<span style="color:#999999;\">N/A</span>'
results['Qualimap'] = '<span style="color:#999999;\">N/A</span>'
results['MultiQC'] = '<span style="color:#999999;\">N/A</span>'
# Search each file using its regex
for k, v in regexes.items():
with open(v[0]) as x:
versions = x.read()
match = re.search(v[1], versions)
if match:
results[k] = "v{}".format(match.group(1))
# Dump to YAML
print ('''
id: 'software_versions'
section_name: 'NGI-MethylSeq Software Versions'
section_href: 'https://github.com/SciLifeLab/NGI-MethylSeq'
plot_type: 'html'
description: 'are collected at run time from the software output.'
data: |
<dl class="dl-horizontal">
''')
for k,v in results.items():
print(" <dt>{}</dt><dd>{}</dd>".format(k,v))
print (" </dl>")
| 45.79661 | 110 | 0.639896 |
7aacb48d0c4216efabf29a8aae0abf6c64a43741 | 9,401 | py | Python | plugins/modules/oci_network_load_balancer_health_facts.py | sohwaje/oci-ansible-collection | 9e6b8cf55e596a96560710a457a7df05886fc59c | [
"Apache-2.0"
] | null | null | null | plugins/modules/oci_network_load_balancer_health_facts.py | sohwaje/oci-ansible-collection | 9e6b8cf55e596a96560710a457a7df05886fc59c | [
"Apache-2.0"
] | null | null | null | plugins/modules/oci_network_load_balancer_health_facts.py | sohwaje/oci-ansible-collection | 9e6b8cf55e596a96560710a457a7df05886fc59c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_network_load_balancer_health_facts
short_description: Fetches details about one or multiple NetworkLoadBalancerHealth resources in Oracle Cloud Infrastructure
description:
- Fetches details about one or multiple NetworkLoadBalancerHealth resources in Oracle Cloud Infrastructure
- Lists the summary health statuses for all network load balancers in the specified compartment.
- If I(network_load_balancer_id) is specified, the details of a single NetworkLoadBalancerHealth will be returned.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
network_load_balancer_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the network load balancer to update.
- Required to get a specific network_load_balancer_health.
type: str
aliases: ["id"]
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the compartment containing the network load balancers to
list.
- Required to list multiple network_load_balancer_healths.
type: str
sort_order:
description:
- The sort order to use, either 'asc' (ascending) or 'desc' (descending).
type: str
choices:
- "ASC"
- "DESC"
sort_by:
description:
- The field to sort by. Only one sort order can be provided. The default order for timeCreated is descending.
The default order for displayName is ascending. If no value is specified, then timeCreated is the default.
type: str
choices:
- "timeCreated"
- "displayName"
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: List network_load_balancer_healths
oci_network_load_balancer_health_facts:
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
- name: Get a specific network_load_balancer_health
oci_network_load_balancer_health_facts:
network_load_balancer_id: "ocid1.networkloadbalancer.oc1..xxxxxxEXAMPLExxxxxx"
"""
RETURN = """
network_load_balancer_healths:
description:
- List of NetworkLoadBalancerHealth resources
returned: on success
type: complex
contains:
status:
description:
- The overall health status of the network load balancer.
- "* **OK:** All backend sets associated with the network load balancer return a status of `OK`."
- "* **WARNING:** At least one of the backend sets associated with the network load balancer returns a status of `WARNING`,
no backend sets return a status of `CRITICAL`, and the network load balancer life cycle state is `ACTIVE`."
- "* **CRITICAL:** One or more of the backend sets associated with the network load balancer return a status of `CRITICAL`."
- "* **UNKNOWN:** If any one of the following conditions is true:"
- " * The network load balancer life cycle state is not `ACTIVE`."
- " * No backend sets are defined for the network load balancer."
- " * More than half of the backend sets associated with the network load balancer return a status of `UNKNOWN`, none of the backend
sets return a status of `WARNING` or `CRITICAL`, and the network load balancer life cycle state is `ACTIVE`."
- " * The system could not retrieve metrics for any reason."
returned: on success
type: str
sample: OK
warning_state_backend_set_names:
description:
- A list of backend sets that are currently in the `WARNING` health state. The list identifies each backend set by the
user-friendly name you assigned when you created the backend set.
- "Example: `example_backend_set3`"
returned: on success
type: list
sample: []
critical_state_backend_set_names:
description:
- A list of backend sets that are currently in the `CRITICAL` health state. The list identifies each backend set by the
user-friendly name you assigned when you created the backend set.
- "Example: `example_backend_set`"
returned: on success
type: list
sample: []
unknown_state_backend_set_names:
description:
- A list of backend sets that are currently in the `UNKNOWN` health state. The list identifies each backend set by the
user-friendly name you assigned when you created the backend set.
- "Example: `example_backend_set2`"
returned: on success
type: list
sample: []
total_backend_set_count:
description:
- The total number of backend sets associated with this network load balancer.
- "Example: `4`"
returned: on success
type: int
sample: 4
network_load_balancer_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the network load balancer with which the health status
is associated.
returned: on success
type: str
sample: "ocid1.networkloadbalancer.oc1..xxxxxxEXAMPLExxxxxx"
sample: [{
"status": "OK",
"warning_state_backend_set_names": [],
"critical_state_backend_set_names": [],
"unknown_state_backend_set_names": [],
"total_backend_set_count": 4,
"network_load_balancer_id": "ocid1.networkloadbalancer.oc1..xxxxxxEXAMPLExxxxxx"
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.network_load_balancer import NetworkLoadBalancerClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class NetworkLoadBalancerHealthFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: get, list"""
def get_required_params_for_get(self):
return [
"network_load_balancer_id",
]
def get_required_params_for_list(self):
return [
"compartment_id",
]
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_network_load_balancer_health,
network_load_balancer_id=self.module.params.get("network_load_balancer_id"),
)
def list_resources(self):
optional_list_method_params = [
"sort_order",
"sort_by",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.list_all_resources(
self.client.list_network_load_balancer_healths,
compartment_id=self.module.params.get("compartment_id"),
**optional_kwargs
)
NetworkLoadBalancerHealthFactsHelperCustom = get_custom_class(
"NetworkLoadBalancerHealthFactsHelperCustom"
)
class ResourceFactsHelper(
NetworkLoadBalancerHealthFactsHelperCustom, NetworkLoadBalancerHealthFactsHelperGen
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(
network_load_balancer_id=dict(aliases=["id"], type="str"),
compartment_id=dict(type="str"),
sort_order=dict(type="str", choices=["ASC", "DESC"]),
sort_by=dict(type="str", choices=["timeCreated", "displayName"]),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="network_load_balancer_health",
service_client_class=NetworkLoadBalancerClient,
namespace="network_load_balancer",
)
result = []
if resource_facts_helper.is_get():
result = [resource_facts_helper.get()]
elif resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(network_load_balancer_healths=result)
if __name__ == "__main__":
main()
| 38.847107 | 158 | 0.662695 |
b4dfcb0c10539009df20bae254e31476a5ddd653 | 1,543 | py | Python | tf_quant_finance/experimental/pricing_platform/framework/core/daycount_conventions.py | slowy07/tf-quant-finance | 0976f720fb58a2d7bfd863640c12a2425cd2f94f | [
"Apache-2.0"
] | 3,138 | 2019-07-24T21:43:17.000Z | 2022-03-30T12:11:09.000Z | tf_quant_finance/experimental/pricing_platform/framework/core/daycount_conventions.py | Aarif1430/tf-quant-finance | 9372eb1ddf2b48cb1a3d4283bc67a10647ddc7a6 | [
"Apache-2.0"
] | 63 | 2019-09-07T19:16:03.000Z | 2022-03-29T19:29:40.000Z | tf_quant_finance/experimental/pricing_platform/framework/core/daycount_conventions.py | Aarif1430/tf-quant-finance | 9372eb1ddf2b48cb1a3d4283bc67a10647ddc7a6 | [
"Apache-2.0"
] | 423 | 2019-07-26T21:28:05.000Z | 2022-03-26T13:07:44.000Z | # Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Supported day count conventions."""
import enum
from tf_quant_finance.experimental.pricing_platform.instrument_protos import daycount_conventions_pb2
DayCountConventions = enum.Enum(
"DayCountConventions",
zip(daycount_conventions_pb2.DayCountConvention.keys(),
daycount_conventions_pb2.DayCountConvention.keys()))
DayCountConventions.__doc__ = "Supported day count conventions."
DayCountConventions.__repr__ = lambda self: self.value
DayCountConventions.__call__ = lambda self: self.value
# Typing can't resolve type of Currency so we use Method Resolution Order to
# infer the type.
DayCountConventionsProtoType = DayCountConventions.mro()[0]
def from_proto_value(value: int) -> DayCountConventionsProtoType:
"""Creates DayCountConventions from a proto field value."""
return DayCountConventions(
daycount_conventions_pb2.DayCountConvention.Name(value))
__all__ = ["DayCountConventions", "from_proto_value"]
| 35.883721 | 101 | 0.787427 |
552a07540b035138bb25aad96d486c3079c44d62 | 9,565 | py | Python | flarestack/analyses/tde/compare_spectral_indices_individual.py | robertdstein/flarestack | 2ce7e67da336514f6f38f06126a1fbd82131e441 | [
"MIT"
] | null | null | null | flarestack/analyses/tde/compare_spectral_indices_individual.py | robertdstein/flarestack | 2ce7e67da336514f6f38f06126a1fbd82131e441 | [
"MIT"
] | 25 | 2019-11-14T15:46:24.000Z | 2020-11-27T11:14:22.000Z | flarestack/analyses/tde/compare_spectral_indices_individual.py | robertdstein/flarestack | 2ce7e67da336514f6f38f06126a1fbd82131e441 | [
"MIT"
] | 2 | 2020-01-06T19:39:27.000Z | 2020-07-16T20:32:29.000Z | from __future__ import print_function
from builtins import str
import numpy as np
import os
import pickle as Pickle
from flarestack.core.results import ResultsHandler
from flarestack.data.icecube.gfu.gfu_v002_p01 import txs_sample_v1
from flarestack.shared import plot_output_dir, flux_to_k, analysis_dir, catalogue_dir
from flarestack.icecube_utils.reference_sensitivity import reference_sensitivity
from flarestack.cluster import run_desy_cluster as rd
import matplotlib.pyplot as plt
from flarestack.utils.custom_dataset import custom_dataset
analyses = dict()
# Initialise Injectors/LLHs
# Shared
llh_energy = {
"Name": "Power Law",
"Gamma": 2.0,
}
llh_time = {"Name": "FixedEndBox"}
# Standard Time Integration
standard_inj_time = {"Name": "Box", "Pre-Window": 0, "Post-Window": 100}
standard_inj_kwargs = {
"Injection Time PDF": standard_inj_time,
"Injection Energy PDF": llh_energy,
"Poisson Smear?": True,
}
standard_llh = {
"LLH Energy PDF": llh_energy,
"LLH Time PDF": llh_time,
"Fit Gamma?": True,
"Fit Negative n_s?": True,
"Fit Weights?": False,
}
standard_positive_llh = {
"LLH Energy PDF": llh_energy,
"LLH Time PDF": llh_time,
"Fit Gamma?": True,
"Fit Negative n_s?": False,
"Fit Weights?": False,
}
# Murase model with One day Injection
murase_flare_llh = {
"LLH Energy PDF": llh_energy,
"LLH Time PDF": llh_time,
"Fit Gamma?": True,
"Fit Negative n_s?": False,
"Flare Search?": True,
}
inj_time_murase = {"Name": "Box", "Pre-Window": 0, "Post-Window": 2.3}
murase_flare_inj_kwargs = {
"Injection Time PDF": inj_time_murase,
"Injection Energy PDF": llh_energy,
"Poisson Smear?": True,
}
# Winter Model with 10 day Injection
winter_energy_pdf = {"Name": "Power Law", "Gamma": 2.0}
winter_flare_llh = {
"LLH Energy PDF": winter_energy_pdf,
"LLH Time PDF": llh_time,
"Fit Gamma?": True,
"Fit Negative n_s?": False,
"Flare Search?": True,
}
winter_flare_inj_time = {"Name": "Box", "Pre-Window": 0, "Post-Window": 10}
winter_flare_injection_time = {
"Injection Time PDF": winter_flare_inj_time,
"Injection Energy PDF": winter_energy_pdf,
"Poisson Smear?": True,
}
# gammas = [1.8, 1.9, 2.0, 2.1, 2.3, 2.5, 2.7, 2.9]
gammas = [1.8, 2.0]
# gammas = [2.0, 2.3]
# gammas = [1.99, 2.0, 2.02]
# gammas = [2.5, 2.7, 2.9]
name_root = "analyses/tde/compare_spectral_indices_individual/"
cat_res = dict()
cats = [
"Swift J1644+57",
# "Swift J2058+05",
# "ASASSN-14li",
# "XMMSL1 J0740-85"
# "ASASSN-15lh",
]
for j, cat in enumerate(cats):
name = name_root + cat.replace(" ", "") + "/"
cat_path = catalogue_dir + "TDEs/individual_TDEs/" + cat + "_catalogue.npy"
catalogue = np.load(cat_path)
src_res = dict()
# lengths = [0.5 * max_window]
for i, [inj_kwargs, llh_kwargs] in enumerate(
[
[standard_inj_kwargs, standard_llh],
[standard_inj_kwargs, standard_positive_llh],
[winter_flare_injection_time, winter_flare_llh],
# [murase_flare_inj_kwargs, murase_flare_llh]
]
):
label = [
"Time-Integrated (Negative n_s)",
"Time-Integrated",
"10 Day Flare",
"2 Day Flare",
][i]
f_name = ["negative_n_s", "positive_n_s", "flare_winter", "flare_murase"][i]
flare_name = name + f_name + "/"
res = dict()
for gamma in gammas:
full_name = flare_name + str(gamma) + "/"
scale = flux_to_k(
reference_sensitivity(np.sin(catalogue["dec"]), gamma=gamma) * 50
)
if i > 1:
scale *= 10 ** (i - 1)
inj = dict(inj_kwargs)
inj["Injection Energy PDF"] = dict(inj["Injection Energy PDF"])
inj["Injection Energy PDF"]["Gamma"] = gamma
if "E Min" in list(inj["Injection Energy PDF"].keys()):
scale *= 10
mh_dict = {
"name": full_name,
"datasets": custom_dataset(
txs_sample_v1, catalogue, llh_kwargs["LLH Time PDF"]
),
"catalogue": cat_path,
"inj kwargs": inj,
"llh kwargs": llh_kwargs,
"scale": scale,
"n_trials": 5,
"n_steps": 10,
}
# print scale
analysis_path = analysis_dir + full_name
try:
os.makedirs(analysis_path)
except OSError:
pass
pkl_file = analysis_path + "dict.pkl"
with open(pkl_file, "wb") as f:
Pickle.dump(mh_dict, f)
rd.submit_to_cluster(pkl_file, n_jobs=100)
#
# mh = MinimisationHandler(mh_dict)
# mh.iterate_run(mh_dict["scale"], mh_dict["n_steps"], n_trials=10)
# mh.clear()
res[gamma] = mh_dict
src_res[label] = res
cat_res[cat] = src_res
rd.wait_for_cluster()
for (cat, src_res) in cat_res.items():
name = name_root + cat.replace(" ", "") + "/"
sens = [[] for _ in src_res]
fracs = [[] for _ in src_res]
disc_pots = [[] for _ in src_res]
sens_e = [[] for _ in src_res]
disc_e = [[] for _ in src_res]
labels = []
for i, (f_type, res) in enumerate(sorted(src_res.items())):
for (gamma, rh_dict) in sorted(res.items()):
try:
rh = ResultsHandler(rh_dict)
inj = rh_dict["inj kwargs"]["Injection Time PDF"]
if inj["Name"] == "Box":
injection_length = float(inj["Pre-Window"]) + float(
inj["Post-Window"]
)
else:
raise Exception("Unrecognised Time PDF calculation")
inj_time = injection_length * 60 * 60 * 24
astro_sens, astro_disc = rh.astro_values(
rh_dict["inj kwargs"]["Injection Energy PDF"]
)
key = "Total Fluence (GeV cm^{-2} s^{-1})"
e_key = "Mean Luminosity (erg/s)"
sens[i].append(astro_sens[key] * inj_time)
disc_pots[i].append(astro_disc[key] * inj_time)
sens_e[i].append(astro_sens[e_key] * inj_time)
disc_e[i].append(astro_disc[e_key] * inj_time)
fracs[i].append(gamma)
except OSError:
pass
labels.append(f_type)
for j, [fluence, energy] in enumerate([[sens, sens_e], [disc_pots, disc_e]]):
plt.figure()
ax1 = plt.subplot(111)
ax2 = ax1.twinx()
cols = ["r", "g", "b", "orange"]
linestyle = ["-", "--"][j]
print(fracs, fluence, labels, cols, energy)
for l, f in enumerate(fracs):
try:
ax1.plot(
f, fluence[l], label=labels[l], linestyle=linestyle, color=cols[l]
)
ax2.plot(f, energy[l], linestyle=linestyle, color=cols[l])
except ValueError:
pass
y_label = [
r"Total Fluence [GeV cm$^{-2}$]",
r"Mean Isotropic-Equivalent $E_{\nu}$ (erg)",
]
ax2.grid(True, which="both")
ax1.set_ylabel(r"Total Fluence [GeV cm$^{-2}$]", fontsize=12)
ax2.set_ylabel(r"Mean Isotropic-Equivalent $E_{\nu}$ (erg)")
ax1.set_xlabel(r"Gamma")
ax1.set_yscale("log")
ax2.set_yscale("log")
for k, ax in enumerate([ax1, ax2]):
y = [fluence, energy][k]
ax.set_ylim(
0.95 * min([min(x) for x in y if len(x) > 0]),
1.1 * max([max(x) for x in y if len(x) > 0]),
)
plt.title(["Sensitivity", "Discovery Potential"][j] + " for " + cat)
ax1.legend(loc="upper left", fancybox=True, framealpha=1.0)
plt.tight_layout()
plt.savefig(
plot_output_dir(name)
+ "/spectral_index_"
+ ["sens", "disc"][j]
+ "_"
+ cat
+ ".pdf"
)
plt.close()
# for j, s in enumerate([sens, sens_e]):
#
# d = [disc_pots, disc_e][j]
#
# for k, y in enumerate([s, d]):
#
# plt.figure()
# ax1 = plt.subplot(111)
#
# cols = ["b", "orange", "green"]
# linestyle = ["-", "--"][k]
#
# for i, f in enumerate(fracs):
# plt.plot(f, y[i], label=labels[i], linestyle=linestyle,
# color=cols[i])
#
# label = ["", "energy"][j]
#
# y_label = [r"Total Fluence [GeV cm$^{-2}$]",
# r"Mean Isotropic-Equivalent $E_{\nu}$ (erg)"]
#
# ax1.grid(True, which='both')
# ax1.set_ylabel(y_label[j], fontsize=12)
# ax1.set_xlabel(r"Gamma")
# ax1.set_yscale("log")
# ax1.set_ylim(0.95 * min([min(x) for x in y]),
# 1.1 * max([max(x) for x in y]))
#
# print y
#
# plt.title("Time-Integrated Emission")
#
# ax1.legend(loc='upper right', fancybox=True, framealpha=1.)
# plt.tight_layout()
#
# print label, k
#
# plt.savefig(plot_output_dir(name) + "/spectral_index_" + label +
# "_" + ["sens", "disc"][k] + ".pdf")
# plt.close()
| 27.485632 | 86 | 0.528594 |
2113518661a9d8ea22b2a3e85fff1aa796007c16 | 9,089 | py | Python | applications/DamApplication/python_scripts/check_and_prepare_model_process_dam_thermal.py | lkusch/Kratos | e8072d8e24ab6f312765185b19d439f01ab7b27b | [
"BSD-4-Clause"
] | 778 | 2017-01-27T16:29:17.000Z | 2022-03-30T03:01:51.000Z | applications/DamApplication/python_scripts/check_and_prepare_model_process_dam_thermal.py | lkusch/Kratos | e8072d8e24ab6f312765185b19d439f01ab7b27b | [
"BSD-4-Clause"
] | 6,634 | 2017-01-15T22:56:13.000Z | 2022-03-31T15:03:36.000Z | applications/DamApplication/python_scripts/check_and_prepare_model_process_dam_thermal.py | lkusch/Kratos | e8072d8e24ab6f312765185b19d439f01ab7b27b | [
"BSD-4-Clause"
] | 224 | 2017-02-07T14:12:49.000Z | 2022-03-06T23:09:34.000Z | import KratosMultiphysics
def Factory(settings, Model):
if not isinstance(settings, Parameters):
raise Exception("expected input shall be a Parameters object, encapsulating a json string")
return CheckAndPrepareModelProcessDamThermal(Model, settings["Parameters"])
## All the processes python should be derived from "Process"
class CheckAndPrepareModelProcessDamThermal(KratosMultiphysics.Process):
"""Prepare the computing model part.
The computing model part is created if it does not exist. Nodes and elements
from the domain sub model parts are added to the computing model part.
Conditions are added from the processes sub model parts.
"""
def __init__(self, main_model_part, Parameters ):
KratosMultiphysics.Process.__init__(self)
self.main_model_part = main_model_part
self.thermal_model_part_name = Parameters["thermal_model_part_name"].GetString()
self.thermal_domain_sub_model_part_list = Parameters["thermal_domain_sub_model_part_list"]
self.thermal_loads_sub_model_part_list = Parameters["thermal_loads_sub_model_part_list"]
self.thermal_domain_sub_sub_model_part_list = Parameters["thermal_domain_sub_sub_model_part_list"]
self.thermal_loads_sub_sub_model_part_list = Parameters["thermal_loads_sub_sub_model_part_list"]
self.mechanical_model_part_name = Parameters["mechanical_model_part_name"].GetString()
self.mechanical_domain_sub_model_part_list = Parameters["mechanical_domain_sub_model_part_list"]
self.mechanical_loads_sub_model_part_list = Parameters["mechanical_loads_sub_model_part_list"]
self.body_domain_sub_model_part_list = Parameters["body_domain_sub_model_part_list"]
self.body_domain_sub_sub_model_part_list = Parameters["body_domain_sub_sub_model_part_list"]
self.loads_sub_model_part_list = Parameters["loads_sub_model_part_list"]
self.loads_sub_sub_model_part_list = Parameters["loads_sub_sub_model_part_list"]
def Execute(self):
## Construct the thermal model part:
thermal_parts = []
for i in range(self.thermal_domain_sub_model_part_list.size()):
thermal_parts.append(self.main_model_part.GetSubModelPart(self.thermal_domain_sub_model_part_list[i].GetString()))
self.main_model_part.CreateSubModelPart(self.thermal_model_part_name)
thermal_model_part = self.main_model_part.GetSubModelPart(self.thermal_model_part_name)
thermal_model_part.ProcessInfo = self.main_model_part.ProcessInfo
thermal_model_part.Properties = self.main_model_part.Properties
thermal_model_part.Set(KratosMultiphysics.ACTIVE)
print("Adding Nodes to Thermal Model Part")
list_of_ids = set()
for part in thermal_parts:
for node in part.Nodes:
list_of_ids.add(node.Id)
thermal_model_part.AddNodes(list(list_of_ids))
print("Adding Elements to Thermal Model Part")
list_of_ids = set()
for part in thermal_parts:
for elem in part.Elements:
list_of_ids.add(elem.Id)
thermal_model_part.AddElements(list(list_of_ids))
# Thermal Conditions
print("Adding Thermal Conditions to Thermal Model Part")
thermal_conditions = []
for i in range(self.thermal_loads_sub_model_part_list.size()):
thermal_conditions.append(self.main_model_part.GetSubModelPart(self.thermal_loads_sub_model_part_list[i].GetString()))
list_of_ids = set()
for part in thermal_conditions:
for cond in part.Conditions:
list_of_ids.add(cond.Id)
thermal_model_part.AddConditions(list(list_of_ids))
# Sub sub model parts
# Construction process
print("Adding Thermal Sub Sub Model Parts")
for i in range(self.thermal_domain_sub_model_part_list.size()):
thermal_sub_model_part = self.main_model_part.GetSubModelPart(self.thermal_domain_sub_model_part_list[i].GetString())
thermal_model_part.CreateSubModelPart(self.thermal_domain_sub_sub_model_part_list[i].GetString())
thermal_sub_sub_model_part = thermal_model_part.GetSubModelPart(self.thermal_domain_sub_sub_model_part_list[i].GetString())
list_of_ids = set()
for elem in thermal_sub_model_part.Elements:
list_of_ids.add(elem.Id)
thermal_sub_sub_model_part.AddElements(list(list_of_ids))
list_of_ids = set()
for node in thermal_sub_model_part.Nodes:
list_of_ids.add(node.Id)
thermal_sub_sub_model_part.AddNodes(list(list_of_ids))
for i in range(self.thermal_loads_sub_model_part_list.size()):
thermal_load_sub_model_part = self.main_model_part.GetSubModelPart(self.thermal_loads_sub_model_part_list[i].GetString())
thermal_model_part.CreateSubModelPart(self.thermal_loads_sub_sub_model_part_list[i].GetString())
thermal_load_sub_sub_model_part = thermal_model_part.GetSubModelPart(self.thermal_loads_sub_sub_model_part_list[i].GetString())
list_of_ids = set()
for node in thermal_load_sub_model_part.Nodes:
list_of_ids.add(node.Id)
thermal_load_sub_sub_model_part.AddNodes(list(list_of_ids))
list_of_ids = set()
for cond in thermal_load_sub_model_part.Conditions:
list_of_ids.add(cond.Id)
thermal_load_sub_sub_model_part.AddConditions(list(list_of_ids))
print(thermal_model_part)
## Construct the mechanical model part:
mechanical_parts = []
for i in range(self.mechanical_domain_sub_model_part_list.size()):
mechanical_parts.append(self.main_model_part.GetSubModelPart(self.mechanical_domain_sub_model_part_list[i].GetString()))
self.main_model_part.CreateSubModelPart(self.mechanical_model_part_name)
mechanical_model_part = self.main_model_part.GetSubModelPart(self.mechanical_model_part_name)
mechanical_model_part.ProcessInfo = self.main_model_part.ProcessInfo
mechanical_model_part.Properties = self.main_model_part.Properties
mechanical_model_part.Set(KratosMultiphysics.ACTIVE)
print("Adding Nodes to Mechanical Model Part")
list_of_ids = set()
for part in mechanical_parts:
for node in part.Nodes:
list_of_ids.add(node.Id)
mechanical_model_part.AddNodes(list(list_of_ids))
print("Adding Elements to Mechanical Model Part")
list_of_ids = set()
for part in mechanical_parts:
for elem in part.Elements:
list_of_ids.add(elem.Id)
mechanical_model_part.AddElements(list(list_of_ids))
# Mechanical Conditions
print("Adding Conditions to Mechanical Model Part")
mechanical_conditions = []
for i in range(self.mechanical_loads_sub_model_part_list.size()):
mechanical_conditions.append(self.main_model_part.GetSubModelPart(self.mechanical_loads_sub_model_part_list[i].GetString()))
list_of_ids = set()
for part in mechanical_conditions:
for cond in part.Conditions:
list_of_ids.add(cond.Id)
mechanical_model_part.AddConditions(list(list_of_ids))
print("Adding Mechanical Sub Sub Model Parts")
# Sub sub model parts
# Body - Joints
for i in range(self.body_domain_sub_model_part_list.size()):
body_sub_model_part = self.main_model_part.GetSubModelPart(self.body_domain_sub_model_part_list[i].GetString())
mechanical_model_part.CreateSubModelPart(self.body_domain_sub_sub_model_part_list[i].GetString())
body_sub_sub_model_part = mechanical_model_part.GetSubModelPart(self.body_domain_sub_sub_model_part_list[i].GetString())
list_of_ids = set()
for node in body_sub_model_part.Nodes:
list_of_ids.add(node.Id)
body_sub_sub_model_part.AddNodes(list(list_of_ids))
list_of_ids = set()
for elem in body_sub_model_part.Elements:
list_of_ids.add(elem.Id)
body_sub_sub_model_part.AddElements(list(list_of_ids))
# Arc-length
for i in range(self.loads_sub_model_part_list.size()):
load_sub_model_part = self.main_model_part.GetSubModelPart(self.loads_sub_model_part_list[i].GetString())
mechanical_model_part.CreateSubModelPart(self.loads_sub_sub_model_part_list[i].GetString())
load_sub_sub_model_part = mechanical_model_part.GetSubModelPart(self.loads_sub_sub_model_part_list[i].GetString())
list_of_ids = set()
for node in load_sub_model_part.Nodes:
list_of_ids.add(node.Id)
load_sub_sub_model_part.AddNodes(list(list_of_ids))
for cond in load_sub_model_part.Conditions:
list_of_ids.add(cond.Id)
load_sub_sub_model_part.AddConditions(list(list_of_ids))
print(mechanical_model_part)
| 58.262821 | 139 | 0.721972 |
4b25dc100f6763420e0b693116c1c07b46bb92de | 8,199 | py | Python | activefolders/monitor.py | cybera/activefolders | 424adeb44df3f4fb187ecbc13334188a8d30a062 | [
"Apache-2.0"
] | null | null | null | activefolders/monitor.py | cybera/activefolders | 424adeb44df3f4fb187ecbc13334188a8d30a062 | [
"Apache-2.0"
] | null | null | null | activefolders/monitor.py | cybera/activefolders | 424adeb44df3f4fb187ecbc13334188a8d30a062 | [
"Apache-2.0"
] | null | null | null | from threading import Thread
from time import sleep
from activefolders.utils import logging
import peewee
import activefolders.conf as conf
import activefolders.db as db
import activefolders.controllers.folders as folders
import activefolders.controllers.transfers as transfers
import activefolders.controllers.exports as exports
import activefolders.transports.gridftp_simple as gridftp
import activefolders.utils as utils
import activefolders.requests as requests
LOG = logging.getLogger(__name__)
class TransportMonitor(Thread):
SLEEP_TIME = conf.settings.getint('dtnd', 'update_interval') / 3
RESULTS_RETRIES = conf.settings.getint('dtnd', 'results_retries')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._transfers = {}
self._exports = {}
self._results = {}
def run(self):
while True:
self._update_transfers()
sleep(self.SLEEP_TIME)
self._update_exports()
sleep(self.SLEEP_TIME)
self._update_results()
sleep(self.SLEEP_TIME)
def _update_transfers(self):
inactive_transfers = db.Transfer.select().where(db.Transfer.active==False)
for transfer in inactive_transfers:
try:
db.Transfer.get(db.Transfer.folder==transfer.folder,
db.Transfer.dtn==transfer.dtn,
db.Transfer.active==True)
except peewee.DoesNotExist:
transfer.active = True
transfer.save()
active_transfers = db.Transfer.select().where(db.Transfer.active==True)
for transfer in active_transfers:
transport = self._transfers.get(transfer.id)
if transport is None:
transport = gridftp.DtnTransport(transfer)
self._transfers[transfer.id] = transport
transport.start()
for transfer_id, transport in list(self._transfers.items()):
transfer = db.Transfer.get(db.Transfer.id==transfer_id)
if transport.is_alive():
continue
elif not transport.success:
LOG.error("Transfer {} failed with error: {}".format(transfer_id, transport.exception))
del self._transfers[transfer_id]
transfer.delete_instance()
def _update_exports(self):
inactive_exports = db.Export.select().where(db.Export.active==False)
for export in inactive_exports:
try:
db.Export.get(db.Export.folder_destination==export.folder_destination,
db.Transfer.active==True)
except peewee.DoesNotExist:
export.active = True
export.save()
active_exports = db.Export.select().where(db.Export.active==True)
for export in active_exports:
transport = self._exports.get(export.id)
if transport is None:
transport_module = utils.get_transport_module(export.folder_destination.destination)
transport = transport_module.DestinationTransport(export)
self._exports[export.id] = transport
transport.start()
for export_id, transport in list(self._exports.items()):
export = db.Export.get(db.Export.id==export_id)
if transport.is_alive():
continue
elif not transport.success:
LOG.error("Export {} failed with error: {}".format(export_id, transport.exception))
del self._exports[export_id]
export.delete_instance()
def _update_results(self):
this_dtn = conf.settings['dtnd']['name']
reachable_destinations = [ dst for dst, dst_conf in conf.destinations.items() if dst != 'DEFAULT' and dst_conf['dtn'] == this_dtn ] # TODO: Avoid default section
folder_destinations = db.FolderDestination.select().where(
db.FolderDestination.check_for_results==True,
db.FolderDestination.results_retrieved==False,
db.FolderDestination.destination<<reachable_destinations)
for folder_destination in folder_destinations:
# Don't check for results if an export exists
num_exports = db.Export.select().where(db.Export.folder_destination==folder_destination).count()
if num_exports > 0:
continue
if folder_destination.results_folder is None:
self._create_results_folder(folder_destination)
uuid = folder_destination.folder.uuid
if self._results.get(uuid) is None:
self._results[uuid] = {}
destination = folder_destination.destination
transport = self._results[uuid].get(destination)
if transport is None:
transport_module = utils.get_transport_module(destination)
transport = transport_module.ResultsTransport(folder_destination)
self._results[uuid][destination] = transport
transport.start()
continue
elif transport.is_alive():
continue
elif not transport.success:
LOG.error("Results retrieval for folder {} from {} failed with error: {}".format(uuid, destination, transport.exception))
else:
self._update_results_status(transport, folder_destination)
del self._results[uuid][destination]
def _create_results_folder(self, folder_destination):
results_folder = folders.add()
results_folder.results = True
results_folder.save()
folder_destination.results_folder = results_folder
folder_destination.save()
def _update_results_status(self, transport, folder_destination):
if transport.new_results:
if (folder_destination.result_files is not None and folders.all_results_present(folder_destination)) or folder_destination.result_files is None:
folder_destination.initial_results = True
folder_destination.tries_without_changes = 0
folder_destination.save()
self._transfer_results(folder_destination)
elif folder_destination.initial_results:
folder_destination.tries_without_changes += 1
folder_destination.save()
if folder_destination.tries_without_changes >= self.RESULTS_RETRIES:
folder_destination.results_retrieved = True
folder_destination.save()
def _transfer_results(self, folder_destination):
results_folder = folder_destination.results_folder
if folder_destination.results_destination is None:
home_dtn = folder_destination.folder.home_dtn
transfers.add(results_folder, home_dtn)
else:
try:
db.FolderDestination.get(
db.FolderDestination.folder==results_folder)
except peewee.DoesNotExist:
db.FolderDestination.create(folder=results_folder,
destination=folder_destination.results_destination,
credentials=folder_destination.credentials)
transfers.add_all(results_folder.uuid)
exports.add_all(results_folder.uuid)
class RequestMonitor(Thread):
SLEEP_TIME = conf.settings.getint('dtnd', 'requests_update_interval')
def run(self):
while True:
self._update_requests()
sleep(self.SLEEP_TIME)
def _update_requests(self):
all_requests = db.Request.select()
for r in all_requests:
if r.dtn not in conf.dtns:
r.delete_instance()
request = requests.Request(r.dtn, r.command, r.method, r.headers, r.params, r.data, r.expected_responses)
resp = request.execute()
if request.success:
r.delete_instance()
else:
if resp is None:
LOG.error("Request failed with no reponse")
else:
LOG.error("Request failed with response: {}".format(resp.text))
r.failures += 1
r.save()
| 42.046154 | 169 | 0.634712 |
2ec6d388088c9557160ae19965b6e9218805abd5 | 6,006 | py | Python | mcrouter/test/test_poolstats.py | erikwebb/mcrouter | c2b99aaacf3b6665cb829d19d55a4c468e2e95d0 | [
"MIT"
] | null | null | null | mcrouter/test/test_poolstats.py | erikwebb/mcrouter | c2b99aaacf3b6665cb829d19d55a4c468e2e95d0 | [
"MIT"
] | null | null | null | mcrouter/test/test_poolstats.py | erikwebb/mcrouter | c2b99aaacf3b6665cb829d19d55a4c468e2e95d0 | [
"MIT"
] | null | null | null | # Copyright (c) 2015-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the LICENSE
# file in the root directory of this source tree.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from mcrouter.test.MCProcess import Mcrouter
from mcrouter.test.McrouterTestCase import McrouterTestCase
from mcrouter.test.mock_servers import SleepServer
import time
import os
class TestPoolStats(McrouterTestCase):
config = './mcrouter/test/test_poolstats.json'
null_route_config = './mcrouter/test/test_nullroute.json'
mcrouter_server_extra_args = []
extra_args = [
'--pool-stats-config-file=./mcrouter/test/test_poolstats_config.json',
'--timeouts-until-tko=50',
'--disable-miss-on-get-errors',
'--num-proxies=4']
stat_prefix = 'libmcrouter.mcrouter.0.'
pool_prefix = stat_prefix + 'twmemcache.CI.'
count = 20
durationMap = {}
def setUp(self):
self.mc = []
for _i in range(3):
self.mc.append(Mcrouter(self.null_route_config,
extra_args=self.mcrouter_server_extra_args))
self.add_server(self.mc[_i])
# configure SleepServer for the east and wc pools
for _i in range(3):
self.mc.append(SleepServer())
self.add_server(self.mc[_i + 3])
self.mcrouter = self.add_mcrouter(
self.config,
extra_args=self.extra_args)
def verify_stat(self, line, statname, expected, op):
if self.pool_prefix + statname in line:
s = line.split(':')[1].split(',')[0]
if 'GR' in op:
self.assertGreater(int(s), expected)
self.durationMap[statname] = int(s)
elif 'EQ' in op:
self.assertEqual(int(s), expected)
else:
self.assertTrue(False)
def check_pool_stats(self, stats_dir):
file_stat = os.path.join(stats_dir, self.stat_prefix + 'stats')
verifiedStats = 0
with open(file_stat, 'r') as f:
for line in f.readlines():
# Expect all east requests to fail because it
# is running SleepServer
if self.pool_prefix + 'east' in line:
self.verify_stat(line,
'east.requests.sum', self.count, 'EQ')
self.verify_stat(line,
'east.final_result_error.sum', self.count, 'EQ')
self.verify_stat(line,
'east.connections', 1, 'EQ')
self.verify_stat(line, 'east.duration_us.avg', 0, 'GR')
self.verify_stat(line,
'east.total_duration_us.avg', 0, 'GR')
verifiedStats += 1
if self.pool_prefix + 'west' in line:
self.verify_stat(line,
'west.requests.sum', 2 * self.count, 'EQ')
self.verify_stat(line,
'west.connections', 2, 'EQ')
self.verify_stat(line,
'west.final_result_error.sum', 0, 'EQ')
self.verify_stat(line, 'west.duration_us.avg', 0, 'GR')
self.verify_stat(line,
'west.total_duration_us.avg', 0, 'GR')
verifiedStats += 1
if self.pool_prefix + 'north' in line:
self.verify_stat(line,
'north.requests.sum', self.count, 'EQ')
self.verify_stat(line,
'north.connections', 1, 'EQ')
self.verify_stat(line,
'north.final_result_error.sum', 0, 'EQ')
self.verify_stat(line, 'north.duration_us.avg', 0, 'GR')
self.verify_stat(line,
'north.total_duration_us.avg', 0, 'EQ')
verifiedStats += 1
if self.pool_prefix + 'south' in line:
self.verify_stat(line,
'south.requests.sum', self.count, 'EQ')
self.verify_stat(line,
'south.connections', 1, 'EQ')
self.verify_stat(line,
'south.final_result_error.sum', 0, 'EQ')
self.verify_stat(line, 'south.duration_us.avg', 0, 'GR')
self.verify_stat(line,
'south.total_duration_us.avg', 0, 'GR')
verifiedStats += 1
# These checks must be done outside the for loop
self.assertTrue(self.durationMap['east.total_duration_us.avg']
>= self.durationMap['east.duration_us.avg'])
self.assertTrue(self.durationMap['west.total_duration_us.avg']
>= self.durationMap['west.duration_us.avg'])
self.assertTrue(self.durationMap['south.total_duration_us.avg']
>= self.durationMap['south.duration_us.avg'])
# for 'north' pool total_duration would be 0 and duration
# would be greater than 0. so, the check is already done.
self.assertTrue(verifiedStats == 20)
def test_poolstats(self):
n = 4 * self.count
for i in range(0, n):
m = i % 4
if m == 0:
key = 'twmemcache.CI.west:{}:|#|id=123'.format(i)
elif m == 1:
key = 'twmemcache.CI.west.1:{}:|#|id=123'.format(i)
elif m == 2:
key = 'twmemcache.CI.north:{}:|#|id=123'.format(i)
else:
key = 'twmemcache.CI.east:{}:|#|id=123'.format(i)
self.mcrouter.get(key)
self.assertTrue(self.mcrouter.stats()['cmd_get_count'] > 0)
time.sleep(11)
self.check_pool_stats(self.mcrouter.stats_dir)
| 42 | 78 | 0.53713 |
795d324db085e8f9d04dd79fa580eb7fb3056617 | 2,098 | py | Python | ludwig/datasets/yelp_review_polarity/__init__.py | hfurkanbozkurt/ludwig | bfcbd52237c73702764e733ede4351e0146394bd | [
"Apache-2.0"
] | 970 | 2020-12-17T15:09:20.000Z | 2022-03-31T22:58:03.000Z | ludwig/datasets/yelp_review_polarity/__init__.py | hfurkanbozkurt/ludwig | bfcbd52237c73702764e733ede4351e0146394bd | [
"Apache-2.0"
] | 503 | 2020-12-16T21:44:40.000Z | 2022-03-31T18:21:52.000Z | ludwig/datasets/yelp_review_polarity/__init__.py | hfurkanbozkurt/ludwig | bfcbd52237c73702764e733ede4351e0146394bd | [
"Apache-2.0"
] | 145 | 2020-12-18T07:38:30.000Z | 2022-03-29T19:05:08.000Z | #! /usr/bin/env python
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import pandas as pd
from ludwig.datasets.base_dataset import BaseDataset, DEFAULT_CACHE_LOCATION
from ludwig.datasets.mixins.download import TarDownloadMixin
from ludwig.datasets.mixins.load import CSVLoadMixin
from ludwig.datasets.mixins.process import MultifileJoinProcessMixin
def load(cache_dir=DEFAULT_CACHE_LOCATION, split=True):
dataset = YelpPolarity(cache_dir=cache_dir)
return dataset.load(split=split)
class YelpPolarity(TarDownloadMixin, MultifileJoinProcessMixin, CSVLoadMixin, BaseDataset):
"""
The Yelp Polarity dataset
Details:
1,569,264 samples from the Yelp Dataset Challenge 2015. \
This subset has 280,000 training samples and 19,000 test samples \
in each polarity.
Dataset source:
Character-level Convolutional Networks for Text Classification
Xiang Zhang et al., 2015
"""
def __init__(self, cache_dir=DEFAULT_CACHE_LOCATION):
super().__init__(dataset_name="yelp_polarity", cache_dir=cache_dir)
def process_downloaded_dataset(self):
super().process_downloaded_dataset(header=None)
processed_df = pd.read_csv(os.path.join(self.processed_dataset_path, self.csv_filename))
processed_df.columns = ["label", "text", "split"]
processed_df.to_csv(os.path.join(self.processed_dataset_path, self.csv_filename), index=False)
| 41.137255 | 102 | 0.71592 |
a2079f19fb6929abf3d2612ea84b8e6dbb490116 | 2,082 | py | Python | manager/get_operator_target_group_status.py | wja30/cortex_0.31 | 522ec6226526dee6b4f8c3ed67bdf2b913d25de3 | [
"Apache-2.0"
] | 8,260 | 2019-01-31T00:56:06.000Z | 2022-03-30T11:39:21.000Z | manager/get_operator_target_group_status.py | wja30/cortex_0.31 | 522ec6226526dee6b4f8c3ed67bdf2b913d25de3 | [
"Apache-2.0"
] | 1,196 | 2019-02-19T07:47:54.000Z | 2022-03-31T16:06:30.000Z | manager/get_operator_target_group_status.py | wja30/cortex_0.31 | 522ec6226526dee6b4f8c3ed67bdf2b913d25de3 | [
"Apache-2.0"
] | 664 | 2019-01-31T04:52:26.000Z | 2022-03-22T02:39:00.000Z | # Copyright 2021 Cortex Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import boto3
import os
import json
from helpers import get_operator_load_balancer
def get_operator_target_group_status():
cluster_name = os.environ["CORTEX_CLUSTER_NAME"]
region = os.environ["CORTEX_REGION"]
client_elbv2 = boto3.client("elbv2", region_name=region)
load_balancer_arn = get_operator_load_balancer(cluster_name, client_elbv2)["LoadBalancerArn"]
target_group_arn = get_load_balancer_https_target_group_arn(load_balancer_arn, client_elbv2)
return get_target_health(target_group_arn, client_elbv2)
def get_load_balancer_https_target_group_arn(load_balancer_arn, client_elbv2):
paginator = client_elbv2.get_paginator("describe_listeners")
for listener_page in paginator.paginate(LoadBalancerArn=load_balancer_arn):
for listener in listener_page["Listeners"]:
if listener["Port"] == 443:
return listener["DefaultActions"][0]["TargetGroupArn"]
raise Exception(
f"unable to find https target group for operator load balancer ({load_balancer_arn})"
)
def get_target_health(target_group_arn, client_elbv2):
response = client_elbv2.describe_target_health(TargetGroupArn=target_group_arn)
for health_description in response["TargetHealthDescriptions"]:
if health_description["TargetHealth"]["State"] == "healthy":
return "healthy"
return json.dumps(response["TargetHealthDescriptions"])
if __name__ == "__main__":
print(get_operator_target_group_status(), end="")
| 37.178571 | 97 | 0.761768 |
0f525616bc9ffc7558d400a00b265928c8665531 | 777 | py | Python | specklepy/tests/test_zernike.py | felixbosco/specklepy | 18f1d542f04cbe31fec8675791bf8350a09441c6 | [
"MIT"
] | null | null | null | specklepy/tests/test_zernike.py | felixbosco/specklepy | 18f1d542f04cbe31fec8675791bf8350a09441c6 | [
"MIT"
] | null | null | null | specklepy/tests/test_zernike.py | felixbosco/specklepy | 18f1d542f04cbe31fec8675791bf8350a09441c6 | [
"MIT"
] | 2 | 2020-12-07T14:21:13.000Z | 2021-02-26T13:52:52.000Z | import unittest
from specklepy.utils.zernike import Zernike
from specklepy.plotting.utils import imshow
class TestZernike(unittest.TestCase):
def setUp(self):
self.size = 256
def test_init(self):
z = Zernike()
imshow(z.init_rho(self.size), title='Radius')
imshow(z.init_phi(self.size), title='Azimuth')
def test_init_from_vector(self):
z = Zernike()
coeffs = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]
# coeffs = np.random.rand((10))
out = z(coeffs, size=128)
imshow(out, title='Zernike polynomial {}'.format(coeffs))
def test_init_from_keyword(self):
z = Zernike()
imshow(z.defocus(-1, 256), title='defocus')
if __name__ == "__main__":
unittest.main()
| 25.9 | 67 | 0.6139 |
6545a6cd729de8c77af499d8b753c71b26dc4e80 | 45 | py | Python | stummtaube/commands.py | gaetjen/stummtaube | 760466f361256cf1dbe3bf24995034e2e5385a9b | [
"MIT"
] | 2 | 2021-02-11T20:17:12.000Z | 2021-02-15T14:00:52.000Z | stummtaube/commands.py | gaetjen/stummtaube | 760466f361256cf1dbe3bf24995034e2e5385a9b | [
"MIT"
] | null | null | null | stummtaube/commands.py | gaetjen/stummtaube | 760466f361256cf1dbe3bf24995034e2e5385a9b | [
"MIT"
] | null | null | null | START = '!start'
JOIN = '!join'
END = "!end"
| 11.25 | 16 | 0.533333 |
e2742f3d8f9ec37a70d94a3376a987d9e059a069 | 2,477 | py | Python | albertpointscompanion/api/migrations/0002_auto_20211107_0752.py | monocuro-oss/albert-points-companion | 49df51d8ef56f451627ea2a905d2ac97bb16008f | [
"MIT"
] | null | null | null | albertpointscompanion/api/migrations/0002_auto_20211107_0752.py | monocuro-oss/albert-points-companion | 49df51d8ef56f451627ea2a905d2ac97bb16008f | [
"MIT"
] | null | null | null | albertpointscompanion/api/migrations/0002_auto_20211107_0752.py | monocuro-oss/albert-points-companion | 49df51d8ef56f451627ea2a905d2ac97bb16008f | [
"MIT"
] | null | null | null | # Generated by Django 3.2.9 on 2021-11-07 07:52
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='HelperGroup',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('description', models.TextField(blank=True)),
],
),
migrations.CreateModel(
name='ItemCategory',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('description', models.TextField(blank=True)),
],
),
migrations.AlterField(
model_name='commandcategory',
name='name',
field=models.CharField(max_length=50),
),
migrations.CreateModel(
name='Item',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('description', models.TextField()),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.itemcategory')),
],
),
migrations.CreateModel(
name='HelperTeam',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('description', models.TextField(blank=True)),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.helpergroup')),
],
),
migrations.CreateModel(
name='Helper',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('description', models.TextField()),
('team', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.helperteam')),
],
),
]
| 39.31746 | 117 | 0.559144 |
feeb8752e67748ea514b0d6d2ad6710b7c8a2a21 | 18,798 | py | Python | train.py | bhaprayan/keras-yolo3 | 9ad5ff761f3baf9042a17327c0149591ce1307c8 | [
"MIT"
] | null | null | null | train.py | bhaprayan/keras-yolo3 | 9ad5ff761f3baf9042a17327c0149591ce1307c8 | [
"MIT"
] | null | null | null | train.py | bhaprayan/keras-yolo3 | 9ad5ff761f3baf9042a17327c0149591ce1307c8 | [
"MIT"
] | null | null | null | """
Retrain the YOLO model for your own dataset.
"""
import numpy as np
import keras.backend as K
import tensorflow as tf
from keras.layers import Input, Lambda
from keras.models import Model
from keras.optimizers import Adam
from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
from yolo3.model import preprocess_true_boxes, yolo_body, tiny_yolo_body, yolo_loss
from yolo3.utils import get_random_data
def _main():
annotation_path = 'train_nuro.txt'
log_dir = 'logs/001/'
classes_path = 'model_data/classes.txt'
anchors_path = 'model_data/yolo_anchors.txt'
class_names = get_classes(classes_path)
num_classes = len(class_names)
anchors = get_anchors(anchors_path)
input_shape = (416,416) # multiple of 32, hw
is_tiny_version = len(anchors)==6 # default setting
if is_tiny_version:
model = create_tiny_model(input_shape, anchors, num_classes,
freeze_body=2, weights_path='model_data/tiny_yolo_weights.h5')
else:
model = create_model(input_shape, anchors, num_classes,
freeze_body=2, weights_path='model_data/darknet53_weights.h5') # make sure you know what you freeze
logging = TensorBoard(log_dir=log_dir)
checkpoint = ModelCheckpoint(log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
monitor='val_loss', save_weights_only=True, save_best_only=True, period=3)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1)
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1)
val_split = 0.99
with open(annotation_path) as f:
lines = f.readlines()
np.random.seed(10101)
np.random.shuffle(lines)
np.random.seed(None)
num_val = int(len(lines)*val_split)
num_train = len(lines) - num_val
# Train with frozen layers first, to get a stable loss.
# Adjust num epochs to your dataset. This step is enough to obtain a not bad model.
if True:
model.compile(optimizer=Adam(lr=1e-3), loss={
# use custom yolo_loss Lambda layer.
'yolo_loss': lambda y_true, y_pred: y_pred})
batch_size = 32
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),
steps_per_epoch=max(1, num_train//batch_size),
validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes),
validation_steps=max(1, num_val//batch_size),
epochs=50,
initial_epoch=0,
callbacks=[logging, checkpoint])
model.save_weights(log_dir + 'trained_weights_stage_1.h5')
# Unfreeze and continue training, to fine-tune.
# Train longer if the result is not good.
if True:
for i in range(len(model.layers)):
model.layers[i].trainable = True
model.compile(optimizer=Adam(lr=1e-4), loss={'yolo_loss': lambda y_true, y_pred: y_pred}) # recompile to apply the change
print('Unfreeze all of the layers.')
batch_size = 32 # note that more GPU memory is required after unfreezing the body
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),
steps_per_epoch=max(1, num_train//batch_size),
validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes),
validation_steps=max(1, num_val//batch_size),
epochs=100,
initial_epoch=50,
callbacks=[logging, checkpoint, reduce_lr, early_stopping])
model.save_weights(log_dir + 'trained_weights_final.h5')
# Further training if needed.
def get_classes(classes_path):
'''loads the classes'''
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def get_anchors(anchors_path):
'''loads the anchors from a file'''
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def tf_print(op, tensors, message=None):
def print_message(x):
sys.stdout.write(message + " %s\n" % x)
return x
prints = [tf.py_func(print_message, [tensor], tensor.dtype) for tensor in tensors]
with tf.control_dependencies(prints):
op = tf.identity(op)
return op
def model_loss_lambda(*args, **kwargs):
return yolo_loss(*args, **kwargs)['loss']
def model_grid_loss_lambda(*args, **kwargs):
dict_loss = yolo_loss(*args, **kwargs)
return dict_loss['loss']
def model_grid_loss_xy_0_lambda(*args, **kwargs):
dict_loss = yolo_loss(*args, **kwargs)
return dict_loss['xy_loss_grid_0']
def model_grid_loss_xy_1_lambda(*args, **kwargs):
dict_loss = yolo_loss(*args, **kwargs)
return dict_loss['xy_loss_grid_1']
def model_grid_loss_xy_2_lambda(*args, **kwargs):
dict_loss = yolo_loss(*args, **kwargs)
return dict_loss['xy_loss_grid_2']
def model_grid_loss_wh_0_lambda(*args, **kwargs):
dict_loss = yolo_loss(*args, **kwargs)
return dict_loss['wh_loss_grid_0']
def model_grid_loss_wh_1_lambda(*args, **kwargs):
dict_loss = yolo_loss(*args, **kwargs)
return dict_loss['wh_loss_grid_1']
def model_grid_loss_wh_2_lambda(*args, **kwargs):
dict_loss = yolo_loss(*args, **kwargs)
return dict_loss['wh_loss_grid_2']
def model_grid_loss_class_0_lambda(*args, **kwargs):
dict_loss = yolo_loss(*args, **kwargs)
return dict_loss['class_loss_grid_0']
def model_grid_loss_class_1_lambda(*args, **kwargs):
dict_loss = yolo_loss(*args, **kwargs)
return dict_loss['class_loss_grid_1']
def model_grid_loss_class_2_lambda(*args, **kwargs):
dict_loss = yolo_loss(*args, **kwargs)
return dict_loss['class_loss_grid_2']
def model_grid_loss_confidence_0_lambda(*args, **kwargs):
dict_loss = yolo_loss(*args, **kwargs)
return dict_loss['confidence_loss_grid_0']
def model_grid_loss_confidence_1_lambda(*args, **kwargs):
dict_loss = yolo_loss(*args, **kwargs)
return dict_loss['confidence_loss_grid_1']
def model_grid_loss_confidence_2_lambda(*args, **kwargs):
dict_loss = yolo_loss(*args, **kwargs)
return dict_loss['confidence_loss_grid_2']
def model_output_0_lambda(*args, **kwargs):
dict_loss = yolo_loss(*args, **kwargs)
return dict_loss['yolo_output_0']
def model_output_1_lambda(*args, **kwargs):
dict_loss = yolo_loss(*args, **kwargs)
return dict_loss['yolo_output_1']
def model_output_2_lambda(*args, **kwargs):
dict_loss = yolo_loss(*args, **kwargs)
return dict_loss['yolo_output_2']
def model_object_mask_0_lambda(*args, **kwargs):
dict_loss = yolo_loss(*args, **kwargs)
return dict_loss['object_mask_0']
def model_object_mask_1_lambda(*args, **kwargs):
dict_loss = yolo_loss(*args, **kwargs)
return dict_loss['object_mask_1']
def model_object_mask_2_lambda(*args, **kwargs):
dict_loss = yolo_loss(*args, **kwargs)
return dict_loss['object_mask_2']
def create_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2,
weights_path='model_data/yolo_weights.h5', grid_loss=False):
'''create the training model'''
K.clear_session() # get a new session
image_input = Input(shape=(None, None, 3))
h, w = input_shape
num_anchors = len(anchors)
y_true = [Input(shape=(h//{0:32, 1:16, 2:8}[l], w//{0:32, 1:16, 2:8}[l], \
num_anchors//3, num_classes+5)) for l in range(3)]
# batch_details = K.placeholder(shape=(1,))
model_body = yolo_body(image_input, num_anchors//3, num_classes)
print('Create YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))
if load_pretrained:
model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
print('Load weights {}.'.format(weights_path))
if freeze_body in [1, 2]:
# Freeze darknet53 body or freeze all but 3 output layers.
num = (185, len(model_body.layers)-3)[freeze_body-1]
for i in range(num): model_body.layers[i].trainable = False
print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))
if grid_loss:
model_loss = Lambda(model_grid_loss_lambda, output_shape=(1, ), name='yolo_loss',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(
[*model_body.output, *y_true])
model = Model([model_body.input, *y_true], model_loss)
else:
model_loss = Lambda(model_loss_lambda, output_shape=(1,), name='yolo_loss',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(
[*model_body.output, *y_true])
model = Model([model_body.input, *y_true], model_loss)
return model
def create_tiny_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2,
weights_path='model_data/tiny_yolo_weights.h5'):
'''create the training model, for Tiny YOLOv3'''
K.clear_session() # get a new session
image_input = Input(shape=(None, None, 3))
h, w = input_shape
num_anchors = len(anchors)
y_true = [Input(shape=(h//{0:32, 1:16}[l], w//{0:32, 1:16}[l], \
num_anchors//2, num_classes+5)) for l in range(2)]
model_body = tiny_yolo_body(image_input, num_anchors//2, num_classes)
print('Create Tiny YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))
if load_pretrained:
model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
print('Load weights {}.'.format(weights_path))
if freeze_body in [1, 2]:
# Freeze the darknet body or freeze all but 2 output layers.
num = (20, len(model_body.layers)-2)[freeze_body-1]
for i in range(num): model_body.layers[i].trainable = False
print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))
model_loss = Lambda(model_loss_lambda, output_shape=(1,), name='yolo_loss',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.7})(
[*model_body.output, *y_true])
model = Model([model_body.input, *y_true], model_loss)
return model
def create_locloss_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2,
weights_path='model_data/yolo_weights.h5', grid_loss=False):
'''create the training model'''
K.clear_session() # get a new session
image_input = Input(shape=(None, None, 3))
h, w = input_shape
num_anchors = len(anchors)
y_true = [Input(shape=(h//{0:32, 1:16, 2:8}[l], w//{0:32, 1:16, 2:8}[l], \
num_anchors//3, num_classes+5)) for l in range(3)]
# batch_details = K.placeholder(shape=(1,))
model_body = yolo_body(image_input, num_anchors//3, num_classes)
print('Create YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))
if load_pretrained:
model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
print('Load weights {}.'.format(weights_path))
if freeze_body in [1, 2]:
# Freeze darknet53 body or freeze all but 3 output layers.
num = (185, len(model_body.layers)-3)[freeze_body-1]
for i in range(num): model_body.layers[i].trainable = False
print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))
if grid_loss:
model_loss_total = Lambda(model_grid_loss_lambda, output_shape=(1, ), name='yolo_loss',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(
[*model_body.output, *y_true])
model_loss_xy_0 = Lambda(model_grid_loss_xy_0_lambda, output_shape=(3, ), name='yolo_loss_xy_0',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(
[*model_body.output, *y_true])
model_loss_xy_1 = Lambda(model_grid_loss_xy_1_lambda, output_shape=(3, ), name='yolo_loss_xy_1',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(
[*model_body.output, *y_true])
model_loss_xy_2 = Lambda(model_grid_loss_xy_2_lambda, output_shape=(3, ), name='yolo_loss_xy_2',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(
[*model_body.output, *y_true])
model_loss_wh_0 = Lambda(model_grid_loss_wh_0_lambda, output_shape=(3, ), name='yolo_loss_wh_0',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(
[*model_body.output, *y_true])
model_loss_wh_1 = Lambda(model_grid_loss_wh_1_lambda, output_shape=(3, ), name='yolo_loss_wh_1',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(
[*model_body.output, *y_true])
model_loss_wh_2 = Lambda(model_grid_loss_wh_2_lambda, output_shape=(3, ), name='yolo_loss_wh_2',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(
[*model_body.output, *y_true])
model_loss_class_0 = Lambda(model_grid_loss_class_0_lambda, output_shape=(3, ), name='yolo_loss_class_0',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(
[*model_body.output, *y_true])
model_loss_class_1 = Lambda(model_grid_loss_class_1_lambda, output_shape=(3, ), name='yolo_loss_class_1',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(
[*model_body.output, *y_true])
model_loss_class_2 = Lambda(model_grid_loss_class_2_lambda, output_shape=(3, ), name='yolo_loss_class_2',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(
[*model_body.output, *y_true])
model_loss_confidence_0 = Lambda(model_grid_loss_confidence_0_lambda, output_shape=(3, ), name='yolo_loss_confidence_0',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(
[*model_body.output, *y_true])
model_loss_confidence_1 = Lambda(model_grid_loss_confidence_1_lambda, output_shape=(3, ), name='yolo_loss_confidence_1',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(
[*model_body.output, *y_true])
model_loss_confidence_2 = Lambda(model_grid_loss_confidence_2_lambda, output_shape=(3, ), name='yolo_loss_confidence_2',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(
[*model_body.output, *y_true])
model_output_0 = Lambda(model_output_0_lambda, output_shape=(3, ), name='yolo_output_0',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(
[*model_body.output, *y_true])
model_output_1 = Lambda(model_output_1_lambda, output_shape=(3, ), name='yolo_output_1',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(
[*model_body.output, *y_true])
model_output_2 = Lambda(model_output_2_lambda, output_shape=(3, ), name='yolo_output_2',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(
[*model_body.output, *y_true])
model_object_mask_0 = Lambda(model_object_mask_0_lambda, output_shape=(3, ), name='yolo_obj_mask_0',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(
[*model_body.output, *y_true])
model_object_mask_1 = Lambda(model_object_mask_1_lambda, output_shape=(3, ), name='yolo_obj_mask_1',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(
[*model_body.output, *y_true])
model_object_mask_2 = Lambda(model_object_mask_2_lambda, output_shape=(3, ), name='yolo_obj_mask_2',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(
[*model_body.output, *y_true])
model = Model([model_body.input, *y_true], [model_loss_xy_0, model_loss_wh_0, model_loss_class_0, model_loss_confidence_0,
model_loss_xy_1, model_loss_wh_1, model_loss_class_1, model_loss_confidence_1,
model_loss_xy_2, model_loss_wh_2, model_loss_class_2, model_loss_confidence_2,
model_loss_total, model_output_0, model_output_1, model_output_2, model_object_mask_0, model_object_mask_1, model_object_mask_2])
else:
model_loss = Lambda(model_loss_lambda, output_shape=(1,), name='yolo_loss',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(
[*model_body.output, *y_true])
model = Model([model_body.input, *y_true], model_loss)
return model
def data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes):
'''data generator for fit_generator'''
n = len(annotation_lines)
i = 0
while True:
image_data = []
box_data = []
batch_data = []
for b in range(batch_size):
if i==0:
pass
# np.random.shuffle(annotation_lines) removing random shuffle for now
image, box = get_random_data(annotation_lines[i], input_shape, random=False)
image_data.append(image)
box_data.append(box)
batch_data.append(annotation_lines[i])
i = (i+1) % n
image_data = np.array(image_data)
box_data = np.array(box_data)
y_true = preprocess_true_boxes(box_data, input_shape, anchors, num_classes, batch_data)
print('len(y_true): ', len(y_true))
# print('Batch data:', y_true[-1])
yield [image_data, *y_true], np.zeros(batch_size)
# print('Generated batch:', batch_data)
def data_generator_wrapper(annotation_lines, batch_size, input_shape, anchors, num_classes):
n = len(annotation_lines)
if n==0 or batch_size<=0: return None
return data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes)
if __name__ == '__main__':
_main()
| 45.84878 | 137 | 0.675551 |
cf2bdc75780c0bff9555234afa4cf1672646eff6 | 5,838 | py | Python | files/bin/ivr.py | torao/rpi-recorder | 978c0ec635196caad6b80e4b11c0bfe3c41cb8aa | [
"MIT"
] | null | null | null | files/bin/ivr.py | torao/rpi-recorder | 978c0ec635196caad6b80e4b11c0bfe3c41cb8aa | [
"MIT"
] | null | null | null | files/bin/ivr.py | torao/rpi-recorder | 978c0ec635196caad6b80e4b11c0bfe3c41cb8aa | [
"MIT"
] | null | null | null | import datetime
import fcntl
import os
import re
import subprocess
import sys
import time
DEFAULT_TELOP = "iVR 1.0"
def file_extension(file):
return os.path.splitext(os.path.basename(file))[1]
AUXILIARY_UNITS = ["", "k", "M", "G", "T", "P"]
# Exception to be used when an SIGTERM/SIGINT is detected.
class TermException(Exception):
pass
# A handler that only throws an TermException when SIGTERM/SIGINT is detected.
# like: signal.signal(signal.SIGTERM, ivr.term_handler)
def term_handler(signum, frame):
raise TermException("")
# Return the specified integer as a string with auxiliary units of kMGTP.
def with_aux_unit(num):
for i in range(len(AUXILIARY_UNITS)):
if num <= 1024 or i + 1 == len(AUXILIARY_UNITS):
unit = AUXILIARY_UNITS[i]
break
num /= 1024
if len(unit) == 0:
return "{:,d}".format(num)
return "{:,.1f}{}".format(num, unit)
# Convert a string with kMGTP auxiliary units to a numeric value.
# An error will occur if the conversion fails.
def without_aux_unit(num):
if len(num) == 0 or num[-1].isdigit():
return float(num)
multi = 1024
for i in range(1, len(AUXILIARY_UNITS)):
if num[-1].upper() == AUXILIARY_UNITS[i].upper():
num = num[:-1]
break
multi *= 1024
return float(num.replace(",", "")) * multi
FOOTAGE_FILE_PATTERN = r"footage-(\d{6})-(\d{4})(\d{2})(\d{2})(\d{2})\.[a-zA-Z0-9]+"
TRACKLOG_FILE_PATTERN = r"tracklog-(\d{4})(\d{2})(\d{2})\.gpx"
IVRLOG_FILE_PATTERN = r"ivr-(\d{4})(\d{2})(\d{2})\.log"
# Generate a footage file name from the specified date and sequence number.
def footage_file_name(date, sequence, extension):
date_part = date.strftime("%Y%m%d%H")
seq_part = "{:06d}".format(sequence % 1000000)
return "footage-%s-%s.%s" % (seq_part, date_part, extension)
# Generate a track-log file name from the specified date and sequence number.
def tracklog_file_name(date, sequence):
date_part = date.strftime("%Y%m%d")
seq_part = "" if sequence == 0 else (".%d" % sequence)
return "tracklog-%s%s.gpx" % (date_part, seq_part)
# Perform an atomic update to the specified file.
def write(file, text):
i = 0
file_not_found_error = 0
while True:
seq = "" if i == 0 else ".{}".format(i)
temp_file = "{}{}.tmp".format(file, seq)
try:
with open(temp_file, mode="x") as f:
f.write(text)
f.flush()
except FileNotFoundError:
# directory has not been mounted yet?
if file_not_found_error * 0.25 > 3:
ivr.log("ERROR: FileNotFoundError was repeated: {}".format(file))
raise
time.sleep(0.25)
file_not_found_error += 1
i += 1
except FileExistsError:
i += 1
else:
os.rename(temp_file, file)
break
# Write the process ID to the PID file.
def save_pid(prog=None, pid=None):
if prog is None:
prog = os.path.basename(sys.argv[0])
if pid is None:
pid = os.getpid()
pid_file = os.path.join(temp_dir(), "{}.pid".format(prog))
write(pid_file, "{}".format(pid))
return pid_file
# Write the process ID to the PID file.
def remove_pid(prog=None):
if prog is None:
prog = os.path.basename(sys.argv[0])
pid_file = os.path.join(temp_dir(), "{}.pid".format(prog))
if os.path.isfile(pid_file):
os.remove(pid_file)
return
# Execute the command and return its standard output. If the execution fails, it returns None.
# Pipes and redirects are not available because this isn't a shell invocation.
def execute(cmd):
ret = subprocess.run(cmd, stdin=subprocess.DEVNULL, capture_output=True)
if ret.returncode != 0:
log(
"ERROR: failed to execute: {} => {}\n{}".format(
cmd, ret.returncode, ret.stderr
)
)
return None
return ret.stdout.decode("utf-8")
# Notify the user of the specified text.
def beep(speech):
cmd = ""
announce = os.path.join(bin_dir(), "announce.wav")
if os.path.isfile(announce):
cmd += "aplay {}; ".format(announce)
else:
speech = "notice, {}".format(speech)
if len(speech) != 0:
cmd += 'espeak-ng -p 30 -g 11 "{}"'.format(speech)
subprocess.Popen(
cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, shell=True
)
# Output the specified message as log to the standard output.
def log(msg):
now = datetime.datetime.now()
log_file = "ivr-%s.log" % now.strftime("%Y%m%d")
# write log with exclusive lock
tm = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
program = os.path.basename(sys.argv[0])
message = "[{}] {} - {}\n".format(tm, program, msg)
file = os.path.join(data_dir(), log_file)
with open(file, mode="a") as f:
fcntl.flock(f.fileno(), fcntl.LOCK_EX)
f.write(message)
fcntl.flock(f.fileno(), fcntl.LOCK_UN)
return
_home_directory = None # Home directory
# Refer the home directory of IVR.
def home_dir():
global _home_directory
if _home_directory is None:
path = os.path.abspath(sys.argv[0])
path = os.path.join(os.path.dirname(path), "..")
_home_directory = os.path.abspath(path)
return _home_directory
# Refer to the binary directory.
def bin_dir():
return os.path.join(home_dir(), "bin")
# Refer to the temporary directory. Note that the files in this directory may not be persistent.
def temp_dir():
return os.path.join(home_dir(), "tmp")
# Refer to the data directory.
def data_dir():
return os.path.join(home_dir(), "data")
# Refer to the text file to overlay on the footage
def telop_file():
return os.path.join(temp_dir(), "telop.txt")
| 29.484848 | 96 | 0.620932 |
e16d91ef723d58214cf24f204d3b3aad94ede0d0 | 29,611 | py | Python | fairseq/data/iterators.py | Mu-Y/fairseq | 1ed8b8d73f76611ad39675a647449d29b1050179 | [
"MIT"
] | 1 | 2021-11-26T00:42:23.000Z | 2021-11-26T00:42:23.000Z | fairseq/data/iterators.py | Mu-Y/fairseq | 1ed8b8d73f76611ad39675a647449d29b1050179 | [
"MIT"
] | null | null | null | fairseq/data/iterators.py | Mu-Y/fairseq | 1ed8b8d73f76611ad39675a647449d29b1050179 | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import logging
import math
import operator
import os
import queue
import time
from threading import Thread
import numpy as np
import torch
from fairseq.data import data_utils
logger = logging.getLogger(__name__)
# Object used by _background_consumer to signal the source is exhausted
# to the main thread.
_sentinel = object()
class CountingIterator(object):
"""Wrapper around an iterable that maintains the iteration count.
Args:
iterable (iterable): iterable to wrap
start (int): starting iteration count. Note that this doesn't
actually advance the iterator.
total (int): override the iterator length returned by ``__len``.
This can be used to truncate *iterator*.
Attributes:
n (int): number of elements consumed from this iterator
"""
def __init__(self, iterable, start=None, total=None):
self._itr = iter(iterable)
self.n = start or getattr(iterable, "n", 0)
self.total = total if total is not None else self.n + len(iterable)
def __len__(self):
return self.total
def __iter__(self):
return self
def __next__(self):
if not self.has_next():
raise StopIteration
try:
x = next(self._itr)
except StopIteration:
raise IndexError(
f"Iterator expected to have length {self.total}, "
"but exhausted at position {self.n}."
)
self.n += 1
return x
def has_next(self):
"""Whether the iterator has been exhausted."""
return self.n < self.total
def skip(self, n):
"""Fast-forward the iterator by skipping n elements."""
for _ in range(n):
next(self)
return self
def take(self, n):
"""Truncate the iterator to n elements at most."""
self.total = min(self.total, n)
# Propagate this change to the underlying iterator
if hasattr(self._itr, "take"):
self._itr.take(max(n - self.n, 0))
return self
class EpochBatchIterating(object):
def __len__(self) -> int:
raise NotImplementedError
@property
def next_epoch_idx(self):
raise NotImplementedError
def next_epoch_itr(
self, shuffle=True, fix_batches_to_gpus=False, set_dataset_epoch=True
):
"""Return a new iterator over the dataset.
Args:
shuffle (bool, optional): shuffle batches before returning the
iterator (default: True).
fix_batches_to_gpus (bool, optional): ensure that batches are always
allocated to the same shards across epochs. Requires
that :attr:`dataset` supports prefetching (default: False).
set_dataset_epoch (bool, optional): update the wrapped Dataset with
the new epoch number (default: True).
"""
raise NotImplementedError
def end_of_epoch(self) -> bool:
"""Returns whether the most recent epoch iterator has been exhausted"""
raise NotImplementedError
@property
def iterations_in_epoch(self) -> int:
"""The number of consumed batches in the current epoch."""
raise NotImplementedError
def state_dict(self):
"""Returns a dictionary containing a whole state of the iterator."""
raise NotImplementedError
def load_state_dict(self, state_dict):
"""Copies the state of the iterator from the given *state_dict*."""
raise NotImplementedError
@property
def first_batch(self):
return "DUMMY"
class StreamingEpochBatchIterator(EpochBatchIterating):
"""A steaming-style iterator over a :class:`torch.utils.data.IterableDataset`.
Args:
dataset (~torch.utils.data.Dataset): dataset from which to load the data
max_sentences: batch size
collate_fn (callable): merges a list of samples to form a mini-batch
num_workers (int, optional): how many subprocesses to use for data
loading. 0 means the data will be loaded in the main process
(default: 0).
epoch (int, optional): the epoch to start the iterator from
(default: 1).
buffer_size (int, optional): the number of batches to keep ready in the
queue. Helps speeding up dataloading. When buffer_size is zero, the
default torch.utils.data.DataLoader preloading is used.
timeout (int, optional): if positive, the timeout value for collecting a batch
from workers. Should always be non-negative (default: ``0``).
"""
def __init__(
self,
dataset,
max_sentences=1,
collate_fn=None,
epoch=1,
num_workers=0,
buffer_size=0,
timeout=0,
):
assert isinstance(dataset, torch.utils.data.IterableDataset)
self.dataset = dataset
self.max_sentences = max_sentences
self.collate_fn = collate_fn
self.epoch = max(epoch, 1) # we use 1-based indexing for epochs
self.num_workers = num_workers
# This upper limit here is to prevent people from abusing this feature
# in a shared computing environment.
self.buffer_size = min(buffer_size, 20)
self.timeout = timeout
self._current_epoch_iterator = None
@property
def next_epoch_idx(self):
"""Return the epoch index after *next_epoch_itr* is called."""
if self._current_epoch_iterator is not None and self.end_of_epoch():
return self.epoch + 1
else:
return self.epoch
def next_epoch_itr(
self, shuffle=True, fix_batches_to_gpus=False, set_dataset_epoch=True
):
self.epoch = self.next_epoch_idx
if set_dataset_epoch and hasattr(self.dataset, "set_epoch"):
self.dataset.set_epoch(self.epoch)
self._current_epoch_iterator = self._get_iterator_for_epoch(self.epoch, shuffle)
return self._current_epoch_iterator
def end_of_epoch(self) -> bool:
return not self._current_epoch_iterator.has_next()
@property
def iterations_in_epoch(self) -> int:
if self._current_epoch_iterator is not None:
return self._current_epoch_iterator.n
return 0
def state_dict(self):
return {
"epoch": self.epoch,
}
def load_state_dict(self, state_dict):
self.epoch = state_dict["epoch"]
def _get_iterator_for_epoch(self, epoch, shuffle, offset=0):
if self.num_workers > 0:
os.environ["PYTHONWARNINGS"] = "ignore:semaphore_tracker:UserWarning"
# Create data loader
worker_init_fn = getattr(self.dataset, "worker_init_fn", None)
itr = torch.utils.data.DataLoader(
self.dataset,
batch_size=self.max_sentences,
collate_fn=self.collate_fn,
num_workers=self.num_workers,
timeout=self.timeout,
worker_init_fn=worker_init_fn,
pin_memory=True,
)
# Wrap with a BufferedIterator if needed
if self.buffer_size > 0:
itr = BufferedIterator(self.buffer_size, itr)
# Wrap with CountingIterator
itr = CountingIterator(itr, start=offset)
return itr
class EpochBatchIterator(EpochBatchIterating):
"""A multi-epoch iterator over a :class:`torch.utils.data.Dataset`.
Compared to :class:`torch.utils.data.DataLoader`, this iterator:
- can be reused across multiple epochs with the :func:`next_epoch_itr`
method (optionally shuffled between epochs)
- can be serialized/deserialized with the :func:`state_dict` and
:func:`load_state_dict` methods
- supports sharding with the *num_shards* and *shard_id* arguments
Args:
dataset (~torch.utils.data.Dataset): dataset from which to load the data
collate_fn (callable): merges a list of samples to form a mini-batch
batch_sampler (~torch.utils.data.Sampler or a callable): an iterator over batches of
indices, or a callable to create such an iterator (~torch.utils.data.Sampler).
A callable batch_sampler will be called for each epoch to enable per epoch dynamic
batch iterators defined by this callable batch_sampler.
seed (int, optional): seed for random number generator for
reproducibility (default: 1).
num_shards (int, optional): shard the data iterator into N
shards (default: 1).
shard_id (int, optional): which shard of the data iterator to
return (default: 0).
num_workers (int, optional): how many subprocesses to use for data
loading. 0 means the data will be loaded in the main process
(default: 0).
epoch (int, optional): the epoch to start the iterator from
(default: 1).
buffer_size (int, optional): the number of batches to keep ready in the
queue. Helps speeding up dataloading. When buffer_size is zero, the
default torch.utils.data.DataLoader preloading is used.
timeout (int, optional): if positive, the timeout value for collecting a batch
from workers. Should always be non-negative (default: ``0``).
disable_shuffling (bool, optional): force disable shuffling
(default: ``False``).
skip_remainder_batch (bool, optional): if set, discard the last batch in an epoch
for the sake of training stability, as the last batch is usually smaller than
local_batch_size * distributed_word_size (default: ``False``).
grouped_shuffling (bool, optional): enable shuffling batches in groups
of num_shards. Ensures that each GPU receives similar length sequences when
batches are sorted by length.
"""
def __init__(
self,
dataset,
collate_fn,
batch_sampler,
seed=1,
num_shards=1,
shard_id=0,
num_workers=0,
epoch=1,
buffer_size=0,
timeout=0,
disable_shuffling=False,
skip_remainder_batch=False,
grouped_shuffling=False,
):
assert isinstance(dataset, torch.utils.data.Dataset)
self.dataset = dataset
self.collate_fn = collate_fn
self.batch_sampler = batch_sampler
self._frozen_batches = (
tuple(batch_sampler) if not callable(batch_sampler) else None
)
self.seed = seed
self.num_shards = num_shards
self.shard_id = shard_id
self.num_workers = num_workers
# This upper limit here is to prevent people from abusing this feature
# in a shared computing environment.
self.buffer_size = min(buffer_size, 20)
self.timeout = timeout
self.disable_shuffling = disable_shuffling
self.skip_remainder_batch = skip_remainder_batch
self.grouped_shuffling = grouped_shuffling
self.epoch = max(epoch, 1) # we use 1-based indexing for epochs
self.shuffle = not disable_shuffling
self._cur_epoch_itr = None
self._next_epoch_itr = None
self._supports_prefetch = getattr(dataset, "supports_prefetch", False)
@property
def frozen_batches(self):
if self._frozen_batches is None:
self._frozen_batches = tuple(self.batch_sampler(self.dataset, self.epoch))
return self._frozen_batches
@property
def first_batch(self):
if len(self.frozen_batches) == 0:
raise Exception(
"The dataset is empty. This could indicate "
"that all elements in the dataset have been skipped. "
"Try increasing the max number of allowed tokens or using "
"a larger dataset."
)
if getattr(self.dataset, "supports_fetch_outside_dataloader", True):
return self.collate_fn([self.dataset[i] for i in self.frozen_batches[0]])
else:
return "DUMMY"
def __len__(self):
return int(math.ceil(len(self.frozen_batches) / float(self.num_shards)))
@property
def n(self):
return self.iterations_in_epoch
@property
def next_epoch_idx(self):
"""Return the epoch index after *next_epoch_itr* is called."""
if self._next_epoch_itr is not None:
return self.epoch
elif self._cur_epoch_itr is not None and self.end_of_epoch():
return self.epoch + 1
else:
return self.epoch
def next_epoch_itr(
self, shuffle=True, fix_batches_to_gpus=False, set_dataset_epoch=True
):
"""Return a new iterator over the dataset.
Args:
shuffle (bool, optional): shuffle batches before returning the
iterator (default: True).
fix_batches_to_gpus (bool, optional): ensure that batches are always
allocated to the same shards across epochs. Requires
that :attr:`dataset` supports prefetching (default: False).
set_dataset_epoch (bool, optional): update the wrapped Dataset with
the new epoch number (default: True).
"""
if self.disable_shuffling:
shuffle = False
prev_epoch = self.epoch
self.epoch = self.next_epoch_idx
if set_dataset_epoch and hasattr(self.dataset, "set_epoch"):
self.dataset.set_epoch(self.epoch)
if self._next_epoch_itr is not None:
self._cur_epoch_itr = self._next_epoch_itr
self._next_epoch_itr = None
else:
if callable(self.batch_sampler) and prev_epoch != self.epoch:
# reset _frozen_batches to refresh the next epoch
self._frozen_batches = None
self._cur_epoch_itr = self._get_iterator_for_epoch(
self.epoch,
shuffle,
fix_batches_to_gpus=fix_batches_to_gpus,
)
self.shuffle = shuffle
return self._cur_epoch_itr
def end_of_epoch(self) -> bool:
"""Returns whether the most recent epoch iterator has been exhausted"""
return not self._cur_epoch_itr.has_next()
@property
def iterations_in_epoch(self):
"""The number of consumed batches in the current epoch."""
if self._cur_epoch_itr is not None:
return self._cur_epoch_itr.n
elif self._next_epoch_itr is not None:
return self._next_epoch_itr.n
return 0
def state_dict(self):
"""Returns a dictionary containing a whole state of the iterator."""
if self.end_of_epoch():
epoch = self.epoch + 1
iter_in_epoch = 0
else:
epoch = self.epoch
iter_in_epoch = self.iterations_in_epoch
return {
"version": 2,
"epoch": epoch,
"iterations_in_epoch": iter_in_epoch,
"shuffle": self.shuffle,
}
def load_state_dict(self, state_dict):
"""Copies the state of the iterator from the given *state_dict*."""
self.epoch = state_dict["epoch"]
itr_pos = state_dict.get("iterations_in_epoch", 0)
version = state_dict.get("version", 1)
if itr_pos > 0:
# fast-forward epoch iterator
self._next_epoch_itr = self._get_iterator_for_epoch(
self.epoch,
shuffle=state_dict.get("shuffle", True),
offset=itr_pos,
)
if self._next_epoch_itr is None:
if version == 1:
# legacy behavior: we finished the epoch, increment epoch counter
self.epoch += 1
else:
raise RuntimeError(
"Cannot resume training due to dataloader mismatch, please "
"report this to the fairseq developers. You can relaunch "
"training with `--reset-dataloader` and it should work."
)
else:
self._next_epoch_itr = None
def _get_iterator_for_epoch(
self, epoch, shuffle, fix_batches_to_gpus=False, offset=0
):
def shuffle_batches(batches, seed):
with data_utils.numpy_seed(seed):
if self.grouped_shuffling:
grouped_batches = [
batches[(i * self.num_shards) : ((i + 1) * self.num_shards)]
for i in range((len(batches) // self.num_shards))
]
np.random.shuffle(grouped_batches)
batches = list(itertools.chain(*grouped_batches))
else:
np.random.shuffle(batches)
return batches
if self._supports_prefetch:
batches = self.frozen_batches
if shuffle and not fix_batches_to_gpus:
batches = shuffle_batches(list(batches), self.seed + epoch)
batches = list(
ShardedIterator(batches, self.num_shards, self.shard_id, fill_value=[])
)
self.dataset.prefetch([i for s in batches for i in s])
if shuffle and fix_batches_to_gpus:
batches = shuffle_batches(batches, self.seed + epoch + self.shard_id)
else:
if shuffle:
batches = shuffle_batches(list(self.frozen_batches), self.seed + epoch)
else:
batches = self.frozen_batches
batches = list(
ShardedIterator(batches, self.num_shards, self.shard_id, fill_value=[])
)
if offset > 0 and offset >= len(batches):
return None
if self.num_workers > 0:
os.environ["PYTHONWARNINGS"] = "ignore:semaphore_tracker:UserWarning"
# Create data loader
itr = torch.utils.data.DataLoader(
self.dataset,
collate_fn=self.collate_fn,
batch_sampler=batches[offset:],
num_workers=self.num_workers,
timeout=self.timeout,
pin_memory=True,
)
# Wrap with a BufferedIterator if needed
if self.buffer_size > 0:
itr = BufferedIterator(self.buffer_size, itr)
# Wrap with CountingIterator
itr = CountingIterator(itr, start=offset)
if self.skip_remainder_batch:
# TODO: Below is a lazy implementation which discard the final batch regardless
# of whether it is a full batch or not.
total_num_itrs = len(batches) - 1
itr.take(total_num_itrs)
logger.info(f"skip final residual batch, total_num_itrs = {total_num_itrs}")
return itr
class GroupedIterator(CountingIterator):
"""Wrapper around an iterable that returns groups (chunks) of items.
Args:
iterable (iterable): iterable to wrap
chunk_size (int): size of each chunk
skip_remainder_batch (bool, optional): if set, discard the last grouped batch in
each training epoch, as the last grouped batch is usually smaller than
local_batch_size * distributed_word_size * chunk_size (default: ``False``).
Attributes:
n (int): number of elements consumed from this iterator
"""
def __init__(self, iterable, chunk_size, skip_remainder_batch=False):
if skip_remainder_batch:
total_num_itrs = int(math.floor(len(iterable) / float(chunk_size)))
logger.info(
f"skip final residual batch, grouped total_num_itrs = {total_num_itrs}"
)
else:
total_num_itrs = int(math.ceil(len(iterable) / float(chunk_size)))
logger.info(f"grouped total_num_itrs = {total_num_itrs}")
itr = _chunk_iterator(iterable, chunk_size, skip_remainder_batch)
super().__init__(
itr,
start=int(math.ceil(getattr(iterable, "n", 0) / float(chunk_size))),
total=total_num_itrs,
)
self.chunk_size = chunk_size
if skip_remainder_batch:
self.take(total_num_itrs)
# TODO: [Hack] Here the grouped iterator modifies the base iterator size so that
# training can move into the next epoch once the grouped iterator is exhausted.
# Double-check this implementation in case unexpected behavior occurs.
iterable.take(total_num_itrs * chunk_size)
def _chunk_iterator(itr, chunk_size, skip_remainder_batch=False):
chunk = []
for x in itr:
chunk.append(x)
if len(chunk) == chunk_size:
yield chunk
chunk = []
if not skip_remainder_batch and len(chunk) > 0:
yield chunk
class ShardedIterator(CountingIterator):
"""A sharded wrapper around an iterable, padded to length.
Args:
iterable (iterable): iterable to wrap
num_shards (int): number of shards to split the iterable into
shard_id (int): which shard to iterator over
fill_value (Any, optional): padding value when the iterable doesn't
evenly divide *num_shards* (default: None).
Attributes:
n (int): number of elements consumed from this iterator
"""
def __init__(
self, iterable, num_shards, shard_id, fill_value=None, skip_remainder_batch=None
):
"""
Args:
skip_remainder_batch: ignored"""
if shard_id < 0 or shard_id >= num_shards:
raise ValueError("shard_id must be between 0 and num_shards")
sharded_len = int(math.ceil(len(iterable) / float(num_shards)))
itr = map(
operator.itemgetter(1),
itertools.zip_longest(
range(sharded_len),
itertools.islice(iterable, shard_id, len(iterable), num_shards),
fillvalue=fill_value,
),
)
super().__init__(
itr,
start=int(math.ceil(getattr(iterable, "n", 0) / float(num_shards))),
total=sharded_len,
)
class BackgroundConsumer(Thread):
def __init__(self, queue, source, max_len, cuda_device):
Thread.__init__(self)
self._queue = queue
self._source = source
self._max_len = max_len
self.count = 0
self.cuda_device = cuda_device
def run(self):
# set_device to avoid creation of GPU0 context when using pin_memory
if self.cuda_device is not None:
torch.cuda.set_device(self.cuda_device)
try:
for item in self._source:
self._queue.put(item)
# Stop if we reached the maximum length
self.count += 1
if self._max_len is not None and self.count >= self._max_len:
break
# Signal the consumer we are done.
self._queue.put(_sentinel)
except Exception as e:
self._queue.put(e)
class BufferedIterator(object):
def __init__(self, size, iterable):
self._queue = queue.Queue(size)
self._iterable = iterable
self._consumer = None
self.start_time = time.time()
self.warning_time = None
self.total = len(iterable)
def _create_consumer(self):
self._consumer = BackgroundConsumer(
self._queue,
self._iterable,
self.total,
torch.cuda.current_device() if torch.cuda.is_available() else None,
)
self._consumer.daemon = True
self._consumer.start()
def __iter__(self):
return self
def __len__(self):
return self.total
def take(self, n):
self.total = min(self.total, n)
# Propagate this change to the underlying iterator
if hasattr(self._iterable, "take"):
self._iterable.take(n)
return self
def __next__(self):
# Create consumer if not created yet
if self._consumer is None:
self._create_consumer()
# Notify the user if there is a data loading bottleneck
if self._queue.qsize() < min(2, max(1, self._queue.maxsize // 2)):
if time.time() - self.start_time > 5 * 60:
if (
self.warning_time is None
or time.time() - self.warning_time > 15 * 60
):
logger.debug(
"Data loading buffer is empty or nearly empty. This may "
"indicate a data loading bottleneck, and increasing the "
"number of workers (--num-workers) may help."
)
self.warning_time = time.time()
# Get next example
item = self._queue.get(True)
if isinstance(item, Exception):
raise item
if item is _sentinel:
raise StopIteration()
return item
class GroupedEpochBatchIterator(EpochBatchIterator):
"""Grouped version of EpochBatchIterator
It takes several samplers from different datasets.
Each epoch shuffle the dataset wise sampler individually with different
random seed. The those sub samplers are combined with into
one big samplers with deterministic permutation to mix batches from
different datasets. It will act like EpochBatchIterator but make sure
1) data from one data set each time
2) for different workers, they use the same order to fetch the data
so they will use data from the same dataset everytime
mult_rate is used for update_freq > 1 case where we want to make sure update_freq
mini-batches come from same source
"""
def __init__(
self,
dataset,
collate_fn,
batch_samplers,
seed=1,
num_shards=1,
shard_id=0,
num_workers=0,
epoch=0,
mult_rate=1,
buffer_size=0,
):
super().__init__(
dataset,
collate_fn,
batch_samplers,
seed,
num_shards,
shard_id,
num_workers,
epoch,
buffer_size,
)
# level 0: sub-samplers 1: batch_idx 2: batches
self._frozen_batches = tuple([tuple(sub_batch) for sub_batch in batch_samplers])
self.step_size = mult_rate * num_shards
self.lengths = [
(len(x) // self.step_size) * self.step_size for x in self.frozen_batches
]
def __len__(self):
return sum(self.lengths)
@property
def first_batch(self):
if len(self.frozen_batches) == 0:
raise Exception(
"The dataset is empty. This could indicate "
"that all elements in the dataset have been skipped. "
"Try increasing the max number of allowed tokens or using "
"a larger dataset."
)
if self.dataset.supports_fetch_outside_dataloader:
return self.collate_fn([self.dataset[i] for i in self.frozen_batches[0][0]])
else:
return "DUMMY"
def _get_iterator_for_epoch(
self, epoch, shuffle, fix_batches_to_gpus=False, offset=0
):
def shuffle_batches(batches, seed):
with data_utils.numpy_seed(seed):
np.random.shuffle(batches)
return batches
def return_full_batches(batch_sets, seed, shuffle):
if shuffle:
batch_sets = [shuffle_batches(list(x), seed) for x in batch_sets]
batch_sets = [
batch_sets[i][: self.lengths[i]] for i in range(len(batch_sets))
]
batches = list(itertools.chain.from_iterable(batch_sets))
if shuffle:
with data_utils.numpy_seed(seed):
idx = np.random.permutation(len(batches) // self.step_size)
if len(idx) * self.step_size != len(batches):
raise ValueError(
"ERROR: %d %d %d %d"
% (len(idx), self.step_size, len(batches), self.shard_id),
":".join(["%d" % x for x in self.lengths]),
)
mini_shards = [
batches[i * self.step_size : (i + 1) * self.step_size]
for i in idx
]
batches = list(itertools.chain.from_iterable(mini_shards))
return batches
if self._supports_prefetch:
raise NotImplementedError("To be implemented")
else:
batches = return_full_batches(
self.frozen_batches, self.seed + epoch, shuffle
)
batches = list(
ShardedIterator(batches, self.num_shards, self.shard_id, fill_value=[])
)
if offset > 0 and offset >= len(batches):
return None
if self.num_workers > 0:
os.environ["PYTHONWARNINGS"] = "ignore:semaphore_tracker:UserWarning"
itr = torch.utils.data.DataLoader(
self.dataset,
collate_fn=self.collate_fn,
batch_sampler=batches[offset:],
num_workers=self.num_workers,
)
if self.buffer_size > 0:
itr = BufferedIterator(self.buffer_size, itr)
return CountingIterator(itr, start=offset)
| 36.110976 | 94 | 0.606666 |
fd3ba94a9fe9ee148c489f019403ff7edf6f7b22 | 2,296 | py | Python | greenberry/consensus/block_rewards.py | greenberrycoin/gbch-blockchain | d99843d720c6e7bd7baaf8bb4639a46dbb56caed | [
"Apache-2.0"
] | null | null | null | greenberry/consensus/block_rewards.py | greenberrycoin/gbch-blockchain | d99843d720c6e7bd7baaf8bb4639a46dbb56caed | [
"Apache-2.0"
] | null | null | null | greenberry/consensus/block_rewards.py | greenberrycoin/gbch-blockchain | d99843d720c6e7bd7baaf8bb4639a46dbb56caed | [
"Apache-2.0"
] | null | null | null | from greenberry.util.ints import uint32, uint64
# 1 GreenBerry coin = 1,000,000,000,000 = 1 trillion mojo.
_mojo_per_greenberry = 1000000000000
_blocks_per_year = 1681920 # 32 * 6 * 24 * 365
def calculate_pool_reward(height: uint32) -> uint64:
"""
Returns the pool reward at a certain block height. The pool earns 7/8 of the reward in each block. If the farmer
is solo farming, they act as the pool, and therefore earn the entire block reward.
These halving events will not be hit at the exact times
(3 years, etc), due to fluctuations in difficulty. They will likely come early, if the network space and VDF
rates increase continuously.
"""
if height == 0:
return uint64(int((7 / 8) * 300000 * _mojo_per_greenberry))
elif height < 3 * _blocks_per_year:
return uint64(int((7 / 8) * 2 * _mojo_per_greenberry))
elif height < 6 * _blocks_per_year:
return uint64(int((7 / 8) * 1 * _mojo_per_greenberry))
elif height < 9 * _blocks_per_year:
return uint64(int((7 / 8) * 0.5 * _mojo_per_greenberry))
elif height < 12 * _blocks_per_year:
return uint64(int((7 / 8) * 0.25 * _mojo_per_greenberry))
else:
return uint64(int((7 / 8) * 0.125 * _mojo_per_greenberry))
def calculate_base_farmer_reward(height: uint32) -> uint64:
"""
Returns the base farmer reward at a certain block height.
The base fee reward is 1/8 of total block reward
Returns the coinbase reward at a certain block height. These halving events will not be hit at the exact times
(3 years, etc), due to fluctuations in difficulty. They will likely come early, if the network space and VDF
rates increase continuously.
"""
if height == 0:
return uint64(int((1 / 8) * 300000 * _mojo_per_greenberry))
elif height < 3 * _blocks_per_year:
return uint64(int((1 / 8) * 2 * _mojo_per_greenberry))
elif height < 6 * _blocks_per_year:
return uint64(int((1 / 8) * 1 * _mojo_per_greenberry))
elif height < 9 * _blocks_per_year:
return uint64(int((1 / 8) * 0.5 * _mojo_per_greenberry))
elif height < 12 * _blocks_per_year:
return uint64(int((1 / 8) * 0.25 * _mojo_per_greenberry))
else:
return uint64(int((1 / 8) * 0.125 * _mojo_per_greenberry))
| 44.153846 | 116 | 0.674652 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.