hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f70f24eb6dbeb6161cf2786c9b1c55687bbbf46f | 3,707 | py | Python | lux_ai/lux_gym/multi_subtask.py | mrzhuzhe/Kaggle_Lux_AI_2021 | f00954d328467ce24f5535d3878d305c6d1f3acb | [
"MIT"
] | 44 | 2021-12-13T22:49:26.000Z | 2022-03-22T03:31:34.000Z | lux_ai/lux_gym/multi_subtask.py | L16H7/Kaggle_Lux_AI_2021 | 08b795e71e78c768d28c648290a15d58ca718776 | [
"MIT"
] | 1 | 2022-03-10T04:05:04.000Z | 2022-03-10T04:05:04.000Z | lux_ai/lux_gym/multi_subtask.py | L16H7/Kaggle_Lux_AI_2021 | 08b795e71e78c768d28c648290a15d58ca718776 | [
"MIT"
] | 10 | 2021-12-13T21:23:57.000Z | 2022-02-03T09:50:27.000Z | from abc import ABC, abstractmethod
import numpy as np
import random
from typing import Callable, Dict, Optional, Tuple, Sequence
from .reward_spaces import Subtask
from ..lux.game import Game
class SubtaskSampler(ABC):
def __init__(self, subtask_constructors: Sequence[Callable[..., Subtask]]):
self.subtask_constructors = subtask_constructors
@abstractmethod
def sample(self, final_rewards: Optional[Tuple[float, float]]) -> Subtask:
pass
# noinspection PyMethodMayBeStatic
def get_info(self) -> Dict[str, np.ndarray]:
return {}
class RandomSampler(SubtaskSampler):
def sample(self, final_rewards: Optional[Tuple[float, float]]) -> Subtask:
return self.subtask_constructors[random.randrange(len(self.subtask_constructors))]()
class DifficultySampler(SubtaskSampler):
def __init__(self, subtask_constructors: Sequence[Callable[..., Subtask]]):
super(DifficultySampler, self).__init__(subtask_constructors)
self.active_subtask_idx = -1
self.summed_rewards = np.zeros(len(self.subtask_constructors))
self.n_trials = np.zeros(len(self.subtask_constructors))
def sample(self, final_rewards: Optional[Tuple[float, float]]) -> Subtask:
if final_rewards is not None:
self.n_trials[self.active_subtask_idx] += 1
self.summed_rewards[self.active_subtask_idx] += np.mean(final_rewards)
self.active_subtask_idx = np.random.choice(len(self.subtask_constructors), p=self.weights)
return self.subtask_constructors[self.active_subtask_idx]()
@property
def weights(self) -> np.ndarray:
weights = Subtask.get_reward_spec().reward_max - self.summed_rewards / np.maximum(self.n_trials, 1)
return weights / weights.sum()
def get_info(self) -> Dict[str, np.ndarray]:
return {
f"LOGGING_{subtask.__name__}_subtask_difficulty": self.weights[i]
for i, subtask in enumerate(self.subtask_constructors)
}
class MultiSubtask(Subtask):
def __init__(
self,
subtask_constructors: Sequence[Callable[..., Subtask]] = (),
subtask_sampler_constructor: Callable[..., SubtaskSampler] = RandomSampler,
**kwargs
):
super(MultiSubtask, self).__init__(**kwargs)
self.subtask_constructors = subtask_constructors
self.subtask_sampler = subtask_sampler_constructor(self.subtask_constructors)
self.active_subtask = self.subtask_sampler.sample(None)
self.info = {
f"LOGGING_{subtask.__name__}_subtask_reward": np.array([float("nan"), float("nan")])
for subtask in self.subtask_constructors
}
def compute_rewards_and_done(self, game_state: Game, done: bool) -> Tuple[Tuple[float, float], bool]:
reward, done = self.active_subtask.compute_rewards_and_done(game_state, done)
for subtask in self.subtask_constructors:
reward_key = f"LOGGING_{subtask.__name__}_subtask_reward"
if isinstance(self.active_subtask, subtask):
self.info[reward_key] = np.array(reward)
else:
self.info[reward_key] = np.array([float("nan"), float("nan")])
if done:
self.active_subtask = self.subtask_sampler.sample(reward)
return reward, done
def completed_task(self, game_state: Game) -> np.ndarray:
raise NotImplementedError
def get_info(self) -> Dict[str, np.ndarray]:
return dict(**self.info, **self.subtask_sampler.get_info())
def get_subtask_encoding(self, subtask_encoding_dict: dict) -> int:
return self.active_subtask.get_subtask_encoding(subtask_encoding_dict)
| 40.736264 | 107 | 0.688427 | from abc import ABC, abstractmethod
import numpy as np
import random
from typing import Callable, Dict, Optional, Tuple, Sequence
from .reward_spaces import Subtask
from ..lux.game import Game
class SubtaskSampler(ABC):
def __init__(self, subtask_constructors: Sequence[Callable[..., Subtask]]):
self.subtask_constructors = subtask_constructors
@abstractmethod
def sample(self, final_rewards: Optional[Tuple[float, float]]) -> Subtask:
pass
def get_info(self) -> Dict[str, np.ndarray]:
return {}
class RandomSampler(SubtaskSampler):
def sample(self, final_rewards: Optional[Tuple[float, float]]) -> Subtask:
return self.subtask_constructors[random.randrange(len(self.subtask_constructors))]()
class DifficultySampler(SubtaskSampler):
def __init__(self, subtask_constructors: Sequence[Callable[..., Subtask]]):
super(DifficultySampler, self).__init__(subtask_constructors)
self.active_subtask_idx = -1
self.summed_rewards = np.zeros(len(self.subtask_constructors))
self.n_trials = np.zeros(len(self.subtask_constructors))
def sample(self, final_rewards: Optional[Tuple[float, float]]) -> Subtask:
if final_rewards is not None:
self.n_trials[self.active_subtask_idx] += 1
self.summed_rewards[self.active_subtask_idx] += np.mean(final_rewards)
self.active_subtask_idx = np.random.choice(len(self.subtask_constructors), p=self.weights)
return self.subtask_constructors[self.active_subtask_idx]()
@property
def weights(self) -> np.ndarray:
weights = Subtask.get_reward_spec().reward_max - self.summed_rewards / np.maximum(self.n_trials, 1)
return weights / weights.sum()
def get_info(self) -> Dict[str, np.ndarray]:
return {
f"LOGGING_{subtask.__name__}_subtask_difficulty": self.weights[i]
for i, subtask in enumerate(self.subtask_constructors)
}
class MultiSubtask(Subtask):
def __init__(
self,
subtask_constructors: Sequence[Callable[..., Subtask]] = (),
subtask_sampler_constructor: Callable[..., SubtaskSampler] = RandomSampler,
**kwargs
):
super(MultiSubtask, self).__init__(**kwargs)
self.subtask_constructors = subtask_constructors
self.subtask_sampler = subtask_sampler_constructor(self.subtask_constructors)
self.active_subtask = self.subtask_sampler.sample(None)
self.info = {
f"LOGGING_{subtask.__name__}_subtask_reward": np.array([float("nan"), float("nan")])
for subtask in self.subtask_constructors
}
def compute_rewards_and_done(self, game_state: Game, done: bool) -> Tuple[Tuple[float, float], bool]:
reward, done = self.active_subtask.compute_rewards_and_done(game_state, done)
for subtask in self.subtask_constructors:
reward_key = f"LOGGING_{subtask.__name__}_subtask_reward"
if isinstance(self.active_subtask, subtask):
self.info[reward_key] = np.array(reward)
else:
self.info[reward_key] = np.array([float("nan"), float("nan")])
if done:
self.active_subtask = self.subtask_sampler.sample(reward)
return reward, done
def completed_task(self, game_state: Game) -> np.ndarray:
raise NotImplementedError
def get_info(self) -> Dict[str, np.ndarray]:
return dict(**self.info, **self.subtask_sampler.get_info())
def get_subtask_encoding(self, subtask_encoding_dict: dict) -> int:
return self.active_subtask.get_subtask_encoding(subtask_encoding_dict)
| true | true |
f70f256cd59a3311b34f386e6564bde3e61f621a | 41,982 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_10_01/aio/operations/_public_ip_addresses_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | 8 | 2021-01-13T23:44:08.000Z | 2021-03-17T10:13:36.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_10_01/aio/operations/_public_ip_addresses_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | null | null | null | sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_10_01/aio/operations/_public_ip_addresses_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PublicIPAddressesOperations:
"""PublicIPAddressesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2017_10_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
public_ip_address_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
public_ip_address_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified public IP address.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_address_name: The name of the subnet.
:type public_ip_address_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
public_ip_address_name=public_ip_address_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'} # type: ignore
async def get(
self,
resource_group_name: str,
public_ip_address_name: str,
expand: Optional[str] = None,
**kwargs
) -> "models.PublicIPAddress":
"""Gets the specified public IP address in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_address_name: The name of the subnet.
:type public_ip_address_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PublicIPAddress, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2017_10_01.models.PublicIPAddress
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PublicIPAddress"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
public_ip_address_name: str,
parameters: "models.PublicIPAddress",
**kwargs
) -> "models.PublicIPAddress":
cls = kwargs.pop('cls', None) # type: ClsType["models.PublicIPAddress"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PublicIPAddress')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
public_ip_address_name: str,
parameters: "models.PublicIPAddress",
**kwargs
) -> AsyncLROPoller["models.PublicIPAddress"]:
"""Creates or updates a static or dynamic public IP address.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_address_name: The name of the public IP address.
:type public_ip_address_name: str
:param parameters: Parameters supplied to the create or update public IP address operation.
:type parameters: ~azure.mgmt.network.v2017_10_01.models.PublicIPAddress
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PublicIPAddress or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2017_10_01.models.PublicIPAddress]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.PublicIPAddress"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
public_ip_address_name=public_ip_address_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
public_ip_address_name: str,
parameters: "models.TagsObject",
**kwargs
) -> "models.PublicIPAddress":
cls = kwargs.pop('cls', None) # type: ClsType["models.PublicIPAddress"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'} # type: ignore
async def begin_update_tags(
self,
resource_group_name: str,
public_ip_address_name: str,
parameters: "models.TagsObject",
**kwargs
) -> AsyncLROPoller["models.PublicIPAddress"]:
"""Updates public IP address tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_address_name: The name of the public IP address.
:type public_ip_address_name: str
:param parameters: Parameters supplied to update public IP address tags.
:type parameters: ~azure.mgmt.network.v2017_10_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PublicIPAddress or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2017_10_01.models.PublicIPAddress]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.PublicIPAddress"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
public_ip_address_name=public_ip_address_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'} # type: ignore
def list_all(
self,
**kwargs
) -> AsyncIterable["models.PublicIPAddressListResult"]:
"""Gets all the public IP addresses in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PublicIPAddressListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2017_10_01.models.PublicIPAddressListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PublicIPAddressListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPAddressListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/publicIPAddresses'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["models.PublicIPAddressListResult"]:
"""Gets all public IP addresses in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PublicIPAddressListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2017_10_01.models.PublicIPAddressListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PublicIPAddressListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPAddressListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses'} # type: ignore
def list_virtual_machine_scale_set_public_ip_addresses(
self,
resource_group_name: str,
virtual_machine_scale_set_name: str,
**kwargs
) -> AsyncIterable["models.PublicIPAddressListResult"]:
"""Gets information about all public IP addresses on a virtual machine scale set level.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PublicIPAddressListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2017_10_01.models.PublicIPAddressListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PublicIPAddressListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-30"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_virtual_machine_scale_set_public_ip_addresses.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPAddressListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_virtual_machine_scale_set_public_ip_addresses.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/publicipaddresses'} # type: ignore
def list_virtual_machine_scale_set_vm_public_ip_addresses(
self,
resource_group_name: str,
virtual_machine_scale_set_name: str,
virtualmachine_index: str,
network_interface_name: str,
ip_configuration_name: str,
**kwargs
) -> AsyncIterable["models.PublicIPAddressListResult"]:
"""Gets information about all public IP addresses in a virtual machine IP configuration in a
virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param network_interface_name: The network interface name.
:type network_interface_name: str
:param ip_configuration_name: The IP configuration name.
:type ip_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PublicIPAddressListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2017_10_01.models.PublicIPAddressListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PublicIPAddressListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-30"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_virtual_machine_scale_set_vm_public_ip_addresses.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'ipConfigurationName': self._serialize.url("ip_configuration_name", ip_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPAddressListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_virtual_machine_scale_set_vm_public_ip_addresses.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}/ipconfigurations/{ipConfigurationName}/publicipaddresses'} # type: ignore
async def get_virtual_machine_scale_set_public_ip_address(
self,
resource_group_name: str,
virtual_machine_scale_set_name: str,
virtualmachine_index: str,
network_interface_name: str,
ip_configuration_name: str,
public_ip_address_name: str,
expand: Optional[str] = None,
**kwargs
) -> "models.PublicIPAddress":
"""Get the specified public IP address in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param ip_configuration_name: The name of the IP configuration.
:type ip_configuration_name: str
:param public_ip_address_name: The name of the public IP Address.
:type public_ip_address_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PublicIPAddress, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2017_10_01.models.PublicIPAddress
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PublicIPAddress"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-30"
accept = "application/json, text/json"
# Construct URL
url = self.get_virtual_machine_scale_set_public_ip_address.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'ipConfigurationName': self._serialize.url("ip_configuration_name", ip_configuration_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_virtual_machine_scale_set_public_ip_address.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}/ipconfigurations/{ipConfigurationName}/publicipaddresses/{publicIpAddressName}'} # type: ignore
| 50.949029 | 395 | 0.673027 |
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PublicIPAddressesOperations:
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
public_ip_address_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
url = self._delete_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'}
async def begin_delete(
self,
resource_group_name: str,
public_ip_address_name: str,
**kwargs
) -> AsyncLROPoller[None]:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
public_ip_address_name=public_ip_address_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'}
async def get(
self,
resource_group_name: str,
public_ip_address_name: str,
expand: Optional[str] = None,
**kwargs
) -> "models.PublicIPAddress":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
accept = "application/json, text/json"
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'}
async def _create_or_update_initial(
self,
resource_group_name: str,
public_ip_address_name: str,
parameters: "models.PublicIPAddress",
**kwargs
) -> "models.PublicIPAddress":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
url = self._create_or_update_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(parameters, 'PublicIPAddress')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'}
async def begin_create_or_update(
self,
resource_group_name: str,
public_ip_address_name: str,
parameters: "models.PublicIPAddress",
**kwargs
) -> AsyncLROPoller["models.PublicIPAddress"]:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
public_ip_address_name=public_ip_address_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'}
async def _update_tags_initial(
self,
resource_group_name: str,
public_ip_address_name: str,
parameters: "models.TagsObject",
**kwargs
) -> "models.PublicIPAddress":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
url = self._update_tags_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'}
async def begin_update_tags(
self,
resource_group_name: str,
public_ip_address_name: str,
parameters: "models.TagsObject",
**kwargs
) -> AsyncLROPoller["models.PublicIPAddress"]:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
public_ip_address_name=public_ip_address_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'}
def list_all(
self,
**kwargs
) -> AsyncIterable["models.PublicIPAddressListResult"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list_all.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPAddressListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/publicIPAddresses'}
def list(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["models.PublicIPAddressListResult"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPAddressListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses'}
def list_virtual_machine_scale_set_public_ip_addresses(
self,
resource_group_name: str,
virtual_machine_scale_set_name: str,
**kwargs
) -> AsyncIterable["models.PublicIPAddressListResult"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-30"
accept = "application/json, text/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list_virtual_machine_scale_set_public_ip_addresses.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPAddressListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_virtual_machine_scale_set_public_ip_addresses.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/publicipaddresses'}
def list_virtual_machine_scale_set_vm_public_ip_addresses(
self,
resource_group_name: str,
virtual_machine_scale_set_name: str,
virtualmachine_index: str,
network_interface_name: str,
ip_configuration_name: str,
**kwargs
) -> AsyncIterable["models.PublicIPAddressListResult"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-30"
accept = "application/json, text/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list_virtual_machine_scale_set_vm_public_ip_addresses.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'ipConfigurationName': self._serialize.url("ip_configuration_name", ip_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPAddressListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_virtual_machine_scale_set_vm_public_ip_addresses.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}/ipconfigurations/{ipConfigurationName}/publicipaddresses'}
async def get_virtual_machine_scale_set_public_ip_address(
self,
resource_group_name: str,
virtual_machine_scale_set_name: str,
virtualmachine_index: str,
network_interface_name: str,
ip_configuration_name: str,
public_ip_address_name: str,
expand: Optional[str] = None,
**kwargs
) -> "models.PublicIPAddress":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-30"
accept = "application/json, text/json"
url = self.get_virtual_machine_scale_set_public_ip_address.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'ipConfigurationName': self._serialize.url("ip_configuration_name", ip_configuration_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_virtual_machine_scale_set_public_ip_address.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}/ipconfigurations/{ipConfigurationName}/publicipaddresses/{publicIpAddressName}'}
| true | true |
f70f25c06081abbe813159ebbb0ac18fae2c2425 | 23,673 | py | Python | grr/server/grr_response_server/gui/selenium_tests/hunt_create_test.py | acidburn0zzz/grr | 44e1a5b1630e8101610faaaebe15b19b5ad30cb1 | [
"Apache-2.0"
] | 1 | 2019-08-28T23:48:20.000Z | 2019-08-28T23:48:20.000Z | grr/server/grr_response_server/gui/selenium_tests/hunt_create_test.py | AjitNair2/grr | 2a2ea891b3927775872904cdd402a18e7bb3d143 | [
"Apache-2.0"
] | 2 | 2022-01-15T03:18:12.000Z | 2022-02-13T22:02:43.000Z | grr/server/grr_response_server/gui/selenium_tests/hunt_create_test.py | acidburn0zzz/grr | 44e1a5b1630e8101610faaaebe15b19b5ad30cb1 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""Test of "New Hunt" wizard."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from absl import app
from selenium.webdriver.common import keys
from grr_response_core.lib import rdfvalue
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_server import data_store
from grr_response_server import foreman
from grr_response_server import foreman_rules
from grr_response_server import hunt as lib_hunt
from grr_response_server.flows.general import file_finder
from grr_response_server.flows.general import transfer
from grr_response_server.gui import gui_test_lib
from grr_response_server.rdfvalues import flow_runner as rdf_flow_runner
from grr_response_server.rdfvalues import output_plugin as rdf_output_plugin
from grr.test_lib import test_lib
class TestNewHuntWizard(gui_test_lib.GRRSeleniumHuntTest):
"""Test the "new hunt wizard" GUI."""
@staticmethod
def FindForemanRules(hunt_urn, token):
rules = data_store.REL_DB.ReadAllForemanRules()
return [rule for rule in rules if rule.hunt_id == hunt_urn.Basename()]
def testNewHuntWizard(self):
# Open up and click on View Hunts.
self.Open("/")
self.WaitUntil(self.IsElementPresent, "client_query")
self.WaitUntil(self.IsElementPresent, "css=a[grrtarget=hunts]")
self.Click("css=a[grrtarget=hunts]")
self.WaitUntil(self.IsElementPresent, "css=button[name=NewHunt]")
# Open up "New Hunt" wizard
self.Click("css=button[name=NewHunt]")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('What to run?')")
# Click on Filesystem item in flows list
self.WaitUntil(self.IsElementPresent, "css=#_Filesystem > i.jstree-icon")
self.Click("css=#_Filesystem > i.jstree-icon")
# Click on the FileFinder item in Filesystem flows list
self.Click("link=File Finder")
# Wait for flow configuration form to be rendered (just wait for first
# input field).
self.WaitUntil(self.IsElementPresent,
"css=grr-new-hunt-wizard-form label:contains('Paths')")
# Change "path" and "pathtype" values
self.Type(
"css=grr-new-hunt-wizard-form "
"grr-form-proto-repeated-field:has(label:contains('Paths')) "
"input", "/tmp")
self.Select(
"css=grr-new-hunt-wizard-form "
"grr-form-proto-single-field:has(label:contains('Pathtype')) "
"select", "TSK")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Hunt parameters')")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('How to process results')")
# Click on "Back" button and check that all the values in the form
# remain intact.
self.Click("css=grr-new-hunt-wizard-form button.Back")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Hunt parameters')")
self.Click("css=grr-new-hunt-wizard-form button.Back")
self.WaitUntil(self.IsElementPresent,
"css=grr-new-hunt-wizard-form label:contains('Paths')")
self.assertEqual(
"/tmp",
self.GetValue(
"css=grr-new-hunt-wizard-form "
"grr-form-proto-repeated-field:has(label:contains('Paths')) input"))
self.assertEqual(
"TSK",
self.GetSelectedLabel(
"css=grr-new-hunt-wizard-form "
"grr-form-proto-single-field:has(label:contains('Pathtype')) select"
))
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Hunt parameters')")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('How to process results')")
# Configure the hunt to use dummy output plugin.
self.Click("css=grr-new-hunt-wizard-form button[name=Add]")
self.Select("css=grr-new-hunt-wizard-form select", "DummyOutputPlugin")
self.Type(
"css=grr-new-hunt-wizard-form "
"grr-form-proto-single-field:has(label:contains('Filepath Regex')) "
"input", "some regex")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Where to run?')")
# Empty set of rules should be valid.
self.WaitUntil(self.IsElementPresent, "css=button.Next:not([disabled])")
# A note informs what an empty set of rules means.
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('No rules specified!')")
# Alternative match mode that matches a client if
# any of the rules evaluates to true can be selected.
self.Select(
"css=grr-configure-rules-page "
"label:contains('Match mode') ~ * select", "Match any")
# The note depends on the match mode.
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('No rules specified!')")
# Create 3 foreman rules. Note that "Add" button adds rules
# to the beginning of a list. So we always use :nth(0) selector.
self.Click("css=grr-configure-rules-page button[name=Add]")
self.Select("css=grr-configure-rules-page div.well:nth(0) select", "Regex")
rule = foreman_rules.ForemanRegexClientRule
label = rule.ForemanStringField.SYSTEM.description
self.Select(
"css=grr-configure-rules-page div.well:nth(0) "
"label:contains('Field') ~ * select", label)
self.Type(
"css=grr-configure-rules-page div.well:nth(0) "
"label:contains('Attribute regex') ~ * input", "Linux")
self.Click("css=grr-configure-rules-page button[name=Add]")
self.Select("css=grr-configure-rules-page div.well:nth(0) select",
"Integer")
rule = foreman_rules.ForemanIntegerClientRule
label = rule.ForemanIntegerField.CLIENT_CLOCK.description
self.Select(
"css=grr-configure-rules-page div.well:nth(0) "
"label:contains('Field') ~ * select", label)
self.Select(
"css=grr-configure-rules-page div.well:nth(0) "
"label:contains('Operator') ~ * select", "GREATER_THAN")
self.Type(
"css=grr-configure-rules-page div.well:nth(0) "
"label:contains('Value') ~ * input", "1336650631137737")
self.Click("css=grr-configure-rules-page button[name=Add]")
self.Click("css=grr-configure-rules-page div.well:nth(0) "
"label:contains('Os darwin') ~ * input[type=checkbox]")
# Click on "Back" button
self.Click("css=grr-new-hunt-wizard-form button.Back")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('How to process results')")
# Click on "Next" button again and check that all the values that
# we've just entered remain intact.
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Where to run?')")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Review')")
# Check that the arguments summary is present.
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Paths')")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('/tmp')")
# Check that output plugins are shown.
self.assertTrue(
self.IsElementPresent(
"css=grr-wizard-form:contains('DummyOutputPlugin')"))
self.assertTrue(
self.IsElementPresent("css=grr-wizard-form:contains('some regex')"))
# Check that there's no deprecated rules summary.
self.assertFalse(
self.IsElementPresent("css=grr-wizard-form:contains('Regex rules')"))
self.assertFalse(
self.IsElementPresent("css=grr-wizard-form:contains('Integer rules')"))
# Check that rules summary is present.
self.assertTrue(
self.IsElementPresent(
"css=grr-wizard-form:contains('Client rule set')"))
# Click on "Run" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Created Hunt')")
# Close the window and check that the hunt was created.
self.Click("css=button.Next")
# Select newly created hunt.
self.Click("css=grr-hunts-list td:contains('gui_user')")
# Check that correct details are displayed in hunt details tab.
self.WaitUntil(self.IsElementPresent,
"css=grr-hunt-inspector:contains('GenericHunt')")
self.WaitUntil(self.IsElementPresent,
"css=grr-hunt-inspector:contains('Flow Arguments')")
self.assertTrue(
self.IsElementPresent("css=grr-hunt-inspector:contains('Paths')"))
self.assertTrue(
self.IsElementPresent("css=grr-hunt-inspector:contains('/tmp')"))
self.assertTrue(
self.IsElementPresent(
"css=grr-hunt-inspector:contains('DummyOutputPlugin')"))
self.assertTrue(
self.IsElementPresent("css=grr-hunt-inspector:contains('some regex')"))
# Check that there's no deprecated rules summary.
self.assertFalse(
self.IsElementPresent("css=grr-hunt-inspector:contains('Regex rules')"))
self.assertFalse(
self.IsElementPresent(
"css=grr-hunt-inspector:contains('Integer rules')"))
# Check that rules summary is present.
self.assertTrue(
self.IsElementPresent(
"css=grr-hunt-inspector:contains('Client Rule Set')"))
# Check that the hunt object was actually created
hunts_list = sorted(
data_store.REL_DB.ReadHuntObjects(offset=0, count=10),
key=lambda x: x.create_time)
self.assertLen(hunts_list, 1)
# Check that the hunt was created with a correct flow
hunt = hunts_list[0]
self.assertEqual(hunt.args.standard.flow_name,
file_finder.FileFinder.__name__)
self.assertEqual(hunt.args.standard.flow_args.paths[0], "/tmp")
self.assertEqual(hunt.args.standard.flow_args.pathtype,
rdf_paths.PathSpec.PathType.TSK)
# self.assertEqual(hunt.args.flow_args.ignore_errors, True)
self.assertEqual(hunt.output_plugins[0].plugin_name, "DummyOutputPlugin")
# Check that hunt was not started
self.assertEqual(hunt.hunt_state, hunt.HuntState.PAUSED)
lib_hunt.StartHunt(hunt.hunt_id)
hunt_rules = self.FindForemanRules(
rdfvalue.RDFURN("hunts").Add(hunt.hunt_id), token=self.token)
# Check that the hunt was created with correct rules
self.assertLen(hunt_rules, 1)
lifetime = hunt_rules[0].GetLifetime()
lifetime -= rdfvalue.DurationSeconds("2w")
self.assertLessEqual(lifetime, rdfvalue.DurationSeconds("1s"))
r = hunt_rules[0].client_rule_set
self.assertEqual(r.match_mode,
foreman_rules.ForemanClientRuleSet.MatchMode.MATCH_ANY)
self.assertLen(r.rules, 3)
self.assertEqual(r.rules[0].rule_type,
foreman_rules.ForemanClientRule.Type.OS)
self.assertEqual(r.rules[0].os.os_windows, False)
self.assertEqual(r.rules[0].os.os_linux, False)
self.assertEqual(r.rules[0].os.os_darwin, True)
self.assertEqual(r.rules[1].rule_type,
foreman_rules.ForemanClientRule.Type.INTEGER)
self.assertEqual(r.rules[1].integer.field, "CLIENT_CLOCK")
self.assertEqual(
r.rules[1].integer.operator,
foreman_rules.ForemanIntegerClientRule.Operator.GREATER_THAN)
self.assertEqual(r.rules[1].integer.value, 1336650631137737)
self.assertEqual(r.rules[2].rule_type,
foreman_rules.ForemanClientRule.Type.REGEX)
self.assertEqual(r.rules[2].regex.field, "SYSTEM")
self.assertEqual(r.rules[2].regex.attribute_regex, "Linux")
def testWizardStepCounterIsShownCorrectly(self):
# Open up and click on View Hunts.
self.Open("/#/hunts")
# Open up "New Hunt" wizard
self.Click("css=button[name=NewHunt]")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('What to run?')")
# Click on the FileFinder item in Filesystem flows list
self.WaitUntil(self.IsElementPresent, "css=#_Filesystem > i.jstree-icon")
self.Click("css=#_Filesystem > i.jstree-icon")
self.Click("link=File Finder")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Step 1 out of 6')")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Step 2 out of 6')")
def testLiteralExpressionIsProcessedCorrectly(self):
"""Literals are raw bytes. Testing that raw bytes are processed right."""
# Open up and click on View Hunts.
self.Open("/")
self.Click("css=a[grrtarget=hunts]")
# Open up "New Hunt" wizard
self.Click("css=button[name=NewHunt]")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('What to run?')")
# Click on Filesystem item in flows list
self.WaitUntil(self.IsElementPresent, "css=#_Filesystem > i.jstree-icon")
self.Click("css=#_Filesystem > i.jstree-icon")
# Click on the FileFinder item in Filesystem flows list
self.Click("link=File Finder")
self.Click("css=label:contains('Conditions') ~ * button")
self.Select("css=label:contains('Condition type') ~ * select",
"Contents literal match")
self.Type("css=label:contains('Literal') ~ * input", "foo\\x0d\\xc8bar")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Hunt parameters')")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('How to process results')")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Where to run?')")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Review')")
# Check that the arguments summary is present.
self.WaitUntil(
self.IsElementPresent,
"css=grr-wizard-form:contains('%s')" % file_finder.FileFinder.__name__)
self.WaitUntil(self.IsTextPresent, b"foo\\x0d\\xc8bar")
# Click on "Run" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Created Hunt')")
# Close the window and check that the hunt was created.
self.Click("css=button.Next")
# Check that the hunt object was actually created
hunts_list = sorted(
data_store.REL_DB.ReadHuntObjects(offset=0, count=10),
key=lambda x: x.create_time)
self.assertLen(hunts_list, 1)
# Check that the hunt was created with a correct literal value.
hunt = hunts_list[0]
self.assertEqual(hunt.args.standard.flow_name,
file_finder.FileFinder.__name__)
self.assertEqual(
hunt.args.standard.flow_args.conditions[0].contents_literal_match
.literal, b"foo\x0d\xc8bar")
def testOutputPluginsListEmptyWhenNoDefaultOutputPluginSet(self):
self.Open("/#main=ManageHunts")
self.Click("css=button[name=NewHunt]")
# Select "List Processes" flow.
self.Click("css=#_Processes > i.jstree-icon")
self.Click("link=ListProcesses")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Hunt parameters')")
# There should be no dummy output plugin visible.
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('How to process results')")
self.WaitUntilNot(self.IsElementPresent,
"css=grr-wizard-form:contains('Dummy do do')")
def testDefaultOutputPluginIsCorrectlyAddedToThePluginsList(self):
with test_lib.ConfigOverrider(
{"AdminUI.new_hunt_wizard.default_output_plugin": "DummyOutputPlugin"}):
self.Open("/#main=ManageHunts")
self.Click("css=button[name=NewHunt]")
# Select "List Processes" flow.
self.Click("css=#_Processes > i.jstree-icon")
self.Click("link=ListProcesses")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Hunt parameters')")
# Dummy output plugin should be added by default.
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('How to process results')")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('DummyOutputPlugin')")
def testLabelsHuntRuleDisplaysAvailableLabels(self):
client_id = self.SetupClient(0)
self.AddClientLabel(client_id, u"owner1", u"foo")
self.AddClientLabel(client_id, u"owner2", u"bar")
self.Open("/#main=ManageHunts")
self.Click("css=button[name=NewHunt]")
# Select "List Processes" flow.
self.Click("css=#_Processes > i.jstree-icon")
self.Click("link=ListProcesses")
# Click 'Next' to go to hunt parameters page.
self.Click("css=grr-new-hunt-wizard-form button.Next")
# Click 'Next' to go to output plugins page.
self.Click("css=grr-new-hunt-wizard-form button.Next")
# Click 'Next' to go to hunt rules page.
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.Click("css=grr-new-hunt-wizard-form button[name=Add]")
# Select 'Clients With Label' rule.
self.Select("css=grr-new-hunt-wizard-form div.well select", "Label")
# Check that there's an option present for labels 'bar' (this option
# should be selected) and for label 'foo'.
self.WaitUntil(
self.IsElementPresent, "css=grr-new-hunt-wizard-form div.well "
".form-group:has(label:contains('Label')) "
"select option:selected[label=bar]")
self.WaitUntil(
self.IsElementPresent, "css=grr-new-hunt-wizard-form div.well "
".form-group:has(label:contains('Label')) "
"select option:not(:selected)[label=foo]")
def testLabelsHuntRuleMatchesCorrectClients(self):
client_ids = self.SetupClients(10)
self.AddClientLabel(client_ids[1], u"owner1", u"foo")
self.AddClientLabel(client_ids[1], u"owner2", u"bar")
self.AddClientLabel(client_ids[7], u"GRR", u"bar")
self.Open("/#main=ManageHunts")
self.Click("css=button[name=NewHunt]")
# Select "List Processes" flow.
self.Click("css=#_Processes > i.jstree-icon")
self.Click("link=ListProcesses")
# Click 'Next' to go to the output plugins page, hunt parameters page
# and then to hunt rules page.
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Hunt parameters')")
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('How to process results')")
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Where to run?')")
# Select 'Clients With Label' rule.
self.Click("css=grr-configure-rules-page button[name=Add]")
self.Select("css=grr-new-hunt-wizard-form div.well select", "Label")
self.Select(
"css=grr-new-hunt-wizard-form div.well .form-group "
".form-group:has(label:contains('Label')):nth-last-of-type(1) "
"select", "foo")
self.Click("css=grr-new-hunt-wizard-form div.well .form-group "
".form-group:has(label:contains('Add label')) button")
self.Select(
"css=grr-new-hunt-wizard-form div.well .form-group "
".form-group:has(label:contains('Label')):nth-last-of-type(1) "
"select", "bar")
self.Select(
"css=grr-new-hunt-wizard-form div.well .form-group "
".form-group:has(label:contains('Match mode')) select", "Match any")
# Click 'Next' to go to hunt overview page. Then click 'Next' to go to
# submit the hunt and wait until it's created.
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Created Hunt')")
hunts_list = sorted(
data_store.REL_DB.ReadHuntObjects(offset=0, count=10),
key=lambda x: x.create_time)
hunt = hunts_list[0]
lib_hunt.StartHunt(hunt.hunt_id)
foreman_obj = foreman.Foreman()
for client_id in client_ids:
tasks_assigned = foreman_obj.AssignTasksToClient(client_id)
if client_id in [client_ids[1], client_ids[7]]:
self.assertTrue(tasks_assigned)
else:
self.assertFalse(tasks_assigned)
def CreateSampleHunt(self, description, token=None):
self.StartHunt(
description=description,
flow_runner_args=rdf_flow_runner.FlowRunnerArgs(
flow_name=transfer.GetFile.__name__),
flow_args=transfer.GetFileArgs(
pathspec=rdf_paths.PathSpec(
path="/tmp/evil.txt",
pathtype=rdf_paths.PathSpec.PathType.TSK,
)),
client_rule_set=self._CreateForemanClientRuleSet(),
output_plugins=[
rdf_output_plugin.OutputPluginDescriptor(
plugin_name="DummyOutputPlugin",
plugin_args=gui_test_lib.DummyOutputPlugin.args_type(
filename_regex="blah!", fetch_binaries=True))
],
client_rate=60,
paused=True,
token=token)
def testPathAutocomplete(self):
# Open Hunts
self.Open("/#/hunts")
# Open "New Hunt" wizard
self.Click("css=button[name=NewHunt]")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('What to run?')")
# Click on Filesystem item in flows list
self.Click("css=#_Filesystem > i.jstree-icon")
# Click on the FileFinder item in Filesystem flows list
self.Click("link=File Finder")
input_selector = "css=grr-form-glob-expression input[uib-typeahead]"
# Change "path"
self.Type(input_selector, "/foo/%%path")
self.WaitUntil(self.IsElementPresent,
"css=[uib-typeahead-popup]:contains('%%environ_path%%')")
self.GetElement(input_selector).send_keys(keys.Keys.ENTER)
self.WaitUntilEqual("/foo/%%environ_path%%", self.GetValue,
input_selector + ":text")
if __name__ == "__main__":
app.run(test_lib.main)
| 39.719799 | 80 | 0.669032 |
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from absl import app
from selenium.webdriver.common import keys
from grr_response_core.lib import rdfvalue
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_server import data_store
from grr_response_server import foreman
from grr_response_server import foreman_rules
from grr_response_server import hunt as lib_hunt
from grr_response_server.flows.general import file_finder
from grr_response_server.flows.general import transfer
from grr_response_server.gui import gui_test_lib
from grr_response_server.rdfvalues import flow_runner as rdf_flow_runner
from grr_response_server.rdfvalues import output_plugin as rdf_output_plugin
from grr.test_lib import test_lib
class TestNewHuntWizard(gui_test_lib.GRRSeleniumHuntTest):
@staticmethod
def FindForemanRules(hunt_urn, token):
rules = data_store.REL_DB.ReadAllForemanRules()
return [rule for rule in rules if rule.hunt_id == hunt_urn.Basename()]
def testNewHuntWizard(self):
self.Open("/")
self.WaitUntil(self.IsElementPresent, "client_query")
self.WaitUntil(self.IsElementPresent, "css=a[grrtarget=hunts]")
self.Click("css=a[grrtarget=hunts]")
self.WaitUntil(self.IsElementPresent, "css=button[name=NewHunt]")
self.Click("css=button[name=NewHunt]")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('What to run?')")
self.WaitUntil(self.IsElementPresent, "css=#_Filesystem > i.jstree-icon")
self.Click("css=#_Filesystem > i.jstree-icon")
self.Click("link=File Finder")
self.WaitUntil(self.IsElementPresent,
"css=grr-new-hunt-wizard-form label:contains('Paths')")
self.Type(
"css=grr-new-hunt-wizard-form "
"grr-form-proto-repeated-field:has(label:contains('Paths')) "
"input", "/tmp")
self.Select(
"css=grr-new-hunt-wizard-form "
"grr-form-proto-single-field:has(label:contains('Pathtype')) "
"select", "TSK")
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Hunt parameters')")
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('How to process results')")
self.Click("css=grr-new-hunt-wizard-form button.Back")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Hunt parameters')")
self.Click("css=grr-new-hunt-wizard-form button.Back")
self.WaitUntil(self.IsElementPresent,
"css=grr-new-hunt-wizard-form label:contains('Paths')")
self.assertEqual(
"/tmp",
self.GetValue(
"css=grr-new-hunt-wizard-form "
"grr-form-proto-repeated-field:has(label:contains('Paths')) input"))
self.assertEqual(
"TSK",
self.GetSelectedLabel(
"css=grr-new-hunt-wizard-form "
"grr-form-proto-single-field:has(label:contains('Pathtype')) select"
))
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Hunt parameters')")
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('How to process results')")
self.Click("css=grr-new-hunt-wizard-form button[name=Add]")
self.Select("css=grr-new-hunt-wizard-form select", "DummyOutputPlugin")
self.Type(
"css=grr-new-hunt-wizard-form "
"grr-form-proto-single-field:has(label:contains('Filepath Regex')) "
"input", "some regex")
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Where to run?')")
self.WaitUntil(self.IsElementPresent, "css=button.Next:not([disabled])")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('No rules specified!')")
self.Select(
"css=grr-configure-rules-page "
"label:contains('Match mode') ~ * select", "Match any")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('No rules specified!')")
self.Click("css=grr-configure-rules-page button[name=Add]")
self.Select("css=grr-configure-rules-page div.well:nth(0) select", "Regex")
rule = foreman_rules.ForemanRegexClientRule
label = rule.ForemanStringField.SYSTEM.description
self.Select(
"css=grr-configure-rules-page div.well:nth(0) "
"label:contains('Field') ~ * select", label)
self.Type(
"css=grr-configure-rules-page div.well:nth(0) "
"label:contains('Attribute regex') ~ * input", "Linux")
self.Click("css=grr-configure-rules-page button[name=Add]")
self.Select("css=grr-configure-rules-page div.well:nth(0) select",
"Integer")
rule = foreman_rules.ForemanIntegerClientRule
label = rule.ForemanIntegerField.CLIENT_CLOCK.description
self.Select(
"css=grr-configure-rules-page div.well:nth(0) "
"label:contains('Field') ~ * select", label)
self.Select(
"css=grr-configure-rules-page div.well:nth(0) "
"label:contains('Operator') ~ * select", "GREATER_THAN")
self.Type(
"css=grr-configure-rules-page div.well:nth(0) "
"label:contains('Value') ~ * input", "1336650631137737")
self.Click("css=grr-configure-rules-page button[name=Add]")
self.Click("css=grr-configure-rules-page div.well:nth(0) "
"label:contains('Os darwin') ~ * input[type=checkbox]")
self.Click("css=grr-new-hunt-wizard-form button.Back")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('How to process results')")
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Where to run?')")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Review')")
# Check that the arguments summary is present.
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Paths')")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('/tmp')")
# Check that output plugins are shown.
self.assertTrue(
self.IsElementPresent(
"css=grr-wizard-form:contains('DummyOutputPlugin')"))
self.assertTrue(
self.IsElementPresent("css=grr-wizard-form:contains('some regex')"))
# Check that there's no deprecated rules summary.
self.assertFalse(
self.IsElementPresent("css=grr-wizard-form:contains('Regex rules')"))
self.assertFalse(
self.IsElementPresent("css=grr-wizard-form:contains('Integer rules')"))
self.assertTrue(
self.IsElementPresent(
"css=grr-wizard-form:contains('Client rule set')"))
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Created Hunt')")
self.Click("css=button.Next")
self.Click("css=grr-hunts-list td:contains('gui_user')")
self.WaitUntil(self.IsElementPresent,
"css=grr-hunt-inspector:contains('GenericHunt')")
self.WaitUntil(self.IsElementPresent,
"css=grr-hunt-inspector:contains('Flow Arguments')")
self.assertTrue(
self.IsElementPresent("css=grr-hunt-inspector:contains('Paths')"))
self.assertTrue(
self.IsElementPresent("css=grr-hunt-inspector:contains('/tmp')"))
self.assertTrue(
self.IsElementPresent(
"css=grr-hunt-inspector:contains('DummyOutputPlugin')"))
self.assertTrue(
self.IsElementPresent("css=grr-hunt-inspector:contains('some regex')"))
self.assertFalse(
self.IsElementPresent("css=grr-hunt-inspector:contains('Regex rules')"))
self.assertFalse(
self.IsElementPresent(
"css=grr-hunt-inspector:contains('Integer rules')"))
# Check that rules summary is present.
self.assertTrue(
self.IsElementPresent(
"css=grr-hunt-inspector:contains('Client Rule Set')"))
# Check that the hunt object was actually created
hunts_list = sorted(
data_store.REL_DB.ReadHuntObjects(offset=0, count=10),
key=lambda x: x.create_time)
self.assertLen(hunts_list, 1)
# Check that the hunt was created with a correct flow
hunt = hunts_list[0]
self.assertEqual(hunt.args.standard.flow_name,
file_finder.FileFinder.__name__)
self.assertEqual(hunt.args.standard.flow_args.paths[0], "/tmp")
self.assertEqual(hunt.args.standard.flow_args.pathtype,
rdf_paths.PathSpec.PathType.TSK)
# self.assertEqual(hunt.args.flow_args.ignore_errors, True)
self.assertEqual(hunt.output_plugins[0].plugin_name, "DummyOutputPlugin")
# Check that hunt was not started
self.assertEqual(hunt.hunt_state, hunt.HuntState.PAUSED)
lib_hunt.StartHunt(hunt.hunt_id)
hunt_rules = self.FindForemanRules(
rdfvalue.RDFURN("hunts").Add(hunt.hunt_id), token=self.token)
# Check that the hunt was created with correct rules
self.assertLen(hunt_rules, 1)
lifetime = hunt_rules[0].GetLifetime()
lifetime -= rdfvalue.DurationSeconds("2w")
self.assertLessEqual(lifetime, rdfvalue.DurationSeconds("1s"))
r = hunt_rules[0].client_rule_set
self.assertEqual(r.match_mode,
foreman_rules.ForemanClientRuleSet.MatchMode.MATCH_ANY)
self.assertLen(r.rules, 3)
self.assertEqual(r.rules[0].rule_type,
foreman_rules.ForemanClientRule.Type.OS)
self.assertEqual(r.rules[0].os.os_windows, False)
self.assertEqual(r.rules[0].os.os_linux, False)
self.assertEqual(r.rules[0].os.os_darwin, True)
self.assertEqual(r.rules[1].rule_type,
foreman_rules.ForemanClientRule.Type.INTEGER)
self.assertEqual(r.rules[1].integer.field, "CLIENT_CLOCK")
self.assertEqual(
r.rules[1].integer.operator,
foreman_rules.ForemanIntegerClientRule.Operator.GREATER_THAN)
self.assertEqual(r.rules[1].integer.value, 1336650631137737)
self.assertEqual(r.rules[2].rule_type,
foreman_rules.ForemanClientRule.Type.REGEX)
self.assertEqual(r.rules[2].regex.field, "SYSTEM")
self.assertEqual(r.rules[2].regex.attribute_regex, "Linux")
def testWizardStepCounterIsShownCorrectly(self):
# Open up and click on View Hunts.
self.Open("/#/hunts")
# Open up "New Hunt" wizard
self.Click("css=button[name=NewHunt]")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('What to run?')")
# Click on the FileFinder item in Filesystem flows list
self.WaitUntil(self.IsElementPresent, "css=#_Filesystem > i.jstree-icon")
self.Click("css=#_Filesystem > i.jstree-icon")
self.Click("link=File Finder")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Step 1 out of 6')")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Step 2 out of 6')")
def testLiteralExpressionIsProcessedCorrectly(self):
# Open up and click on View Hunts.
self.Open("/")
self.Click("css=a[grrtarget=hunts]")
# Open up "New Hunt" wizard
self.Click("css=button[name=NewHunt]")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('What to run?')")
# Click on Filesystem item in flows list
self.WaitUntil(self.IsElementPresent, "css=#_Filesystem > i.jstree-icon")
self.Click("css=#_Filesystem > i.jstree-icon")
# Click on the FileFinder item in Filesystem flows list
self.Click("link=File Finder")
self.Click("css=label:contains('Conditions') ~ * button")
self.Select("css=label:contains('Condition type') ~ * select",
"Contents literal match")
self.Type("css=label:contains('Literal') ~ * input", "foo\\x0d\\xc8bar")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Hunt parameters')")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('How to process results')")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Where to run?')")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Review')")
# Check that the arguments summary is present.
self.WaitUntil(
self.IsElementPresent,
"css=grr-wizard-form:contains('%s')" % file_finder.FileFinder.__name__)
self.WaitUntil(self.IsTextPresent, b"foo\\x0d\\xc8bar")
# Click on "Run" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Created Hunt')")
# Close the window and check that the hunt was created.
self.Click("css=button.Next")
# Check that the hunt object was actually created
hunts_list = sorted(
data_store.REL_DB.ReadHuntObjects(offset=0, count=10),
key=lambda x: x.create_time)
self.assertLen(hunts_list, 1)
# Check that the hunt was created with a correct literal value.
hunt = hunts_list[0]
self.assertEqual(hunt.args.standard.flow_name,
file_finder.FileFinder.__name__)
self.assertEqual(
hunt.args.standard.flow_args.conditions[0].contents_literal_match
.literal, b"foo\x0d\xc8bar")
def testOutputPluginsListEmptyWhenNoDefaultOutputPluginSet(self):
self.Open("/#main=ManageHunts")
self.Click("css=button[name=NewHunt]")
# Select "List Processes" flow.
self.Click("css=#_Processes > i.jstree-icon")
self.Click("link=ListProcesses")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Hunt parameters')")
# There should be no dummy output plugin visible.
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('How to process results')")
self.WaitUntilNot(self.IsElementPresent,
"css=grr-wizard-form:contains('Dummy do do')")
def testDefaultOutputPluginIsCorrectlyAddedToThePluginsList(self):
with test_lib.ConfigOverrider(
{"AdminUI.new_hunt_wizard.default_output_plugin": "DummyOutputPlugin"}):
self.Open("/#main=ManageHunts")
self.Click("css=button[name=NewHunt]")
# Select "List Processes" flow.
self.Click("css=#_Processes > i.jstree-icon")
self.Click("link=ListProcesses")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Hunt parameters')")
# Dummy output plugin should be added by default.
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('How to process results')")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('DummyOutputPlugin')")
def testLabelsHuntRuleDisplaysAvailableLabels(self):
client_id = self.SetupClient(0)
self.AddClientLabel(client_id, u"owner1", u"foo")
self.AddClientLabel(client_id, u"owner2", u"bar")
self.Open("/#main=ManageHunts")
self.Click("css=button[name=NewHunt]")
# Select "List Processes" flow.
self.Click("css=#_Processes > i.jstree-icon")
self.Click("link=ListProcesses")
# Click 'Next' to go to hunt parameters page.
self.Click("css=grr-new-hunt-wizard-form button.Next")
# Click 'Next' to go to output plugins page.
self.Click("css=grr-new-hunt-wizard-form button.Next")
# Click 'Next' to go to hunt rules page.
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.Click("css=grr-new-hunt-wizard-form button[name=Add]")
# Select 'Clients With Label' rule.
self.Select("css=grr-new-hunt-wizard-form div.well select", "Label")
# Check that there's an option present for labels 'bar' (this option
self.WaitUntil(
self.IsElementPresent, "css=grr-new-hunt-wizard-form div.well "
".form-group:has(label:contains('Label')) "
"select option:selected[label=bar]")
self.WaitUntil(
self.IsElementPresent, "css=grr-new-hunt-wizard-form div.well "
".form-group:has(label:contains('Label')) "
"select option:not(:selected)[label=foo]")
def testLabelsHuntRuleMatchesCorrectClients(self):
client_ids = self.SetupClients(10)
self.AddClientLabel(client_ids[1], u"owner1", u"foo")
self.AddClientLabel(client_ids[1], u"owner2", u"bar")
self.AddClientLabel(client_ids[7], u"GRR", u"bar")
self.Open("/#main=ManageHunts")
self.Click("css=button[name=NewHunt]")
self.Click("css=#_Processes > i.jstree-icon")
self.Click("link=ListProcesses")
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Hunt parameters')")
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('How to process results')")
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Where to run?')")
self.Click("css=grr-configure-rules-page button[name=Add]")
self.Select("css=grr-new-hunt-wizard-form div.well select", "Label")
self.Select(
"css=grr-new-hunt-wizard-form div.well .form-group "
".form-group:has(label:contains('Label')):nth-last-of-type(1) "
"select", "foo")
self.Click("css=grr-new-hunt-wizard-form div.well .form-group "
".form-group:has(label:contains('Add label')) button")
self.Select(
"css=grr-new-hunt-wizard-form div.well .form-group "
".form-group:has(label:contains('Label')):nth-last-of-type(1) "
"select", "bar")
self.Select(
"css=grr-new-hunt-wizard-form div.well .form-group "
".form-group:has(label:contains('Match mode')) select", "Match any")
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Created Hunt')")
hunts_list = sorted(
data_store.REL_DB.ReadHuntObjects(offset=0, count=10),
key=lambda x: x.create_time)
hunt = hunts_list[0]
lib_hunt.StartHunt(hunt.hunt_id)
foreman_obj = foreman.Foreman()
for client_id in client_ids:
tasks_assigned = foreman_obj.AssignTasksToClient(client_id)
if client_id in [client_ids[1], client_ids[7]]:
self.assertTrue(tasks_assigned)
else:
self.assertFalse(tasks_assigned)
def CreateSampleHunt(self, description, token=None):
self.StartHunt(
description=description,
flow_runner_args=rdf_flow_runner.FlowRunnerArgs(
flow_name=transfer.GetFile.__name__),
flow_args=transfer.GetFileArgs(
pathspec=rdf_paths.PathSpec(
path="/tmp/evil.txt",
pathtype=rdf_paths.PathSpec.PathType.TSK,
)),
client_rule_set=self._CreateForemanClientRuleSet(),
output_plugins=[
rdf_output_plugin.OutputPluginDescriptor(
plugin_name="DummyOutputPlugin",
plugin_args=gui_test_lib.DummyOutputPlugin.args_type(
filename_regex="blah!", fetch_binaries=True))
],
client_rate=60,
paused=True,
token=token)
def testPathAutocomplete(self):
# Open Hunts
self.Open("/#/hunts")
# Open "New Hunt" wizard
self.Click("css=button[name=NewHunt]")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('What to run?')")
# Click on Filesystem item in flows list
self.Click("css=#_Filesystem > i.jstree-icon")
# Click on the FileFinder item in Filesystem flows list
self.Click("link=File Finder")
input_selector = "css=grr-form-glob-expression input[uib-typeahead]"
# Change "path"
self.Type(input_selector, "/foo/%%path")
self.WaitUntil(self.IsElementPresent,
"css=[uib-typeahead-popup]:contains('%%environ_path%%')")
self.GetElement(input_selector).send_keys(keys.Keys.ENTER)
self.WaitUntilEqual("/foo/%%environ_path%%", self.GetValue,
input_selector + ":text")
if __name__ == "__main__":
app.run(test_lib.main)
| true | true |
f70f26a0d985579cf884d6fe98e9714972271079 | 9,486 | py | Python | gcloud/datastore/batch.py | scrapinghub/gcloud-python | 1ec6d636ebf2c4d618aca6b2485fbbfa5f0fde29 | [
"Apache-2.0"
] | null | null | null | gcloud/datastore/batch.py | scrapinghub/gcloud-python | 1ec6d636ebf2c4d618aca6b2485fbbfa5f0fde29 | [
"Apache-2.0"
] | null | null | null | gcloud/datastore/batch.py | scrapinghub/gcloud-python | 1ec6d636ebf2c4d618aca6b2485fbbfa5f0fde29 | [
"Apache-2.0"
] | 2 | 2017-07-30T16:18:23.000Z | 2020-10-14T11:24:18.000Z | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create / interact with a batch of updates / deletes.
Batches provide the ability to execute multiple operations
in a single request to the Cloud Datastore API.
See
https://cloud.google.com/datastore/docs/concepts/entities#Datastore_Batch_operations
"""
from gcloud.datastore import helpers
from gcloud.datastore.key import _dataset_ids_equal
from gcloud.datastore._generated import datastore_pb2 as _datastore_pb2
class Batch(object):
"""An abstraction representing a collected group of updates / deletes.
Used to build up a bulk mutuation.
For example, the following snippet of code will put the two ``save``
operations and the ``delete`` operation into the same mutation, and send
them to the server in a single API request::
>>> from gcloud.datastore.batch import Batch
>>> batch = Batch()
>>> batch.put(entity1)
>>> batch.put(entity2)
>>> batch.delete(key3)
>>> batch.commit()
You can also use a batch as a context manager, in which case
:meth:`commit` will be called automatically if its block exits without
raising an exception::
>>> with Batch() as batch:
... batch.put(entity1)
... batch.put(entity2)
... batch.delete(key3)
By default, no updates will be sent if the block exits with an error::
>>> with Batch() as batch:
... do_some_work(batch)
... raise Exception() # rolls back
:type client: :class:`gcloud.datastore.client.Client`
:param client: The client used to connect to datastore.
"""
_id = None # "protected" attribute, always None for non-transactions
def __init__(self, client):
self._client = client
self._commit_request = _datastore_pb2.CommitRequest()
self._partial_key_entities = []
def current(self):
"""Return the topmost batch / transaction, or None."""
return self._client.current_batch
@property
def dataset_id(self):
"""Getter for dataset ID in which the batch will run.
:rtype: :class:`str`
:returns: The dataset ID in which the batch will run.
"""
return self._client.dataset_id
@property
def namespace(self):
"""Getter for namespace in which the batch will run.
:rtype: :class:`str`
:returns: The namespace in which the batch will run.
"""
return self._client.namespace
@property
def connection(self):
"""Getter for connection over which the batch will run.
:rtype: :class:`gcloud.datastore.connection.Connection`
:returns: The connection over which the batch will run.
"""
return self._client.connection
def _add_partial_key_entity_pb(self):
"""Adds a new mutation for an entity with a partial key.
:rtype: :class:`gcloud.datastore._generated.entity_pb2.Entity`
:returns: The newly created entity protobuf that will be
updated and sent with a commit.
"""
return self.mutations.insert_auto_id.add()
def _add_complete_key_entity_pb(self):
"""Adds a new mutation for an entity with a completed key.
:rtype: :class:`gcloud.datastore._generated.entity_pb2.Entity`
:returns: The newly created entity protobuf that will be
updated and sent with a commit.
"""
# We use ``upsert`` for entities with completed keys, rather than
# ``insert`` or ``update``, in order not to create race conditions
# based on prior existence / removal of the entity.
return self.mutations.upsert.add()
def _add_delete_key_pb(self):
"""Adds a new mutation for a key to be deleted.
:rtype: :class:`gcloud.datastore._generated.entity_pb2.Key`
:returns: The newly created key protobuf that will be
deleted when sent with a commit.
"""
return self.mutations.delete.add()
@property
def mutations(self):
"""Getter for the changes accumulated by this batch.
Every batch is committed with a single commit request containing all
the work to be done as mutations. Inside a batch, calling :meth:`put`
with an entity, or :meth:`delete` with a key, builds up the request by
adding a new mutation. This getter returns the protobuf that has been
built-up so far.
:rtype: :class:`gcloud.datastore._generated.datastore_pb2.Mutation`
:returns: The Mutation protobuf to be sent in the commit request.
"""
return self._commit_request.mutation
def put(self, entity):
"""Remember an entity's state to be saved during :meth:`commit`.
.. note::
Any existing properties for the entity will be replaced by those
currently set on this instance. Already-stored properties which do
not correspond to keys set on this instance will be removed from
the datastore.
.. note::
Property values which are "text" ('unicode' in Python2, 'str' in
Python3) map to 'string_value' in the datastore; values which are
"bytes" ('str' in Python2, 'bytes' in Python3) map to 'blob_value'.
When an entity has a partial key, calling :meth:`commit` sends it as
an ``insert_auto_id`` mutation and the key is completed. On return,
the key for the ``entity`` passed in is updated to match the key ID
assigned by the server.
:type entity: :class:`gcloud.datastore.entity.Entity`
:param entity: the entity to be saved.
:raises: ValueError if entity has no key assigned, or if the key's
``dataset_id`` does not match ours.
"""
if entity.key is None:
raise ValueError("Entity must have a key")
if not _dataset_ids_equal(self.dataset_id, entity.key.dataset_id):
raise ValueError("Key must be from same dataset as batch")
if entity.key.is_partial:
entity_pb = self._add_partial_key_entity_pb()
self._partial_key_entities.append(entity)
else:
entity_pb = self._add_complete_key_entity_pb()
_assign_entity_to_pb(entity_pb, entity)
def delete(self, key):
"""Remember a key to be deleted during :meth:`commit`.
:type key: :class:`gcloud.datastore.key.Key`
:param key: the key to be deleted.
:raises: ValueError if key is not complete, or if the key's
``dataset_id`` does not match ours.
"""
if key.is_partial:
raise ValueError("Key must be complete")
if not _dataset_ids_equal(self.dataset_id, key.dataset_id):
raise ValueError("Key must be from same dataset as batch")
key_pb = helpers._prepare_key_for_request(key.to_protobuf())
self._add_delete_key_pb().CopyFrom(key_pb)
def begin(self):
"""No-op
Overridden by :class:`gcloud.datastore.transaction.Transaction`.
"""
def commit(self):
"""Commits the batch.
This is called automatically upon exiting a with statement,
however it can be called explicitly if you don't want to use a
context manager.
"""
_, updated_keys = self.connection.commit(
self.dataset_id, self._commit_request, self._id)
# If the back-end returns without error, we are guaranteed that
# :meth:`Connection.commit` will return keys that match (length and
# order) directly ``_partial_key_entities``.
for new_key_pb, entity in zip(updated_keys,
self._partial_key_entities):
new_id = new_key_pb.path_element[-1].id
entity.key = entity.key.completed_key(new_id)
def rollback(self):
"""No-op
Overridden by :class:`gcloud.datastore.transaction.Transaction`.
"""
pass
def __enter__(self):
self._client._push_batch(self)
self.begin()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
try:
if exc_type is None:
self.commit()
else:
self.rollback()
finally:
self._client._pop_batch()
def _assign_entity_to_pb(entity_pb, entity):
"""Copy ``entity`` into ``entity_pb``.
Helper method for ``Batch.put``.
:type entity_pb: :class:`gcloud.datastore._generated.entity_pb2.Entity`
:param entity_pb: The entity owned by a mutation.
:type entity: :class:`gcloud.datastore.entity.Entity`
:param entity: The entity being updated within the batch / transaction.
"""
bare_entity_pb = helpers.entity_to_protobuf(entity)
key_pb = helpers._prepare_key_for_request(bare_entity_pb.key)
bare_entity_pb.key.CopyFrom(key_pb)
entity_pb.CopyFrom(bare_entity_pb)
| 35.931818 | 84 | 0.650538 |
from gcloud.datastore import helpers
from gcloud.datastore.key import _dataset_ids_equal
from gcloud.datastore._generated import datastore_pb2 as _datastore_pb2
class Batch(object):
_id = None
def __init__(self, client):
self._client = client
self._commit_request = _datastore_pb2.CommitRequest()
self._partial_key_entities = []
def current(self):
return self._client.current_batch
@property
def dataset_id(self):
return self._client.dataset_id
@property
def namespace(self):
return self._client.namespace
@property
def connection(self):
return self._client.connection
def _add_partial_key_entity_pb(self):
return self.mutations.insert_auto_id.add()
def _add_complete_key_entity_pb(self):
return self.mutations.upsert.add()
def _add_delete_key_pb(self):
return self.mutations.delete.add()
@property
def mutations(self):
return self._commit_request.mutation
def put(self, entity):
if entity.key is None:
raise ValueError("Entity must have a key")
if not _dataset_ids_equal(self.dataset_id, entity.key.dataset_id):
raise ValueError("Key must be from same dataset as batch")
if entity.key.is_partial:
entity_pb = self._add_partial_key_entity_pb()
self._partial_key_entities.append(entity)
else:
entity_pb = self._add_complete_key_entity_pb()
_assign_entity_to_pb(entity_pb, entity)
def delete(self, key):
if key.is_partial:
raise ValueError("Key must be complete")
if not _dataset_ids_equal(self.dataset_id, key.dataset_id):
raise ValueError("Key must be from same dataset as batch")
key_pb = helpers._prepare_key_for_request(key.to_protobuf())
self._add_delete_key_pb().CopyFrom(key_pb)
def begin(self):
def commit(self):
_, updated_keys = self.connection.commit(
self.dataset_id, self._commit_request, self._id)
for new_key_pb, entity in zip(updated_keys,
self._partial_key_entities):
new_id = new_key_pb.path_element[-1].id
entity.key = entity.key.completed_key(new_id)
def rollback(self):
pass
def __enter__(self):
self._client._push_batch(self)
self.begin()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
try:
if exc_type is None:
self.commit()
else:
self.rollback()
finally:
self._client._pop_batch()
def _assign_entity_to_pb(entity_pb, entity):
bare_entity_pb = helpers.entity_to_protobuf(entity)
key_pb = helpers._prepare_key_for_request(bare_entity_pb.key)
bare_entity_pb.key.CopyFrom(key_pb)
entity_pb.CopyFrom(bare_entity_pb)
| true | true |
f70f27ed025a93603507b3a8a8661a661342efc9 | 5,888 | py | Python | interkamen_career/modules/mechanics_economic.py | Acetonen/Interkamen_career | 75cc0a5832b7c1e303967cc337bb001e3383eb9e | [
"MIT"
] | null | null | null | interkamen_career/modules/mechanics_economic.py | Acetonen/Interkamen_career | 75cc0a5832b7c1e303967cc337bb001e3383eb9e | [
"MIT"
] | null | null | null | interkamen_career/modules/mechanics_economic.py | Acetonen/Interkamen_career | 75cc0a5832b7c1e303967cc337bb001e3383eb9e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""Visualise statistic by machine economic."""
from __future__ import annotations
import pandas as pd
from matplotlib import pyplot as plt
from typing import Dict
from .mechanic_report import MechReports
from .administration.logger_cfg import Logs
from .support_modules.custom_exceptions import MainMenu
from .support_modules.standart_functions import (
BasicFunctionsS
as BasF_S
)
LOGGER = Logs().give_logger(__name__)
class MechEconomic(MechReports):
"""Visualise statistic by machine economic."""
__slots__ = (
'mech_econ_path',
'mech_econ_data',
'mech_econ_file',
)
def __init__(self, user):
"""Load mech econom data."""
super().__init__(user)
self.mech_econ_data = {}
self.mech_econ_path = (
super().get_root_path() / 'data' / 'mech_ecomomic'
)
if self.mech_econ_path.exists():
self.mech_econ_file = super().load_data(
data_path=self.mech_econ_path,
user=user,
)
else:
self.mech_econ_file = pd.DataFrame(self.mech_econ_data, index=[0])
def _save_mech_econom(self):
"""Save mech econom and create log file."""
self.mech_econ_file = self.mech_econ_file.append(
self.mech_econ_data,
ignore_index=True
)
self._dump_mech_econ_data()
self._log_mech_econ_creation()
def _dump_mech_econ_data(self):
"""Dump salary data to file."""
super().dump_data(
data_path=self.mech_econ_path,
base_to_dump=self.mech_econ_file,
user=self.user,
)
def _log_mech_econ_creation(self):
"""Save log about salary creation."""
report_name = '{}-{}'.format(
self.mech_econ_data['year'],
self.mech_econ_data['month'],
)
LOGGER.warning(
f"User '{self.user.login}' create mechanic econom.: {report_name}"
)
def _visualise_one_day_cost(self):
"""Visualise cost of one day by each machine."""
year = self._chose_year()
data_by_year = super().give_dataframe_by_year(year)
data_for_plot = {
'mach': [],
'day_cost': [],
}
for mach in super().maint_dict['mach_name']:
totall_cost = sum(self.mech_econ_file[mach])
total_work = sum(data_by_year.work)
number_of_wdays = total_work
day_cost = round(totall_cost/number_of_wdays, 0)
data_for_plot['mach'].append(mach)
data_for_plot['day_cost'].append(day_cost)
data_for_plot = pd.DataFrame(data_for_plot)
self._create_one_day_cost_plot(data_for_plot)
def _input_machines_econ(self, mech_econ_date):
"""Input money, spent for machine in month."""
self.mech_econ_data['year'] = mech_econ_date['year']
self.mech_econ_data['month'] = mech_econ_date['month']
super().clear_screen()
print("Введите сумму для каждой техники:")
for mach in super().maint_dict['mach_name']:
self.mech_econ_data[mach] = float(input(f"{mach}: "))
save = input(
"\nДанные введены."
"\n[s] - сохранить данные: "
)
if save.lower() == 's':
self._save_mech_econom()
print("Данные сохранены.")
else:
print("Вы отменили сохранение.")
input("\n[ENTER] - выйти.")
def _visualise_statistic(self, year):
"""Visualise statistic."""
mech_econ_year = self.mech_econ_file.year == year
data_by_year = (
self.mech_econ_file[mech_econ_year]
.sort_values(by=['month'])
)
super().print_all_dataframe(data_by_year)
input("\n[ENTER] - выйти.")
def _chose_year(self):
"""Show statistic about drill instrument."""
print("[ENTER] - выход"
"\nВыберете год:")
year = super().choise_from_list(
sorted(set(self.mech_econ_file.year)),
none_option=True
)
if year:
return year
else:
raise MainMenu
@BasF_S.set_plotter_parametrs
def _create_one_day_cost_plot(self, dataframe):
"""Create one day cost plot."""
figure = plt.figure()
x_cost = list(range(len(super().maint_dict['mach_name'])))
axle = figure.add_subplot(111)
axle.bar(
x_cost, dataframe.day_cost, 0.3, alpha=0.4, color='r',
label='Коэффициент', tick_label=dataframe.mach
)
axle.tick_params(labelrotation=90)
axle.set_title(
"Коэффициент целесообразности содержания техники руб/час. ",
fontsize="x-large")
axle.set_ylabel('руб.')
axle.legend()
axle.grid(
True, linestyle='--', which='major',
color='grey', alpha=.25, axis='y'
)
figure.tight_layout()
plt.show()
def create_mech_econom(self):
"""Create mechanic econom data report."""
mech_econ_date = self.input_date()
check = super().check_date_in_dataframe(
self.mech_econ_file,
mech_econ_date
)
if check:
print("Данные за этот месяц уже внесены.")
input("\n[ENTER] - выйти.")
else:
self._input_machines_econ(mech_econ_date)
def show_econ_statistic(self, stat_variants: Dict):
"""Show machine economic statistic."""
stat_variants = {
'Целесообразность затрат на содержание техники.':
self._visualise_one_day_cost,
}
print("[ENTER] - выйти."
"\nВыберете вид отчета:")
stat = super().choise_from_list(stat_variants, none_option=True)
if stat:
stat_variants[stat]()
| 32.351648 | 78 | 0.589504 |
from __future__ import annotations
import pandas as pd
from matplotlib import pyplot as plt
from typing import Dict
from .mechanic_report import MechReports
from .administration.logger_cfg import Logs
from .support_modules.custom_exceptions import MainMenu
from .support_modules.standart_functions import (
BasicFunctionsS
as BasF_S
)
LOGGER = Logs().give_logger(__name__)
class MechEconomic(MechReports):
__slots__ = (
'mech_econ_path',
'mech_econ_data',
'mech_econ_file',
)
def __init__(self, user):
super().__init__(user)
self.mech_econ_data = {}
self.mech_econ_path = (
super().get_root_path() / 'data' / 'mech_ecomomic'
)
if self.mech_econ_path.exists():
self.mech_econ_file = super().load_data(
data_path=self.mech_econ_path,
user=user,
)
else:
self.mech_econ_file = pd.DataFrame(self.mech_econ_data, index=[0])
def _save_mech_econom(self):
self.mech_econ_file = self.mech_econ_file.append(
self.mech_econ_data,
ignore_index=True
)
self._dump_mech_econ_data()
self._log_mech_econ_creation()
def _dump_mech_econ_data(self):
super().dump_data(
data_path=self.mech_econ_path,
base_to_dump=self.mech_econ_file,
user=self.user,
)
def _log_mech_econ_creation(self):
report_name = '{}-{}'.format(
self.mech_econ_data['year'],
self.mech_econ_data['month'],
)
LOGGER.warning(
f"User '{self.user.login}' create mechanic econom.: {report_name}"
)
def _visualise_one_day_cost(self):
year = self._chose_year()
data_by_year = super().give_dataframe_by_year(year)
data_for_plot = {
'mach': [],
'day_cost': [],
}
for mach in super().maint_dict['mach_name']:
totall_cost = sum(self.mech_econ_file[mach])
total_work = sum(data_by_year.work)
number_of_wdays = total_work
day_cost = round(totall_cost/number_of_wdays, 0)
data_for_plot['mach'].append(mach)
data_for_plot['day_cost'].append(day_cost)
data_for_plot = pd.DataFrame(data_for_plot)
self._create_one_day_cost_plot(data_for_plot)
def _input_machines_econ(self, mech_econ_date):
self.mech_econ_data['year'] = mech_econ_date['year']
self.mech_econ_data['month'] = mech_econ_date['month']
super().clear_screen()
print("Введите сумму для каждой техники:")
for mach in super().maint_dict['mach_name']:
self.mech_econ_data[mach] = float(input(f"{mach}: "))
save = input(
"\nДанные введены."
"\n[s] - сохранить данные: "
)
if save.lower() == 's':
self._save_mech_econom()
print("Данные сохранены.")
else:
print("Вы отменили сохранение.")
input("\n[ENTER] - выйти.")
def _visualise_statistic(self, year):
mech_econ_year = self.mech_econ_file.year == year
data_by_year = (
self.mech_econ_file[mech_econ_year]
.sort_values(by=['month'])
)
super().print_all_dataframe(data_by_year)
input("\n[ENTER] - выйти.")
def _chose_year(self):
print("[ENTER] - выход"
"\nВыберете год:")
year = super().choise_from_list(
sorted(set(self.mech_econ_file.year)),
none_option=True
)
if year:
return year
else:
raise MainMenu
@BasF_S.set_plotter_parametrs
def _create_one_day_cost_plot(self, dataframe):
figure = plt.figure()
x_cost = list(range(len(super().maint_dict['mach_name'])))
axle = figure.add_subplot(111)
axle.bar(
x_cost, dataframe.day_cost, 0.3, alpha=0.4, color='r',
label='Коэффициент', tick_label=dataframe.mach
)
axle.tick_params(labelrotation=90)
axle.set_title(
"Коэффициент целесообразности содержания техники руб/час. ",
fontsize="x-large")
axle.set_ylabel('руб.')
axle.legend()
axle.grid(
True, linestyle='--', which='major',
color='grey', alpha=.25, axis='y'
)
figure.tight_layout()
plt.show()
def create_mech_econom(self):
mech_econ_date = self.input_date()
check = super().check_date_in_dataframe(
self.mech_econ_file,
mech_econ_date
)
if check:
print("Данные за этот месяц уже внесены.")
input("\n[ENTER] - выйти.")
else:
self._input_machines_econ(mech_econ_date)
def show_econ_statistic(self, stat_variants: Dict):
stat_variants = {
'Целесообразность затрат на содержание техники.':
self._visualise_one_day_cost,
}
print("[ENTER] - выйти."
"\nВыберете вид отчета:")
stat = super().choise_from_list(stat_variants, none_option=True)
if stat:
stat_variants[stat]()
| true | true |
f70f2913154b4f910fa56969655595ca01f7413a | 3,464 | py | Python | multiagent/scenarios/simple_speaker_listener.py | robbycostales/multiagent-particle-envs | 22a00b18e13b629a206a8ffc8d8319d06dd5d7b0 | [
"MIT"
] | null | null | null | multiagent/scenarios/simple_speaker_listener.py | robbycostales/multiagent-particle-envs | 22a00b18e13b629a206a8ffc8d8319d06dd5d7b0 | [
"MIT"
] | null | null | null | multiagent/scenarios/simple_speaker_listener.py | robbycostales/multiagent-particle-envs | 22a00b18e13b629a206a8ffc8d8319d06dd5d7b0 | [
"MIT"
] | null | null | null | import numpy as np
from multiagent.core import World, Agent, Landmark
from multiagent.scenario import BaseScenario
class Scenario(BaseScenario):
def make_world(self, dim_c=3):
world = World()
# set any world properties first
world.dim_c = dim_c
num_landmarks = 3
# add agents
world.agents = [Agent() for i in range(2)]
for i, agent in enumerate(world.agents):
agent.name = 'agent %d' % i
agent.collide = False
agent.size = 0.075
# speaker
world.agents[0].movable = False
# listener
world.agents[1].silent = True
# add landmarks
world.landmarks = [Landmark() for i in range(num_landmarks)]
for i, landmark in enumerate(world.landmarks):
landmark.name = 'landmark %d' % i
landmark.collide = False
landmark.movable = False
landmark.size = 0.04
# make initial conditions
self.reset_world(world)
return world
def reset_world(self, world):
# assign goals to agents
for agent in world.agents:
agent.goal_a = None
agent.goal_b = None
# want listener to go to the goal landmark
world.agents[0].goal_a = world.agents[1]
world.agents[0].goal_b = np.random.choice(world.landmarks)
# random properties for agents
for i, agent in enumerate(world.agents):
agent.color = np.array([0.25,0.25,0.25])
# random properties for landmarks
world.landmarks[0].color = np.array([0.65,0.15,0.15])
world.landmarks[1].color = np.array([0.15,0.65,0.15])
world.landmarks[2].color = np.array([0.15,0.15,0.65])
# special colors for goals
world.agents[0].goal_a.color = world.agents[0].goal_b.color + np.array([0.45, 0.45, 0.45])
# set random initial states
for agent in world.agents:
agent.state.p_pos = np.random.uniform(-1,+1, world.dim_p)
agent.state.p_vel = np.zeros(world.dim_p)
agent.state.c = np.zeros(world.dim_c)
for i, landmark in enumerate(world.landmarks):
landmark.state.p_pos = np.random.uniform(-1,+1, world.dim_p)
landmark.state.p_vel = np.zeros(world.dim_p)
def benchmark_data(self, agent, world):
# returns data for benchmarking purposes
return reward(agent, reward)
def reward(self, agent, world):
# squared distance from listener to landmark
a = world.agents[0]
dist2 = np.sum(np.square(a.goal_a.state.p_pos - a.goal_b.state.p_pos))
return -dist2
def observation(self, agent, world):
# goal color
goal_color = np.zeros(world.dim_color)
if agent.goal_b is not None:
goal_color = agent.goal_b.color
# get positions of all entities in this agent's reference frame
entity_pos = []
for entity in world.landmarks:
entity_pos.append(entity.state.p_pos - agent.state.p_pos)
# communication of all other agents
comm = []
for other in world.agents:
if other is agent or (other.state.c is None): continue
comm.append(other.state.c)
# speaker
if not agent.movable:
return np.concatenate([goal_color])
# listener
if agent.silent:
return np.concatenate([agent.state.p_vel] + entity_pos + comm)
| 37.652174 | 98 | 0.601905 | import numpy as np
from multiagent.core import World, Agent, Landmark
from multiagent.scenario import BaseScenario
class Scenario(BaseScenario):
def make_world(self, dim_c=3):
world = World()
world.dim_c = dim_c
num_landmarks = 3
world.agents = [Agent() for i in range(2)]
for i, agent in enumerate(world.agents):
agent.name = 'agent %d' % i
agent.collide = False
agent.size = 0.075
world.agents[0].movable = False
world.agents[1].silent = True
world.landmarks = [Landmark() for i in range(num_landmarks)]
for i, landmark in enumerate(world.landmarks):
landmark.name = 'landmark %d' % i
landmark.collide = False
landmark.movable = False
landmark.size = 0.04
self.reset_world(world)
return world
def reset_world(self, world):
for agent in world.agents:
agent.goal_a = None
agent.goal_b = None
world.agents[0].goal_a = world.agents[1]
world.agents[0].goal_b = np.random.choice(world.landmarks)
for i, agent in enumerate(world.agents):
agent.color = np.array([0.25,0.25,0.25])
world.landmarks[0].color = np.array([0.65,0.15,0.15])
world.landmarks[1].color = np.array([0.15,0.65,0.15])
world.landmarks[2].color = np.array([0.15,0.15,0.65])
world.agents[0].goal_a.color = world.agents[0].goal_b.color + np.array([0.45, 0.45, 0.45])
for agent in world.agents:
agent.state.p_pos = np.random.uniform(-1,+1, world.dim_p)
agent.state.p_vel = np.zeros(world.dim_p)
agent.state.c = np.zeros(world.dim_c)
for i, landmark in enumerate(world.landmarks):
landmark.state.p_pos = np.random.uniform(-1,+1, world.dim_p)
landmark.state.p_vel = np.zeros(world.dim_p)
def benchmark_data(self, agent, world):
return reward(agent, reward)
def reward(self, agent, world):
a = world.agents[0]
dist2 = np.sum(np.square(a.goal_a.state.p_pos - a.goal_b.state.p_pos))
return -dist2
def observation(self, agent, world):
goal_color = np.zeros(world.dim_color)
if agent.goal_b is not None:
goal_color = agent.goal_b.color
entity_pos = []
for entity in world.landmarks:
entity_pos.append(entity.state.p_pos - agent.state.p_pos)
# communication of all other agents
comm = []
for other in world.agents:
if other is agent or (other.state.c is None): continue
comm.append(other.state.c)
# speaker
if not agent.movable:
return np.concatenate([goal_color])
# listener
if agent.silent:
return np.concatenate([agent.state.p_vel] + entity_pos + comm)
| true | true |
f70f295307fb04fa6988031adb26ba2c95760a41 | 14,052 | py | Python | nicos_mlz/sans1/gui/monitorwidgets.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | nicos_mlz/sans1/gui/monitorwidgets.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | nicos_mlz/sans1/gui/monitorwidgets.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | # -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2022 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# Enrico Faulhaber <enrico.faulhaber@frm2.tum.de>
#
# *****************************************************************************
"""Special widgets for the SANS1 statusmonitor."""
from nicos.core.status import BUSY, DISABLED, ERROR, NOTREACHED, OK, UNKNOWN, \
WARN
from nicos.guisupport.qt import QBrush, QColor, QLineF, QPainter, QPen, \
QRectF, QSize, Qt, QTextOption, QWidget
from nicos.guisupport.widget import NicosWidget, PropDef
_magenta = QBrush(QColor('#A12F86'))
_yellow = QBrush(QColor('yellow'))
_white = QBrush(QColor('white'))
_grey = QBrush(QColor('lightgrey'))
_black = QBrush(QColor('black'))
_blue = QBrush(QColor('blue'))
_red = QBrush(QColor('red'))
_olive = QBrush(QColor('olive'))
_orange = QBrush(QColor('#ffa500'))
statusbrush = {
BUSY: _yellow,
WARN: _orange,
ERROR: _red,
NOTREACHED: _red,
DISABLED: _white,
OK: _white,
UNKNOWN: _olive,
}
class Tube2(NicosWidget, QWidget):
"""Sans1Tube with two detectors..."""
designer_description = 'SANS-1 tube with two detectors'
def __init__(self, parent, designMode=False):
# det1pos, det1shift, det1tilt, det2pos
self._curval = [0, 0, 0, 0]
self._curstr = ['', '', '', '']
self._curstatus = [OK, OK, OK, OK]
QWidget.__init__(self, parent)
NicosWidget.__init__(self)
devices = PropDef('devices', 'QStringList', [], 'position, shift and '
'tilt of det1, position of det2')
height = PropDef('height', int, 10, 'Widget height in characters')
width = PropDef('width', int, 30, 'Widget width in characters')
name = PropDef('name', str, '', 'Display name')
posscale = PropDef('posscale', float, 20000, 'Length of the tube')
color = PropDef('color', 'QColor', _magenta.color(), 'Color of the tube')
def sizeHint(self):
return QSize(round(self.props['width'] * self._scale) + 10,
round(self.props['height'] * self._scale) +
round(self.props['name'] and self._scale * 2.5 or 0) + 40)
def registerKeys(self):
for dev in self.props['devices']:
self.registerDevice(str(dev))
def on_devValueChange(self, dev, value, strvalue, unitvalue, expired):
try:
idx = self.props['devices'].index(dev)
except ValueError:
return
self._curval[idx] = value
self._curstr[idx] = unitvalue
self.update()
def on_devStatusChange(self, dev, code, status, expired):
try:
idx = self.props['devices'].index(dev)
except ValueError:
return
self._curstatus[idx] = code
self.update()
def paintEvent(self, event):
painter = QPainter(self)
painter.setBrush(QBrush(self.color))
painter.setRenderHint(QPainter.Antialiasing)
fontscale = float(self._scale)
h = self.props['height'] * fontscale
w = self.props['width'] * fontscale
posscale = (w - 120) / self.props['posscale']
if self.props['name']:
painter.setFont(self.font())
painter.drawText(QRectF(5, 0, w, fontscale * 2.5),
self.props['name'], QTextOption(Qt.AlignCenter))
yoff = fontscale * 2.5
else:
yoff = 0
painter.setPen(self.color)
painter.drawEllipse(QRectF(5, 5 + yoff, 50, h))
painter.drawRect(QRectF(30, 5 + yoff, w - 50, h))
painter.setPen(QColor('black'))
painter.drawArc(QRectF(5, 5 + yoff, 50, h), 1440, 2880)
painter.drawLine(QLineF(30, 5 + yoff, w - 25, 5 + yoff))
painter.drawLine(QLineF(30, 5 + yoff + h, w - 25, 5 + yoff + h))
painter.drawEllipse(QRectF(w - 45, 5 + yoff, 50, h))
# draw Detector 1
minx = 0
pos_val = self._curval[0]
if pos_val is not None:
pos_status = self._curstatus[0]
pos_str = self._curstr[0]
shift_val = self._curval[1]
shift_status = self._curstatus[1]
shift_str = self._curstr[1]
if shift_val > 0:
shift_str += ' ↓'
elif shift_val < 0:
shift_str += ' ↑'
# Not used at the moment, prepared for later use
tilt_val = self._curval[2]
tilt_status = self._curstatus[2]
tilt_str = self._curstr[2]
if tilt_str.endswith('deg'):
tilt_str = tilt_str[:-3] + '°'
stat = max(pos_status, shift_status, tilt_status)
painter.setBrush(statusbrush[stat])
# tf = QTransform()
# tf.rotate(tilt_val)
painter.resetTransform()
painter.translate(60 + pos_val * posscale + fontscale / 2.,
15 + yoff + shift_val * posscale + (h - 20) / 2.)
painter.rotate(-tilt_val)
painter.drawRect(QRectF(-fontscale / 2., - (h - 20) / 2., fontscale,
h - 20)) # XXX tilt ???
painter.resetTransform()
painter.setFont(self.valueFont)
painter.drawText(QRectF(60 + pos_val * posscale - 10.5 * fontscale,
-5 + yoff + h - fontscale, # + (shift_val - 4) * posscale,
9.5 * fontscale, 2 * fontscale),
tilt_str, QTextOption(Qt.AlignRight))
painter.drawText(QRectF(60 + pos_val * posscale - 6.5 * fontscale,
yoff + fontscale, # + (shift_val - 4) * posscale,
9.5 * fontscale, 2 * fontscale),
shift_str, QTextOption(Qt.AlignLeft))
minx = max(minx, 60 + pos_val * posscale + 5 - 4 * fontscale)
painter.drawText(QRectF(minx, h + 10 + yoff, 8 * fontscale, 30),
pos_str, QTextOption(Qt.AlignCenter))
minx = minx + 8 * fontscale
# # draw Detector 2
# pos_val = self._curval[3]
# if pos_val is not None:
# pos_status = self._curstatus[3]
# pos_str = self._curstr[3]
#
# painter.setBrush(statusbrush[pos_status])
# painter.drawRect(60 + pos_val * posscale, 15 + yoff,
# fontscale, h - 20 - 5 * posscale)
# painter.setFont(self.valueFont)
# minx = max(minx, 65 + pos_val * posscale - 4 * fontscale)
# painter.drawText(minx, h + 10 + yoff,
# 8 * fontscale, 30, Qt.AlignCenter, pos_str)
# minx = minx + 8 * fontscale
class BeamOption(NicosWidget, QWidget):
designer_description = 'SANS-1 beam option'
def __init__(self, parent, designMode=False):
self._curstr = ''
self._curstatus = OK
self._fixed = ''
QWidget.__init__(self, parent)
NicosWidget.__init__(self)
dev = PropDef('dev', str, '', 'NICOS device name')
height = PropDef('height', int, 4, 'Widget height in characters')
width = PropDef('width', int, 10, 'Widget width in characters')
name = PropDef('name', str, '', 'Display name')
def sizeHint(self):
return QSize(round(self.props['width'] * self._scale),
round(self.props['height'] * self._scale) +
round(self.props['name'] and self._scale * 2.5 or 0))
def registerKeys(self):
self.registerDevice(self.props['dev'])
def on_devValueChange(self, dev, value, strvalue, unitvalue, expired):
self._curstr = unitvalue
self.update()
def on_devMetaChange(self, dev, fmtstr, unit, fixed):
self._fixed = fixed
self.update()
def on_devStatusChange(self, dev, code, status, expired):
self._curstatus = code
self.update()
def paintEvent(self, event):
painter = QPainter(self)
painter.setBrush(_magenta)
painter.setRenderHint(QPainter.Antialiasing)
w = self.props['width'] * self._scale
h = self.props['height'] * self._scale
if self.props['name']:
painter.setFont(self.font())
painter.drawText(QRectF(0, 0, w, self._scale * 2.5),
self.props['name'], QTextOption(Qt.AlignCenter))
yoff = self._scale * 2.5
else:
yoff = 0
painter.setBrush(statusbrush[self._curstatus])
painter.drawRect(QRectF(2, 2 + yoff, w - 4, h - 4))
painter.setFont(self.valueFont)
painter.drawText(QRectF(2, 2 + yoff, w - 4, h - 4),
self._curstr, QTextOption(Qt.AlignCenter))
class CollimatorTable(NicosWidget, QWidget):
"""Displays a list of 'beam options' as a vertical stack.
Options are displayed as vertical stack of named elements drawn on top
of a centered blue line ('the beam').
If the device value is in 'options', the correspondig element is drawn
on top of 'the beam' by moving the whole stack vertically.
If the device value is in 'disabled_options', the whole
stack of options is vertically shifted 'out of beam'.
Other values are ignored as they are considered temporary
(while moving an option).
If the device state happens to be in error, the name label is
displayed in red to indicate the error.
"""
designer_description = 'SANS-1 collimator table'
def __init__(self, parent, designMode=False):
self._curstr = ''
self._curstatus = OK
self._fixed = ''
self.shift = -1
QWidget.__init__(self, parent)
NicosWidget.__init__(self)
dev = PropDef('dev', str, '', 'NICOS device name of a switcher')
options = PropDef('options', 'QStringList', [], 'list of valid switcher-'
'values to display in top-down order (first element '
'will be displayed on top location)')
disabled_options = PropDef('disabled_options', 'QStringList', [],
'list of valid switcher values for which '
'all options are display out-of-beam')
height = PropDef('height', int, 4, 'Widget height in characters')
width = PropDef('width', int, 10, 'Widget width in characters')
name = PropDef('name', str, '', 'Display name')
def registerKeys(self):
self.registerDevice(self.props['dev'])
def sizeHint(self):
return QSize(round(self._scale * self.props['width']),
round(self._scale * 2.5 * self.props['height']) +
round(self.props['name'] and 2.5 * self._scale or 0))
def on_devValueChange(self, dev, value, strvalue, unitvalue, expired):
self._curstr = strvalue
self.update()
def on_devMetaChange(self, dev, fmtstr, unit, fixed):
self._fixed = fixed
self.update()
def on_devStatusChange(self, dev, code, status, expired):
self._curstatus = code
self.update()
def paintEvent(self, event):
painter = QPainter(self)
painter.setRenderHint(QPainter.Antialiasing)
h = self._scale * 2.5 * self.props['height']
w = self._scale * self.props['width']
# cache pen
pen = painter.pen()
if self.props['name']:
painter.setFont(self.font())
if self._curstatus != OK:
painter.fillRect(QRectF(0, 0, w, self._scale * 2.5),
statusbrush[self._curstatus])
if self._fixed:
painter.setPen(QPen(_blue.color()))
else:
painter.setPen(QPen(_black.color()))
painter.drawText(QRectF(0, 0, w, self._scale * 2.5),
self.props['name'], QTextOption(Qt.AlignCenter))
painter.setPen(pen)
yoff = self._scale * 2.5
else:
yoff = 0
painter.setPen(QPen(_blue.color()))
y = h * 0.5 + yoff
painter.drawLine(QLineF(0, y, w, y))
painter.drawLine(QLineF(0, y+1, w, y+1))
painter.drawLine(QLineF(0, y+2, w, y+2))
# reset pen
painter.setPen(pen)
painter.setBrush(statusbrush[self._curstatus])
if self._curstr in self.props['options']:
self.shift = self.props['options'].index(self._curstr)
if self._curstr in self.props['disabled_options']:
self.shift = len(self.props['options'])
painter.setFont(self.valueFont)
h0 = max(2 * self._scale, 2 * self._scale + 4)
painter.setClipRect(QRectF(0, yoff, w, h))
for i, t in enumerate(self.props['options']):
y = h * 0.5 + yoff + h0 * (self.shift - i - 0.45)
b = statusbrush[self._curstatus]
if t == self._curstr:
painter.setBrush(b)
else:
painter.setBrush(_grey if b == statusbrush[OK] else b)
painter.drawRect(QRectF(5, y + 2, w - 10, h0 - 4))
painter.drawText(QRectF(5, y + 2, w - 10, h0 - 4),
t, QTextOption(Qt.AlignCenter))
| 38.604396 | 95 | 0.570951 |
from nicos.core.status import BUSY, DISABLED, ERROR, NOTREACHED, OK, UNKNOWN, \
WARN
from nicos.guisupport.qt import QBrush, QColor, QLineF, QPainter, QPen, \
QRectF, QSize, Qt, QTextOption, QWidget
from nicos.guisupport.widget import NicosWidget, PropDef
_magenta = QBrush(QColor('#A12F86'))
_yellow = QBrush(QColor('yellow'))
_white = QBrush(QColor('white'))
_grey = QBrush(QColor('lightgrey'))
_black = QBrush(QColor('black'))
_blue = QBrush(QColor('blue'))
_red = QBrush(QColor('red'))
_olive = QBrush(QColor('olive'))
_orange = QBrush(QColor('#ffa500'))
statusbrush = {
BUSY: _yellow,
WARN: _orange,
ERROR: _red,
NOTREACHED: _red,
DISABLED: _white,
OK: _white,
UNKNOWN: _olive,
}
class Tube2(NicosWidget, QWidget):
designer_description = 'SANS-1 tube with two detectors'
def __init__(self, parent, designMode=False):
self._curval = [0, 0, 0, 0]
self._curstr = ['', '', '', '']
self._curstatus = [OK, OK, OK, OK]
QWidget.__init__(self, parent)
NicosWidget.__init__(self)
devices = PropDef('devices', 'QStringList', [], 'position, shift and '
'tilt of det1, position of det2')
height = PropDef('height', int, 10, 'Widget height in characters')
width = PropDef('width', int, 30, 'Widget width in characters')
name = PropDef('name', str, '', 'Display name')
posscale = PropDef('posscale', float, 20000, 'Length of the tube')
color = PropDef('color', 'QColor', _magenta.color(), 'Color of the tube')
def sizeHint(self):
return QSize(round(self.props['width'] * self._scale) + 10,
round(self.props['height'] * self._scale) +
round(self.props['name'] and self._scale * 2.5 or 0) + 40)
def registerKeys(self):
for dev in self.props['devices']:
self.registerDevice(str(dev))
def on_devValueChange(self, dev, value, strvalue, unitvalue, expired):
try:
idx = self.props['devices'].index(dev)
except ValueError:
return
self._curval[idx] = value
self._curstr[idx] = unitvalue
self.update()
def on_devStatusChange(self, dev, code, status, expired):
try:
idx = self.props['devices'].index(dev)
except ValueError:
return
self._curstatus[idx] = code
self.update()
def paintEvent(self, event):
painter = QPainter(self)
painter.setBrush(QBrush(self.color))
painter.setRenderHint(QPainter.Antialiasing)
fontscale = float(self._scale)
h = self.props['height'] * fontscale
w = self.props['width'] * fontscale
posscale = (w - 120) / self.props['posscale']
if self.props['name']:
painter.setFont(self.font())
painter.drawText(QRectF(5, 0, w, fontscale * 2.5),
self.props['name'], QTextOption(Qt.AlignCenter))
yoff = fontscale * 2.5
else:
yoff = 0
painter.setPen(self.color)
painter.drawEllipse(QRectF(5, 5 + yoff, 50, h))
painter.drawRect(QRectF(30, 5 + yoff, w - 50, h))
painter.setPen(QColor('black'))
painter.drawArc(QRectF(5, 5 + yoff, 50, h), 1440, 2880)
painter.drawLine(QLineF(30, 5 + yoff, w - 25, 5 + yoff))
painter.drawLine(QLineF(30, 5 + yoff + h, w - 25, 5 + yoff + h))
painter.drawEllipse(QRectF(w - 45, 5 + yoff, 50, h))
minx = 0
pos_val = self._curval[0]
if pos_val is not None:
pos_status = self._curstatus[0]
pos_str = self._curstr[0]
shift_val = self._curval[1]
shift_status = self._curstatus[1]
shift_str = self._curstr[1]
if shift_val > 0:
shift_str += ' ↓'
elif shift_val < 0:
shift_str += ' ↑'
tilt_val = self._curval[2]
tilt_status = self._curstatus[2]
tilt_str = self._curstr[2]
if tilt_str.endswith('deg'):
tilt_str = tilt_str[:-3] + '°'
stat = max(pos_status, shift_status, tilt_status)
painter.setBrush(statusbrush[stat])
painter.resetTransform()
painter.translate(60 + pos_val * posscale + fontscale / 2.,
15 + yoff + shift_val * posscale + (h - 20) / 2.)
painter.rotate(-tilt_val)
painter.drawRect(QRectF(-fontscale / 2., - (h - 20) / 2., fontscale,
h - 20))
painter.resetTransform()
painter.setFont(self.valueFont)
painter.drawText(QRectF(60 + pos_val * posscale - 10.5 * fontscale,
-5 + yoff + h - fontscale,
9.5 * fontscale, 2 * fontscale),
tilt_str, QTextOption(Qt.AlignRight))
painter.drawText(QRectF(60 + pos_val * posscale - 6.5 * fontscale,
yoff + fontscale,
9.5 * fontscale, 2 * fontscale),
shift_str, QTextOption(Qt.AlignLeft))
minx = max(minx, 60 + pos_val * posscale + 5 - 4 * fontscale)
painter.drawText(QRectF(minx, h + 10 + yoff, 8 * fontscale, 30),
pos_str, QTextOption(Qt.AlignCenter))
minx = minx + 8 * fontscale
lass BeamOption(NicosWidget, QWidget):
designer_description = 'SANS-1 beam option'
def __init__(self, parent, designMode=False):
self._curstr = ''
self._curstatus = OK
self._fixed = ''
QWidget.__init__(self, parent)
NicosWidget.__init__(self)
dev = PropDef('dev', str, '', 'NICOS device name')
height = PropDef('height', int, 4, 'Widget height in characters')
width = PropDef('width', int, 10, 'Widget width in characters')
name = PropDef('name', str, '', 'Display name')
def sizeHint(self):
return QSize(round(self.props['width'] * self._scale),
round(self.props['height'] * self._scale) +
round(self.props['name'] and self._scale * 2.5 or 0))
def registerKeys(self):
self.registerDevice(self.props['dev'])
def on_devValueChange(self, dev, value, strvalue, unitvalue, expired):
self._curstr = unitvalue
self.update()
def on_devMetaChange(self, dev, fmtstr, unit, fixed):
self._fixed = fixed
self.update()
def on_devStatusChange(self, dev, code, status, expired):
self._curstatus = code
self.update()
def paintEvent(self, event):
painter = QPainter(self)
painter.setBrush(_magenta)
painter.setRenderHint(QPainter.Antialiasing)
w = self.props['width'] * self._scale
h = self.props['height'] * self._scale
if self.props['name']:
painter.setFont(self.font())
painter.drawText(QRectF(0, 0, w, self._scale * 2.5),
self.props['name'], QTextOption(Qt.AlignCenter))
yoff = self._scale * 2.5
else:
yoff = 0
painter.setBrush(statusbrush[self._curstatus])
painter.drawRect(QRectF(2, 2 + yoff, w - 4, h - 4))
painter.setFont(self.valueFont)
painter.drawText(QRectF(2, 2 + yoff, w - 4, h - 4),
self._curstr, QTextOption(Qt.AlignCenter))
class CollimatorTable(NicosWidget, QWidget):
designer_description = 'SANS-1 collimator table'
def __init__(self, parent, designMode=False):
self._curstr = ''
self._curstatus = OK
self._fixed = ''
self.shift = -1
QWidget.__init__(self, parent)
NicosWidget.__init__(self)
dev = PropDef('dev', str, '', 'NICOS device name of a switcher')
options = PropDef('options', 'QStringList', [], 'list of valid switcher-'
'values to display in top-down order (first element '
'will be displayed on top location)')
disabled_options = PropDef('disabled_options', 'QStringList', [],
'list of valid switcher values for which '
'all options are display out-of-beam')
height = PropDef('height', int, 4, 'Widget height in characters')
width = PropDef('width', int, 10, 'Widget width in characters')
name = PropDef('name', str, '', 'Display name')
def registerKeys(self):
self.registerDevice(self.props['dev'])
def sizeHint(self):
return QSize(round(self._scale * self.props['width']),
round(self._scale * 2.5 * self.props['height']) +
round(self.props['name'] and 2.5 * self._scale or 0))
def on_devValueChange(self, dev, value, strvalue, unitvalue, expired):
self._curstr = strvalue
self.update()
def on_devMetaChange(self, dev, fmtstr, unit, fixed):
self._fixed = fixed
self.update()
def on_devStatusChange(self, dev, code, status, expired):
self._curstatus = code
self.update()
def paintEvent(self, event):
painter = QPainter(self)
painter.setRenderHint(QPainter.Antialiasing)
h = self._scale * 2.5 * self.props['height']
w = self._scale * self.props['width']
pen = painter.pen()
if self.props['name']:
painter.setFont(self.font())
if self._curstatus != OK:
painter.fillRect(QRectF(0, 0, w, self._scale * 2.5),
statusbrush[self._curstatus])
if self._fixed:
painter.setPen(QPen(_blue.color()))
else:
painter.setPen(QPen(_black.color()))
painter.drawText(QRectF(0, 0, w, self._scale * 2.5),
self.props['name'], QTextOption(Qt.AlignCenter))
painter.setPen(pen)
yoff = self._scale * 2.5
else:
yoff = 0
painter.setPen(QPen(_blue.color()))
y = h * 0.5 + yoff
painter.drawLine(QLineF(0, y, w, y))
painter.drawLine(QLineF(0, y+1, w, y+1))
painter.drawLine(QLineF(0, y+2, w, y+2))
painter.setPen(pen)
painter.setBrush(statusbrush[self._curstatus])
if self._curstr in self.props['options']:
self.shift = self.props['options'].index(self._curstr)
if self._curstr in self.props['disabled_options']:
self.shift = len(self.props['options'])
painter.setFont(self.valueFont)
h0 = max(2 * self._scale, 2 * self._scale + 4)
painter.setClipRect(QRectF(0, yoff, w, h))
for i, t in enumerate(self.props['options']):
y = h * 0.5 + yoff + h0 * (self.shift - i - 0.45)
b = statusbrush[self._curstatus]
if t == self._curstr:
painter.setBrush(b)
else:
painter.setBrush(_grey if b == statusbrush[OK] else b)
painter.drawRect(QRectF(5, y + 2, w - 10, h0 - 4))
painter.drawText(QRectF(5, y + 2, w - 10, h0 - 4),
t, QTextOption(Qt.AlignCenter))
| true | true |
f70f29ce05e826594240a5a93ef2677dfb4bec9b | 5,738 | py | Python | cloudmarker/manager.py | nishitm/cloudmarker | 98f9d228abe3cf01fb4a6d631d31aadec47e759a | [
"MIT"
] | null | null | null | cloudmarker/manager.py | nishitm/cloudmarker | 98f9d228abe3cf01fb4a6d631d31aadec47e759a | [
"MIT"
] | null | null | null | cloudmarker/manager.py | nishitm/cloudmarker | 98f9d228abe3cf01fb4a6d631d31aadec47e759a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Manager of worker subprocesses.
This module invokes the worker subprocesses that perform the cloud
security monitoring tasks. Each worker subprocess wraps around a cloud,
store, event, or alert plugin and executes the plugin in a separate
subprocess.
"""
import logging.config
import multiprocessing as mp
import time
import schedule
from cloudmarker import util, workers
# Define module-level logger.
_log = logging.getLogger(__name__)
def main():
"""Run the framework based on the schedule."""
args = util.parse_cli()
config = util.load_config(args.config)
logging.config.dictConfig(config['logger'])
# Run the audits according to the schedule set in the configuration if the
# 'force' flag is not set in the command line.
if args.force:
_log.info('Starting job now')
job(config)
else:
_log.info('Scheduled to run job everyday at %s', config['schedule'])
schedule.every().day.at(config['schedule']).do(job, config)
while True:
schedule.run_pending()
time.sleep(60)
def job(config):
"""Run the audits.
Arguments:
config (dict): Configuration dictionary.
"""
# Create an audit object for each audit configured to be run.
audits = []
for audit_name in config['run']:
audits.append(Audit(audit_name, config))
# Start all audits.
for audit in audits:
audit.start()
# Wait for all audits to terminate.
for audit in audits:
audit.join()
class Audit:
"""Audit manager.
This class encapsulates a set of worker subprocesses and worker
input queues for a single audit configuration.
"""
def __init__(self, audit_name, config):
"""Create an instance of :class:`Audit` from configuration.
A single audit definition (from a list of audit definitions
under the ``audits`` key in the configuration) is instantiated.
Each audit definition contains lists of cloud plugins, store
plugins, event plugins, and alert plugins. These plugins are
instantiated and multiprocessing queues are set up to take
records from one plugin and feed them to another plugin as per
the audit workflow.
Arguments:
audit_name (str): Key name for an audit configuration. This
key is looked for in ``config['audits']``.
config (dict): Configuration dictionary. This is the
entire configuration dictionary that contains
top-level keys named ``clouds``, ``stores``, ``events``,
``alerts``, ``audits``, ``run``, etc.
"""
audit_config = config['audits'][audit_name]
# We keep all workers in these lists.
self._cloud_workers = []
self._store_workers = []
self._event_workers = []
self._alert_workers = []
# We keep all queues in these lists.
self._store_queues = []
self._event_queues = []
self._alert_queues = []
# Create alert workers and queues.
for name in audit_config['alerts']:
input_queue = mp.Queue()
args = (
audit_name + '-' + name,
util.load_plugin(config['alerts'][name]),
input_queue,
)
worker = mp.Process(target=workers.store_worker, args=args)
self._alert_workers.append(worker)
self._alert_queues.append(input_queue)
# Create event_workers workers and queues.
for name in audit_config['events']:
input_queue = mp.Queue()
args = (
audit_name + '-' + name,
util.load_plugin(config['events'][name]),
input_queue,
self._alert_queues,
)
worker = mp.Process(target=workers.event_worker, args=args)
self._event_workers.append(worker)
self._event_queues.append(input_queue)
# Create store workers and queues.
for name in audit_config['stores']:
input_queue = mp.Queue()
args = (
audit_name + '-' + name,
util.load_plugin(config['stores'][name]),
input_queue,
)
worker = mp.Process(target=workers.store_worker, args=args)
self._store_workers.append(worker)
self._store_queues.append(input_queue)
# Create cloud workers.
for name in audit_config['clouds']:
args = (
audit_name + '-' + name,
util.load_plugin(config['clouds'][name]),
self._store_queues + self._event_queues
)
worker = mp.Process(target=workers.cloud_worker, args=args)
self._cloud_workers.append(worker)
def start(self):
"""Start audit by starting all workers."""
for w in (self._cloud_workers + self._store_workers +
self._event_workers + self._alert_workers):
w.start()
def join(self):
"""Wait until all workers terminate."""
# Wait for cloud workers to terminate.
for w in self._cloud_workers:
w.join()
# Stop store workers and event workers.
for q in self._store_queues + self._event_queues:
q.put(None)
# Wait for store workers and event_workers workers to terminate.
for w in self._store_workers + self._event_workers:
w.join()
# Stop alert workers.
for q in self._alert_queues:
q.put(None)
# Wait for alert workers to terminate.
for w in self._alert_workers:
w.join()
| 32.235955 | 78 | 0.600732 |
import logging.config
import multiprocessing as mp
import time
import schedule
from cloudmarker import util, workers
_log = logging.getLogger(__name__)
def main():
args = util.parse_cli()
config = util.load_config(args.config)
logging.config.dictConfig(config['logger'])
if args.force:
_log.info('Starting job now')
job(config)
else:
_log.info('Scheduled to run job everyday at %s', config['schedule'])
schedule.every().day.at(config['schedule']).do(job, config)
while True:
schedule.run_pending()
time.sleep(60)
def job(config):
audits = []
for audit_name in config['run']:
audits.append(Audit(audit_name, config))
for audit in audits:
audit.start()
for audit in audits:
audit.join()
class Audit:
def __init__(self, audit_name, config):
audit_config = config['audits'][audit_name]
self._cloud_workers = []
self._store_workers = []
self._event_workers = []
self._alert_workers = []
self._store_queues = []
self._event_queues = []
self._alert_queues = []
for name in audit_config['alerts']:
input_queue = mp.Queue()
args = (
audit_name + '-' + name,
util.load_plugin(config['alerts'][name]),
input_queue,
)
worker = mp.Process(target=workers.store_worker, args=args)
self._alert_workers.append(worker)
self._alert_queues.append(input_queue)
for name in audit_config['events']:
input_queue = mp.Queue()
args = (
audit_name + '-' + name,
util.load_plugin(config['events'][name]),
input_queue,
self._alert_queues,
)
worker = mp.Process(target=workers.event_worker, args=args)
self._event_workers.append(worker)
self._event_queues.append(input_queue)
for name in audit_config['stores']:
input_queue = mp.Queue()
args = (
audit_name + '-' + name,
util.load_plugin(config['stores'][name]),
input_queue,
)
worker = mp.Process(target=workers.store_worker, args=args)
self._store_workers.append(worker)
self._store_queues.append(input_queue)
for name in audit_config['clouds']:
args = (
audit_name + '-' + name,
util.load_plugin(config['clouds'][name]),
self._store_queues + self._event_queues
)
worker = mp.Process(target=workers.cloud_worker, args=args)
self._cloud_workers.append(worker)
def start(self):
for w in (self._cloud_workers + self._store_workers +
self._event_workers + self._alert_workers):
w.start()
def join(self):
for w in self._cloud_workers:
w.join()
for q in self._store_queues + self._event_queues:
q.put(None)
for w in self._store_workers + self._event_workers:
w.join()
for q in self._alert_queues:
q.put(None)
for w in self._alert_workers:
w.join()
| true | true |
f70f2a201509a04db04816c2ba900f5ce7cb680b | 447 | py | Python | tests/utils/test_retriever.py | duketemon/pyuplift | 33daa0768ff333387cb8223ebfaedaffa57de335 | [
"MIT"
] | 26 | 2019-02-24T07:41:59.000Z | 2022-01-03T05:07:26.000Z | tests/utils/test_retriever.py | duketemon/pyuplift | 33daa0768ff333387cb8223ebfaedaffa57de335 | [
"MIT"
] | 8 | 2019-03-17T07:57:16.000Z | 2019-08-02T19:55:49.000Z | tests/utils/test_retriever.py | duketemon/pyuplift | 33daa0768ff333387cb8223ebfaedaffa57de335 | [
"MIT"
] | 4 | 2019-07-17T12:36:37.000Z | 2020-07-16T11:36:35.000Z | import os
import shutil
import pytest
from pyuplift.utils import retrieve_from_gz
data_home = os.path.join(os.sep.join(__file__.split(os.sep)[:-1]), 'data')
def test_retrieve_from_gz():
output_path = os.path.join(data_home, 'test.test')
archive_path = output_path + '.gz'
retrieve_from_gz(archive_path, output_path)
with open(output_path, 'r') as f:
text = f.read()
os.remove(output_path)
assert text == 'good'
| 24.833333 | 74 | 0.700224 | import os
import shutil
import pytest
from pyuplift.utils import retrieve_from_gz
data_home = os.path.join(os.sep.join(__file__.split(os.sep)[:-1]), 'data')
def test_retrieve_from_gz():
output_path = os.path.join(data_home, 'test.test')
archive_path = output_path + '.gz'
retrieve_from_gz(archive_path, output_path)
with open(output_path, 'r') as f:
text = f.read()
os.remove(output_path)
assert text == 'good'
| true | true |
f70f2adaa945474149079545ea89047b44947487 | 10,636 | py | Python | carla_python_api_recorder.py | t27/carla-scenic-data-collector | 3f38fa0e23a9f0ed85726292c5703c8505330870 | [
"MIT"
] | 1 | 2022-03-30T07:30:51.000Z | 2022-03-30T07:30:51.000Z | carla_python_api_recorder.py | t27/carla-scenic-data-collector | 3f38fa0e23a9f0ed85726292c5703c8505330870 | [
"MIT"
] | 1 | 2021-03-15T03:48:28.000Z | 2021-03-15T03:48:28.000Z | carla_python_api_recorder.py | t27/carla-scenic-data-collector | 3f38fa0e23a9f0ed85726292c5703c8505330870 | [
"MIT"
] | 5 | 2021-03-14T22:19:53.000Z | 2021-11-11T15:28:05.000Z | # Recorder that records agent states as dataframes and also stores a carla recording, in synchronous mode
#!/usr/bin/env python
# Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
import glob
import os
import sys
import pandas as pd
from tqdm import tqdm
import math
CARLA_VERSION = "0.9.11"
try:
# sys.path.append("./libs/carla-0.9.9-py3.7-linux-x86_64.egg")
if CARLA_VERSION == "0.9.9":
sys.path.append("./libs/carla-0.9.9-py3.7-linux-x86_64.egg")
elif CARLA_VERSION == "0.9.11":
sys.path.append("./libs/carla-0.9.11-py3.7-linux-x86_64.egg")
except IndexError:
pass
import carla
import argparse
import random
import time
import logging
import click
import pathlib
import spawn
current_dir = pathlib.Path(__file__).parent.absolute()
SEED = 27
random.seed(SEED)
def get_metadata(actor, frame_id):
type_id = actor.type_id
def splitCarlaVec(vect):
return vect.x, vect.y, vect.z
id = actor.id
# clsname = ClientSideBoundingBoxes.get_class_name(actor)
tf = actor.get_transform()
roll, pitch, yaw = tf.rotation.roll, tf.rotation.pitch, tf.rotation.yaw
loc = actor.get_location()
pos_x, pos_y, pos_z = splitCarlaVec(loc)
try:
bbox3d = actor.bounding_box
bbox3d_offset_x, bbox3d_offset_y, bbox3d_offset_z = splitCarlaVec(
bbox3d.location
)
bbox3d_extent_x, bbox3d_extent_y, bbox3d_extent_z = splitCarlaVec(bbox3d.extent)
except:
bbox3d_offset_x, bbox3d_offset_y, bbox3d_offset_z = None, None, None
bbox3d_extent_x, bbox3d_extent_y, bbox3d_extent_z = None, None, None
velocity_x, velocity_y, velocity_z = splitCarlaVec(actor.get_velocity())
acc_x, acc_y, acc_z = splitCarlaVec(actor.get_acceleration())
angular_vel_x, angular_vel_y, angular_vel_z = splitCarlaVec(
actor.get_angular_velocity()
)
try:
# need to do this because Carla's Actor object doesnt support getattr
traffic_light_state = actor.state.name
except:
traffic_light_state = None
return (
frame_id,
id,
type_id,
pos_x,
pos_y,
pos_z,
roll,
pitch,
yaw,
velocity_x,
velocity_y,
velocity_z,
acc_x,
acc_y,
acc_z,
angular_vel_x,
angular_vel_y,
angular_vel_z,
bbox3d_offset_x,
bbox3d_offset_y,
bbox3d_offset_z,
bbox3d_extent_x,
bbox3d_extent_y,
bbox3d_extent_z,
traffic_light_state,
)
global_collision = False
def collision_detect_callback(event):
actor_we_collide_against = event.other_actor
impulse = event.normal_impulse
intensity = math.sqrt(impulse.x ** 2 + impulse.y ** 2 + impulse.z ** 2)
if "vehicle." in actor_we_collide_against.type_id:
global global_collision
global_collision = True
def attach_collision_sensor(actor, world):
blueprint_library = world.get_blueprint_library()
collision_sensor = world.spawn_actor(
blueprint_library.find("sensor.other.collision"),
carla.Transform(),
attach_to=actor,
)
collision_sensor.listen(lambda event: collision_detect_callback(event))
return collision_sensor
def run(
client,
round_name,
recording_dir,
speed_violation_prob=60,
tl_violation_prob=70,
perc_speed_diff=-30,
num_vehicles=25,
SESSION_DURATION=60,
):
safe = True # avoid spawning vehicles whose geometry is not ideal for carla
actor_list = []
sensors = []
logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.INFO)
try:
FPS = 5
DELTA_T = 1 / FPS
world = client.get_world()
blueprints = world.get_blueprint_library().filter("vehicle.*")
traffic_manager = client.get_trafficmanager()
traffic_manager.set_global_distance_to_leading_vehicle(2.0)
if CARLA_VERSION == "0.9.11":
print("Using deterministic Traffic Manager")
traffic_manager.set_random_device_seed(SEED)
settings = client.get_world().get_settings()
if not settings.synchronous_mode:
traffic_manager.set_synchronous_mode(True)
synchronous_master = True
settings.synchronous_mode = True
settings.fixed_delta_seconds = DELTA_T
client.get_world().apply_settings(settings)
else:
synchronous_master = False
recording_dir_path = pathlib.Path(recording_dir)
recording_dir_path.mkdir(exist_ok=True)
session_recording = str(recording_dir_path / f"{round_name}.csv")
carla_session_recording = str(
recording_dir_path.absolute() / f"{round_name}_carla_recording"
)
print("Recording on file: %s" % client.start_recorder(carla_session_recording))
vehicles_list, walkers_list, all_actors = spawn.spawn(
client, world, num_vehicles, 0, safe
)
world.tick()
print("spawned %d vehicles, press Ctrl+C to exit." % len(actor_list))
# fmt: off
df_columns = [
"frame_id", "id", "type_id", "pos_x", "pos_y", "pos_z", "roll", "pitch", "yaw",
"velocity_x", "velocity_y", "velocity_z", "acc_x", "acc_y", "acc_z",
"angular_vel_x", "angular_vel_y", "angular_vel_z",
"bbox3d_offset_x", "bbox3d_offset_y", "bbox3d_offset_z",
"bbox3d_extent_x", "bbox3d_extent_y", "bbox3d_extent_z", "traffic_light_color",
]
# fmt: on
# get all non vehicle agents
global global_collision
global_collision = False
actors = world.get_actors()
for actor in actors:
if "vehicle." in actor.type_id:
sensors.append(attach_collision_sensor(actor, world))
non_vehicles = [
x
for x in actors
if ("vehicle" not in x.type_id and "traffic_light" not in x.type_id)
] # signs, traffic lights etc
frame_id = 0
df_arr = []
non_vehicle_arr = [get_metadata(actor, frame_id) for actor in non_vehicles]
df_arr += non_vehicle_arr
pbar = tqdm(total=FPS * SESSION_DURATION)
max_frames = FPS * SESSION_DURATION
collision_detected_once = False
while frame_id < max_frames:
if global_collision and not collision_detected_once:
# Todo, if detected, start a countdown of N frames and break only after N iterations
print("detected collision, exiting!")
collision_detected_once = True
max_frames = frame_id + 5
# continue
actors = world.get_actors()
for actor in actors:
if "vehicle." in actor.type_id:
# print(actor.type_id)
tm_port = traffic_manager.get_port()
actor.set_autopilot(True, tm_port)
traffic_manager.ignore_lights_percentage(actor, tl_violation_prob)
traffic_manager.distance_to_leading_vehicle(actor, 3)
if random.random() * 100 < speed_violation_prob:
traffic_manager.vehicle_percentage_speed_difference(
actor, perc_speed_diff
)
vehicles_and_lights = [
x
for x in actors
if "vehicle" in x.type_id or "traffic_light" in x.type_id
]
metadata_arr = [
get_metadata(actor, frame_id) for actor in vehicles_and_lights
]
df_arr += metadata_arr
frame_id += 1
pbar.update(1)
world.tick()
df = pd.DataFrame(df_arr, columns=df_columns)
pbar.close()
print(f"Saving CSV({len(df.frame_id.unique())} frames)")
# df.to_parquet(f"session_data.parquet")
df.to_csv(session_recording, index=False)
world.tick()
# if args.recorder_time > 0:
# time.sleep(args.recorder_time)
# else:
# while True:
# world.wait_for_tick()
# # time.sleep(0.1)
finally:
if synchronous_master:
settings = world.get_settings()
settings.synchronous_mode = False
settings.fixed_delta_seconds = None
world.apply_settings(settings)
print("\ndestroying %d actors" % (len(sensors) + len(vehicles_list)))
# all_agents = sensors + vehicles_list
for s in sensors:
s.destroy()
client.apply_batch_sync([carla.command.DestroyActor(x) for x in vehicles_list])
print("Stop recording")
client.stop_recorder()
@click.command()
@click.option(
"-s",
"--scenario_type",
type=click.Choice(["tl_sl", "nominal"], case_sensitive=False),
required=True,
)
@click.option("-n", "--num_rounds", default=100)
@click.option("--test", is_flag=True)
def main(scenario_type, num_rounds, test):
# print(scenario_type, test, num_rounds)
if test:
random.seed(72)
if scenario_type.lower() == "tl_sl":
SPEED_VIOLATION_PROB = 60
TL_VIOLATION_PROB = 70
PERC_SPEED_DIFF = -30
SCENARIO_NAME = "tl_sl"
# NUM_ROUNDS = 100
elif scenario_type.lower() == "nominal":
SPEED_VIOLATION_PROB = 0
TL_VIOLATION_PROB = 0
PERC_SPEED_DIFF = 0
SCENARIO_NAME = "nominal"
# NUM_ROUNDS = 200
NUM_ROUNDS = num_rounds
print(f"Recording {SCENARIO_NAME} data")
try:
host = "127.0.0.1" # IP of the host server (default: 127.0.0.1)
port = 2000 # TCP port to listen to (default: 2000)",
client = carla.Client(host, port)
if test:
scenario_dir = f"test_{SCENARIO_NAME}_recordings"
else:
scenario_dir = f"{SCENARIO_NAME}_recordings"
round_names = []
for i in range(NUM_ROUNDS):
run(
client,
f"{scenario_type}_round_{i}",
scenario_dir,
SPEED_VIOLATION_PROB,
TL_VIOLATION_PROB,
PERC_SPEED_DIFF,
)
round_names.append(f"{scenario_type}_round_{i}")
# client.reload_world()
except KeyboardInterrupt:
pass
finally:
print("\ndone.")
if __name__ == "__main__":
main()
| 32.036145 | 105 | 0.615269 |
import glob
import os
import sys
import pandas as pd
from tqdm import tqdm
import math
CARLA_VERSION = "0.9.11"
try:
if CARLA_VERSION == "0.9.9":
sys.path.append("./libs/carla-0.9.9-py3.7-linux-x86_64.egg")
elif CARLA_VERSION == "0.9.11":
sys.path.append("./libs/carla-0.9.11-py3.7-linux-x86_64.egg")
except IndexError:
pass
import carla
import argparse
import random
import time
import logging
import click
import pathlib
import spawn
current_dir = pathlib.Path(__file__).parent.absolute()
SEED = 27
random.seed(SEED)
def get_metadata(actor, frame_id):
type_id = actor.type_id
def splitCarlaVec(vect):
return vect.x, vect.y, vect.z
id = actor.id
tf = actor.get_transform()
roll, pitch, yaw = tf.rotation.roll, tf.rotation.pitch, tf.rotation.yaw
loc = actor.get_location()
pos_x, pos_y, pos_z = splitCarlaVec(loc)
try:
bbox3d = actor.bounding_box
bbox3d_offset_x, bbox3d_offset_y, bbox3d_offset_z = splitCarlaVec(
bbox3d.location
)
bbox3d_extent_x, bbox3d_extent_y, bbox3d_extent_z = splitCarlaVec(bbox3d.extent)
except:
bbox3d_offset_x, bbox3d_offset_y, bbox3d_offset_z = None, None, None
bbox3d_extent_x, bbox3d_extent_y, bbox3d_extent_z = None, None, None
velocity_x, velocity_y, velocity_z = splitCarlaVec(actor.get_velocity())
acc_x, acc_y, acc_z = splitCarlaVec(actor.get_acceleration())
angular_vel_x, angular_vel_y, angular_vel_z = splitCarlaVec(
actor.get_angular_velocity()
)
try:
traffic_light_state = actor.state.name
except:
traffic_light_state = None
return (
frame_id,
id,
type_id,
pos_x,
pos_y,
pos_z,
roll,
pitch,
yaw,
velocity_x,
velocity_y,
velocity_z,
acc_x,
acc_y,
acc_z,
angular_vel_x,
angular_vel_y,
angular_vel_z,
bbox3d_offset_x,
bbox3d_offset_y,
bbox3d_offset_z,
bbox3d_extent_x,
bbox3d_extent_y,
bbox3d_extent_z,
traffic_light_state,
)
global_collision = False
def collision_detect_callback(event):
actor_we_collide_against = event.other_actor
impulse = event.normal_impulse
intensity = math.sqrt(impulse.x ** 2 + impulse.y ** 2 + impulse.z ** 2)
if "vehicle." in actor_we_collide_against.type_id:
global global_collision
global_collision = True
def attach_collision_sensor(actor, world):
blueprint_library = world.get_blueprint_library()
collision_sensor = world.spawn_actor(
blueprint_library.find("sensor.other.collision"),
carla.Transform(),
attach_to=actor,
)
collision_sensor.listen(lambda event: collision_detect_callback(event))
return collision_sensor
def run(
client,
round_name,
recording_dir,
speed_violation_prob=60,
tl_violation_prob=70,
perc_speed_diff=-30,
num_vehicles=25,
SESSION_DURATION=60,
):
safe = True # avoid spawning vehicles whose geometry is not ideal for carla
actor_list = []
sensors = []
logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.INFO)
try:
FPS = 5
DELTA_T = 1 / FPS
world = client.get_world()
blueprints = world.get_blueprint_library().filter("vehicle.*")
traffic_manager = client.get_trafficmanager()
traffic_manager.set_global_distance_to_leading_vehicle(2.0)
if CARLA_VERSION == "0.9.11":
print("Using deterministic Traffic Manager")
traffic_manager.set_random_device_seed(SEED)
settings = client.get_world().get_settings()
if not settings.synchronous_mode:
traffic_manager.set_synchronous_mode(True)
synchronous_master = True
settings.synchronous_mode = True
settings.fixed_delta_seconds = DELTA_T
client.get_world().apply_settings(settings)
else:
synchronous_master = False
recording_dir_path = pathlib.Path(recording_dir)
recording_dir_path.mkdir(exist_ok=True)
session_recording = str(recording_dir_path / f"{round_name}.csv")
carla_session_recording = str(
recording_dir_path.absolute() / f"{round_name}_carla_recording"
)
print("Recording on file: %s" % client.start_recorder(carla_session_recording))
vehicles_list, walkers_list, all_actors = spawn.spawn(
client, world, num_vehicles, 0, safe
)
world.tick()
print("spawned %d vehicles, press Ctrl+C to exit." % len(actor_list))
# fmt: off
df_columns = [
"frame_id", "id", "type_id", "pos_x", "pos_y", "pos_z", "roll", "pitch", "yaw",
"velocity_x", "velocity_y", "velocity_z", "acc_x", "acc_y", "acc_z",
"angular_vel_x", "angular_vel_y", "angular_vel_z",
"bbox3d_offset_x", "bbox3d_offset_y", "bbox3d_offset_z",
"bbox3d_extent_x", "bbox3d_extent_y", "bbox3d_extent_z", "traffic_light_color",
]
# fmt: on
# get all non vehicle agents
global global_collision
global_collision = False
actors = world.get_actors()
for actor in actors:
if "vehicle." in actor.type_id:
sensors.append(attach_collision_sensor(actor, world))
non_vehicles = [
x
for x in actors
if ("vehicle" not in x.type_id and "traffic_light" not in x.type_id)
] # signs, traffic lights etc
frame_id = 0
df_arr = []
non_vehicle_arr = [get_metadata(actor, frame_id) for actor in non_vehicles]
df_arr += non_vehicle_arr
pbar = tqdm(total=FPS * SESSION_DURATION)
max_frames = FPS * SESSION_DURATION
collision_detected_once = False
while frame_id < max_frames:
if global_collision and not collision_detected_once:
# Todo, if detected, start a countdown of N frames and break only after N iterations
print("detected collision, exiting!")
collision_detected_once = True
max_frames = frame_id + 5
# continue
actors = world.get_actors()
for actor in actors:
if "vehicle." in actor.type_id:
# print(actor.type_id)
tm_port = traffic_manager.get_port()
actor.set_autopilot(True, tm_port)
traffic_manager.ignore_lights_percentage(actor, tl_violation_prob)
traffic_manager.distance_to_leading_vehicle(actor, 3)
if random.random() * 100 < speed_violation_prob:
traffic_manager.vehicle_percentage_speed_difference(
actor, perc_speed_diff
)
vehicles_and_lights = [
x
for x in actors
if "vehicle" in x.type_id or "traffic_light" in x.type_id
]
metadata_arr = [
get_metadata(actor, frame_id) for actor in vehicles_and_lights
]
df_arr += metadata_arr
frame_id += 1
pbar.update(1)
world.tick()
df = pd.DataFrame(df_arr, columns=df_columns)
pbar.close()
print(f"Saving CSV({len(df.frame_id.unique())} frames)")
# df.to_parquet(f"session_data.parquet")
df.to_csv(session_recording, index=False)
world.tick()
# if args.recorder_time > 0:
# time.sleep(args.recorder_time)
# else:
# while True:
# world.wait_for_tick()
# # time.sleep(0.1)
finally:
if synchronous_master:
settings = world.get_settings()
settings.synchronous_mode = False
settings.fixed_delta_seconds = None
world.apply_settings(settings)
print("\ndestroying %d actors" % (len(sensors) + len(vehicles_list)))
# all_agents = sensors + vehicles_list
for s in sensors:
s.destroy()
client.apply_batch_sync([carla.command.DestroyActor(x) for x in vehicles_list])
print("Stop recording")
client.stop_recorder()
@click.command()
@click.option(
"-s",
"--scenario_type",
type=click.Choice(["tl_sl", "nominal"], case_sensitive=False),
required=True,
)
@click.option("-n", "--num_rounds", default=100)
@click.option("--test", is_flag=True)
def main(scenario_type, num_rounds, test):
# print(scenario_type, test, num_rounds)
if test:
random.seed(72)
if scenario_type.lower() == "tl_sl":
SPEED_VIOLATION_PROB = 60
TL_VIOLATION_PROB = 70
PERC_SPEED_DIFF = -30
SCENARIO_NAME = "tl_sl"
# NUM_ROUNDS = 100
elif scenario_type.lower() == "nominal":
SPEED_VIOLATION_PROB = 0
TL_VIOLATION_PROB = 0
PERC_SPEED_DIFF = 0
SCENARIO_NAME = "nominal"
# NUM_ROUNDS = 200
NUM_ROUNDS = num_rounds
print(f"Recording {SCENARIO_NAME} data")
try:
host = "127.0.0.1" # IP of the host server (default: 127.0.0.1)
port = 2000 # TCP port to listen to (default: 2000)",
client = carla.Client(host, port)
if test:
scenario_dir = f"test_{SCENARIO_NAME}_recordings"
else:
scenario_dir = f"{SCENARIO_NAME}_recordings"
round_names = []
for i in range(NUM_ROUNDS):
run(
client,
f"{scenario_type}_round_{i}",
scenario_dir,
SPEED_VIOLATION_PROB,
TL_VIOLATION_PROB,
PERC_SPEED_DIFF,
)
round_names.append(f"{scenario_type}_round_{i}")
# client.reload_world()
except KeyboardInterrupt:
pass
finally:
print("\ndone.")
if __name__ == "__main__":
main()
| true | true |
f70f2c01e48108b120549634668375635de05a9a | 1,236 | py | Python | Project Files/app/models/Lead.py | s3llsw0rd/demo_ajax_db_search | 00a7e470f094f2941733abc529994d0be575c749 | [
"Unlicense"
] | null | null | null | Project Files/app/models/Lead.py | s3llsw0rd/demo_ajax_db_search | 00a7e470f094f2941733abc529994d0be575c749 | [
"Unlicense"
] | null | null | null | Project Files/app/models/Lead.py | s3llsw0rd/demo_ajax_db_search | 00a7e470f094f2941733abc529994d0be575c749 | [
"Unlicense"
] | null | null | null | from system.core.model import Model
from flask import jsonify
class Lead(Model):
def __init__(self):
super(Lead, self).__init__()
def get_leads(self, name, early, late, page, sort, order):
query = 'SELECT * FROM leads'
data = {}
prev = False
if name != '':
query += ' WHERE CONCAT(first_name, " ", last_name) LIKE "%":name"%"'
prev = True
data['name'] = name
if early != '':
if prev:
query += ' AND'
else: query += ' WHERE'
query += ' registered_datetime > :start'
prev = True
data['start'] = early
if late != '':
if prev:
query += ' AND'
else: query += ' WHERE'
query += ' registered_datetime < :stop'
data['stop'] = late
if sort != '':
self.check_sort(sort)
query += ' ORDER BY ' + sort
if order != '':
if order == 'DESC':
query += ' DESC'
elif order == 'ASC':
query += ' ASC'
pages = self.db.query_db(query, data)
query += ' LIMIT :offset, 10'
data['offset'] = int(page)*10-10
results = self.db.query_db(query, data)
return jsonify({'people': results, 'pages': pages})
def check_sort(self, sort):
legal_vals = ['id','first_name','last_name','registered_datetime','email']
if not sort in legal_vals:
sort = 'registered_datetime' | 26.869565 | 76 | 0.60356 | from system.core.model import Model
from flask import jsonify
class Lead(Model):
def __init__(self):
super(Lead, self).__init__()
def get_leads(self, name, early, late, page, sort, order):
query = 'SELECT * FROM leads'
data = {}
prev = False
if name != '':
query += ' WHERE CONCAT(first_name, " ", last_name) LIKE "%":name"%"'
prev = True
data['name'] = name
if early != '':
if prev:
query += ' AND'
else: query += ' WHERE'
query += ' registered_datetime > :start'
prev = True
data['start'] = early
if late != '':
if prev:
query += ' AND'
else: query += ' WHERE'
query += ' registered_datetime < :stop'
data['stop'] = late
if sort != '':
self.check_sort(sort)
query += ' ORDER BY ' + sort
if order != '':
if order == 'DESC':
query += ' DESC'
elif order == 'ASC':
query += ' ASC'
pages = self.db.query_db(query, data)
query += ' LIMIT :offset, 10'
data['offset'] = int(page)*10-10
results = self.db.query_db(query, data)
return jsonify({'people': results, 'pages': pages})
def check_sort(self, sort):
legal_vals = ['id','first_name','last_name','registered_datetime','email']
if not sort in legal_vals:
sort = 'registered_datetime' | true | true |
f70f2c2710810b2f835f58ec1ad1d0d901f82c75 | 3,566 | py | Python | harmony/harmony_checker/views.py | ecxia/harmony-checker | 78b198fa091bd8175a8ad8f233053b34654c3a09 | [
"MIT"
] | null | null | null | harmony/harmony_checker/views.py | ecxia/harmony-checker | 78b198fa091bd8175a8ad8f233053b34654c3a09 | [
"MIT"
] | null | null | null | harmony/harmony_checker/views.py | ecxia/harmony-checker | 78b198fa091bd8175a8ad8f233053b34654c3a09 | [
"MIT"
] | null | null | null | from django.conf import settings
from django.contrib.auth import get_user, views as auth_views
from django.contrib.auth.decorators import login_required
from django.core.files import File
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render, get_object_or_404
from django.urls import reverse
from django.utils import timezone
from .forms import ScoreForm
from .models import Score, Result
from . import voiceleading
from music21 import converter
import os.path
# Create your views here.
def index(request):
user = get_user(request)
if request.method == 'POST':
score_form = ScoreForm(request.POST, request.FILES)
new_score = score_form.save()
if user.is_authenticated:
new_score.user = user
new_score.score_display_name = os.path.basename(new_score.score.name)
new_score.save()
fname = str.format('{0}/{1}', settings.MEDIA_ROOT, new_score.score.url)
stream = converter.parse(fname)
end_height = 1
for musical_test in new_score.musical_tests.all():
musical_test_failures = getattr(voiceleading, musical_test.func)(
stream,
chordified_stream=stream.chordify(),
)
r = Result(score=new_score,musical_test=musical_test)
r.passed = (len(musical_test_failures) == 0)
r.save()
stream, end_height = voiceleading.annotate_stream(musical_test_failures, stream, end_height)
output_path = os.path.join("{}_checked.xml".format(fname[:-4]))
stream.write(
"musicxml", output_path
)
with open(output_path) as fp:
contents = File(fp)
new_score.checked_score.save(output_path, contents)
new_score.checked_score_display_name = f"{new_score.score_display_name[:-4]}_checked.xml"
new_score.save()
return HttpResponseRedirect(
reverse('harmony_checker:checked', args=(new_score.id,))
)
else:
score_form = ScoreForm()
return render(
request,
'harmony_checker/index.html',
{'score_form': score_form, 'user': user, 'title': "Check Harmony"}
)
def checked(request, score_id):
user = get_user(request)
score = get_object_or_404(Score, pk=score_id)
results = Result.objects.filter(score=score_id)
#generate checked score display name
return render(
request,
'harmony_checker/checked.html',
{
'score': score,
'results': results,
'user': user,
'title': 'Results'
}
)
def checked_score(request, score_id):
score = get_object_or_404(Score, pk=score_id)
response = HttpResponse(score.checked_score, content_type='application/xml')
response['Content-Disposition'] = f"attachment; filename={score.checked_score_display_name}"
return response
def score(request, score_id):
score = get_object_or_404(Score, pk=score_id)
response = HttpResponse(score.score, content_type='application/xml')
response['Content-Disposition'] = f"attachment; filename={score.score_display_name}"
return response
@login_required
def profile(request):
user = get_user(request)
scores = Score.objects.filter(user=user).order_by('-upload_date')
return render(
request,
'harmony_checker/profile.html',
{
'user': user,
'scores': scores,
'title': "User Profile"
}
) | 34.288462 | 104 | 0.652271 | from django.conf import settings
from django.contrib.auth import get_user, views as auth_views
from django.contrib.auth.decorators import login_required
from django.core.files import File
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render, get_object_or_404
from django.urls import reverse
from django.utils import timezone
from .forms import ScoreForm
from .models import Score, Result
from . import voiceleading
from music21 import converter
import os.path
def index(request):
user = get_user(request)
if request.method == 'POST':
score_form = ScoreForm(request.POST, request.FILES)
new_score = score_form.save()
if user.is_authenticated:
new_score.user = user
new_score.score_display_name = os.path.basename(new_score.score.name)
new_score.save()
fname = str.format('{0}/{1}', settings.MEDIA_ROOT, new_score.score.url)
stream = converter.parse(fname)
end_height = 1
for musical_test in new_score.musical_tests.all():
musical_test_failures = getattr(voiceleading, musical_test.func)(
stream,
chordified_stream=stream.chordify(),
)
r = Result(score=new_score,musical_test=musical_test)
r.passed = (len(musical_test_failures) == 0)
r.save()
stream, end_height = voiceleading.annotate_stream(musical_test_failures, stream, end_height)
output_path = os.path.join("{}_checked.xml".format(fname[:-4]))
stream.write(
"musicxml", output_path
)
with open(output_path) as fp:
contents = File(fp)
new_score.checked_score.save(output_path, contents)
new_score.checked_score_display_name = f"{new_score.score_display_name[:-4]}_checked.xml"
new_score.save()
return HttpResponseRedirect(
reverse('harmony_checker:checked', args=(new_score.id,))
)
else:
score_form = ScoreForm()
return render(
request,
'harmony_checker/index.html',
{'score_form': score_form, 'user': user, 'title': "Check Harmony"}
)
def checked(request, score_id):
user = get_user(request)
score = get_object_or_404(Score, pk=score_id)
results = Result.objects.filter(score=score_id)
return render(
request,
'harmony_checker/checked.html',
{
'score': score,
'results': results,
'user': user,
'title': 'Results'
}
)
def checked_score(request, score_id):
score = get_object_or_404(Score, pk=score_id)
response = HttpResponse(score.checked_score, content_type='application/xml')
response['Content-Disposition'] = f"attachment; filename={score.checked_score_display_name}"
return response
def score(request, score_id):
score = get_object_or_404(Score, pk=score_id)
response = HttpResponse(score.score, content_type='application/xml')
response['Content-Disposition'] = f"attachment; filename={score.score_display_name}"
return response
@login_required
def profile(request):
user = get_user(request)
scores = Score.objects.filter(user=user).order_by('-upload_date')
return render(
request,
'harmony_checker/profile.html',
{
'user': user,
'scores': scores,
'title': "User Profile"
}
) | true | true |
f70f2cf6ecbeb0ea1040a6f84e5170d04a4bfaa5 | 736 | py | Python | test/test_to_json.py | testinggg-art/Cyberbrain | e38c74c174e23aa386d005b03f09b30aa1b3a0ae | [
"MIT"
] | null | null | null | test/test_to_json.py | testinggg-art/Cyberbrain | e38c74c174e23aa386d005b03f09b30aa1b3a0ae | [
"MIT"
] | null | null | null | test/test_to_json.py | testinggg-art/Cyberbrain | e38c74c174e23aa386d005b03f09b30aa1b3a0ae | [
"MIT"
] | null | null | null | import re
def test_repr(tracer, rpc_stub):
class A:
pass
tracer.start()
match = re.match("foo", "foobar")
a = A()
tracer.stop()
from utils import return_GetFrame
frame_proto = return_GetFrame(rpc_stub, "test_repr")
binding_match_event = frame_proto.events[0]
assert (
binding_match_event.binding.repr
== "<re.Match object; span=(0, 3), match='foo'>"
)
assert (
binding_match_event.binding.value
== '{"repr": "<re.Match object; span=(0, 3), match=\'foo\'>"}'
)
binding_a_event = frame_proto.events[2]
assert binding_a_event.binding.repr == "<test_to_json.test_repr.<locals>.A object>"
assert binding_a_event.binding.value == "{}"
| 24.533333 | 87 | 0.626359 | import re
def test_repr(tracer, rpc_stub):
class A:
pass
tracer.start()
match = re.match("foo", "foobar")
a = A()
tracer.stop()
from utils import return_GetFrame
frame_proto = return_GetFrame(rpc_stub, "test_repr")
binding_match_event = frame_proto.events[0]
assert (
binding_match_event.binding.repr
== "<re.Match object; span=(0, 3), match='foo'>"
)
assert (
binding_match_event.binding.value
== '{"repr": "<re.Match object; span=(0, 3), match=\'foo\'>"}'
)
binding_a_event = frame_proto.events[2]
assert binding_a_event.binding.repr == "<test_to_json.test_repr.<locals>.A object>"
assert binding_a_event.binding.value == "{}"
| true | true |
f70f2d49a80dc28577e28b248c5d806981f90827 | 50,619 | py | Python | spectrochempy/core/dataset/nddataset.py | dcambie/spectrochempy | e376082d66be7a4c528b7d83be076d77534e39bd | [
"CECILL-B"
] | null | null | null | spectrochempy/core/dataset/nddataset.py | dcambie/spectrochempy | e376082d66be7a4c528b7d83be076d77534e39bd | [
"CECILL-B"
] | null | null | null | spectrochempy/core/dataset/nddataset.py | dcambie/spectrochempy | e376082d66be7a4c528b7d83be076d77534e39bd | [
"CECILL-B"
] | null | null | null | # -*- coding: utf-8 -*-
#
# ======================================================================================================================
# Copyright (©) 2015-2019 LCS
# Laboratoire Catalyse et Spectrochimie, Caen, France.
# CeCILL-B FREE SOFTWARE LICENSE AGREEMENT
# See full LICENSE agreement in the root directory
# ======================================================================================================================
"""
This module implements the |NDDataset| class.
"""
__all__ = ['NDDataset']
import textwrap
import warnings
import sys
import numpy as np
from traitlets import HasTraits, Instance, Bool, Float, validate, default, Dict, Union
from traittypes import Array
from spectrochempy.core.project.baseproject import AbstractProject
from spectrochempy.core.dataset.ndarray import NDArray, DEFAULT_DIM_NAME
from spectrochempy.core.dataset.ndcomplex import NDComplexArray
from spectrochempy.core.dataset.coord import Coord, LinearCoord
from spectrochempy.core.dataset.coordset import CoordSet
from spectrochempy.core.dataset.ndmath import NDMath, _set_ufuncs, _set_operators
from spectrochempy.core.dataset.ndio import NDIO
from spectrochempy.core.dataset.ndplot import NDPlot
from spectrochempy.core import error_, warning_
from spectrochempy.utils import (colored_output, SpectroChemPyException, SpectroChemPyWarning, MaskedConstant)
HAS_XARRAY = False
try:
import xarray as xr
HAS_XARRAY = True # pragma: no cover
except ImportError:
xr = None # pragma: no cover
# ======================================================================================================================
# NDDataset class definition
# ======================================================================================================================
class NDDataset(NDIO, NDPlot, NDMath, NDComplexArray):
# coordinates
_coordset = Instance(CoordSet, allow_none=True)
# model data (e.g., for fit)
_modeldata = Array(Float(), allow_none=True)
# some setting for NDDataset
_copy = Bool(False)
_labels_allowed = Bool(False) # no labels for NDDataset
# dataset can be members of a project.
# we use the abstract class to avoid circular imports.
_parent = Instance(AbstractProject, allow_none=True)
# For the GUI interface
# parameters state
_state = Dict()
# processed data (for GUI)
_processeddata = Array(Float(), allow_none=True)
# processed mask (for GUI)
_processedmask = Union((Bool(), Array(Bool()), Instance(MaskedConstant)))
# baseline data (for GUI)
_baselinedata = Array(Float(), allow_none=True)
# reference data (for GUI)
_referencedata = Array(Float(), allow_none=True)
# ------------------------------------------------------------------------------------------------------------------
# initialisation
# ------------------------------------------------------------------------------------------------------------------
# ..................................................................................................................
def __init__(self, data=None, coordset=None, coordunits=None, coordtitles=None, **kwargs):
"""
The main N-dimensional dataset class used by |scpy|.
The NDDataset is the main object use by SpectroChemPy. Like numpy ndarrays, NDDataset have the capability to be
sliced, sorted and subject to mathematical operations. But, in addition, NDDataset may have units,
can be masked
and each dimensions can have coordinates also with units. This make NDDataset aware of unit compatibility,
e.g.,
for binary operation such as additions or subtraction or during the application of mathematical operations.
In addition or in replacement of numerical data for coordinates, NDDataset can also have labeled coordinates
where labels can be different kind of objects (strings, datetime, numpy nd.ndarray or othe NDDatasets, etc…).
Parameters
----------
data : array of floats
Data array contained in the object. The data can be a list, a tuple, a |ndarray|, a ndarray-like,
a |NDArray| or any subclass of |NDArray|. Any size or shape of data is accepted. If not given, an empty
|NDArray| will be inited.
At the initialisation the provided data will be eventually casted to a numpy-ndarray.
If a subclass of |NDArray| is passed which already contains some mask, labels, or units, these elements
will
be used to accordingly set those of the created object. If possible, the provided data will not be copied
for `data` input, but will be passed by reference, so you should make a copy of the `data` before passing
them if that's the desired behavior or set the `copy` argument to True.
coordset : An instance of |CoordSet|, optional
`coords` contains the coordinates for the different dimensions of the `data`. if `coords` is provided,
it must specified the `coord` and `labels` for all dimensions of the `data`.
Multiple `coord`'s can be specified in an |CoordSet| instance for each dimension.
coordunits : list, optional
A list of units corresponding to the dimensions in the order of the coordset.
coordtitles : list, optional
A list of titles corresponding of the dimensions in the order of the coordset.
**kwargs : dict
See other parameters.
Other Parameters
----------------
dtype : str or dtype, optional, default=np.float64
If specified, the data will be casted to this dtype, else the data will be casted to float64 or complex128.
dims : list of chars, optional
If specified the list must have a length equal to the number od data dimensions (ndim) and the chars
must be
taken among among x,y,z,u,v,w or t. If not specified, the dimension names are automatically attributed in
this order.
name : str, optional
A user friendly name for this object. If not given, the automatic `id` given at the object creation will be
used as a name.
labels : array of objects, optional
Labels for the `data`. labels can be used only for 1D-datasets.
The labels array may have an additional dimension, meaning several series of labels for the same data.
The given array can be a list, a tuple, a |ndarray|, a ndarray-like, a |NDArray| or any subclass of
|NDArray|.
mask : array of bool or `NOMASK`, optional
Mask for the data. The mask array must have the same shape as the data. The given array can be a list,
a tuple, or a |ndarray|. Each values in the array must be `False` where the data are *valid* and True when
they are not (like in numpy masked arrays). If `data` is already a :class:`~numpy.ma.MaskedArray`, or any
array object (such as a |NDArray| or subclass of it), providing a `mask` here will causes the mask from the
masked array to be ignored.
units : |Unit| instance or str, optional
Units of the data. If data is a |Quantity| then `units` is set to the unit of the `data`; if a unit is also
explicitly provided an error is raised. Handling of units use the `pint <https://pint.readthedocs.org/>`_
package.
title : str, optional
The title of the dimension. It will later be used for instance for labelling plots of the data.
It is optional but recommended to give a title to each ndarray.
dlabel : str, optional
Alias of `title`.
meta : dict-like object, optional
Additional metadata for this object. Must be dict-like but no
further restriction is placed on meta.
author : str, optional
Name(s) of the author(s) of this dataset. BNy default, name of the computer note where this dataset is
created.
description : str, optional
A optional description of the nd-dataset. A shorter alias is `desc`.
history : str, optional
A string to add to the object history.
copy : bool, optional
Perform a copy of the passed object. Default is False.
See Also
--------
Coord : Explicit coordinates object.
LinearCoord : Implicit coordinates objet.
CoordSet : Set of coordinates.
Notes
-----
The underlying array in a |NDDataset| object can be accessed through the `data` attribute, which will return
a conventional |ndarray|.
Examples
--------
Usage by an end-user
>>> from spectrochempy import *
>>> x = NDDataset([1, 2, 3])
>>> print(x.data) # doctest: +NORMALIZE_WHITESPACE
[ 1 2 3]
"""
super().__init__(data, **kwargs)
self._parent = None
# eventually set the coordinates with optional units and title
if isinstance(coordset, CoordSet):
self.set_coordset(**coordset)
else:
if coordset is None:
coordset = [None] * self.ndim
if coordunits is None:
coordunits = [None] * self.ndim
if coordtitles is None:
coordtitles = [None] * self.ndim
_coordset = []
for c, u, t in zip(coordset, coordunits, coordtitles):
if not isinstance(c, CoordSet):
if isinstance(c, LinearCoord):
coord = LinearCoord(c)
else:
coord = Coord(c)
if u is not None:
coord.units = u
if t is not None:
coord.title = t
else:
if u: # pragma: no cover
warning_('units have been set for a CoordSet, but this will be ignored '
'(units are only defined at the coordinate level')
if t: # pragma: no cover
warning_('title will be ignored as they are only defined at the coordinates level')
coord = c
_coordset.append(coord)
if _coordset and set(_coordset) != {Coord()}: # if they are no coordinates do nothing
self.set_coordset(*_coordset)
# ------------------------------------------------------------------------------------------------------------------
# special methods
# ------------------------------------------------------------------------------------------------------------------
# ..................................................................................................................
def __dir__(self):
# WARNING: be carefull to keep the present order of the three first elements! Needed for save/load operations
return ['dims', 'coordset', 'data', 'name', 'title', 'mask', 'units', 'meta', 'preferences',
'author', 'description', 'history', 'date', 'modified', 'origin', 'roi', 'offset', 'transposed',
'modeldata', 'processeddata', 'baselinedata', 'referencedata', 'state'] + NDIO().__dir__()
# ..................................................................................................................
def __getitem__(self, items):
saveditems = items
# coordinate selection to test first
if isinstance(items, str):
try:
return self._coordset[items]
except Exception:
pass
# slicing
new, items = super().__getitem__(items, return_index=True)
if new is None:
return None
if self._coordset is not None:
names = self._coordset.names # all names of the current coordinates
new_coords = [None] * len(names)
for i, item in enumerate(items):
# get the corresponding dimension name in the dims list
name = self.dims[i]
# get the corresponding index in the coordinate's names list
idx = names.index(name)
if self._coordset[idx].is_empty:
new_coords[idx] = Coord(None, name=name)
elif isinstance(item, slice):
# add the slice on the corresponding coordinates on the dim to the new list of coordinates
if not isinstance(self._coordset[idx], CoordSet):
new_coords[idx] = self._coordset[idx][item]
else:
# we must slice all internal coordinates
newc = []
for c in self._coordset[idx]:
newc.append(c[item])
new_coords[idx] = CoordSet(*newc[::-1], name=name) # we reverse to be sure
# the order will be # kept for internal coordinates
new_coords[idx]._default = self._coordset[idx]._default # set the same default coord
new_coords[idx]._is_same_dim = self._coordset[idx]._is_same_dim
elif isinstance(item, (np.ndarray, list)):
new_coords[idx] = self._coordset[idx][item]
new.set_coordset(*new_coords, keepnames=True)
new.history = f'Slice extracted: ({saveditems})'
return new
# ..................................................................................................................
def __getattr__(self, item):
# when the attribute was not found
if item in ["__numpy_ufunc__", "interface", '_pytestfixturefunction', '__dataclass_fields__',
'_ipython_canary_method_should_not_exist_', '_baseclass', '_fill_value', '_ax_lines', '_axcb',
'clevels', '__wrapped__', 'coords', '__await__',
'__aiter__'] or '_validate' in item or '_changed' in item:
# raise an error so that traits, ipython operation and more ... will be handled correctly
raise AttributeError
# syntax such as ds.x, ds.y, etc...
if item[0] in self.dims or self._coordset:
# look also properties
attribute = None
index = 0
# print(item)
if len(item) > 2 and item[1] == '_':
attribute = item[1:]
item = item[0]
index = self.dims.index(item)
if self._coordset:
try:
c = self._coordset[item]
if isinstance(c, str) and c in self.dims:
# probaly a reference to another coordinate name
c = self._coordset[c]
if c.name in self.dims or c._parent_dim in self.dims:
if attribute is not None:
# get the attribute
return getattr(c, attribute)
else:
return c
else:
raise AttributeError
except Exception as err:
if item in self.dims:
return None
else:
raise err
elif attribute is not None:
if attribute == 'size':
# we want the size but there is no coords, get it from the data shape
return self.shape[index]
else:
raise AttributeError(f'Can not find `{attribute}` when no coordinate is defined')
return None
raise AttributeError
def __setattr__(self, key, value):
if key in DEFAULT_DIM_NAME: # syntax such as ds.x, ds.y, etc...
# Note the above test is important to avoid errors with traitlets
# even if it looks redundant with the folllowing
if key in self.dims:
if self._coordset is None:
# we need to create a coordset first
self.set_coordset(dict((self.dims[i], None) for i in range(self.ndim)))
idx = self._coordset.names.index(key)
_coordset = self._coordset
listcoord = False
if isinstance(value, list):
listcoord = all([isinstance(item, Coord) for item in value])
if listcoord:
_coordset[idx] = list(CoordSet(value).to_dict().values())[0]
_coordset[idx].name = key
_coordset[idx]._is_same_dim = True
elif isinstance(value, CoordSet):
if len(value) > 1:
value = CoordSet(value)
_coordset[idx] = list(value.to_dict().values())[0]
_coordset[idx].name = key
_coordset[idx]._is_same_dim = True
elif isinstance(value, (Coord, LinearCoord)):
value.name = key
_coordset[idx] = value
else:
_coordset[idx] = Coord(value, name=key)
_coordset = self._valid_coordset(_coordset)
self._coordset.set(_coordset)
else:
raise AttributeError(f'Coordinate `{key}` is not used.')
else:
super().__setattr__(key, value)
# ..................................................................................................................
def __eq__(self, other, attrs=None):
attrs = self.__dir__()
for attr in (
'filename', 'preferences', 'name', 'description', 'history', 'date', 'modified', 'origin',
'show_datapoints', 'roi', 'offset', 'modeldata', 'processeddata', 'baselinedata', 'referencedata',
'state'):
# these attibutes are not used for comparison (comparison based on data and units!)
try:
attrs.remove(attr)
except ValueError:
pass
return super().__eq__(other, attrs)
# ..................................................................................................................
def __hash__(self):
# all instance of this class has same hash, so they can be compared
return super().__hash__ + hash(self._coordset)
# ------------------------------------------------------------------------------------------------------------------
# Default values
# ------------------------------------------------------------------------------------------------------------------
# ..................................................................................................................
@default('_coordset')
def _coordset_default(self):
return None
# ..................................................................................................................
@default('_modeldata')
def _modeldata_default(self):
return None
# ..................................................................................................................
@default('_processeddata')
def _processeddata_default(self):
return None
# ..................................................................................................................
@default('_baselinedata')
def _baselinedata_default(self):
return None
# ..................................................................................................................
@default('_referencedata')
def _referencedata_default(self):
return None
# ------------------------------------------------------------------------------------------------------------------
# GUI options
# ------------------------------------------------------------------------------------------------------------------
# TODO: refactor the spectrochempy preference system to have a common basis
@property
def state(self):
# state of the controller window for this dataset
return self._state
@state.setter
def state(self, val):
self._state = val
@property
def processeddata(self):
return self._processeddata
@processeddata.setter
def processeddata(self, val):
self._processeddata = val
@property
def processedmask(self):
return self._processedmask
@processedmask.setter
def processedmask(self, val):
self._processedmask = val
@property
def baselinedata(self):
return self._baselinedata
@baselinedata.setter
def baselinedata(self, val):
self._baselinedata = val
@property
def referencedata(self):
return self._referencedata
@referencedata.setter
def referencedata(self, val):
self._referencedata = val
# ------------------------------------------------------------------------------------------------------------------
# Validators
# ------------------------------------------------------------------------------------------------------------------
# ..................................................................................................................
@validate('_coordset')
def _coordset_validate(self, proposal):
coords = proposal['value']
return self._valid_coordset(coords)
def _valid_coordset(self, coords):
# uses in coords_validate and setattr
if coords is None:
return
for k, coord in enumerate(coords):
if coord is not None and not isinstance(coord, CoordSet) and coord.data is None:
continue
# For coord to be acceptable, we require at least a NDArray, a NDArray subclass or a CoordSet
if not isinstance(coord, (LinearCoord, Coord, CoordSet)):
if isinstance(coord, NDArray):
coord = coords[k] = Coord(coord)
else:
raise TypeError('Coordinates must be an instance or a subclass of Coord class or NDArray, or of '
f' CoordSet class, but an instance of {type(coord)} has been passed')
if self.dims and coord.name in self.dims:
# check the validity of the given coordinates in terms of size (if it correspond to one of the dims)
size = coord.size
if self.implements('NDDataset'):
idx = self._get_dims_index(coord.name)[0] # idx in self.dims
if size != self._data.shape[idx]:
raise ValueError(f'the size of a coordinates array must be None or be equal'
f' to that of the respective `{coord.name}`'
f' data dimension but coordinate size={size} != data shape[{idx}]='
f'{self._data.shape[idx]}')
else:
pass # bypass this checking for any other derived type (should be done in the subclass)
coords._parent = self
return coords
# ..................................................................................................................
@property
def _dict_dims(self):
_dict = {}
for index, dim in enumerate(self.dims):
if dim not in _dict:
_dict[dim] = {'size': self.shape[index], 'coord': getattr(self, dim)}
return _dict
# ------------------------------------------------------------------------------------------------------------------
# public methods
# ------------------------------------------------------------------------------------------------------------------
# ..................................................................................................................
def add_coordset(self, *coords, dims=None, **kwargs):
"""
Add one or a set of coordinates from a dataset.
Parameters
----------
*coords : iterable
Coordinates object(s).
dims : list
Name of the coordinates.
**kwargs : dict
Keywords passed to the coordset.
"""
if not coords and not kwargs:
# reset coordinates
self._coordset = None
return
if self._coordset is None:
# make the whole coordset at once
self._coordset = CoordSet(*coords, dims=dims, **kwargs)
else:
# add one coordinate
self._coordset._append(*coords, **kwargs)
if self._coordset:
# set a notifier to the updated traits of the CoordSet instance
HasTraits.observe(self._coordset, self._dims_update, '_updated')
# force it one time after this initialization
self._coordset._updated = True
# ..................................................................................................................
def coord(self, dim='x'):
"""
Return the coordinates along the given dimension.
Parameters
----------
dim : int or str
A dimension index or name, default index = `x`.
If an integer is provided, it is equivalent to the `axis` parameter for numpy array.
Returns
-------
|Coord|
Coordinates along the given axis.
"""
idx = self._get_dims_index(dim)[0] # should generate an error if the
# dimension name is not recognized
if idx is None:
return None
if self._coordset is None:
return None
# idx is not necessarily the position of the coordinates in the CoordSet
# indeed, transposition may have taken place. So we need to retrieve the coordinates by its name
name = self.dims[idx]
if name in self._coordset.names:
idx = self._coordset.names.index(name)
return self._coordset[idx]
else:
error_(f'could not find this dimenson name: `{name}`')
return None
# ..................................................................................................................
@property
def coordset(self):
"""
|CoordSet| instance.
Contains the coordinates of the various dimensions of the dataset.
It's a readonly property. Use set_coords to change one or more coordinates at once.
"""
if self._coordset and all(c.is_empty for c in self._coordset):
# all coordinates are empty, this is equivalent to None for the coordset
return None
return self._coordset
# ..................................................................................................................
@coordset.setter
def coordset(self, coords):
if isinstance(coords, CoordSet):
self.set_coordset(**coords)
else:
self.set_coordset(coords)
# ..................................................................................................................
@property
def coordnames(self):
"""
List of the |Coord| names.
Read only property.
"""
if self._coordset is not None:
return self._coordset.names
# ..................................................................................................................
@property
def coordtitles(self):
"""
List of the |Coord| titles.
Read only property. Use set_coordtitle to eventually set titles.
"""
if self._coordset is not None:
return self._coordset.titles
# ..................................................................................................................
@property
def coordunits(self):
"""
List of the |Coord| units.
Read only property. Use set_coordunits to eventually set units.
"""
if self._coordset is not None:
return self._coordset.units
# ..................................................................................................................
@property
def data(self):
"""
The ``data`` array.
If there is no data but labels, then the labels are returned instead of data.
"""
return super().data
# ..................................................................................................................
@data.setter
def data(self, data):
# as we can't write super().data = data, we call _set_data
# see comment in the data.setter of NDArray
super()._set_data(data)
# ..................................................................................................................
def delete_coordset(self):
"""
Delete all coordinate settings.
"""
self._coordset = None
# ..................................................................................................................
def implements(self, name=None):
"""
Check if the current object implements `NDDataset`.
Rather than isinstance(obj, NDDataset) use object.implements('NDDataset').
This is useful to check type without importing the module
Parameters
----------
name : str
Name of the object class. If None, the function returns the class name.
If name is given, it checks if it correspond to the current class name.
Returns
-------
str or bool
If name is given, a bool is returned
If name is None, the classname is returned
Examples
--------
>>> from spectrochempy import NDDataset, Coord
>>> co = Coord([1., 2., 3.])
>>> co.implements('NDDataset')
False
>>> co.implements('Coord')
True
>>> ds = NDDataset([1., 2., 3.])
>>> ds.implements()
'NDDataset'
"""
if name is None:
return 'NDDataset'
else:
return name == 'NDDataset'
# ..................................................................................................................
@property
def labels(self):
# not valid for NDDataset
# There is no label for nd-dataset
raise NotImplementedError # pragma: no cover
# ..................................................................................................................
@property
def modeldata(self):
"""
|ndarray| - models data.
Data eventually generated by modelling of the data.
"""
return self._modeldata
# ..................................................................................................................
@modeldata.setter
def modeldata(self, data):
self._modeldata = data
# ..................................................................................................................
@property
def parent(self):
"""
|Project| instance
The parent project of the dataset.
"""
return self._parent
# ..................................................................................................................
@parent.setter
def parent(self, value):
if self._parent is not None:
# A parent project already exists for this dataset but the
# entered values gives a different parent. This is not allowed,
# as it can produce impredictable results. We will first remove it
# from the current project.
self._parent.remove_dataset(self.name)
self._parent = value
# ..................................................................................................................
def set_coordset(self, *args, **kwargs):
"""
Set one or more coordinates at once.
Warnings
--------
This method replace all existing coordinates.
See Also
--------
add_coords, set_coordtitles, set_coordunits
"""
self._coordset = None
self.add_coordset(*args, dims=self.dims, **kwargs)
# ..................................................................................................................
def set_coordtitles(self, *args, **kwargs):
"""
Set titles of the one or more coordinates.
"""
self._coordset.set_titles(*args, **kwargs)
# ..................................................................................................................
def set_coordunits(self, *args, **kwargs):
"""
Set units of the one or more coordinates.
"""
self._coordset.set_units(*args, **kwargs)
# ..................................................................................................................
def sort(self, **kwargs):
"""
Returns the dataset sorted along a given dimension.
(by default, the last dimension [axis=-1]) using the numeric or label values.
Parameters
----------
dim : str or int, optional, default=-1
dimension index or name along which to sort.
pos : int , optional
If labels are multidimensional - allow to sort on a define
row of labels : labels[pos]. Experimental : Not yet checked.
by : str among ['value', 'label'], optional, default=``value``
Indicate if the sorting is following the order of labels or
numeric coord values.
descend : `bool`, optional, default=`False`
If true the dataset is sorted in a descending direction. Default is False except if coordinates
are reversed.
inplace : bool, optional, default=`False`
Flag to say that the method return a new object (default)
or not (inplace=True).
Returns
-------
sorted_dataset
"""
inplace = kwargs.get('inplace', False)
if not inplace:
new = self.copy()
else:
new = self
# parameter for selecting the level of labels (default None or 0)
pos = kwargs.pop('pos', None)
# parameter to say if selection is done by values or by labels
by = kwargs.pop('by', 'value')
# determine which axis is sorted (dims or axis can be passed in kwargs)
# it will return a tuple with axis and dim
axis, dim = self.get_axis(**kwargs)
if axis is None:
axis, dim = self.get_axis(axis=0)
# get the corresponding coordinates (remember the their order can be different form the order
# of dimension in dims. S we cannot jsut take the coord from the indice.
coord = getattr(self, dim) # get the coordinate using the syntax such as self.x
descend = kwargs.pop('descend', None)
if descend is None:
# when non specified, default is False (except for reversed coordinates
descend = coord.reversed
# import warnings
# warnings.simplefilter("error")
indexes = []
for i in range(self.ndim):
if i == axis:
if not coord.has_data:
# sometimes we have only label for Coord objects.
# in this case, we sort labels if they exist!
if coord.is_labeled:
by = 'label'
else:
# nothing to do for sorting
# return self itself
return self
args = coord._argsort(by=by, pos=pos, descend=descend)
setattr(new, dim, coord[args])
indexes.append(args)
else:
indexes.append(slice(None))
new._data = new._data[tuple(indexes)]
if new.is_masked:
new._mask = new._mask[tuple(indexes)]
return new
# ..................................................................................................................
def squeeze(self, *dims, inplace=False):
"""
Remove single-dimensional entries from the shape of a NDDataset.
Parameters
----------
dim : None or int or tuple of ints, optional
Selects a subset of the single-dimensional entries in the
shape. If a dimension (dim) is selected with shape entry greater than
one, an error is raised.
inplace : bool, optional, default=`False`
Flag to say that the method return a new object (default)
or not (inplace=True).
Returns
-------
squeezed
The input array, but with all or a subset of the
dimensions of length 1 removed.
Raises
------
ValueError
If `dim` is not `None`, and the dimension being squeezed is not
of length 1.
"""
# make a copy of the original dims
old = self.dims[:]
# squeeze the data and determine which axis must be squeezed
new, axis = super().squeeze(*dims, inplace=inplace, return_axis=True)
if axis is not None and new._coordset is not None:
# if there are coordinates they have to be squeezed as well (remove
# coordinate for the squeezed axis)
for i in axis:
dim = old[i]
del new._coordset[dim]
return new
def expand_dims(self, dim=None):
"""
Expand the shape of an array.
Insert a new axis that will appear at the `axis` position in the expanded array shape.
Parameters
----------
dim : int or str
Position in the expanded axes where the new axis (or axes) is placed.
Returns
-------
result : ndarray
View of `a` with the number of dimensions increased.
See Also
--------
squeeze : The inverse operation, removing singleton dimensions
""" # TODO
# ..................................................................................................................
def swapdims(self, dim1, dim2, inplace=False):
"""
Interchange two dimensions of a NDDataset.
Parameters
----------
dim1 : int
First axis.
dim2 : int
Second axis.
inplace : bool, optional, default=`False`
Flag to say that the method return a new object (default)
or not (inplace=True).
Returns
-------
swaped_dataset
See Also
--------
transpose
"""
new = super().swapdims(dim1, dim2, inplace=inplace)
new.history = f'Data swapped between dims {dim1} and {dim2}'
return new
# ..................................................................................................................
@property
def T(self):
"""
Transposed |NDDataset|.
The same object is returned if `ndim` is less than 2.
"""
return self.transpose()
# ..................................................................................................................
def take(self, indices, **kwargs):
"""
Take elements from an array
Parameters
----------
indices
kwargs
Returns
-------
"""
# handle the various syntax to pass the axis
dims = self._get_dims_from_args(**kwargs)
axis = self._get_dims_index(dims)
axis = axis[0] if axis else None
# indices = indices.tolist()
if axis is None:
# just do a fancy indexing
return self[indices]
if axis < 0:
axis = self.ndim + axis
index = tuple([...] + [indices] + [slice(None) for i in range(self.ndim - 1 - axis)])
new = self[index]
return new
def to_array(self):
"""
Return a numpy masked array (i.e., other NDDataset attributes are lost.
Examples
========
>>> import spectrochempy as scp
>>> dataset = scp.read('wodger.spg')
>>> a = scp.to_array(dataset)
equivalent to:
>>> a = np.ma.array(dataset)
or
>>> a= dataset.masked_data
"""
return np.ma.array(self)
# ..................................................................................................................
def to_xarray(self, **kwargs):
"""
Convert a NDDataset instance to an `~xarray.DataArray` object
( the xarray library must be available )
Parameters
Returns
-------
object : a xarray.DataArray object
"""
# Information about DataArray from the DataArray docstring
#
# Attributes
# ----------
# dims: tuple
# Dimension names associated with this array.
# values: np.ndarray
# Access or modify DataArray values as a numpy array.
# coords: dict-like
# Dictionary of DataArray objects that label values along each dimension.
# name: str or None
# Name of this array.
# attrs: OrderedDict
# Dictionary for holding arbitrary metadata.
# Init docstring
#
# Parameters
# ----------
# data: array_like
# Values for this array. Must be an ``numpy.ndarray``, ndarray like,
# or castable to an ``ndarray``.
# coords: sequence or dict of array_like objects, optional
# Coordinates (tick labels) to use for indexing along each dimension.
# If dict-like, should be a mapping from dimension names to the
# corresponding coordinates. If sequence-like, should be a sequence
# of tuples where the first element is the dimension name and the
# second element is the corresponding coordinate array_like object.
# dims: str or sequence of str, optional
# Name(s) of the data dimension(s). Must be either a string (only
# for 1D data) or a sequence of strings with length equal to the
# number of dimensions. If this argument is omitted, dimension names
# are taken from ``coords`` (if possible) and otherwise default to
# ``['dim_0', ... 'dim_n']``.
# name: str or None, optional
# Name of this array.
# attrs: dict_like or None, optional
# Attributes to assign to the new instance. By default, an empty
# attribute dictionary is initialized.
# encoding: dict_like or None, optional
# Dictionary specifying how to encode this array's data into a
# serialized format like netCDF4. Currently used keys (for netCDF)
# include '_FillValue', 'scale_factor', 'add_offset', 'dtype',
# 'units' and 'calendar' (the later two only for datetime arrays).
# Unrecognized keys are ignored.
if not HAS_XARRAY:
warnings.warn('Xarray is not available! This function can not be used', SpectroChemPyWarning)
return None
x, y = self.x, self.y
tx = x.title
if y:
ty = y.title
da = xr.DataArray(np.array(self.data, dtype=np.float64), coords=[(ty, y.data), (tx, x.data)], )
da.attrs['units'] = self.units
else:
da = xr.DataArray(np.array(self.data, dtype=np.float64), coords=[(tx, x.data)], )
da.attrs['units'] = self.units
da.attrs['title'] = self.title
return da
# ..................................................................................................................
def transpose(self, *dims, inplace=False):
"""
Permute the dimensions of a NDDataset.
Parameters
----------
dims : sequence of dimension indexes or names, optional
By default, reverse the dimensions, otherwise permute the dimensions
according to the values given.
inplace : bool, optional, default=`False`
Flag to say that the method return a new object (default)
or not (inplace=True).
Returns
-------
transposed_array
See Also
--------
swapdims : Interchange two dimensions of a NDDataset.
"""
new = super().transpose(*dims, inplace=inplace)
new.history = f'Data transposed between dims: {dims}' if dims else ''
return new
# ------------------------------------------------------------------------------------------------------------------
# private methods
# ------------------------------------------------------------------------------------------------------------------
# ..................................................................................................................
def _cstr(self):
# Display the metadata of the object and partially the data
out = ''
out += ' name: {}\n'.format(self.name)
out += ' author: {}\n'.format(self.author)
out += ' created: {}\n'.format(self._date)
# out += ' modified: {}\n'.format(self._modified) if (self.modified - self.date).seconds > 1 else ''
wrapper1 = textwrap.TextWrapper(initial_indent='', subsequent_indent=' ' * 15, replace_whitespace=True,
width=self._text_width)
pars = self.description.strip().splitlines()
if pars:
out += ' description: '
desc = ''
if pars:
desc += '{}\n'.format(wrapper1.fill(pars[0]))
for par in pars[1:]:
desc += '{}\n'.format(textwrap.indent(par, ' ' * 15))
# the three escaped null characters are here to facilitate
# the generation of html outputs
desc = '\0\0\0{}\0\0\0\n'.format(desc.rstrip())
out += desc
if self._history:
pars = self.history
out += ' history: '
hist = ''
if pars:
hist += '{}\n'.format(wrapper1.fill(pars[0]))
for par in pars[1:]:
hist += '{}\n'.format(textwrap.indent(par, ' ' * 15))
# the three escaped null characters are here to facilitate
# the generation of html outputs
hist = '\0\0\0{}\0\0\0\n'.format(hist.rstrip())
out += hist
out += '{}\n'.format(self._str_value().rstrip())
out += '{}\n'.format(self._str_shape().rstrip()) if self._str_shape() else ''
out += '{}\n'.format(self._str_dims().rstrip())
if not out.endswith('\n'):
out += '\n'
out += '\n'
if not self._html_output:
return colored_output(out.rstrip())
else:
return out.rstrip()
# ..................................................................................................................
def _loc2index(self, loc, dim=-1):
# Return the index of a location (label or coordinates) along the dim
# This can work only if `coords` exists.
if self._coordset is None:
raise SpectroChemPyException('No coords have been defined. Slicing or selection'
' by location ({}) needs coords definition.'.format(loc))
coord = self.coord(dim)
return coord._loc2index(loc)
# ..................................................................................................................
def _str_dims(self):
if self.is_empty:
return ''
if len(self.dims) < 1 or not hasattr(self, "_coordset"):
return ''
if not self._coordset or len(self._coordset) < 1:
return ''
self._coordset._html_output = self._html_output # transfert the html flag if necessary: false by default
txt = self._coordset._cstr()
txt = txt.rstrip() # remove the trailing '\n'
return txt
_repr_dims = _str_dims
# ------------------------------------------------------------------------------------------------------------------
# events
# ------------------------------------------------------------------------------------------------------------------
def _dims_update(self, change=None):
# when notified that a coords names have been updated
_ = self.dims # fire an update
# ..................................................................................................................
# ======================================================================================================================
# module function
# ======================================================================================================================
# make some NDDataset operation accessible from the spectrochempy API
thismodule = sys.modules[__name__]
api_funcs = ['sort', 'copy', 'squeeze', 'swapdims', 'transpose', 'to_array', 'to_xarray', 'take', 'set_complex',
'set_quaternion', 'set_hypercomplex', 'component', 'to', 'to_base_units', 'to_reduced_units', 'ito',
'ito_base_units', 'ito_reduced_units', 'is_units_compatible', 'remove_masks']
# todo: check the fact that some function are defined also in ndmath
for funcname in api_funcs:
setattr(thismodule, funcname, getattr(NDDataset, funcname))
thismodule.__all__.append(funcname)
# load one method from NDIO
load = NDDataset.load
__all__ += ['load']
# ======================================================================================================================
# Set the operators
# ======================================================================================================================
_set_operators(NDDataset, priority=100000)
_set_ufuncs(NDDataset)
| 40.398244 | 120 | 0.481776 |
__all__ = ['NDDataset']
import textwrap
import warnings
import sys
import numpy as np
from traitlets import HasTraits, Instance, Bool, Float, validate, default, Dict, Union
from traittypes import Array
from spectrochempy.core.project.baseproject import AbstractProject
from spectrochempy.core.dataset.ndarray import NDArray, DEFAULT_DIM_NAME
from spectrochempy.core.dataset.ndcomplex import NDComplexArray
from spectrochempy.core.dataset.coord import Coord, LinearCoord
from spectrochempy.core.dataset.coordset import CoordSet
from spectrochempy.core.dataset.ndmath import NDMath, _set_ufuncs, _set_operators
from spectrochempy.core.dataset.ndio import NDIO
from spectrochempy.core.dataset.ndplot import NDPlot
from spectrochempy.core import error_, warning_
from spectrochempy.utils import (colored_output, SpectroChemPyException, SpectroChemPyWarning, MaskedConstant)
HAS_XARRAY = False
try:
import xarray as xr
HAS_XARRAY = True
except ImportError:
xr = None
class NDDataset(NDIO, NDPlot, NDMath, NDComplexArray):
_coordset = Instance(CoordSet, allow_none=True)
_modeldata = Array(Float(), allow_none=True)
_copy = Bool(False)
_labels_allowed = Bool(False)
_parent = Instance(AbstractProject, allow_none=True)
_state = Dict()
_processeddata = Array(Float(), allow_none=True)
_processedmask = Union((Bool(), Array(Bool()), Instance(MaskedConstant)))
_baselinedata = Array(Float(), allow_none=True)
_referencedata = Array(Float(), allow_none=True)
def __init__(self, data=None, coordset=None, coordunits=None, coordtitles=None, **kwargs):
super().__init__(data, **kwargs)
self._parent = None
if isinstance(coordset, CoordSet):
self.set_coordset(**coordset)
else:
if coordset is None:
coordset = [None] * self.ndim
if coordunits is None:
coordunits = [None] * self.ndim
if coordtitles is None:
coordtitles = [None] * self.ndim
_coordset = []
for c, u, t in zip(coordset, coordunits, coordtitles):
if not isinstance(c, CoordSet):
if isinstance(c, LinearCoord):
coord = LinearCoord(c)
else:
coord = Coord(c)
if u is not None:
coord.units = u
if t is not None:
coord.title = t
else:
if u:
warning_('units have been set for a CoordSet, but this will be ignored '
'(units are only defined at the coordinate level')
if t:
warning_('title will be ignored as they are only defined at the coordinates level')
coord = c
_coordset.append(coord)
if _coordset and set(_coordset) != {Coord()}:
self.set_coordset(*_coordset)
def __dir__(self):
return ['dims', 'coordset', 'data', 'name', 'title', 'mask', 'units', 'meta', 'preferences',
'author', 'description', 'history', 'date', 'modified', 'origin', 'roi', 'offset', 'transposed',
'modeldata', 'processeddata', 'baselinedata', 'referencedata', 'state'] + NDIO().__dir__()
def __getitem__(self, items):
saveditems = items
if isinstance(items, str):
try:
return self._coordset[items]
except Exception:
pass
new, items = super().__getitem__(items, return_index=True)
if new is None:
return None
if self._coordset is not None:
names = self._coordset.names
new_coords = [None] * len(names)
for i, item in enumerate(items):
name = self.dims[i]
idx = names.index(name)
if self._coordset[idx].is_empty:
new_coords[idx] = Coord(None, name=name)
elif isinstance(item, slice):
# add the slice on the corresponding coordinates on the dim to the new list of coordinates
if not isinstance(self._coordset[idx], CoordSet):
new_coords[idx] = self._coordset[idx][item]
else:
# we must slice all internal coordinates
newc = []
for c in self._coordset[idx]:
newc.append(c[item])
new_coords[idx] = CoordSet(*newc[::-1], name=name) # we reverse to be sure
# the order will be # kept for internal coordinates
new_coords[idx]._default = self._coordset[idx]._default # set the same default coord
new_coords[idx]._is_same_dim = self._coordset[idx]._is_same_dim
elif isinstance(item, (np.ndarray, list)):
new_coords[idx] = self._coordset[idx][item]
new.set_coordset(*new_coords, keepnames=True)
new.history = f'Slice extracted: ({saveditems})'
return new
# ..................................................................................................................
def __getattr__(self, item):
# when the attribute was not found
if item in ["__numpy_ufunc__", "interface", '_pytestfixturefunction', '__dataclass_fields__',
'_ipython_canary_method_should_not_exist_', '_baseclass', '_fill_value', '_ax_lines', '_axcb',
'clevels', '__wrapped__', 'coords', '__await__',
'__aiter__'] or '_validate' in item or '_changed' in item:
# raise an error so that traits, ipython operation and more ... will be handled correctly
raise AttributeError
# syntax such as ds.x, ds.y, etc...
if item[0] in self.dims or self._coordset:
# look also properties
attribute = None
index = 0
# print(item)
if len(item) > 2 and item[1] == '_':
attribute = item[1:]
item = item[0]
index = self.dims.index(item)
if self._coordset:
try:
c = self._coordset[item]
if isinstance(c, str) and c in self.dims:
# probaly a reference to another coordinate name
c = self._coordset[c]
if c.name in self.dims or c._parent_dim in self.dims:
if attribute is not None:
# get the attribute
return getattr(c, attribute)
else:
return c
else:
raise AttributeError
except Exception as err:
if item in self.dims:
return None
else:
raise err
elif attribute is not None:
if attribute == 'size':
# we want the size but there is no coords, get it from the data shape
return self.shape[index]
else:
raise AttributeError(f'Can not find `{attribute}` when no coordinate is defined')
return None
raise AttributeError
def __setattr__(self, key, value):
if key in DEFAULT_DIM_NAME: # syntax such as ds.x, ds.y, etc...
# Note the above test is important to avoid errors with traitlets
# even if it looks redundant with the folllowing
if key in self.dims:
if self._coordset is None:
# we need to create a coordset first
self.set_coordset(dict((self.dims[i], None) for i in range(self.ndim)))
idx = self._coordset.names.index(key)
_coordset = self._coordset
listcoord = False
if isinstance(value, list):
listcoord = all([isinstance(item, Coord) for item in value])
if listcoord:
_coordset[idx] = list(CoordSet(value).to_dict().values())[0]
_coordset[idx].name = key
_coordset[idx]._is_same_dim = True
elif isinstance(value, CoordSet):
if len(value) > 1:
value = CoordSet(value)
_coordset[idx] = list(value.to_dict().values())[0]
_coordset[idx].name = key
_coordset[idx]._is_same_dim = True
elif isinstance(value, (Coord, LinearCoord)):
value.name = key
_coordset[idx] = value
else:
_coordset[idx] = Coord(value, name=key)
_coordset = self._valid_coordset(_coordset)
self._coordset.set(_coordset)
else:
raise AttributeError(f'Coordinate `{key}` is not used.')
else:
super().__setattr__(key, value)
# ..................................................................................................................
def __eq__(self, other, attrs=None):
attrs = self.__dir__()
for attr in (
'filename', 'preferences', 'name', 'description', 'history', 'date', 'modified', 'origin',
'show_datapoints', 'roi', 'offset', 'modeldata', 'processeddata', 'baselinedata', 'referencedata',
'state'):
# these attibutes are not used for comparison (comparison based on data and units!)
try:
attrs.remove(attr)
except ValueError:
pass
return super().__eq__(other, attrs)
# ..................................................................................................................
def __hash__(self):
# all instance of this class has same hash, so they can be compared
return super().__hash__ + hash(self._coordset)
# ------------------------------------------------------------------------------------------------------------------
# Default values
# ------------------------------------------------------------------------------------------------------------------
# ..................................................................................................................
@default('_coordset')
def _coordset_default(self):
return None
# ..................................................................................................................
@default('_modeldata')
def _modeldata_default(self):
return None
# ..................................................................................................................
@default('_processeddata')
def _processeddata_default(self):
return None
# ..................................................................................................................
@default('_baselinedata')
def _baselinedata_default(self):
return None
# ..................................................................................................................
@default('_referencedata')
def _referencedata_default(self):
return None
# ------------------------------------------------------------------------------------------------------------------
# GUI options
# ------------------------------------------------------------------------------------------------------------------
# TODO: refactor the spectrochempy preference system to have a common basis
@property
def state(self):
# state of the controller window for this dataset
return self._state
@state.setter
def state(self, val):
self._state = val
@property
def processeddata(self):
return self._processeddata
@processeddata.setter
def processeddata(self, val):
self._processeddata = val
@property
def processedmask(self):
return self._processedmask
@processedmask.setter
def processedmask(self, val):
self._processedmask = val
@property
def baselinedata(self):
return self._baselinedata
@baselinedata.setter
def baselinedata(self, val):
self._baselinedata = val
@property
def referencedata(self):
return self._referencedata
@referencedata.setter
def referencedata(self, val):
self._referencedata = val
# ------------------------------------------------------------------------------------------------------------------
# Validators
# ------------------------------------------------------------------------------------------------------------------
# ..................................................................................................................
@validate('_coordset')
def _coordset_validate(self, proposal):
coords = proposal['value']
return self._valid_coordset(coords)
def _valid_coordset(self, coords):
# uses in coords_validate and setattr
if coords is None:
return
for k, coord in enumerate(coords):
if coord is not None and not isinstance(coord, CoordSet) and coord.data is None:
continue
# For coord to be acceptable, we require at least a NDArray, a NDArray subclass or a CoordSet
if not isinstance(coord, (LinearCoord, Coord, CoordSet)):
if isinstance(coord, NDArray):
coord = coords[k] = Coord(coord)
else:
raise TypeError('Coordinates must be an instance or a subclass of Coord class or NDArray, or of '
f' CoordSet class, but an instance of {type(coord)} has been passed')
if self.dims and coord.name in self.dims:
# check the validity of the given coordinates in terms of size (if it correspond to one of the dims)
size = coord.size
if self.implements('NDDataset'):
idx = self._get_dims_index(coord.name)[0] # idx in self.dims
if size != self._data.shape[idx]:
raise ValueError(f'the size of a coordinates array must be None or be equal'
f' to that of the respective `{coord.name}`'
f' data dimension but coordinate size={size} != data shape[{idx}]='
f'{self._data.shape[idx]}')
else:
pass # bypass this checking for any other derived type (should be done in the subclass)
coords._parent = self
return coords
# ..................................................................................................................
@property
def _dict_dims(self):
_dict = {}
for index, dim in enumerate(self.dims):
if dim not in _dict:
_dict[dim] = {'size': self.shape[index], 'coord': getattr(self, dim)}
return _dict
# ------------------------------------------------------------------------------------------------------------------
# public methods
# ------------------------------------------------------------------------------------------------------------------
# ..................................................................................................................
def add_coordset(self, *coords, dims=None, **kwargs):
if not coords and not kwargs:
# reset coordinates
self._coordset = None
return
if self._coordset is None:
# make the whole coordset at once
self._coordset = CoordSet(*coords, dims=dims, **kwargs)
else:
# add one coordinate
self._coordset._append(*coords, **kwargs)
if self._coordset:
# set a notifier to the updated traits of the CoordSet instance
HasTraits.observe(self._coordset, self._dims_update, '_updated')
# force it one time after this initialization
self._coordset._updated = True
# ..................................................................................................................
def coord(self, dim='x'):
idx = self._get_dims_index(dim)[0] # should generate an error if the
# dimension name is not recognized
if idx is None:
return None
if self._coordset is None:
return None
# idx is not necessarily the position of the coordinates in the CoordSet
# indeed, transposition may have taken place. So we need to retrieve the coordinates by its name
name = self.dims[idx]
if name in self._coordset.names:
idx = self._coordset.names.index(name)
return self._coordset[idx]
else:
error_(f'could not find this dimenson name: `{name}`')
return None
# ..................................................................................................................
@property
def coordset(self):
if self._coordset and all(c.is_empty for c in self._coordset):
# all coordinates are empty, this is equivalent to None for the coordset
return None
return self._coordset
# ..................................................................................................................
@coordset.setter
def coordset(self, coords):
if isinstance(coords, CoordSet):
self.set_coordset(**coords)
else:
self.set_coordset(coords)
# ..................................................................................................................
@property
def coordnames(self):
if self._coordset is not None:
return self._coordset.names
# ..................................................................................................................
@property
def coordtitles(self):
if self._coordset is not None:
return self._coordset.titles
# ..................................................................................................................
@property
def coordunits(self):
if self._coordset is not None:
return self._coordset.units
# ..................................................................................................................
@property
def data(self):
return super().data
# ..................................................................................................................
@data.setter
def data(self, data):
# as we can't write super().data = data, we call _set_data
super()._set_data(data)
def delete_coordset(self):
self._coordset = None
def implements(self, name=None):
if name is None:
return 'NDDataset'
else:
return name == 'NDDataset'
@property
def labels(self):
raise NotImplementedError
@property
def modeldata(self):
return self._modeldata
@modeldata.setter
def modeldata(self, data):
self._modeldata = data
@property
def parent(self):
return self._parent
@parent.setter
def parent(self, value):
if self._parent is not None:
self._parent.remove_dataset(self.name)
self._parent = value
def set_coordset(self, *args, **kwargs):
self._coordset = None
self.add_coordset(*args, dims=self.dims, **kwargs)
def set_coordtitles(self, *args, **kwargs):
self._coordset.set_titles(*args, **kwargs)
def set_coordunits(self, *args, **kwargs):
self._coordset.set_units(*args, **kwargs)
def sort(self, **kwargs):
inplace = kwargs.get('inplace', False)
if not inplace:
new = self.copy()
else:
new = self
pos = kwargs.pop('pos', None)
by = kwargs.pop('by', 'value')
axis, dim = self.get_axis(**kwargs)
if axis is None:
axis, dim = self.get_axis(axis=0)
coord = getattr(self, dim)
descend = kwargs.pop('descend', None)
if descend is None:
descend = coord.reversed
indexes = []
for i in range(self.ndim):
if i == axis:
if not coord.has_data:
if coord.is_labeled:
by = 'label'
else:
return self
args = coord._argsort(by=by, pos=pos, descend=descend)
setattr(new, dim, coord[args])
indexes.append(args)
else:
indexes.append(slice(None))
new._data = new._data[tuple(indexes)]
if new.is_masked:
new._mask = new._mask[tuple(indexes)]
return new
def squeeze(self, *dims, inplace=False):
old = self.dims[:]
new, axis = super().squeeze(*dims, inplace=inplace, return_axis=True)
if axis is not None and new._coordset is not None:
for i in axis:
dim = old[i]
del new._coordset[dim]
return new
def expand_dims(self, dim=None):
def swapdims(self, dim1, dim2, inplace=False):
new = super().swapdims(dim1, dim2, inplace=inplace)
new.history = f'Data swapped between dims {dim1} and {dim2}'
return new
@property
def T(self):
return self.transpose()
def take(self, indices, **kwargs):
dims = self._get_dims_from_args(**kwargs)
axis = self._get_dims_index(dims)
axis = axis[0] if axis else None
if axis is None:
return self[indices]
if axis < 0:
axis = self.ndim + axis
index = tuple([...] + [indices] + [slice(None) for i in range(self.ndim - 1 - axis)])
new = self[index]
return new
def to_array(self):
return np.ma.array(self)
def to_xarray(self, **kwargs):
# serialized format like netCDF4. Currently used keys (for netCDF)
# include '_FillValue', 'scale_factor', 'add_offset', 'dtype',
# 'units' and 'calendar' (the later two only for datetime arrays).
# Unrecognized keys are ignored.
if not HAS_XARRAY:
warnings.warn('Xarray is not available! This function can not be used', SpectroChemPyWarning)
return None
x, y = self.x, self.y
tx = x.title
if y:
ty = y.title
da = xr.DataArray(np.array(self.data, dtype=np.float64), coords=[(ty, y.data), (tx, x.data)], )
da.attrs['units'] = self.units
else:
da = xr.DataArray(np.array(self.data, dtype=np.float64), coords=[(tx, x.data)], )
da.attrs['units'] = self.units
da.attrs['title'] = self.title
return da
# ..................................................................................................................
def transpose(self, *dims, inplace=False):
new = super().transpose(*dims, inplace=inplace)
new.history = f'Data transposed between dims: {dims}' if dims else ''
return new
# ------------------------------------------------------------------------------------------------------------------
# private methods
# ------------------------------------------------------------------------------------------------------------------
# ..................................................................................................................
def _cstr(self):
# Display the metadata of the object and partially the data
out = ''
out += ' name: {}\n'.format(self.name)
out += ' author: {}\n'.format(self.author)
out += ' created: {}\n'.format(self._date)
# out += ' modified: {}\n'.format(self._modified) if (self.modified - self.date).seconds > 1 else ''
wrapper1 = textwrap.TextWrapper(initial_indent='', subsequent_indent=' ' * 15, replace_whitespace=True,
width=self._text_width)
pars = self.description.strip().splitlines()
if pars:
out += ' description: '
desc = ''
if pars:
desc += '{}\n'.format(wrapper1.fill(pars[0]))
for par in pars[1:]:
desc += '{}\n'.format(textwrap.indent(par, ' ' * 15))
# the three escaped null characters are here to facilitate
# the generation of html outputs
desc = '\0\0\0{}\0\0\0\n'.format(desc.rstrip())
out += desc
if self._history:
pars = self.history
out += ' history: '
hist = ''
if pars:
hist += '{}\n'.format(wrapper1.fill(pars[0]))
for par in pars[1:]:
hist += '{}\n'.format(textwrap.indent(par, ' ' * 15))
# the three escaped null characters are here to facilitate
# the generation of html outputs
hist = '\0\0\0{}\0\0\0\n'.format(hist.rstrip())
out += hist
out += '{}\n'.format(self._str_value().rstrip())
out += '{}\n'.format(self._str_shape().rstrip()) if self._str_shape() else ''
out += '{}\n'.format(self._str_dims().rstrip())
if not out.endswith('\n'):
out += '\n'
out += '\n'
if not self._html_output:
return colored_output(out.rstrip())
else:
return out.rstrip()
# ..................................................................................................................
def _loc2index(self, loc, dim=-1):
# Return the index of a location (label or coordinates) along the dim
# This can work only if `coords` exists.
if self._coordset is None:
raise SpectroChemPyException('No coords have been defined. Slicing or selection'
' by location ({}) needs coords definition.'.format(loc))
coord = self.coord(dim)
return coord._loc2index(loc)
# ..................................................................................................................
def _str_dims(self):
if self.is_empty:
return ''
if len(self.dims) < 1 or not hasattr(self, "_coordset"):
return ''
if not self._coordset or len(self._coordset) < 1:
return ''
self._coordset._html_output = self._html_output # transfert the html flag if necessary: false by default
txt = self._coordset._cstr()
txt = txt.rstrip() # remove the trailing '\n'
return txt
_repr_dims = _str_dims
# ------------------------------------------------------------------------------------------------------------------
# events
# ------------------------------------------------------------------------------------------------------------------
def _dims_update(self, change=None):
# when notified that a coords names have been updated
_ = self.dims # fire an update
# ..................................................................................................................
# ======================================================================================================================
# module function
# ======================================================================================================================
# make some NDDataset operation accessible from the spectrochempy API
thismodule = sys.modules[__name__]
api_funcs = ['sort', 'copy', 'squeeze', 'swapdims', 'transpose', 'to_array', 'to_xarray', 'take', 'set_complex',
'set_quaternion', 'set_hypercomplex', 'component', 'to', 'to_base_units', 'to_reduced_units', 'ito',
'ito_base_units', 'ito_reduced_units', 'is_units_compatible', 'remove_masks']
# todo: check the fact that some function are defined also in ndmath
for funcname in api_funcs:
setattr(thismodule, funcname, getattr(NDDataset, funcname))
thismodule.__all__.append(funcname)
# load one method from NDIO
load = NDDataset.load
__all__ += ['load']
# ======================================================================================================================
# Set the operators
# ======================================================================================================================
_set_operators(NDDataset, priority=100000)
_set_ufuncs(NDDataset)
| true | true |
f70f2e57c996f7c9bfa084739916de3701ff157b | 4,564 | py | Python | kensu/client/models/process_lineage.py | vidma/kensu-py | aae1e04373f03c988d55772fde6563de3ca9f375 | [
"Apache-2.0"
] | 16 | 2021-04-28T13:22:41.000Z | 2022-03-02T10:45:19.000Z | kensu/client/models/process_lineage.py | vidma/kensu-py | aae1e04373f03c988d55772fde6563de3ca9f375 | [
"Apache-2.0"
] | 12 | 2021-05-17T08:06:42.000Z | 2022-02-28T22:43:04.000Z | kensu/client/models/process_lineage.py | vidma/kensu-py | aae1e04373f03c988d55772fde6563de3ca9f375 | [
"Apache-2.0"
] | 5 | 2021-04-27T15:02:16.000Z | 2021-10-15T16:07:21.000Z | # coding: utf-8
"""
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: beta
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
class ProcessLineage(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'operation_logic': 'str',
'pk': 'ProcessLineagePK'
}
attribute_map = {
'name': 'name',
'operation_logic': 'operationLogic',
'pk': 'pk'
}
def __init__(self, name=None, operation_logic=None, pk=None):
"""
ProcessLineage - a model defined in Swagger
"""
self._name = None
self._operation_logic = None
self._pk = None
self.name = name
if operation_logic is not None:
self.operation_logic = operation_logic
self.pk = pk
@property
def name(self):
"""
Gets the name of this ProcessLineage.
:return: The name of this ProcessLineage.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this ProcessLineage.
:param name: The name of this ProcessLineage.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._name = name
@property
def operation_logic(self):
"""
Gets the operation_logic of this ProcessLineage.
data update operation logic, e.g. 'REPLACE', 'UPDATE', 'APPEND'. Default is 'REPLACE'.
:return: The operation_logic of this ProcessLineage.
:rtype: str
"""
return self._operation_logic
@operation_logic.setter
def operation_logic(self, operation_logic):
"""
Sets the operation_logic of this ProcessLineage.
data update operation logic, e.g. 'REPLACE', 'UPDATE', 'APPEND'. Default is 'REPLACE'.
:param operation_logic: The operation_logic of this ProcessLineage.
:type: str
"""
self._operation_logic = operation_logic
@property
def pk(self):
"""
Gets the pk of this ProcessLineage.
:return: The pk of this ProcessLineage.
:rtype: ProcessLineagePK
"""
return self._pk
@pk.setter
def pk(self, pk):
"""
Sets the pk of this ProcessLineage.
:param pk: The pk of this ProcessLineage.
:type: ProcessLineagePK
"""
if pk is None:
raise ValueError("Invalid value for `pk`, must not be `None`")
self._pk = pk
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, ProcessLineage):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 25.355556 | 105 | 0.548861 |
from pprint import pformat
from six import iteritems
class ProcessLineage(object):
swagger_types = {
'name': 'str',
'operation_logic': 'str',
'pk': 'ProcessLineagePK'
}
attribute_map = {
'name': 'name',
'operation_logic': 'operationLogic',
'pk': 'pk'
}
def __init__(self, name=None, operation_logic=None, pk=None):
self._name = None
self._operation_logic = None
self._pk = None
self.name = name
if operation_logic is not None:
self.operation_logic = operation_logic
self.pk = pk
@property
def name(self):
return self._name
@name.setter
def name(self, name):
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._name = name
@property
def operation_logic(self):
return self._operation_logic
@operation_logic.setter
def operation_logic(self, operation_logic):
self._operation_logic = operation_logic
@property
def pk(self):
return self._pk
@pk.setter
def pk(self, pk):
if pk is None:
raise ValueError("Invalid value for `pk`, must not be `None`")
self._pk = pk
def to_dict(self):
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, ProcessLineage):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f70f2eb5e2fcbf95fe6be450017498f13c7cccf9 | 14,369 | py | Python | year_2018/day_15.py | gchazot/aoc | 1926114b1060a927be3f87732ba0a399afd98ae4 | [
"MIT"
] | 1 | 2020-04-12T16:14:29.000Z | 2020-04-12T16:14:29.000Z | year_2018/day_15.py | gchazot/aoc | 1926114b1060a927be3f87732ba0a399afd98ae4 | [
"MIT"
] | null | null | null | year_2018/day_15.py | gchazot/aoc | 1926114b1060a927be3f87732ba0a399afd98ae4 | [
"MIT"
] | null | null | null | import collections
import itertools
import math
import unittest
import aoc_utils.geometry
from aoc_utils import char_map, data
class TestCoordinatesUtils(unittest.TestCase):
def test_solve_tie(self):
self.assertEqual(None, solve_tie([]))
self.assertEqual((12, 34), solve_tie([(12, 34)]))
self.assertEqual((1, 1), solve_tie([(1, 1), (2, 2)]))
self.assertEqual((1, 1), solve_tie([(2, 2), (1, 1)]))
self.assertEqual((2, 1), solve_tie([(1, 2), (2, 1)]))
self.assertEqual((2, 1), solve_tie([(2, 1), (1, 2)]))
def solve_tie(options):
if len(options):
return sorted_by_priority(options)[0]
def sorted_by_priority(options):
return sorted(options, key=reverse_coordinates)
def reverse_coordinates(coordinates):
return tuple(i for i in reversed(coordinates))
class FindAllClosestRules(char_map.ProgressRules):
def __init__(self, targets, allowed_values):
super(FindAllClosestRules, self).__init__(allowed_values)
self._targets = targets
self._found_one = False
self.results = []
def stop_progressing(self):
return self._found_one
def examine(self, coordinates):
if coordinates in self._targets:
self._found_one = True
self.results.append(coordinates)
return False
return True
def solve_tie(self, coordinate_options):
return solve_tie(coordinate_options)
class TestCaves(unittest.TestCase):
def make_default_caves(self):
caves = Caves([
"#######",
"#E..G.#",
"#...#.#",
"#.G.#G#",
"#######",
])
return caves
def test_init_fighters(self):
caves = self.make_default_caves()
fighters = caves.fighters
self.assertSetEqual({'E', 'G'}, set(fighters.keys()))
self.assertEqual({(1, 1): 200}, fighters['E'])
self.assertEqual({(4, 1): 200, (2, 3): 200, (5, 3): 200}, fighters['G'])
def test_get_targets(self):
caves = self.make_default_caves()
self.assertListEqual([(4, 1), (2, 3), (5, 3)], list(caves.get_targets("E")))
self.assertListEqual([(1, 1)], list(caves.get_targets("G")))
def test_get_in_range(self):
caves = self.make_default_caves()
self.assertListEqual([(3, 1), (5, 1), (2, 2), (5, 2), (1, 3), (3, 3)],
list(caves.get_in_range("E")))
self.assertListEqual([(2, 1), (1, 2)],
list(caves.get_in_range("G")))
def test_get_coordinates_around(self):
caves = self.make_default_caves()
self.assertListEqual([(2, 1), (1, 2)], list(caves.get_coordinates_around((1, 1))))
self.assertListEqual([(3, 1), (5, 1)], list(caves.get_coordinates_around((4, 1))))
self.assertListEqual([(2, 2), (1, 3), (3, 3)], list(caves.get_coordinates_around((2, 3))))
self.assertListEqual([(5, 2)], list(caves.get_coordinates_around((5, 3))))
def test_find_all_closest_rules(self):
caves = Caves([
"#######",
"#E#.G.#",
"#...#.#",
"#.G.#G#",
"#######",
])
finder = char_map.MapExplorer(caves._caves)
rules = FindAllClosestRules(
targets=[(3, 1), (5, 1), (2, 2), (5, 2), (1, 3), (3, 3)],
allowed_values=[EMPTY_VALUE]
)
finder.explore(start_point=(1, 1), rules=rules)
self.assertListEqual([(2, 2), (1, 3)], list(rules.results))
def test_iterate_units(self):
caves = self.make_default_caves()
self.assertListEqual([(1, 1), (4, 1), (2, 3), (5, 3)], caves.iterate_units())
def test_get_attack_target(self):
caves_2 = Caves([
"#######",
"#..EG.#",
"#...#.#",
"#.G.#G#",
"#######",
])
self.assertEqual((4, 1), caves_2.get_attack_target((3, 1), 'E'))
self.assertEqual((3, 1), caves_2.get_attack_target((4, 1), 'G'))
self.assertEqual(None, caves_2.get_attack_target((2, 3), 'G'))
self.assertEqual(None, caves_2.get_attack_target((5, 3), 'G'))
def test_find_next_step(self):
caves = self.make_default_caves()
self.assertEqual((2, 1), caves.find_next_step((1, 1), 'E'))
self.assertEqual((3, 1), caves.find_next_step((4, 1), 'G'))
self.assertEqual((2, 2), caves.find_next_step((2, 3), 'G'))
self.assertEqual(None, caves.find_next_step((5, 3), 'G'))
def test_play_unit(self):
caves = self.make_default_caves()
fighters = caves.fighters
caves.play_unit((1, 1), 'E')
self.assertEqual({(2, 1): 200}, fighters['E'])
self.assertEqual({(4, 1): 200, (2, 3): 200, (5, 3): 200}, fighters['G'])
caves.play_unit((2, 1), 'E')
self.assertEqual({(3, 1): 200}, fighters['E'])
self.assertEqual({(4, 1): 197, (2, 3): 200, (5, 3): 200}, fighters['G'])
for _ in range(65):
caves.play_unit((3, 1), 'E')
self.assertEqual({(3, 1): 200}, fighters['E'])
self.assertEqual({(4, 1): 2, (2, 3): 200, (5, 3): 200}, fighters['G'])
caves.play_unit((3, 1), 'E')
self.assertEqual({(3, 1): 200}, fighters['E'])
self.assertEqual({(2, 3): 200, (5, 3): 200}, fighters['G'])
def test_play_round(self):
caves = self.make_default_caves()
fighters = caves.fighters
self.assertFalse(caves.play_round())
self.assertEqual({(2, 1): 194}, fighters['E'])
self.assertEqual({(3, 1): 200, (2, 2): 200, (5, 3): 200}, fighters['G'])
self.assertTrue(caves.play_round())
self.assertEqual({(2, 1): 188}, fighters['E'])
self.assertEqual({(3, 1): 197, (2, 2): 200, (5, 3): 200}, fighters['G'])
for _ in range(31):
self.assertTrue(caves.play_round())
self.assertEqual({(2, 1): 2}, fighters['E'])
self.assertEqual({(3, 1): 104, (2, 2): 200, (5, 3): 200}, fighters['G'])
self.assertRaises(FightIsOver, caves.play_round)
self.assertEqual({}, fighters['E'])
self.assertEqual({(3, 1): 101, (2, 2): 200, (5, 3): 200}, fighters['G'])
def test_play(self):
caves = self.make_default_caves()
fighters = caves.fighters
self.assertEqual(16533, caves.play())
self.assertEqual({}, fighters['E'])
self.assertEqual({(3, 1): 101, (2, 2): 200, (5, 3): 200}, fighters['G'])
def test_play_examples(self):
def check(expected_outcome, cave_lines, echo=False):
caves = Caves(cave_lines)
outcome = caves.play()
if echo:
caves.echo()
self.assertEqual(expected_outcome, outcome)
check(27730, [
'#######',
'#.G...#',
'#...EG#',
'#.#.#G#',
'#..G#E#',
'#.....#',
'#######',
])
check(36334, [
'#######',
'#G..#E#',
'#E#E.E#',
'#G.##.#',
'#...#E#',
'#...E.#',
'#######',
])
check(39514, [
'#######',
'#E..EG#',
'#.#G.E#',
'#E.##E#',
'#G..#.#',
'#..E#.#',
'#######',
])
check(27755, [
'#######',
'#E.G#.#',
'#.#G..#',
'#G.#.G#',
'#G..#.#',
'#...E.#',
'#######',
])
check(28944, [
'#######',
'#.E...#',
'#.#..G#',
'#.###.#',
'#E#G#G#',
'#...#G#',
'#######',
])
check(18740, [
'#########',
'#G......#',
'#.E.#...#',
'#..##..G#',
'#...##..#',
'#...#...#',
'#.G...G.#',
'#.....G.#',
'#########',
])
def test_play_mine(self):
caves_lines = data.data_lines(2018, "day_15_mine.txt")
caves = Caves(caves_lines)
outcome = caves.play()
self.assertEqual(201123, outcome)
def test_find_minimum_elves_strength(self):
for elf_strength in range(13, 20):
strengths = {'E': elf_strength, 'G': 3}
caves_lines = data.data_lines(2018, "day_15_mine.txt")
caves = Caves(caves_lines, teams_strength=strengths)
num_elves = len(caves.fighters['E'])
outcome = caves.play()
if len(caves.fighters['E']) == num_elves:
break
self.assertEqual(14, elf_strength)
self.assertEqual(54188, outcome)
TEAMS_STRENGTH = {'E': 3, 'G': 3}
EMPTY_VALUE = '.'
WALL_VALUE = '#'
class FightIsOver(Exception):
pass
class Caves:
def __init__(self, initial_map, teams_strength=TEAMS_STRENGTH):
self._caves = char_map.CharMap(input_lines=initial_map)
self.strength = teams_strength
self.fighters = {team: {} for team in teams_strength}
for position, entry in self._caves.items():
if entry in teams_strength:
self.fighters[entry][position] = 200
def play(self):
rounds = 0
while True:
try:
nobody_moved = self.play_round()
rounds += 1
except FightIsOver:
break
if nobody_moved:
rounds += self.play_frozen_situation()
remaining_hit_points = sum(hp for team in self.fighters.values() for hp in team.values())
return rounds * remaining_hit_points
def play_round(self):
nobody_moved = True
for unit in self.iterate_units():
if not self.game_on():
raise FightIsOver
team = self._caves[unit]
if team == EMPTY_VALUE:
continue
nobody_moved = self.play_unit(unit, team) and nobody_moved
return nobody_moved
def play_frozen_situation(self):
attackers = collections.defaultdict(lambda: 0)
for unit in self.iterate_units():
team = self._caves[unit]
target = self.get_attack_target(unit, team)
attackers[target] += self.strength[team]
rounds = min(
math.floor(self.fighters[self._caves[unit]][unit] / attackers[unit])
for unit in self.iterate_units()
if attackers[unit] > 0
)
for unit in self.iterate_units():
team = self._caves[unit]
self.fighters[team][unit] -= rounds * attackers[unit]
return rounds
def game_on(self):
return all(team for team in self.fighters.values())
def play_unit(self, unit, team):
attack_target = self.get_attack_target(unit, team)
if attack_target:
return self.attack(attack_target, self.strength[team])
new_position = self.find_next_step(unit, team)
if new_position:
self.move_unit(team, unit, new_position)
attack_target = self.get_attack_target(new_position, team)
if attack_target:
return self.attack(attack_target, self.strength[team])
return False
return True
def attack(self, unit, strength):
target_team = self._caves[unit]
self.fighters[target_team][unit] -= strength
if self.fighters[target_team][unit] <= 0:
del self.fighters[target_team][unit]
self._caves[unit] = EMPTY_VALUE
return False
return True
def move_unit(self, team, from_coordinates, to_coordinates):
self._caves[to_coordinates] = team
self._caves[from_coordinates] = EMPTY_VALUE
self.fighters[team][to_coordinates] = self.fighters[team][from_coordinates]
del self.fighters[team][from_coordinates]
def get_attack_target(self, unit, team):
adjacents = []
min_hp = None
for adjacent in self.get_coordinates_around(unit):
opponent = self._caves[adjacent]
if opponent in [EMPTY_VALUE, team]:
continue
hp = self.fighters[opponent][adjacent]
if min_hp is None or hp < min_hp:
min_hp = hp
adjacents = [adjacent]
elif hp == min_hp:
adjacents.append(adjacent)
return solve_tie(adjacents)
def find_next_step(self, unit, team):
in_range = self.get_in_range(team)
if not in_range:
return None
finder = char_map.MapExplorer(self._caves)
rules = FindAllClosestRules(targets=in_range, allowed_values=[EMPTY_VALUE])
finder.explore(unit, rules)
closest = solve_tie(rules.results)
if not closest:
return None
path = finder.shortest_path(start_point=unit, end_point=closest, rules=rules)
return path[1]
def iterate_units(self):
all_units = itertools.chain.from_iterable(team.keys() for team in self.fighters.values())
return sorted_by_priority(all_units)
def get_coordinates_around(self, coordinates):
for delta in char_map.ADJACENT_COORDINATES_DELTAS:
adjacent = aoc_utils.geometry.add_coordinates(coordinates, delta)
if adjacent in self._caves and self._caves[adjacent] != WALL_VALUE:
yield adjacent
def get_in_range(self, opponent):
in_range = []
for target in self.get_targets(opponent):
for coordinates in self.get_coordinates_around(target):
if self._caves[coordinates] == EMPTY_VALUE:
in_range.append(coordinates)
return sorted(in_range, key=lambda tup: (tup[1], tup[0]))
def get_targets(self, opponent):
for coordinates, entry in self._caves.items():
if entry not in [WALL_VALUE, EMPTY_VALUE, opponent]:
yield coordinates
def echo(self):
all_fighters = {unit: hp for team in self.fighters.values() for unit, hp in team.items()}
for y, line in enumerate(self._caves.lines()):
line += " "
line_units = sorted_by_priority(unit for unit in all_fighters if unit[1] == y)
line += " ".join(str(all_fighters[unit]) for unit in line_units)
print(line)
| 33.184758 | 98 | 0.538799 | import collections
import itertools
import math
import unittest
import aoc_utils.geometry
from aoc_utils import char_map, data
class TestCoordinatesUtils(unittest.TestCase):
def test_solve_tie(self):
self.assertEqual(None, solve_tie([]))
self.assertEqual((12, 34), solve_tie([(12, 34)]))
self.assertEqual((1, 1), solve_tie([(1, 1), (2, 2)]))
self.assertEqual((1, 1), solve_tie([(2, 2), (1, 1)]))
self.assertEqual((2, 1), solve_tie([(1, 2), (2, 1)]))
self.assertEqual((2, 1), solve_tie([(2, 1), (1, 2)]))
def solve_tie(options):
if len(options):
return sorted_by_priority(options)[0]
def sorted_by_priority(options):
return sorted(options, key=reverse_coordinates)
def reverse_coordinates(coordinates):
return tuple(i for i in reversed(coordinates))
class FindAllClosestRules(char_map.ProgressRules):
def __init__(self, targets, allowed_values):
super(FindAllClosestRules, self).__init__(allowed_values)
self._targets = targets
self._found_one = False
self.results = []
def stop_progressing(self):
return self._found_one
def examine(self, coordinates):
if coordinates in self._targets:
self._found_one = True
self.results.append(coordinates)
return False
return True
def solve_tie(self, coordinate_options):
return solve_tie(coordinate_options)
class TestCaves(unittest.TestCase):
def make_default_caves(self):
caves = Caves([
"#######",
"#E..G.#",
"#...#.#",
"#.G.#G#",
"#######",
])
return caves
def test_init_fighters(self):
caves = self.make_default_caves()
fighters = caves.fighters
self.assertSetEqual({'E', 'G'}, set(fighters.keys()))
self.assertEqual({(1, 1): 200}, fighters['E'])
self.assertEqual({(4, 1): 200, (2, 3): 200, (5, 3): 200}, fighters['G'])
def test_get_targets(self):
caves = self.make_default_caves()
self.assertListEqual([(4, 1), (2, 3), (5, 3)], list(caves.get_targets("E")))
self.assertListEqual([(1, 1)], list(caves.get_targets("G")))
def test_get_in_range(self):
caves = self.make_default_caves()
self.assertListEqual([(3, 1), (5, 1), (2, 2), (5, 2), (1, 3), (3, 3)],
list(caves.get_in_range("E")))
self.assertListEqual([(2, 1), (1, 2)],
list(caves.get_in_range("G")))
def test_get_coordinates_around(self):
caves = self.make_default_caves()
self.assertListEqual([(2, 1), (1, 2)], list(caves.get_coordinates_around((1, 1))))
self.assertListEqual([(3, 1), (5, 1)], list(caves.get_coordinates_around((4, 1))))
self.assertListEqual([(2, 2), (1, 3), (3, 3)], list(caves.get_coordinates_around((2, 3))))
self.assertListEqual([(5, 2)], list(caves.get_coordinates_around((5, 3))))
def test_find_all_closest_rules(self):
caves = Caves([
"#######",
"#E#.G.#",
"#...#.#",
"#.G.#G#",
"#######",
])
finder = char_map.MapExplorer(caves._caves)
rules = FindAllClosestRules(
targets=[(3, 1), (5, 1), (2, 2), (5, 2), (1, 3), (3, 3)],
allowed_values=[EMPTY_VALUE]
)
finder.explore(start_point=(1, 1), rules=rules)
self.assertListEqual([(2, 2), (1, 3)], list(rules.results))
def test_iterate_units(self):
caves = self.make_default_caves()
self.assertListEqual([(1, 1), (4, 1), (2, 3), (5, 3)], caves.iterate_units())
def test_get_attack_target(self):
caves_2 = Caves([
"#######",
"#..EG.#",
"#...#.#",
"#.G.#G#",
"#######",
])
self.assertEqual((4, 1), caves_2.get_attack_target((3, 1), 'E'))
self.assertEqual((3, 1), caves_2.get_attack_target((4, 1), 'G'))
self.assertEqual(None, caves_2.get_attack_target((2, 3), 'G'))
self.assertEqual(None, caves_2.get_attack_target((5, 3), 'G'))
def test_find_next_step(self):
caves = self.make_default_caves()
self.assertEqual((2, 1), caves.find_next_step((1, 1), 'E'))
self.assertEqual((3, 1), caves.find_next_step((4, 1), 'G'))
self.assertEqual((2, 2), caves.find_next_step((2, 3), 'G'))
self.assertEqual(None, caves.find_next_step((5, 3), 'G'))
def test_play_unit(self):
caves = self.make_default_caves()
fighters = caves.fighters
caves.play_unit((1, 1), 'E')
self.assertEqual({(2, 1): 200}, fighters['E'])
self.assertEqual({(4, 1): 200, (2, 3): 200, (5, 3): 200}, fighters['G'])
caves.play_unit((2, 1), 'E')
self.assertEqual({(3, 1): 200}, fighters['E'])
self.assertEqual({(4, 1): 197, (2, 3): 200, (5, 3): 200}, fighters['G'])
for _ in range(65):
caves.play_unit((3, 1), 'E')
self.assertEqual({(3, 1): 200}, fighters['E'])
self.assertEqual({(4, 1): 2, (2, 3): 200, (5, 3): 200}, fighters['G'])
caves.play_unit((3, 1), 'E')
self.assertEqual({(3, 1): 200}, fighters['E'])
self.assertEqual({(2, 3): 200, (5, 3): 200}, fighters['G'])
def test_play_round(self):
caves = self.make_default_caves()
fighters = caves.fighters
self.assertFalse(caves.play_round())
self.assertEqual({(2, 1): 194}, fighters['E'])
self.assertEqual({(3, 1): 200, (2, 2): 200, (5, 3): 200}, fighters['G'])
self.assertTrue(caves.play_round())
self.assertEqual({(2, 1): 188}, fighters['E'])
self.assertEqual({(3, 1): 197, (2, 2): 200, (5, 3): 200}, fighters['G'])
for _ in range(31):
self.assertTrue(caves.play_round())
self.assertEqual({(2, 1): 2}, fighters['E'])
self.assertEqual({(3, 1): 104, (2, 2): 200, (5, 3): 200}, fighters['G'])
self.assertRaises(FightIsOver, caves.play_round)
self.assertEqual({}, fighters['E'])
self.assertEqual({(3, 1): 101, (2, 2): 200, (5, 3): 200}, fighters['G'])
def test_play(self):
caves = self.make_default_caves()
fighters = caves.fighters
self.assertEqual(16533, caves.play())
self.assertEqual({}, fighters['E'])
self.assertEqual({(3, 1): 101, (2, 2): 200, (5, 3): 200}, fighters['G'])
def test_play_examples(self):
def check(expected_outcome, cave_lines, echo=False):
caves = Caves(cave_lines)
outcome = caves.play()
if echo:
caves.echo()
self.assertEqual(expected_outcome, outcome)
check(27730, [
'#######',
'#.G...#',
'#...EG#',
'#.#.#G#',
'#..G#E#',
'#.....#',
'#######',
])
check(36334, [
'#######',
'#G..#E#',
'#E#E.E#',
'#G.##.#',
'#...#E#',
'#...E.#',
'#######',
])
check(39514, [
'#######',
'#E..EG#',
'#.#G.E#',
'#E.##E#',
'#G..#.#',
'#..E#.#',
'#######',
])
check(27755, [
'#######',
'#E.G#.#',
'#.#G..#',
'#G.#.G#',
'#G..#.#',
'#...E.#',
'#######',
])
check(28944, [
'#######',
'#.E...#',
'#.#..G#',
'#.###.#',
'#E#G#G#',
'#...#G#',
'#######',
])
check(18740, [
'#########',
'#G......#',
'#.E.#...#',
'#..##..G#',
'#...##..#',
'#...#...#',
'#.G...G.#',
'#.....G.#',
'#########',
])
def test_play_mine(self):
caves_lines = data.data_lines(2018, "day_15_mine.txt")
caves = Caves(caves_lines)
outcome = caves.play()
self.assertEqual(201123, outcome)
def test_find_minimum_elves_strength(self):
for elf_strength in range(13, 20):
strengths = {'E': elf_strength, 'G': 3}
caves_lines = data.data_lines(2018, "day_15_mine.txt")
caves = Caves(caves_lines, teams_strength=strengths)
num_elves = len(caves.fighters['E'])
outcome = caves.play()
if len(caves.fighters['E']) == num_elves:
break
self.assertEqual(14, elf_strength)
self.assertEqual(54188, outcome)
TEAMS_STRENGTH = {'E': 3, 'G': 3}
EMPTY_VALUE = '.'
WALL_VALUE = '#'
class FightIsOver(Exception):
pass
class Caves:
def __init__(self, initial_map, teams_strength=TEAMS_STRENGTH):
self._caves = char_map.CharMap(input_lines=initial_map)
self.strength = teams_strength
self.fighters = {team: {} for team in teams_strength}
for position, entry in self._caves.items():
if entry in teams_strength:
self.fighters[entry][position] = 200
def play(self):
rounds = 0
while True:
try:
nobody_moved = self.play_round()
rounds += 1
except FightIsOver:
break
if nobody_moved:
rounds += self.play_frozen_situation()
remaining_hit_points = sum(hp for team in self.fighters.values() for hp in team.values())
return rounds * remaining_hit_points
def play_round(self):
nobody_moved = True
for unit in self.iterate_units():
if not self.game_on():
raise FightIsOver
team = self._caves[unit]
if team == EMPTY_VALUE:
continue
nobody_moved = self.play_unit(unit, team) and nobody_moved
return nobody_moved
def play_frozen_situation(self):
attackers = collections.defaultdict(lambda: 0)
for unit in self.iterate_units():
team = self._caves[unit]
target = self.get_attack_target(unit, team)
attackers[target] += self.strength[team]
rounds = min(
math.floor(self.fighters[self._caves[unit]][unit] / attackers[unit])
for unit in self.iterate_units()
if attackers[unit] > 0
)
for unit in self.iterate_units():
team = self._caves[unit]
self.fighters[team][unit] -= rounds * attackers[unit]
return rounds
def game_on(self):
return all(team for team in self.fighters.values())
def play_unit(self, unit, team):
attack_target = self.get_attack_target(unit, team)
if attack_target:
return self.attack(attack_target, self.strength[team])
new_position = self.find_next_step(unit, team)
if new_position:
self.move_unit(team, unit, new_position)
attack_target = self.get_attack_target(new_position, team)
if attack_target:
return self.attack(attack_target, self.strength[team])
return False
return True
def attack(self, unit, strength):
target_team = self._caves[unit]
self.fighters[target_team][unit] -= strength
if self.fighters[target_team][unit] <= 0:
del self.fighters[target_team][unit]
self._caves[unit] = EMPTY_VALUE
return False
return True
def move_unit(self, team, from_coordinates, to_coordinates):
self._caves[to_coordinates] = team
self._caves[from_coordinates] = EMPTY_VALUE
self.fighters[team][to_coordinates] = self.fighters[team][from_coordinates]
del self.fighters[team][from_coordinates]
def get_attack_target(self, unit, team):
adjacents = []
min_hp = None
for adjacent in self.get_coordinates_around(unit):
opponent = self._caves[adjacent]
if opponent in [EMPTY_VALUE, team]:
continue
hp = self.fighters[opponent][adjacent]
if min_hp is None or hp < min_hp:
min_hp = hp
adjacents = [adjacent]
elif hp == min_hp:
adjacents.append(adjacent)
return solve_tie(adjacents)
def find_next_step(self, unit, team):
in_range = self.get_in_range(team)
if not in_range:
return None
finder = char_map.MapExplorer(self._caves)
rules = FindAllClosestRules(targets=in_range, allowed_values=[EMPTY_VALUE])
finder.explore(unit, rules)
closest = solve_tie(rules.results)
if not closest:
return None
path = finder.shortest_path(start_point=unit, end_point=closest, rules=rules)
return path[1]
def iterate_units(self):
all_units = itertools.chain.from_iterable(team.keys() for team in self.fighters.values())
return sorted_by_priority(all_units)
def get_coordinates_around(self, coordinates):
for delta in char_map.ADJACENT_COORDINATES_DELTAS:
adjacent = aoc_utils.geometry.add_coordinates(coordinates, delta)
if adjacent in self._caves and self._caves[adjacent] != WALL_VALUE:
yield adjacent
def get_in_range(self, opponent):
in_range = []
for target in self.get_targets(opponent):
for coordinates in self.get_coordinates_around(target):
if self._caves[coordinates] == EMPTY_VALUE:
in_range.append(coordinates)
return sorted(in_range, key=lambda tup: (tup[1], tup[0]))
def get_targets(self, opponent):
for coordinates, entry in self._caves.items():
if entry not in [WALL_VALUE, EMPTY_VALUE, opponent]:
yield coordinates
def echo(self):
all_fighters = {unit: hp for team in self.fighters.values() for unit, hp in team.items()}
for y, line in enumerate(self._caves.lines()):
line += " "
line_units = sorted_by_priority(unit for unit in all_fighters if unit[1] == y)
line += " ".join(str(all_fighters[unit]) for unit in line_units)
print(line)
| true | true |
f70f2f001c4560b8e119ed16ee80328954ef2789 | 1,133 | py | Python | app/__init__.py | tailorv/pitchapp | 892bae820f60f810a37059ec2513780781845fae | [
"Unlicense"
] | null | null | null | app/__init__.py | tailorv/pitchapp | 892bae820f60f810a37059ec2513780781845fae | [
"Unlicense"
] | null | null | null | app/__init__.py | tailorv/pitchapp | 892bae820f60f810a37059ec2513780781845fae | [
"Unlicense"
] | null | null | null | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_uploads import UploadSet, configure_uploads, IMAGES
from flask_mail import Mail
from flask_bootstrap import Bootstrap
from config import config_options
db = SQLAlchemy()
login_manager = LoginManager()
login_manager.session_protection = "strong"
login_manager.login_view = "auth.login"
photos = UploadSet("photos", IMAGES)
mail = Mail()
bootstrap = Bootstrap()
def create_app(config_name):
app = Flask(__name__)
# Creating the app configurations
app.config.from_object(config_options[config_name])
# Initializing flask extensions
db.init_app(app)
login_manager.init_app(app)
mail.init_app(app)
bootstrap.init_app(app)
# Registering the main app Blueprint
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
# Registering auth blueprint
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint, url_prefix = "/authenticate")
# Configure UploadSet
configure_uploads(app, photos)
return app | 27.634146 | 72 | 0.767873 | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_uploads import UploadSet, configure_uploads, IMAGES
from flask_mail import Mail
from flask_bootstrap import Bootstrap
from config import config_options
db = SQLAlchemy()
login_manager = LoginManager()
login_manager.session_protection = "strong"
login_manager.login_view = "auth.login"
photos = UploadSet("photos", IMAGES)
mail = Mail()
bootstrap = Bootstrap()
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config_options[config_name])
db.init_app(app)
login_manager.init_app(app)
mail.init_app(app)
bootstrap.init_app(app)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint, url_prefix = "/authenticate")
configure_uploads(app, photos)
return app | true | true |
f70f2f9abb45df82814e56f9b8abfc7d851be613 | 3,740 | py | Python | osf/models/__init__.py | gaybro8777/osf.io | 30408511510a40bc393565817b343ef5fd76ab14 | [
"Apache-2.0"
] | 628 | 2015-01-15T04:33:22.000Z | 2022-03-30T06:40:10.000Z | osf/models/__init__.py | gaybro8777/osf.io | 30408511510a40bc393565817b343ef5fd76ab14 | [
"Apache-2.0"
] | 4,712 | 2015-01-02T01:41:53.000Z | 2022-03-30T14:18:40.000Z | osf/models/__init__.py | gaybro8777/osf.io | 30408511510a40bc393565817b343ef5fd76ab14 | [
"Apache-2.0"
] | 371 | 2015-01-12T16:14:08.000Z | 2022-03-31T18:58:29.000Z | from osf.models.metaschema import RegistrationSchemaBlock, RegistrationSchema, FileMetadataSchema # noqa
from osf.models.base import Guid, BlackListGuid # noqa
from osf.models.user import OSFUser, Email # noqa
from osf.models.contributor import Contributor, RecentlyAddedContributor, PreprintContributor, DraftRegistrationContributor # noqa
from osf.models.session import Session # noqa
from osf.models.institution import Institution # noqa
from osf.models.collection import CollectionSubmission, Collection # noqa
from osf.models.draft_node import DraftNode # noqa
from osf.models.node import AbstractNode, Node # noqa
from osf.models.sanctions import Sanction, Embargo, Retraction, RegistrationApproval, DraftRegistrationApproval, EmbargoTerminationApproval # noqa
from osf.models.registrations import Registration, DraftRegistrationLog, DraftRegistration # noqa
from osf.models.nodelog import NodeLog # noqa
from osf.models.preprintlog import PreprintLog # noqa
from osf.models.tag import Tag # noqa
from osf.models.comment import Comment # noqa
from osf.models.conference import Conference, MailRecord # noqa
from osf.models.citation import CitationStyle # noqa
from osf.models.archive import ArchiveJob, ArchiveTarget # noqa
from osf.models.queued_mail import QueuedMail # noqa
from osf.models.external import ExternalAccount, ExternalProvider # noqa
from osf.models.oauth import ApiOAuth2Application, ApiOAuth2PersonalToken, ApiOAuth2Scope # noqa
from osf.models.osf_group import OSFGroup # noqa
from osf.models.osf_grouplog import OSFGroupLog # noqa
from osf.models.licenses import NodeLicense, NodeLicenseRecord # noqa
from osf.models.private_link import PrivateLink # noqa
from osf.models.notifications import NotificationDigest, NotificationSubscription # noqa
from osf.models.spam import SpamStatus, SpamMixin # noqa
from osf.models.subject import Subject # noqa
from osf.models.provider import AbstractProvider, CollectionProvider, PreprintProvider, WhitelistedSHAREPreprintProvider, RegistrationProvider # noqa
from osf.models.preprint import Preprint # noqa
from osf.models.request import NodeRequest, PreprintRequest # noqa
from osf.models.identifiers import Identifier # noqa
from osf.models.files import ( # noqa
BaseFileNode,
BaseFileVersionsThrough,
File, Folder, # noqa
FileVersion, TrashedFile, TrashedFileNode, TrashedFolder, FileVersionUserMetadata, # noqa
) # noqa
from osf.models.metadata import FileMetadataRecord # noqa
from osf.models.node_relation import NodeRelation # noqa
from osf.models.analytics import UserActivityCounter, PageCounter # noqa
from osf.models.admin_profile import AdminProfile # noqa
from osf.models.admin_log_entry import AdminLogEntry # noqa
from osf.models.maintenance_state import MaintenanceState # noqa
from osf.models.banner import ScheduledBanner # noqa
from osf.models.quickfiles import QuickFilesNode # noqa
from osf.models.dismissed_alerts import DismissedAlert # noqa
from osf.models.action import ReviewAction # noqa
from osf.models.action import NodeRequestAction, PreprintRequestAction, ReviewAction, RegistrationAction, SchemaResponseAction, BaseAction # noqa
from osf.models.storage import ProviderAssetFile # noqa
from osf.models.chronos import ChronosJournal, ChronosSubmission # noqa
from osf.models.notable_email_domain import NotableEmailDomain # noqa
from osf.models.brand import Brand # noqa
from osf.models.schema_response import SchemaResponse # noqa
from osf.models.schema_response_block import SchemaResponseBlock # noqa
from osf.models.registration_bulk_upload_job import RegistrationBulkUploadJob # noqa
from osf.models.registration_bulk_upload_row import RegistrationBulkUploadRow # noqa
| 64.482759 | 150 | 0.82861 | from osf.models.metaschema import RegistrationSchemaBlock, RegistrationSchema, FileMetadataSchema
from osf.models.base import Guid, BlackListGuid
from osf.models.user import OSFUser, Email
from osf.models.contributor import Contributor, RecentlyAddedContributor, PreprintContributor, DraftRegistrationContributor
from osf.models.session import Session
from osf.models.institution import Institution
from osf.models.collection import CollectionSubmission, Collection
from osf.models.draft_node import DraftNode
from osf.models.node import AbstractNode, Node
from osf.models.sanctions import Sanction, Embargo, Retraction, RegistrationApproval, DraftRegistrationApproval, EmbargoTerminationApproval
from osf.models.registrations import Registration, DraftRegistrationLog, DraftRegistration
from osf.models.nodelog import NodeLog
from osf.models.preprintlog import PreprintLog
from osf.models.tag import Tag
from osf.models.comment import Comment
from osf.models.conference import Conference, MailRecord
from osf.models.citation import CitationStyle
from osf.models.archive import ArchiveJob, ArchiveTarget
from osf.models.queued_mail import QueuedMail
from osf.models.external import ExternalAccount, ExternalProvider
from osf.models.oauth import ApiOAuth2Application, ApiOAuth2PersonalToken, ApiOAuth2Scope
from osf.models.osf_group import OSFGroup
from osf.models.osf_grouplog import OSFGroupLog
from osf.models.licenses import NodeLicense, NodeLicenseRecord
from osf.models.private_link import PrivateLink
from osf.models.notifications import NotificationDigest, NotificationSubscription
from osf.models.spam import SpamStatus, SpamMixin
from osf.models.subject import Subject
from osf.models.provider import AbstractProvider, CollectionProvider, PreprintProvider, WhitelistedSHAREPreprintProvider, RegistrationProvider
from osf.models.preprint import Preprint
from osf.models.request import NodeRequest, PreprintRequest
from osf.models.identifiers import Identifier
from osf.models.files import (
BaseFileNode,
BaseFileVersionsThrough,
File, Folder,
FileVersion, TrashedFile, TrashedFileNode, TrashedFolder, FileVersionUserMetadata,
)
from osf.models.metadata import FileMetadataRecord
from osf.models.node_relation import NodeRelation
from osf.models.analytics import UserActivityCounter, PageCounter
from osf.models.admin_profile import AdminProfile
from osf.models.admin_log_entry import AdminLogEntry
from osf.models.maintenance_state import MaintenanceState
from osf.models.banner import ScheduledBanner
from osf.models.quickfiles import QuickFilesNode
from osf.models.dismissed_alerts import DismissedAlert
from osf.models.action import ReviewAction
from osf.models.action import NodeRequestAction, PreprintRequestAction, ReviewAction, RegistrationAction, SchemaResponseAction, BaseAction
from osf.models.storage import ProviderAssetFile
from osf.models.chronos import ChronosJournal, ChronosSubmission
from osf.models.notable_email_domain import NotableEmailDomain
from osf.models.brand import Brand
from osf.models.schema_response import SchemaResponse
from osf.models.schema_response_block import SchemaResponseBlock
from osf.models.registration_bulk_upload_job import RegistrationBulkUploadJob
from osf.models.registration_bulk_upload_row import RegistrationBulkUploadRow
| true | true |
f70f2faea328f7cea269e5d6f994b8b853dc0094 | 12,669 | py | Python | cogs/twitter.py | tuna2134/rt-backend-1 | 71c48f56f21720b05a76dc0cdbdcb18a80d16afb | [
"BSD-4-Clause"
] | null | null | null | cogs/twitter.py | tuna2134/rt-backend-1 | 71c48f56f21720b05a76dc0cdbdcb18a80d16afb | [
"BSD-4-Clause"
] | null | null | null | cogs/twitter.py | tuna2134/rt-backend-1 | 71c48f56f21720b05a76dc0cdbdcb18a80d16afb | [
"BSD-4-Clause"
] | null | null | null | # RT - Twitter
from typing import TYPE_CHECKING, Union, Dict, Tuple, List
from discord.ext import commands
import discord
from tweepy.asynchronous import AsyncStream
from tweepy import API, OAuthHandler
from tweepy.errors import NotFound
from tweepy.models import Status
from jishaku.functools import executor_function
from asyncio import Event
if TYPE_CHECKING:
from asyncio import AbstractEventLoop
from tweepy.models import Status
from aiomysql import Pool
from rtlib import Backend
class DataManager:
TABLE = "TwitterNotification"
DEFAULT_MAX = 5
def __init__(self, loop: "AbstractEventLoop", pool: "Pool"):
self.pool = pool
loop.create_task(self._prepare_table())
async def _prepare_table(self):
# テーブルを準備します。
async with self.pool.acquire() as conn:
async with conn.cursor() as cursor:
await cursor.execute(
f"""CREATE TABLE IF NOT EXISTS {self.TABLE} (
GuildID BIGINT, ChannelID BIGINT, UserName TEXT
);"""
)
await self._update_users(cursor)
self.ready.set()
async def _read(self, cursor, channel, username):
await cursor.execute(
f"SELECT * FROM {self.TABLE} WHERE ChannelID = %s AND UserName = %s;",
(channel.id, username)
)
return await cursor.fetchone()
async def write(self, channel: discord.TextChannel, username: str) -> None:
"設定を保存します。"
async with self.pool.acquire() as conn:
async with conn.cursor() as cursor:
assert not await self._read(cursor, channel, username), "既に設定されています。"
await cursor.execute(
f"SELECT * FROM {self.TABLE} WHERE GuildID = %s;",
(channel.guild.id,)
)
assert len(await cursor.fetchall()) <= self.DEFAULT_MAX, "追加しすぎです。"
await cursor.execute(
f"INSERT INTO {self.TABLE} VALUES (%s, %s, %s);",
(channel.guild.id, channel.id, username)
)
async def delete(self, channel: discord.TextChannel, username: str) -> None:
"設定を削除します。"
async with self.pool.acquire() as conn:
async with conn.cursor() as cursor:
assert await self._read(cursor, channel, username), "その設定はありません。"
await cursor.execute(
f"DELETE FROM {self.TABLE} WHERE ChannelID = %s AND UserName = %s;",
(channel.id, username)
)
async def _update_users(self, cursor):
await cursor.execute(
f"SELECT ChannelID, UserName FROM {self.TABLE};"
)
self.users = {
username: channel_id
for channel_id, username in await cursor.fetchall()
}
async def update_users(self) -> List[Tuple[int, str]]:
"設定のキャッシュを更新します。"
async with self.pool.acquire() as conn:
async with conn.cursor() as cursor:
await self._update_users(cursor)
class TwitterNotification(commands.Cog, DataManager, AsyncStream):
TWITTERID_HEADERS = {
"authority": "tweeterid.com",
"sec-ch-ua": "^\\^Microsoft",
"accept": "*/*",
"content-type": "application/x-www-form-urlencoded; charset=UTF-8",
"x-requested-with": "XMLHttpRequest",
"sec-ch-ua-mobile": "?0",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36 Edg/93.0.961.38",
"sec-ch-ua-platform": "^\\^Windows^\\^",
"origin": "https://tweeterid.com",
"sec-fetch-site": "same-origin",
"sec-fetch-mode": "cors",
"sec-fetch-dest": "empty",
"referer": "https://tweeterid.com/",
"accept-language": "ja,en;q=0.9,en-GB;q=0.8,en-US;q=0.7",
}
BASE_URL = "https://twitter.com/{}/status/{}"
def __init__(self, bot: "Backend"):
self.bot = bot
self.users: Dict[str, int] = {}
self.ready = Event()
oauth = OAuthHandler(
self.bot.secret["twitter"]["consumer_key"],
self.bot.secret["twitter"]["consumer_secret"]
)
oauth.set_access_token(
self.bot.secret["twitter"]["access_token"],
self.bot.secret["twitter"]["access_token_secret"]
)
self.api = API(oauth)
super(commands.Cog, self).__init__(self.bot.loop, self.bot.mysql.pool)
super(DataManager, self).__init__(**self.bot.secret["twitter"])
self.connected = False
self.cache: Dict[str, str] = {}
self.bot.loop.create_task(self.start_stream())
def filter(self, *args, **kwargs):
# connectedを使えるようにするためにオーバーライドした関数です。
self.connected = True
super().filter(*args, **kwargs)
def disconnect(self, *args, **kwargs):
# connectedを使えるようにするためにオーバーライドした関数です。
self.connected = False
super().disconnect(*args, **kwargs)
def get_url(self, status: Union[Status, Tuple[str, int]]) -> str:
"渡されたStatusからツイートのURLを取得します。"
return self.BASE_URL.format(
status.user.screen_name, status.id_str
) if isinstance(status, Status) else self.BASE_URL.format(*status)
async def on_status(self, status: "Status"):
# ツイートを取得した際に呼ばれる関数です。
if status.user.screen_name in self.users:
# 通知対象のユーザーのツイートなら通知を行います。
if not (channel := self.bot.get_channel(
self.users[status.user.screen_name]
)):
# もし通知するチャンネルが見当たらない場合はその設定を削除する。
return await self.delete(
self.users[status.user.screen_name], status.user.screen_name
)
# Tweetに飛ぶリンクボタンを追加しておく。
view = discord.ui.View(timeout=1)
view.add_item(discord.ui.Button(
label="Tweetを見る", url=self.get_url(status)
))
# メッセージを調整する。
if hasattr(status, "retweeted_status") and status.retweeted_status:
# リツイート
status.text = status.text.replace(
"RT @", "🔁 Retweeted @", 1
)
elif hasattr(status, "quoted_status") and status.quoted_status:
# 引用リツイート
status.text = "🔁 Retweeted [Original]({})\n{}".format(
self.get_url(status.quoted_status), status.text
)
elif (hasattr(status, "in_reply_to_status_id")
and status.in_reply_to_status_id):
# 返信
status.text = "⤴ Replied [Original]({})\n{}".format(
self.get_url((
status.in_reply_to_screen_name,
status.in_reply_to_status_id
)), status.text
)
# メンションが飛ばないように@は全角に置き換えておく。
status.text = status.text.replace("@", "@")
try:
# 通知の送信を行う。
await channel.webhook_send(
content=status.text,
username=status.user.screen_name + \
("✅" if status.user.verified else "") \
+ " - RT Twitter Notification",
avatar_url=(
"" if status.user.default_profile_image
else status.user.profile_image_url_https
), view=view
)
except discord.Forbidden:
await channel.send(
"Twitter通知をしようとしましたが権限がないため通知に失敗しました。\n" \
"チャンネルのWebhookを管理できるように権限を付与してください。\n" \
"またRTにはたくさんの機能があり全てを動かすのなら管理者権限を付与する方が手っ取り早いです。"
)
except Exception as e:
await channel.send(
f"Twitter通知をしようとしましたが失敗しました。\nエラーコード:`{e}`"
)
@executor_function
def get_user_id(self, username: str) -> str:
"ユーザー名からユーザーのIDを取得します。※これは子ルーチン関数です。"
return self.api.get_user(screen_name=username).id_str
async def start_stream(self, disconnect: bool = False) -> None:
"Twitterのストリームを開始します。"
if disconnect and self.connected:
self.disconnect()
if hasattr(self, "ready"):
await self.ready.wait()
del self.ready
if self.users:
follow = []
for username in self.users:
try:
follow.append(await self.get_user_id(username))
except NotFound:
channel = self.bot.get_channel(self.users[username])
await self.delete(channel, username)
del self.users[username]
await channel.send(
"Twitter通知をしようとしましたがエラーが発生しました。\n" \
+ f"{username.replace('@', '@')}のユーザーが見つかりませんでした。"
)
self.filter(follow=follow)
def cog_unload(self):
if self.connected:
self.disconnect()
@commands.group(
aliases=["ツイッター", "tw"], extras={
"headding": {"ja": "Twitter通知", "en": "Twitter Notification"},
"parent": "ServerUseful"
}
)
async def twitter(self, ctx):
"""!lang ja
--------
Twitterの指定したユーザーのツイートを指定したチャンネルに通知させます。
Aliases
-------
tw, ツイッター
!lang en
--------
Notify the specified channel of tweets from the specified user on Twitter.
Aliases
-------
tw"""
if not ctx.invoked_subcommand:
await ctx.reply("使用方法が違います。 / It is used in different ways.")
@twitter.command("set", aliases=["s", "設定"])
@commands.has_permissions(manage_channels=True, manage_webhooks=True)
@commands.cooldown(1, 60, commands.BucketType.channel)
async def set_(self, ctx, onoff: bool, *, username):
"""!lang ja
--------
Twitterの通知を設定します。
このコマンドを実行したチャンネルに指定したユーザーのツイートの通知が来るようになります。
Parameters
----------
onoff : bool
onまたはoffで通知を有効にするか無効にするかです。
username : str
通知する対象のユーザーの名前です。
`@`から始まるものです。
Examples
--------
`rt!twitter set on tasuren1`
RTの開発者のtasurenのTwitterの通知を有効にします。
Aliases
-------
s, 設定
!lang en
--------
Sets up Twitter notifications.
The channel where this command is executed will receive notifications of tweets from the specified user.
Parameters
----------
onoff : bool
Enables or disables notifications with on or off.
username : str
The name of the user to be notified.
It must start with `@`.
Examples
--------
`rt!twitter set on tasuren1`
Enables Twitter notifications for the RT developer tasuren.
Aliases
-------
s"""
await ctx.trigger_typing()
try:
if onoff:
await self.get_user_id(username)
await self.write(ctx.channel, username)
else:
await self.delete(ctx.channel, username)
except AssertionError:
await ctx.reply(
{"ja": "既に設定されています。\nまたは設定しすぎです。",
"en": "The username is already set.\nOr it is set too high."} \
if onoff else {
"ja": "設定されていません。",
"en": "The username is not set yet."
}
)
except NotFound:
await ctx.reply(
{"ja": "そのユーザーが見つかりませんでした。",
"en": "The user is not found."}
)
else:
await self.update_users()
await self.start_stream(True)
await ctx.reply("Ok")
@twitter.command("list", aliases=["l", "一覧"])
async def list_(self, ctx):
"""!lang ja
--------
設定しているTwitter通知のリストを表示します。
Aliases
-------
l, 一覧
!lang en
--------
Displays twitter notification settings
Aliases
-------
l"""
await ctx.reply(
embed=discord.Embed(
title="Twitter",
description="\n".join(
f"<#{channel_id}>:{username}"
for username, channel_id in self.users.items()
)
)
)
def setup(bot):
bot.add_cog(TwitterNotification(bot)) | 34.056452 | 155 | 0.535559 |
from typing import TYPE_CHECKING, Union, Dict, Tuple, List
from discord.ext import commands
import discord
from tweepy.asynchronous import AsyncStream
from tweepy import API, OAuthHandler
from tweepy.errors import NotFound
from tweepy.models import Status
from jishaku.functools import executor_function
from asyncio import Event
if TYPE_CHECKING:
from asyncio import AbstractEventLoop
from tweepy.models import Status
from aiomysql import Pool
from rtlib import Backend
class DataManager:
TABLE = "TwitterNotification"
DEFAULT_MAX = 5
def __init__(self, loop: "AbstractEventLoop", pool: "Pool"):
self.pool = pool
loop.create_task(self._prepare_table())
async def _prepare_table(self):
async with self.pool.acquire() as conn:
async with conn.cursor() as cursor:
await cursor.execute(
f"""CREATE TABLE IF NOT EXISTS {self.TABLE} (
GuildID BIGINT, ChannelID BIGINT, UserName TEXT
);"""
)
await self._update_users(cursor)
self.ready.set()
async def _read(self, cursor, channel, username):
await cursor.execute(
f"SELECT * FROM {self.TABLE} WHERE ChannelID = %s AND UserName = %s;",
(channel.id, username)
)
return await cursor.fetchone()
async def write(self, channel: discord.TextChannel, username: str) -> None:
async with self.pool.acquire() as conn:
async with conn.cursor() as cursor:
assert not await self._read(cursor, channel, username), "既に設定されています。"
await cursor.execute(
f"SELECT * FROM {self.TABLE} WHERE GuildID = %s;",
(channel.guild.id,)
)
assert len(await cursor.fetchall()) <= self.DEFAULT_MAX, "追加しすぎです。"
await cursor.execute(
f"INSERT INTO {self.TABLE} VALUES (%s, %s, %s);",
(channel.guild.id, channel.id, username)
)
async def delete(self, channel: discord.TextChannel, username: str) -> None:
async with self.pool.acquire() as conn:
async with conn.cursor() as cursor:
assert await self._read(cursor, channel, username), "その設定はありません。"
await cursor.execute(
f"DELETE FROM {self.TABLE} WHERE ChannelID = %s AND UserName = %s;",
(channel.id, username)
)
async def _update_users(self, cursor):
await cursor.execute(
f"SELECT ChannelID, UserName FROM {self.TABLE};"
)
self.users = {
username: channel_id
for channel_id, username in await cursor.fetchall()
}
async def update_users(self) -> List[Tuple[int, str]]:
async with self.pool.acquire() as conn:
async with conn.cursor() as cursor:
await self._update_users(cursor)
class TwitterNotification(commands.Cog, DataManager, AsyncStream):
TWITTERID_HEADERS = {
"authority": "tweeterid.com",
"sec-ch-ua": "^\\^Microsoft",
"accept": "*/*",
"content-type": "application/x-www-form-urlencoded; charset=UTF-8",
"x-requested-with": "XMLHttpRequest",
"sec-ch-ua-mobile": "?0",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36 Edg/93.0.961.38",
"sec-ch-ua-platform": "^\\^Windows^\\^",
"origin": "https://tweeterid.com",
"sec-fetch-site": "same-origin",
"sec-fetch-mode": "cors",
"sec-fetch-dest": "empty",
"referer": "https://tweeterid.com/",
"accept-language": "ja,en;q=0.9,en-GB;q=0.8,en-US;q=0.7",
}
BASE_URL = "https://twitter.com/{}/status/{}"
def __init__(self, bot: "Backend"):
self.bot = bot
self.users: Dict[str, int] = {}
self.ready = Event()
oauth = OAuthHandler(
self.bot.secret["twitter"]["consumer_key"],
self.bot.secret["twitter"]["consumer_secret"]
)
oauth.set_access_token(
self.bot.secret["twitter"]["access_token"],
self.bot.secret["twitter"]["access_token_secret"]
)
self.api = API(oauth)
super(commands.Cog, self).__init__(self.bot.loop, self.bot.mysql.pool)
super(DataManager, self).__init__(**self.bot.secret["twitter"])
self.connected = False
self.cache: Dict[str, str] = {}
self.bot.loop.create_task(self.start_stream())
def filter(self, *args, **kwargs):
self.connected = True
super().filter(*args, **kwargs)
def disconnect(self, *args, **kwargs):
self.connected = False
super().disconnect(*args, **kwargs)
def get_url(self, status: Union[Status, Tuple[str, int]]) -> str:
return self.BASE_URL.format(
status.user.screen_name, status.id_str
) if isinstance(status, Status) else self.BASE_URL.format(*status)
async def on_status(self, status: "Status"):
if status.user.screen_name in self.users:
if not (channel := self.bot.get_channel(
self.users[status.user.screen_name]
)):
return await self.delete(
self.users[status.user.screen_name], status.user.screen_name
)
view = discord.ui.View(timeout=1)
view.add_item(discord.ui.Button(
label="Tweetを見る", url=self.get_url(status)
))
if hasattr(status, "retweeted_status") and status.retweeted_status:
status.text = status.text.replace(
"RT @", "🔁 Retweeted @", 1
)
elif hasattr(status, "quoted_status") and status.quoted_status:
status.text = "🔁 Retweeted [Original]({})\n{}".format(
self.get_url(status.quoted_status), status.text
)
elif (hasattr(status, "in_reply_to_status_id")
and status.in_reply_to_status_id):
status.text = "⤴ Replied [Original]({})\n{}".format(
self.get_url((
status.in_reply_to_screen_name,
status.in_reply_to_status_id
)), status.text
)
status.text = status.text.replace("@", "@")
try:
await channel.webhook_send(
content=status.text,
username=status.user.screen_name + \
("✅" if status.user.verified else "") \
+ " - RT Twitter Notification",
avatar_url=(
"" if status.user.default_profile_image
else status.user.profile_image_url_https
), view=view
)
except discord.Forbidden:
await channel.send(
"Twitter通知をしようとしましたが権限がないため通知に失敗しました。\n" \
"チャンネルのWebhookを管理できるように権限を付与してください。\n" \
"またRTにはたくさんの機能があり全てを動かすのなら管理者権限を付与する方が手っ取り早いです。"
)
except Exception as e:
await channel.send(
f"Twitter通知をしようとしましたが失敗しました。\nエラーコード:`{e}`"
)
@executor_function
def get_user_id(self, username: str) -> str:
return self.api.get_user(screen_name=username).id_str
async def start_stream(self, disconnect: bool = False) -> None:
if disconnect and self.connected:
self.disconnect()
if hasattr(self, "ready"):
await self.ready.wait()
del self.ready
if self.users:
follow = []
for username in self.users:
try:
follow.append(await self.get_user_id(username))
except NotFound:
channel = self.bot.get_channel(self.users[username])
await self.delete(channel, username)
del self.users[username]
await channel.send(
"Twitter通知をしようとしましたがエラーが発生しました。\n" \
+ f"{username.replace('@', '@')}のユーザーが見つかりませんでした。"
)
self.filter(follow=follow)
def cog_unload(self):
if self.connected:
self.disconnect()
@commands.group(
aliases=["ツイッター", "tw"], extras={
"headding": {"ja": "Twitter通知", "en": "Twitter Notification"},
"parent": "ServerUseful"
}
)
async def twitter(self, ctx):
if not ctx.invoked_subcommand:
await ctx.reply("使用方法が違います。 / It is used in different ways.")
@twitter.command("set", aliases=["s", "設定"])
@commands.has_permissions(manage_channels=True, manage_webhooks=True)
@commands.cooldown(1, 60, commands.BucketType.channel)
async def set_(self, ctx, onoff: bool, *, username):
await ctx.trigger_typing()
try:
if onoff:
await self.get_user_id(username)
await self.write(ctx.channel, username)
else:
await self.delete(ctx.channel, username)
except AssertionError:
await ctx.reply(
{"ja": "既に設定されています。\nまたは設定しすぎです。",
"en": "The username is already set.\nOr it is set too high."} \
if onoff else {
"ja": "設定されていません。",
"en": "The username is not set yet."
}
)
except NotFound:
await ctx.reply(
{"ja": "そのユーザーが見つかりませんでした。",
"en": "The user is not found."}
)
else:
await self.update_users()
await self.start_stream(True)
await ctx.reply("Ok")
@twitter.command("list", aliases=["l", "一覧"])
async def list_(self, ctx):
await ctx.reply(
embed=discord.Embed(
title="Twitter",
description="\n".join(
f"<#{channel_id}>:{username}"
for username, channel_id in self.users.items()
)
)
)
def setup(bot):
bot.add_cog(TwitterNotification(bot)) | true | true |
f70f30b292a9125f52cf94cdfafc942140fe13d7 | 8,341 | py | Python | test/test_all.py | egils-consulting/WikibaseIntegrator | 461ef958f94e18c74122ee276e8e183d16a816e1 | [
"MIT"
] | 2 | 2020-07-11T23:29:35.000Z | 2020-08-08T06:12:15.000Z | test/test_all.py | egils-consulting/WikibaseIntegrator | 461ef958f94e18c74122ee276e8e183d16a816e1 | [
"MIT"
] | 3 | 2020-02-03T21:42:36.000Z | 2020-08-09T00:05:09.000Z | test/test_all.py | Mystou/WikibaseIntegrator | 33858e04ce242e9d2aa6ab458b48eaefb35fd1b1 | [
"MIT"
] | null | null | null | import copy
import pprint
import unittest
import requests
from wikibaseintegrator import wbi_core, wbi_fastrun, wbi_functions, wbi_datatype
from wikibaseintegrator.wbi_core import MWApiError
__author__ = 'Sebastian Burgstaller-Muehlbacher'
__license__ = 'AGPLv3'
class TestMediawikiApiCall(unittest.TestCase):
def test_all(self):
with self.assertRaises(MWApiError):
wbi_functions.mediawiki_api_call_helper(data={'format': 'json', 'action': 'wbgetentities', 'ids': 'Q42'}, mediawiki_api_url="https://www.wikidataaaaaaa.org",
max_retries=3, retry_after=1, allow_anonymous=True)
with self.assertRaises(requests.HTTPError):
wbi_functions.mediawiki_api_call_helper(data=None, mediawiki_api_url="https://httpbin.org/status/400", max_retries=3, retry_after=1, allow_anonymous=True)
wbi_functions.mediawiki_api_call_helper(data={'format': 'json', 'action': 'wbgetentities', 'ids': 'Q42'}, max_retries=3, retry_after=1, allow_anonymous=True)
with self.assertRaises(MWApiError):
wbi_functions.mediawiki_api_call_helper(data=None, mediawiki_api_url="https://httpbin.org/status/502", max_retries=3, retry_after=1, allow_anonymous=True)
class TestDataType(unittest.TestCase):
def test_quantity(self):
dt = wbi_datatype.Quantity(quantity='34.5', prop_nr='P43')
dt_json = dt.get_json_representation()
if not dt_json['mainsnak']['datatype'] == 'quantity':
raise
value = dt_json['mainsnak']['datavalue']
if not value['value']['amount'] == '+34.5':
raise
if not value['value']['unit'] == '1':
raise
dt2 = wbi_datatype.Quantity(quantity='34.5', prop_nr='P43', upper_bound='35.3', lower_bound='33.7', unit="Q11573")
value = dt2.get_json_representation()['mainsnak']['datavalue']
if not value['value']['amount'] == '+34.5':
raise
if not value['value']['unit'] == 'http://www.wikidata.org/entity/Q11573':
raise
if not value['value']['upperBound'] == '+35.3':
raise
if not value['value']['lowerBound'] == '+33.7':
raise
def test_geoshape(self):
dt = wbi_datatype.GeoShape(value='Data:Inner_West_Light_Rail_stops.map', prop_nr='P43')
dt_json = dt.get_json_representation()
if not dt_json['mainsnak']['datatype'] == 'geo-shape':
raise
value = dt_json['mainsnak']['datavalue']
if not value['value'] == 'Data:Inner_West_Light_Rail_stops.map':
raise
if not value['type'] == 'string':
raise
def test_live_item(self):
"""
Test an item against Wikidata
"""
item = wbi_core.ItemEngine(item_id='Q423111')
mass_statement = [x for x in item.statements if x.get_prop_nr() == 'P2067'].pop()
pprint.pprint(mass_statement.get_json_representation())
if not mass_statement:
raise
# TODO: get json directly from the API and compare part to ItemEngine
class TestFastRun(unittest.TestCase):
"""
some basic tests for fastrun mode
"""
def test_fast_run(self):
statements = [
wbi_datatype.ExternalID(value='P40095', prop_nr='P352'),
wbi_datatype.ExternalID(value='YER158C', prop_nr='P705')
]
frc = wbi_fastrun.FastRunContainer(base_filter={'P352': '', 'P703': 'Q27510868'},
base_data_type=wbi_datatype.BaseDataType, engine=wbi_core.ItemEngine)
fast_run_result = frc.write_required(data=statements)
if fast_run_result:
message = 'fastrun failed'
else:
message = 'successful fastrun'
print(fast_run_result, message)
# here, fastrun should succeed, if not, test failed
# if fast_run_result:
# raise ValueError
def test_fastrun_label(self):
# tests fastrun label, description and aliases, and label in another language
data = [wbi_datatype.ExternalID('/m/02j71', 'P646')]
fast_run_base_filter = {'P361': 'Q18589965'}
item = wbi_core.ItemEngine(item_id="Q2", data=data, fast_run=True, fast_run_base_filter=fast_run_base_filter)
frc = wbi_core.ItemEngine.fast_run_store[0]
frc.debug = True
assert item.get_label('en') == "Earth"
descr = item.get_description('en')
assert len(descr) > 3
aliases = item.get_aliases()
assert "the Earth" in aliases
assert list(item.fast_run_container.get_language_data("Q2", 'en', 'label'))[0] == "Earth"
assert item.fast_run_container.check_language_data("Q2", ['not the Earth'], 'en', 'label')
assert "the Earth" in item.get_aliases()
assert "planet" in item.get_description()
assert item.get_label("es") == "Tierra"
item.set_description(descr)
item.set_description("fghjkl")
assert item.json_representation['descriptions']['en'] == {'language': 'en', 'value': 'fghjkl'}
item.set_label("Earth")
item.set_label("xfgfdsg")
assert item.json_representation['labels']['en'] == {'language': 'en', 'value': 'xfgfdsg'}
item.set_aliases(["fake alias"], if_exists='APPEND')
assert {'language': 'en', 'value': 'fake alias'} in item.json_representation['aliases']['en']
# something thats empty (for now.., can change, so this just makes sure no exception is thrown)
frc.check_language_data("Q2", ['Ewiase'], 'ak', 'label')
frc.check_language_data("Q2", ['not Ewiase'], 'ak', 'label')
frc.check_language_data("Q2", [''], 'ak', 'description')
frc.check_language_data("Q2", [], 'ak', 'aliases')
frc.check_language_data("Q2", ['sdf', 'sdd'], 'ak', 'aliases')
item.get_label("ak")
item.get_description("ak")
item.get_aliases("ak")
item.set_label("label", lang="ak")
item.set_description("d", lang="ak")
item.set_aliases(["a"], lang="ak", if_exists='APPEND')
def test_sitelinks():
data = [wbi_datatype.ItemID(value='Q12136', prop_nr='P31')]
item = wbi_core.ItemEngine(item_id='Q622901', data=data)
item.get_sitelink("enwiki")
assert "enwiki" not in item.json_representation['sitelinks']
item.set_sitelink("enwiki", "something")
assert item.get_sitelink("enwiki")['title'] == "something"
assert "enwiki" in item.json_representation['sitelinks']
def test_nositelinks():
# this item doesn't and probably wont ever have any sitelinks (but who knows?? maybe one day..)
data = [wbi_datatype.ItemID(value='Q5', prop_nr='P31')]
item = wbi_core.ItemEngine(item_id='Q27869338', data=data)
item.get_sitelink("enwiki")
assert "enwiki" not in item.json_representation['sitelinks']
item.set_sitelink("enwiki", "something")
assert item.get_sitelink("enwiki")['title'] == "something"
assert "enwiki" in item.json_representation['sitelinks']
####
# tests for statement equality, with and without refs
####
def test_ref_equals():
# statements are identical
oldref = [wbi_datatype.ExternalID(value='P58742', prop_nr='P352', is_reference=True),
wbi_datatype.ItemID(value='Q24784025', prop_nr='P527', is_reference=True),
wbi_datatype.Time(time='+2001-12-31T12:01:13Z', prop_nr='P813', is_reference=True)]
olditem = wbi_datatype.ItemID("Q123", "P123", references=[oldref])
newitem = copy.deepcopy(olditem)
assert olditem.equals(newitem, include_ref=False)
assert olditem.equals(newitem, include_ref=True)
# dates are a month apart
newitem = copy.deepcopy(olditem)
newitem.references[0][2] = wbi_datatype.Time(time='+2002-01-31T12:01:13Z', prop_nr='P813')
assert olditem.equals(newitem, include_ref=False)
assert not olditem.equals(newitem, include_ref=True)
# multiple refs
newitem = copy.deepcopy(olditem)
newitem.references.append([wbi_datatype.ExternalID(value='99999', prop_nr='P352')])
assert olditem.equals(newitem, include_ref=False)
assert not olditem.equals(newitem, include_ref=True)
olditem.references.append([wbi_datatype.ExternalID(value='99999', prop_nr='P352')])
assert olditem.equals(newitem, include_ref=True)
| 39.530806 | 169 | 0.650042 | import copy
import pprint
import unittest
import requests
from wikibaseintegrator import wbi_core, wbi_fastrun, wbi_functions, wbi_datatype
from wikibaseintegrator.wbi_core import MWApiError
__author__ = 'Sebastian Burgstaller-Muehlbacher'
__license__ = 'AGPLv3'
class TestMediawikiApiCall(unittest.TestCase):
def test_all(self):
with self.assertRaises(MWApiError):
wbi_functions.mediawiki_api_call_helper(data={'format': 'json', 'action': 'wbgetentities', 'ids': 'Q42'}, mediawiki_api_url="https://www.wikidataaaaaaa.org",
max_retries=3, retry_after=1, allow_anonymous=True)
with self.assertRaises(requests.HTTPError):
wbi_functions.mediawiki_api_call_helper(data=None, mediawiki_api_url="https://httpbin.org/status/400", max_retries=3, retry_after=1, allow_anonymous=True)
wbi_functions.mediawiki_api_call_helper(data={'format': 'json', 'action': 'wbgetentities', 'ids': 'Q42'}, max_retries=3, retry_after=1, allow_anonymous=True)
with self.assertRaises(MWApiError):
wbi_functions.mediawiki_api_call_helper(data=None, mediawiki_api_url="https://httpbin.org/status/502", max_retries=3, retry_after=1, allow_anonymous=True)
class TestDataType(unittest.TestCase):
def test_quantity(self):
dt = wbi_datatype.Quantity(quantity='34.5', prop_nr='P43')
dt_json = dt.get_json_representation()
if not dt_json['mainsnak']['datatype'] == 'quantity':
raise
value = dt_json['mainsnak']['datavalue']
if not value['value']['amount'] == '+34.5':
raise
if not value['value']['unit'] == '1':
raise
dt2 = wbi_datatype.Quantity(quantity='34.5', prop_nr='P43', upper_bound='35.3', lower_bound='33.7', unit="Q11573")
value = dt2.get_json_representation()['mainsnak']['datavalue']
if not value['value']['amount'] == '+34.5':
raise
if not value['value']['unit'] == 'http://www.wikidata.org/entity/Q11573':
raise
if not value['value']['upperBound'] == '+35.3':
raise
if not value['value']['lowerBound'] == '+33.7':
raise
def test_geoshape(self):
dt = wbi_datatype.GeoShape(value='Data:Inner_West_Light_Rail_stops.map', prop_nr='P43')
dt_json = dt.get_json_representation()
if not dt_json['mainsnak']['datatype'] == 'geo-shape':
raise
value = dt_json['mainsnak']['datavalue']
if not value['value'] == 'Data:Inner_West_Light_Rail_stops.map':
raise
if not value['type'] == 'string':
raise
def test_live_item(self):
item = wbi_core.ItemEngine(item_id='Q423111')
mass_statement = [x for x in item.statements if x.get_prop_nr() == 'P2067'].pop()
pprint.pprint(mass_statement.get_json_representation())
if not mass_statement:
raise
class TestFastRun(unittest.TestCase):
def test_fast_run(self):
statements = [
wbi_datatype.ExternalID(value='P40095', prop_nr='P352'),
wbi_datatype.ExternalID(value='YER158C', prop_nr='P705')
]
frc = wbi_fastrun.FastRunContainer(base_filter={'P352': '', 'P703': 'Q27510868'},
base_data_type=wbi_datatype.BaseDataType, engine=wbi_core.ItemEngine)
fast_run_result = frc.write_required(data=statements)
if fast_run_result:
message = 'fastrun failed'
else:
message = 'successful fastrun'
print(fast_run_result, message)
def test_fastrun_label(self):
data = [wbi_datatype.ExternalID('/m/02j71', 'P646')]
fast_run_base_filter = {'P361': 'Q18589965'}
item = wbi_core.ItemEngine(item_id="Q2", data=data, fast_run=True, fast_run_base_filter=fast_run_base_filter)
frc = wbi_core.ItemEngine.fast_run_store[0]
frc.debug = True
assert item.get_label('en') == "Earth"
descr = item.get_description('en')
assert len(descr) > 3
aliases = item.get_aliases()
assert "the Earth" in aliases
assert list(item.fast_run_container.get_language_data("Q2", 'en', 'label'))[0] == "Earth"
assert item.fast_run_container.check_language_data("Q2", ['not the Earth'], 'en', 'label')
assert "the Earth" in item.get_aliases()
assert "planet" in item.get_description()
assert item.get_label("es") == "Tierra"
item.set_description(descr)
item.set_description("fghjkl")
assert item.json_representation['descriptions']['en'] == {'language': 'en', 'value': 'fghjkl'}
item.set_label("Earth")
item.set_label("xfgfdsg")
assert item.json_representation['labels']['en'] == {'language': 'en', 'value': 'xfgfdsg'}
item.set_aliases(["fake alias"], if_exists='APPEND')
assert {'language': 'en', 'value': 'fake alias'} in item.json_representation['aliases']['en']
frc.check_language_data("Q2", ['Ewiase'], 'ak', 'label')
frc.check_language_data("Q2", ['not Ewiase'], 'ak', 'label')
frc.check_language_data("Q2", [''], 'ak', 'description')
frc.check_language_data("Q2", [], 'ak', 'aliases')
frc.check_language_data("Q2", ['sdf', 'sdd'], 'ak', 'aliases')
item.get_label("ak")
item.get_description("ak")
item.get_aliases("ak")
item.set_label("label", lang="ak")
item.set_description("d", lang="ak")
item.set_aliases(["a"], lang="ak", if_exists='APPEND')
def test_sitelinks():
data = [wbi_datatype.ItemID(value='Q12136', prop_nr='P31')]
item = wbi_core.ItemEngine(item_id='Q622901', data=data)
item.get_sitelink("enwiki")
assert "enwiki" not in item.json_representation['sitelinks']
item.set_sitelink("enwiki", "something")
assert item.get_sitelink("enwiki")['title'] == "something"
assert "enwiki" in item.json_representation['sitelinks']
def test_nositelinks():
data = [wbi_datatype.ItemID(value='Q5', prop_nr='P31')]
item = wbi_core.ItemEngine(item_id='Q27869338', data=data)
item.get_sitelink("enwiki")
assert "enwiki" not in item.json_representation['sitelinks']
item.set_sitelink("enwiki", "something")
assert item.get_sitelink("enwiki")['title'] == "something"
assert "enwiki" in item.json_representation['sitelinks']
####
# tests for statement equality, with and without refs
####
def test_ref_equals():
# statements are identical
oldref = [wbi_datatype.ExternalID(value='P58742', prop_nr='P352', is_reference=True),
wbi_datatype.ItemID(value='Q24784025', prop_nr='P527', is_reference=True),
wbi_datatype.Time(time='+2001-12-31T12:01:13Z', prop_nr='P813', is_reference=True)]
olditem = wbi_datatype.ItemID("Q123", "P123", references=[oldref])
newitem = copy.deepcopy(olditem)
assert olditem.equals(newitem, include_ref=False)
assert olditem.equals(newitem, include_ref=True)
# dates are a month apart
newitem = copy.deepcopy(olditem)
newitem.references[0][2] = wbi_datatype.Time(time='+2002-01-31T12:01:13Z', prop_nr='P813')
assert olditem.equals(newitem, include_ref=False)
assert not olditem.equals(newitem, include_ref=True)
# multiple refs
newitem = copy.deepcopy(olditem)
newitem.references.append([wbi_datatype.ExternalID(value='99999', prop_nr='P352')])
assert olditem.equals(newitem, include_ref=False)
assert not olditem.equals(newitem, include_ref=True)
olditem.references.append([wbi_datatype.ExternalID(value='99999', prop_nr='P352')])
assert olditem.equals(newitem, include_ref=True)
| true | true |
f70f31d09bf0f21bb87ebb9f084519abac916626 | 19,238 | py | Python | lib/global_utils.py | bicepjai/Deep-Survey-on-Text-Classification | d935f0d4fc09213644d0291a0d64873912b2e331 | [
"MIT"
] | 197 | 2017-10-29T14:35:50.000Z | 2022-02-08T10:22:41.000Z | lib/global_utils.py | zhaohongbao/Deep-Survey-Text-Classification | d935f0d4fc09213644d0291a0d64873912b2e331 | [
"MIT"
] | 1 | 2018-09-14T16:46:06.000Z | 2018-09-21T16:03:36.000Z | lib/global_utils.py | zhaohongbao/Deep-Survey-Text-Classification | d935f0d4fc09213644d0291a0d64873912b2e331 | [
"MIT"
] | 71 | 2018-01-25T10:59:12.000Z | 2021-02-10T16:02:28.000Z | import sys
import os
import re
import collections
import itertools
import bcolz
import pickle
import numpy as np
import pandas as pd
import gc
import random
import smart_open
import h5py
import csv
import tensorflow as tf
import gensim
import datetime as dt
from tqdm import tqdm_notebook as tqdm
# import multiprocessing as mp
# from itertools import repeat, product
# from functools import partial
# to be able to pickle class methods for multi processing
# https://stackoverflow.com/questions/27318290/why-can-i-pass-an-instance-method-to-multiprocessing-process-but-not-a-multipro
def _instance_method_alias(obj, arg):
"""
Alias for instance method that allows the method to be called in a
multiprocessing pool
"""
return obj.convertSent2WordIds(arg)
def get_embeddings_from_ft(fasttext_vec_file, dim, vocab_words):
"""
convert fast text .vec file to numpy array
created embedding will be in order of words in vocab_words
"""
# gathering words from fasttext vec file--------------------
ft_lines = None
with open(fasttext_vec_file, "r") as f:
ft_lines = f.readlines()
ft_shape = tuple([int(i.strip()) for i in ft_lines[0].split()])
ft_vocab_size = ft_shape[0]
ft_wvs_dict = {}
for i, line in enumerate(ft_lines[1:]):
str_list = line.split()
word = str(str_list[0].strip())
vec = np.array([np.float(f) for f in str_list[1:]])
assert dim == len(vec), "fast text some vectors doesn't match dimensions "+str(dim)+" != "+str(len(vec))
ft_wvs_dict[word] = vec
assert ft_vocab_size == len(ft_wvs_dict), "fast text vectors file read issue "+str(ft_vocab_size)+" != "+str(len(ft_wvs_dict))
# creating embedding matrix from the file --------------------
wvs_embedding = np.random.randn(len(vocab_words), dim)
for i,word in enumerate(vocab_words):
if word in ft_wvs_dict:
wvs_embedding[i] = ft_wvs_dict[word]
return wvs_embedding
#=============================================================
# DOCUMENT PREPROCESSING
#=============================================================
CHAR_ALPHABETS = "abcdefghijklmnopqrstuvwxyz0123456789-,;.!?:'\"/\\|_@#$%^&*~`+-=<>()[]{}\n "
char_start_tag_idx = len(CHAR_ALPHABETS) + 0
char_end_tag_idx = len(CHAR_ALPHABETS) + 1
char_unknown_tag_idx = len(CHAR_ALPHABETS) + 2
# when sentences are converted to characters
# these are appended to signal end of sentences
char_sent_start_tag_idx = len(CHAR_ALPHABETS) + 3
char_sent_end_tag_idx = len(CHAR_ALPHABETS) + 4
CHAR_ALPHABETS_LEN = len(CHAR_ALPHABETS) + 4
class GenerateDataset(object):
"""
This class takes in preprocessed data frame and
generated datasets as necessary
"""
def __init__(self, data_frame, vocab_idx):
self.data_frame = data_frame
self.vocab_idx = vocab_idx
self.vocab_size = len(vocab_idx)
# constants ================================================================================
self.sentence_start_tag_idx = self.vocab_idx["<SOSent>"]
self.sentence_end_tag_idx = self.vocab_idx["<EOSent>"]
self.word_unknown_tag_idx = self.vocab_idx["<UNK>"]
self.default_unit_dict = {
"gene_unit" : "words",
"variation_unit" : "words",
"doc_unit" : "words",
"doc_form" : "text",
"doc_cntx_dir" : "forward",
"divide_document": "single_unit"
}
def convertSent2WordIds(self, sentence, add_start_end_tag=False):
"""
sentence is a list of word.
It is converted to list of ids based on vocab_idx
"""
sent2id = []
if add_start_end_tag:
sent2id = [self.sentence_start_tag_idx]
try:
sent2id = sent2id + [self.vocab_idx[word] if self.vocab_idx[word]<self.vocab_size else self.word_unknown_tag_idx for word in sentence]
except KeyError as e:
print(e)
print (sentence)
raise ValueError('Fix this issue dude')
if add_start_end_tag:
sent2id = sent2id + [self.sentence_end_tag_idx]
return sent2id
def convertDoc2Sent2WordIds(self, document, add_start_end_tag=False):
"""
document is a list of sentence.
sentence is a list of word.
so given sent_list will be converted to list of list of ids based on vocab_idx
"""
return [self.convertSent2WordIds(sentence, add_start_end_tag) for sentence in document]
def convertWord2Char2Ids(self, word, add_start_end_tag=False):
"""
word is a char sequence or list of characters,
return list of ids in word or char sequence
"""
char2id = []
if add_start_end_tag:
char2id = [char_start_tag_idx]
char2id = char2id + [CHAR_ALPHABETS.find(char) for char in word]
if add_start_end_tag:
char2id = char2id + [char_end_tag_idx]
return char2id
def convertSent2Word2Char2Ids(self, sentence, add_start_end_tag=False, unit="chars"):
"""
sentence is list of words
word is list of characters
returns list of list of ids
"""
sent2words2char2id = []
if unit == "chars":
"""
all the words are grouped as list of chars with pre-post added tags
"""
if add_start_end_tag:
sent2words2char2id = [[char_sent_start_tag_idx]]
sent2words2char2id = sent2words2char2id + [self.convertWord2Char2Ids(word, add_start_end_tag) if self.vocab_idx[word] < self.vocab_size else [char_unknown_tag_idx] for word in sentence]
if add_start_end_tag:
sent2words2char2id = sent2words2char2id + [[char_sent_end_tag_idx]]
elif unit == "raw_chars":
"""
just a stream of characters
"""
if add_start_end_tag:
sent2words2char2id = [char_sent_start_tag_idx]
for word in sentence:
if self.vocab_idx[word] < self.vocab_size:
sent2words2char2id += [charid for charid in self.convertWord2Char2Ids(word, add_start_end_tag)]
else:
sent2words2char2id += [char_unknown_tag_idx]
if add_start_end_tag:
sent2words2char2id = sent2words2char2id + [char_sent_end_tag_idx]
else:
assert False, "give valid doc_unit argument"
return sent2words2char2id
def convertDoc2Sent2Word2Char2Ids(self, document, doc_form="sentences", add_start_end_tag=False, unit="chars"):
"""
document is a list of sentence.
sentence is a list of word.
so given sent_list will be converted to list of list of ids based on vocab_idx
returns list of list if doc_form == "text"
returns list of list of list if doc_form == "sentences"
"""
doc2word2char2ids = []
if doc_form == "sentences":
doc2word2char2ids = [self.convertSent2Word2Char2Ids(sentence, add_start_end_tag, unit) for sentence in document]
elif doc_form == "text":
doc2word2char2ids = [list_or_charid for list_or_charid in self.convertSent2Word2Char2Ids(sentence, add_start_end_tag, unit)]
else:
assert False, "give valid doc_form argument"
return doc2word2char2ids
def generate_data(self, unit_dict=None, has_class=False, add_start_end_tag=False):
"""
dataframe expects to have Sentences, Variations, Genes, Class(has_class)
Sentences Text attribute converted to list of sentences which in turn converted to list of words
Variations just one sentence which is a list of words
Genes just one sentence which is a list of words
returns information based on request
unit_dict contains these 5 keys that can have
gene_unit can be ["words", "chars", "raw_chars"]
variation_unit can be ["words", "chars", "raw_chars"]
doc_unit can be ["words", "word_list", chars", "raw_chars"]
doc_form can be ["sentences", "text"]
doc_cntx_dir can be ["forward", "backward"]
divide_document can be ["single_unit", "multiple_units"]
"""
if not unit_dict:
unit_dict = self.default_unit_dict
try:
unit_dict["doc_cntx_dir"]
except KeyError as e:
unit_dict["doc_cntx_dir"] = "forward"
ids_document = []
ids_labels = []
ids_genes = []
ids_variations = []
# since sometimes the data will be shuffled in the frame
# during train test split
for index in self.data_frame.index:
document = self.data_frame.Sentences[index]
if unit_dict["divide_document"] == "single_unit": #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~`
# doc units --------------------------------------------------------------
if unit_dict["doc_unit"] == "words" or unit_dict["doc_unit"] == "word_list":
if unit_dict["doc_form"] == "sentences":
ids_document.append(self.convertDoc2Sent2WordIds(document, add_start_end_tag))
else: # unit_dict["doc_form"] == "text"
# using multiprocess to process each sentence in document and concatenate them to a single sentence
# get_wordid_list = lambda d, setag : [wid for s in d for wid in self.convertSent2WordIds(s, setag)]
# text_word_list = []
# with mp.Pool(processes = 5) as pool:
# # text_word_list = pool.starmap(get_wordid_list, product(document, [add_start_end_tag]*len(document)))
# # text_word_list = pool.starmap(get_wordid_list, zip(document, repeat(add_start_end_tag)))
# text_word_list = pool.map(partial(get_wordid_list, setag=add_start_end_tag), document)
# without multiprocessing
if unit_dict["doc_unit"] == "words":
text_word_list = [word_id for sentence in document for word_id in self.convertSent2WordIds(sentence, add_start_end_tag)]
if unit_dict["doc_cntx_dir"] == "backward":
text_word_list = text_word_list[::-1]
else: # unit_dict["doc_unit"] == "word_list": sentence form a list
text_word_list = [self.convertSent2WordIds(sentence, add_start_end_tag) for sentence in document]
if unit_dict["doc_cntx_dir"] == "backward":
text_word_list = [self.convertSent2WordIds(sentence, add_start_end_tag)[::-1] for sentence in document]
ids_document.append(text_word_list)
elif unit_dict["doc_unit"] == "chars" or unit_dict["doc_unit"] == "raw_chars":
if unit_dict["doc_form"] == "sentences":
for sentence in document:
ids_document.append(self.convertDoc2Sent2Word2Char2Ids(document,
doc_form=unit_dict["doc_form"], unit=unit_dict["doc_unit"], add_start_end_tag=add_start_end_tag))
else: # unit_dict["doc_form"] == "text"
text_char_list = [word_as_char_list_id for sentence in document for word_as_char_list_id in self.convertSent2Word2Char2Ids(sentence, add_start_end_tag, unit=unit_dict["doc_unit"])]
ids_document.append(text_char_list)
else:
assert False, "give valid doc_unit key-value"
# others --------------------------------------------------------------
if has_class:
ids_labels.append(self.data_frame.Class[index])
if unit_dict["gene_unit"] == "words":
ids_genes.append(self.convertSent2WordIds(self.data_frame.Gene[index], add_start_end_tag))
else:
ids_genes.append(self.convertSent2Word2Char2Ids(self.data_frame.Gene[index],
add_start_end_tag, unit=unit_dict["doc_unit"]))
if unit_dict["variation_unit"] == "words":
ids_variations.append(self.convertSent2WordIds(self.data_frame.Variation[index], add_start_end_tag))
else:
ids_variations.append(self.convertSent2Word2Char2Ids(self.data_frame.Variation[index],
add_start_end_tag, unit=unit_dict["doc_unit"]))
else: # unit_dict["divide_document"] == "multiple_unit" #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~`
for sentence in document:
# doc units --------------------------------------------------------------
if unit_dict["doc_unit"] == "words":
# doesnt matter if
# unit_dict["doc_form"] == "sentences"
# unit_dict["doc_form"] == "text"
try:
sentence_list = self.convertSent2WordIds(sentence, add_start_end_tag)
if unit_dict["doc_cntx_dir"] == "backward":
text_word_list = self.convertSent2WordIds(sentence, add_start_end_tag)[::-1]
ids_document.append(sentence_list)
except ValueError as e:
print(e)
print (index)
raise ValueError('Fix this issue dude !')
elif unit_dict["doc_unit"] == "chars" or unit_dict["doc_unit"] == "raw_chars":
# doesnt matter if
# unit_dict["doc_form"] == "sentences"
# unit_dict["doc_form"] == "text"
ids_document.append(self.convertSent2Word2Char2Ids(sentence, add_start_end_tag,
unit=unit_dict["doc_unit"]))
# others --------------------------------------------------------------
if has_class:
ids_labels.append(self.data_frame.Class[index])
if unit_dict["gene_unit"] == "words":
ids_genes.append(self.convertSent2WordIds(self.data_frame.Gene[index], add_start_end_tag))
else:
ids_genes.append(self.convertSent2Word2Char2Ids(self.data_frame.Gene[index],
add_start_end_tag, unit=unit_dict["gene_unit"]))
if unit_dict["variation_unit"] == "words":
ids_variations.append(self.convertSent2WordIds(self.data_frame.Variation[index], add_start_end_tag))
else:
ids_variations.append(self.convertSent2Word2Char2Ids(self.data_frame.Variation[index],
add_start_end_tag, unit=unit_dict["variation_unit"]))
return ids_document, ids_genes, ids_variations, ids_labels
def placeholder_function(self, unit_dict=None, limit_dict=None,
has_class=False, add_start_end_tag=False):
"""
dataframe expects to have Sentences, Variations, Genes, Class(has_class)
Sentences Text attribute converted to list of sentences which in turn converted to list of words
Variations just one sentence which is a list of words
Genes just one sentence which is a list of words
returns information based on request
unit_dict contains these 5 keys that can have
gene_unit can be ["words", "chars"]
variation_unit can be ["words", "chars"]
doc_unit can be ["words", "chars"]
doc_form can be ["sentences", "text"]
divide_document can be ["single_unit", "multiple_units"]
limit_dict contains max sequence len to form valid matrices
Text attribute options
max_text_seq_len => maximum number of words in a text
max_text_document_len => maximum number of sentences in a document
max_text_sentence_len => maximum number of words in a sentence
max_text_word_len => maximum number of chars in a word
Gene Attribute options
max_gene_sentence_len => maximum number of words in a sentence
max_gene_word_len => maximum number of chars in a word
Variation Attribute options
max_variation_sentence_len => maximum number of words in a sentence
max_variation_word_len => maximum number of chars in a word
"""
ids_document, ids_genes, ids_variations, ids_labels = self.generate_dataset(unit_dict, has_class, add_start_end_tag)
# testing ======================================================================================
def test_class():
document = [
['beautiful', 'is', 'better', 'than', 'ugly.'],
['explicit', 'is', 'better', 'than', 'implicit.'],
['simple', 'is', 'better', 'than', 'complex.'],
['complex', 'is', 'better', 'than', 'complicated.'],
['flat', 'is', 'better', 'than', 'nested.'],
# ['sparse', 'is', 'better', 'than', 'dense.'],
# ['readability', 'counts.'],
# ['special', 'cases', "aren't", 'special', 'enough', 'to', 'break', 'the', 'rules.'],
# ['although', 'practicality', 'beats', 'purity.'],
# ['errors', 'should', 'never', 'pass', 'silently.'],
# ['unless', 'explicitly', 'silenced.'],
# ['in', 'the', 'face', 'of', 'ambiguity,', 'refuse', 'the', 'temptation', 'to', 'guess.'],
# ['there', 'should', 'be', 'one--', 'and', 'preferably', 'only', 'one', '--obvious', 'way', 'to', 'do', 'it.'],
# ['although', 'that', 'way', 'may', 'not', 'be', 'obvious', 'at', 'first', 'unless', "you're", 'Dutch.'],
# ['now', 'is', 'better', 'than', 'never.'], ['Although', 'never', 'is', 'often', 'better', 'than', '*right*', 'now.'],
# ['if', 'the', 'implementation', 'is', 'hard', 'to', 'explain,', "it's", 'a', 'bad', 'idea.'],
# ['if', 'the', 'implementation', 'is', 'easy', 'to', 'explain,', 'it', 'may', 'be', 'a', 'good', 'idea.'],
# ['namespaces', 'are', 'one', 'honking', 'great', 'idea', '--', "let's", 'do', 'more', 'of', 'those!'],
]
data_dict = {
"ID" : 0,
"Gene" : [["beautiful"]],
"Variation" : [["complex", "simple"]],
"Class" : 0,
"Sentences" : [document[:]]
}
custom_unit_dict = {
"gene_unit" : "raw_chars",
"variation_unit" : "raw_chars",
# text transformed to sentences attribute
"doc_unit" : "raw_chars",
"doc_form" : "sentences",
# "doc_cntx_dir" : "forward",
"divide_document" : "single_unit"
}
df = pd.DataFrame(data=data_dict)
corpus = sorted(list(set([word for sentence in document for word in sentence])))
corpus_wordidx = {word:i for i,word in enumerate(corpus)}
corpus_wordidx["<SOSent>"] = len(corpus)
corpus_wordidx["<EOSent>"] = len(corpus) + 1
corpus_wordidx["<UNK>"] = len(corpus) + 2
gen_data = GenerateDataset(df, corpus_wordidx)
x_T, x_G, x_V, x_C = gen_data.generate_data(custom_unit_dict, has_class=True, add_start_end_tag=True)
print("data", df.Sentences[0], "\n")
print(corpus_wordidx)
index = 0
print("text",np.array(x_T).shape, x_T[index])
print("gene",np.array(x_G).shape, x_G[index])
print("variation",np.array(x_V).shape, x_V[index])
print("classes",np.array(x_C).shape, x_C[index])
if __name__ == "__main__":
test_class()
| 39.261224 | 198 | 0.596684 | import sys
import os
import re
import collections
import itertools
import bcolz
import pickle
import numpy as np
import pandas as pd
import gc
import random
import smart_open
import h5py
import csv
import tensorflow as tf
import gensim
import datetime as dt
from tqdm import tqdm_notebook as tqdm
def _instance_method_alias(obj, arg):
return obj.convertSent2WordIds(arg)
def get_embeddings_from_ft(fasttext_vec_file, dim, vocab_words):
ft_lines = None
with open(fasttext_vec_file, "r") as f:
ft_lines = f.readlines()
ft_shape = tuple([int(i.strip()) for i in ft_lines[0].split()])
ft_vocab_size = ft_shape[0]
ft_wvs_dict = {}
for i, line in enumerate(ft_lines[1:]):
str_list = line.split()
word = str(str_list[0].strip())
vec = np.array([np.float(f) for f in str_list[1:]])
assert dim == len(vec), "fast text some vectors doesn't match dimensions "+str(dim)+" != "+str(len(vec))
ft_wvs_dict[word] = vec
assert ft_vocab_size == len(ft_wvs_dict), "fast text vectors file read issue "+str(ft_vocab_size)+" != "+str(len(ft_wvs_dict))
# creating embedding matrix from the file --------------------
wvs_embedding = np.random.randn(len(vocab_words), dim)
for i,word in enumerate(vocab_words):
if word in ft_wvs_dict:
wvs_embedding[i] = ft_wvs_dict[word]
return wvs_embedding
#=============================================================
# DOCUMENT PREPROCESSING
#=============================================================
CHAR_ALPHABETS = "abcdefghijklmnopqrstuvwxyz0123456789-,;.!?:'\"/\\|_@
char_start_tag_idx = len(CHAR_ALPHABETS) + 0
char_end_tag_idx = len(CHAR_ALPHABETS) + 1
char_unknown_tag_idx = len(CHAR_ALPHABETS) + 2
# when sentences are converted to characters
# these are appended to signal end of sentences
char_sent_start_tag_idx = len(CHAR_ALPHABETS) + 3
char_sent_end_tag_idx = len(CHAR_ALPHABETS) + 4
CHAR_ALPHABETS_LEN = len(CHAR_ALPHABETS) + 4
class GenerateDataset(object):
def __init__(self, data_frame, vocab_idx):
self.data_frame = data_frame
self.vocab_idx = vocab_idx
self.vocab_size = len(vocab_idx)
# constants ================================================================================
self.sentence_start_tag_idx = self.vocab_idx["<SOSent>"]
self.sentence_end_tag_idx = self.vocab_idx["<EOSent>"]
self.word_unknown_tag_idx = self.vocab_idx["<UNK>"]
self.default_unit_dict = {
"gene_unit" : "words",
"variation_unit" : "words",
"doc_unit" : "words",
"doc_form" : "text",
"doc_cntx_dir" : "forward",
"divide_document": "single_unit"
}
def convertSent2WordIds(self, sentence, add_start_end_tag=False):
sent2id = []
if add_start_end_tag:
sent2id = [self.sentence_start_tag_idx]
try:
sent2id = sent2id + [self.vocab_idx[word] if self.vocab_idx[word]<self.vocab_size else self.word_unknown_tag_idx for word in sentence]
except KeyError as e:
print(e)
print (sentence)
raise ValueError('Fix this issue dude')
if add_start_end_tag:
sent2id = sent2id + [self.sentence_end_tag_idx]
return sent2id
def convertDoc2Sent2WordIds(self, document, add_start_end_tag=False):
return [self.convertSent2WordIds(sentence, add_start_end_tag) for sentence in document]
def convertWord2Char2Ids(self, word, add_start_end_tag=False):
char2id = []
if add_start_end_tag:
char2id = [char_start_tag_idx]
char2id = char2id + [CHAR_ALPHABETS.find(char) for char in word]
if add_start_end_tag:
char2id = char2id + [char_end_tag_idx]
return char2id
def convertSent2Word2Char2Ids(self, sentence, add_start_end_tag=False, unit="chars"):
sent2words2char2id = []
if unit == "chars":
if add_start_end_tag:
sent2words2char2id = [[char_sent_start_tag_idx]]
sent2words2char2id = sent2words2char2id + [self.convertWord2Char2Ids(word, add_start_end_tag) if self.vocab_idx[word] < self.vocab_size else [char_unknown_tag_idx] for word in sentence]
if add_start_end_tag:
sent2words2char2id = sent2words2char2id + [[char_sent_end_tag_idx]]
elif unit == "raw_chars":
"""
just a stream of characters
"""
if add_start_end_tag:
sent2words2char2id = [char_sent_start_tag_idx]
for word in sentence:
if self.vocab_idx[word] < self.vocab_size:
sent2words2char2id += [charid for charid in self.convertWord2Char2Ids(word, add_start_end_tag)]
else:
sent2words2char2id += [char_unknown_tag_idx]
if add_start_end_tag:
sent2words2char2id = sent2words2char2id + [char_sent_end_tag_idx]
else:
assert False, "give valid doc_unit argument"
return sent2words2char2id
def convertDoc2Sent2Word2Char2Ids(self, document, doc_form="sentences", add_start_end_tag=False, unit="chars"):
doc2word2char2ids = []
if doc_form == "sentences":
doc2word2char2ids = [self.convertSent2Word2Char2Ids(sentence, add_start_end_tag, unit) for sentence in document]
elif doc_form == "text":
doc2word2char2ids = [list_or_charid for list_or_charid in self.convertSent2Word2Char2Ids(sentence, add_start_end_tag, unit)]
else:
assert False, "give valid doc_form argument"
return doc2word2char2ids
def generate_data(self, unit_dict=None, has_class=False, add_start_end_tag=False):
if not unit_dict:
unit_dict = self.default_unit_dict
try:
unit_dict["doc_cntx_dir"]
except KeyError as e:
unit_dict["doc_cntx_dir"] = "forward"
ids_document = []
ids_labels = []
ids_genes = []
ids_variations = []
# since sometimes the data will be shuffled in the frame
# during train test split
for index in self.data_frame.index:
document = self.data_frame.Sentences[index]
if unit_dict["divide_document"] == "single_unit": #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~`
# doc units --------------------------------------------------------------
if unit_dict["doc_unit"] == "words" or unit_dict["doc_unit"] == "word_list":
if unit_dict["doc_form"] == "sentences":
ids_document.append(self.convertDoc2Sent2WordIds(document, add_start_end_tag))
else: # unit_dict["doc_form"] == "text"
# using multiprocess to process each sentence in document and concatenate them to a single sentence
# get_wordid_list = lambda d, setag : [wid for s in d for wid in self.convertSent2WordIds(s, setag)]
# text_word_list = []
# with mp.Pool(processes = 5) as pool:
# # text_word_list = pool.starmap(get_wordid_list, product(document, [add_start_end_tag]*len(document)))
# # text_word_list = pool.starmap(get_wordid_list, zip(document, repeat(add_start_end_tag)))
# text_word_list = pool.map(partial(get_wordid_list, setag=add_start_end_tag), document)
# without multiprocessing
if unit_dict["doc_unit"] == "words":
text_word_list = [word_id for sentence in document for word_id in self.convertSent2WordIds(sentence, add_start_end_tag)]
if unit_dict["doc_cntx_dir"] == "backward":
text_word_list = text_word_list[::-1]
else: # unit_dict["doc_unit"] == "word_list": sentence form a list
text_word_list = [self.convertSent2WordIds(sentence, add_start_end_tag) for sentence in document]
if unit_dict["doc_cntx_dir"] == "backward":
text_word_list = [self.convertSent2WordIds(sentence, add_start_end_tag)[::-1] for sentence in document]
ids_document.append(text_word_list)
elif unit_dict["doc_unit"] == "chars" or unit_dict["doc_unit"] == "raw_chars":
if unit_dict["doc_form"] == "sentences":
for sentence in document:
ids_document.append(self.convertDoc2Sent2Word2Char2Ids(document,
doc_form=unit_dict["doc_form"], unit=unit_dict["doc_unit"], add_start_end_tag=add_start_end_tag))
else: # unit_dict["doc_form"] == "text"
text_char_list = [word_as_char_list_id for sentence in document for word_as_char_list_id in self.convertSent2Word2Char2Ids(sentence, add_start_end_tag, unit=unit_dict["doc_unit"])]
ids_document.append(text_char_list)
else:
assert False, "give valid doc_unit key-value"
# others --------------------------------------------------------------
if has_class:
ids_labels.append(self.data_frame.Class[index])
if unit_dict["gene_unit"] == "words":
ids_genes.append(self.convertSent2WordIds(self.data_frame.Gene[index], add_start_end_tag))
else:
ids_genes.append(self.convertSent2Word2Char2Ids(self.data_frame.Gene[index],
add_start_end_tag, unit=unit_dict["doc_unit"]))
if unit_dict["variation_unit"] == "words":
ids_variations.append(self.convertSent2WordIds(self.data_frame.Variation[index], add_start_end_tag))
else:
ids_variations.append(self.convertSent2Word2Char2Ids(self.data_frame.Variation[index],
add_start_end_tag, unit=unit_dict["doc_unit"]))
else: # unit_dict["divide_document"] == "multiple_unit" #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~`
for sentence in document:
# doc units --------------------------------------------------------------
if unit_dict["doc_unit"] == "words":
# doesnt matter if
# unit_dict["doc_form"] == "sentences"
# unit_dict["doc_form"] == "text"
try:
sentence_list = self.convertSent2WordIds(sentence, add_start_end_tag)
if unit_dict["doc_cntx_dir"] == "backward":
text_word_list = self.convertSent2WordIds(sentence, add_start_end_tag)[::-1]
ids_document.append(sentence_list)
except ValueError as e:
print(e)
print (index)
raise ValueError('Fix this issue dude !')
elif unit_dict["doc_unit"] == "chars" or unit_dict["doc_unit"] == "raw_chars":
# doesnt matter if
# unit_dict["doc_form"] == "sentences"
# unit_dict["doc_form"] == "text"
ids_document.append(self.convertSent2Word2Char2Ids(sentence, add_start_end_tag,
unit=unit_dict["doc_unit"]))
# others --------------------------------------------------------------
if has_class:
ids_labels.append(self.data_frame.Class[index])
if unit_dict["gene_unit"] == "words":
ids_genes.append(self.convertSent2WordIds(self.data_frame.Gene[index], add_start_end_tag))
else:
ids_genes.append(self.convertSent2Word2Char2Ids(self.data_frame.Gene[index],
add_start_end_tag, unit=unit_dict["gene_unit"]))
if unit_dict["variation_unit"] == "words":
ids_variations.append(self.convertSent2WordIds(self.data_frame.Variation[index], add_start_end_tag))
else:
ids_variations.append(self.convertSent2Word2Char2Ids(self.data_frame.Variation[index],
add_start_end_tag, unit=unit_dict["variation_unit"]))
return ids_document, ids_genes, ids_variations, ids_labels
def placeholder_function(self, unit_dict=None, limit_dict=None,
has_class=False, add_start_end_tag=False):
ids_document, ids_genes, ids_variations, ids_labels = self.generate_dataset(unit_dict, has_class, add_start_end_tag)
# testing ======================================================================================
def test_class():
document = [
['beautiful', 'is', 'better', 'than', 'ugly.'],
['explicit', 'is', 'better', 'than', 'implicit.'],
['simple', 'is', 'better', 'than', 'complex.'],
['complex', 'is', 'better', 'than', 'complicated.'],
['flat', 'is', 'better', 'than', 'nested.'],
# ['sparse', 'is', 'better', 'than', 'dense.'],
# ['readability', 'counts.'],
# ['special', 'cases', "aren't", 'special', 'enough', 'to', 'break', 'the', 'rules.'],
# ['although', 'practicality', 'beats', 'purity.'],
# ['errors', 'should', 'never', 'pass', 'silently.'],
# ['unless', 'explicitly', 'silenced.'],
# ['in', 'the', 'face', 'of', 'ambiguity,', 'refuse', 'the', 'temptation', 'to', 'guess.'],
# ['there', 'should', 'be', 'one--', 'and', 'preferably', 'only', 'one', '--obvious', 'way', 'to', 'do', 'it.'],
# ['although', 'that', 'way', 'may', 'not', 'be', 'obvious', 'at', 'first', 'unless', "you're", 'Dutch.'],
# ['now', 'is', 'better', 'than', 'never.'], ['Although', 'never', 'is', 'often', 'better', 'than', '*right*', 'now.'],
# ['if', 'the', 'implementation', 'is', 'hard', 'to', 'explain,', "it's", 'a', 'bad', 'idea.'],
# ['if', 'the', 'implementation', 'is', 'easy', 'to', 'explain,', 'it', 'may', 'be', 'a', 'good', 'idea.'],
# ['namespaces', 'are', 'one', 'honking', 'great', 'idea', '--', "let's", 'do', 'more', 'of', 'those!'],
]
data_dict = {
"ID" : 0,
"Gene" : [["beautiful"]],
"Variation" : [["complex", "simple"]],
"Class" : 0,
"Sentences" : [document[:]]
}
custom_unit_dict = {
"gene_unit" : "raw_chars",
"variation_unit" : "raw_chars",
# text transformed to sentences attribute
"doc_unit" : "raw_chars",
"doc_form" : "sentences",
# "doc_cntx_dir" : "forward",
"divide_document" : "single_unit"
}
df = pd.DataFrame(data=data_dict)
corpus = sorted(list(set([word for sentence in document for word in sentence])))
corpus_wordidx = {word:i for i,word in enumerate(corpus)}
corpus_wordidx["<SOSent>"] = len(corpus)
corpus_wordidx["<EOSent>"] = len(corpus) + 1
corpus_wordidx["<UNK>"] = len(corpus) + 2
gen_data = GenerateDataset(df, corpus_wordidx)
x_T, x_G, x_V, x_C = gen_data.generate_data(custom_unit_dict, has_class=True, add_start_end_tag=True)
print("data", df.Sentences[0], "\n")
print(corpus_wordidx)
index = 0
print("text",np.array(x_T).shape, x_T[index])
print("gene",np.array(x_G).shape, x_G[index])
print("variation",np.array(x_V).shape, x_V[index])
print("classes",np.array(x_C).shape, x_C[index])
if __name__ == "__main__":
test_class()
| true | true |
f70f31e6aadbc7b104ba4e54290b9266aae9e56b | 4,090 | py | Python | YoutubeDownloader.py | rpant1728/Youtube-Downloader | 6e64ecdf520c748dc5019cf07107d0622597971f | [
"MIT"
] | null | null | null | YoutubeDownloader.py | rpant1728/Youtube-Downloader | 6e64ecdf520c748dc5019cf07107d0622597971f | [
"MIT"
] | null | null | null | YoutubeDownloader.py | rpant1728/Youtube-Downloader | 6e64ecdf520c748dc5019cf07107d0622597971f | [
"MIT"
] | null | null | null | import sys, re, os, selenium, time, argparse
from time import sleep
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.common.by import By
from urllib.request import urlopen, urlretrieve
class YoutubeDownloader(object):
def __init__(self):
self.driver = webdriver.Chrome()
def download_video(self, directory, query):
driver = self.driver
download_link = "http://www.ssyoutube.com/watch?v=" + query.split("?v=")[1]
driver.get(download_link)
sleep(10)
html = driver.page_source
soup = BeautifulSoup(html, "lxml")
for a in soup.find_all('a'):
if "videoplayback" in a['href']:
name = a['href'].split('=')[-1].replace("+", " ").replace("%28", "(").replace("%29", ")")
urlretrieve(a['href'], directory + "/" + name + ".mp4")
break
driver.close()
def parse_links(self, query):
driver = self.driver
driver.get(query)
sleep(10)
html = driver.page_source
soup = BeautifulSoup(html, "lxml")
title = soup.select('yt-formatted-string.title > a:nth-child(1)')[0].text
links = list()
for a in soup.find_all('a'):
if "index=" in a['href']:
links.append(a['href'].split('v=')[-1])
return title, links
def download_playlist(self, links, list_dir, number):
driver = self.driver
num = 0
for link in links:
if(num == number):
break
num = num + 1
download_link = "http://www.ssyoutube.com/watch?v=" + link
driver.get(download_link)
time.sleep(15)
html = driver.page_source
soup = BeautifulSoup(html, "lxml")
for a in soup.find_all('a'):
if "videoplayback" in a['href']:
name = a['href'].split('=')[-1].replace("+", " ").replace("%28", "(").replace("%29", ")")
urlretrieve(a['href'], list_dir + "/" + name + ".mp4")
break
driver.close()
def create_base_directory(self, directory):
direct = os.path.dirname(directory)
if not os.path.exists(direct):
os.makedirs(direct)
else:
direct = os.path.dirname(directory)
return direct
def create_list_directory(self, directory, title):
direct = os.path.dirname(os.path.join(directory, title))
if not os.path.exists(direct):
os.makedirs(direct)
else:
direct = os.path.dirname(directory, title)
return direct
def download(self, query, crawl_type, number, directory):
direct = self.create_base_directory(directory)
if(crawl_type == 'video'):
self.download_video(direct, query)
elif(crawl_type == 'playlist'):
title, links = self.parse_links(query)
list_dir = self.create_list_directory(direct, title)
self.download_playlist(links, list_dir, number)
def main():
parser = argparse.ArgumentParser(description='Youtube Downloader')
parser.add_argument('-q', '--query', type=str, help='Link of video or playlist')
parser.add_argument('-t', '--crawl_type', type=str, default='video', help="Options: 'video' | 'playlist'")
parser.add_argument('-n', '--number', type=int, default=0, help='Number of videos to download from playlist: integer, -1 to download all')
parser.add_argument('-d', '--directory', type=str, default='./Videos/', help='Directory to save results')
# parser.add_argument('-l', '--headless', action='store_true', help='If set, script will be run headless')
args = parser.parse_args()
downloader = YoutubeDownloader()
downloader.download(query=args.query,
crawl_type=args.crawl_type,
number=args.number,
directory=args.directory)
if __name__ == "__main__":
main()
| 39.708738 | 142 | 0.582885 | import sys, re, os, selenium, time, argparse
from time import sleep
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.common.by import By
from urllib.request import urlopen, urlretrieve
class YoutubeDownloader(object):
def __init__(self):
self.driver = webdriver.Chrome()
def download_video(self, directory, query):
driver = self.driver
download_link = "http://www.ssyoutube.com/watch?v=" + query.split("?v=")[1]
driver.get(download_link)
sleep(10)
html = driver.page_source
soup = BeautifulSoup(html, "lxml")
for a in soup.find_all('a'):
if "videoplayback" in a['href']:
name = a['href'].split('=')[-1].replace("+", " ").replace("%28", "(").replace("%29", ")")
urlretrieve(a['href'], directory + "/" + name + ".mp4")
break
driver.close()
def parse_links(self, query):
driver = self.driver
driver.get(query)
sleep(10)
html = driver.page_source
soup = BeautifulSoup(html, "lxml")
title = soup.select('yt-formatted-string.title > a:nth-child(1)')[0].text
links = list()
for a in soup.find_all('a'):
if "index=" in a['href']:
links.append(a['href'].split('v=')[-1])
return title, links
def download_playlist(self, links, list_dir, number):
driver = self.driver
num = 0
for link in links:
if(num == number):
break
num = num + 1
download_link = "http://www.ssyoutube.com/watch?v=" + link
driver.get(download_link)
time.sleep(15)
html = driver.page_source
soup = BeautifulSoup(html, "lxml")
for a in soup.find_all('a'):
if "videoplayback" in a['href']:
name = a['href'].split('=')[-1].replace("+", " ").replace("%28", "(").replace("%29", ")")
urlretrieve(a['href'], list_dir + "/" + name + ".mp4")
break
driver.close()
def create_base_directory(self, directory):
direct = os.path.dirname(directory)
if not os.path.exists(direct):
os.makedirs(direct)
else:
direct = os.path.dirname(directory)
return direct
def create_list_directory(self, directory, title):
direct = os.path.dirname(os.path.join(directory, title))
if not os.path.exists(direct):
os.makedirs(direct)
else:
direct = os.path.dirname(directory, title)
return direct
def download(self, query, crawl_type, number, directory):
direct = self.create_base_directory(directory)
if(crawl_type == 'video'):
self.download_video(direct, query)
elif(crawl_type == 'playlist'):
title, links = self.parse_links(query)
list_dir = self.create_list_directory(direct, title)
self.download_playlist(links, list_dir, number)
def main():
parser = argparse.ArgumentParser(description='Youtube Downloader')
parser.add_argument('-q', '--query', type=str, help='Link of video or playlist')
parser.add_argument('-t', '--crawl_type', type=str, default='video', help="Options: 'video' | 'playlist'")
parser.add_argument('-n', '--number', type=int, default=0, help='Number of videos to download from playlist: integer, -1 to download all')
parser.add_argument('-d', '--directory', type=str, default='./Videos/', help='Directory to save results')
args = parser.parse_args()
downloader = YoutubeDownloader()
downloader.download(query=args.query,
crawl_type=args.crawl_type,
number=args.number,
directory=args.directory)
if __name__ == "__main__":
main()
| true | true |
f70f32553a67b014ff81b6d4f591997937121e03 | 2,407 | py | Python | rs/Database/mackinac/test/test_reconstruct.py | stevenirby/RetSynth | 89cd62221959fd2dc2952c30de6ecbe2d511479a | [
"BSD-2-Clause"
] | 3 | 2018-08-24T23:45:11.000Z | 2021-09-08T19:57:22.000Z | rs/Database/mackinac/test/test_reconstruct.py | stevenirby/RetSynth | 89cd62221959fd2dc2952c30de6ecbe2d511479a | [
"BSD-2-Clause"
] | 1 | 2019-10-11T12:08:25.000Z | 2019-10-14T16:36:14.000Z | rs/Database/mackinac/test/test_reconstruct.py | stevenirby/RetSynth | 89cd62221959fd2dc2952c30de6ecbe2d511479a | [
"BSD-2-Clause"
] | 2 | 2021-08-21T19:52:43.000Z | 2021-09-25T12:39:03.000Z | import pytest
from os.path import join
import mackinac
@pytest.mark.fixtures('download_data')
class TestReconstruct:
def test_reconstruct_features(self, universal_folder, bacteria_folder, b_theta_features,
b_theta_summary, b_theta_id):
template = mackinac.create_template_model(
universal_folder,
bacteria_folder,
'bacteria',
'Bacteria template')
model = mackinac.reconstruct_model_from_features(
b_theta_features,
template,
b_theta_id,
'negbio',
gc_content=b_theta_summary['gc_content'] / 100.0
)
assert model.id == b_theta_id
assert len(model.reactions) == 923 # Value can change if genome annotation changes
assert len(model.metabolites) == 999 # Value can change if genome annotation changes
assert len(model.compartments) == 2
def test_reconstruct_likelihoods(self, universal_folder, bacteria_folder, b_theta_features,
b_theta_summary, b_theta_id, search_program_path,
search_db_path, fid_role_path, work_folder):
template = mackinac.create_template_model(
universal_folder,
bacteria_folder,
'bacteria',
'Bacteria template')
likelihoods = mackinac.calculate_likelihoods(
b_theta_id,
b_theta_features,
template,
search_program_path=search_program_path,
search_db_path=search_db_path,
fid_role_path=fid_role_path,
work_folder=work_folder)
assert len(likelihoods.reaction_values) == 5652
assert likelihoods.reaction_values['rxn00006']['likelihood'] == 0.0
assert pytest.approx(likelihoods.reaction_values['rxn14380']['likelihood'], 0.9594912486067599)
model = mackinac.reconstruct_model_from_likelihoods(
likelihoods,
template,
b_theta_id,
'negbio',
gc_content=b_theta_summary['gc_content'] / 100.0
)
assert model.id == b_theta_id
assert len(model.reactions) == 1164 # Value can change if genome annotation changes
assert len(model.metabolites) == 1260 # Value can change if genome annotation changes
assert len(model.compartments) == 2
| 41.5 | 103 | 0.630245 | import pytest
from os.path import join
import mackinac
@pytest.mark.fixtures('download_data')
class TestReconstruct:
def test_reconstruct_features(self, universal_folder, bacteria_folder, b_theta_features,
b_theta_summary, b_theta_id):
template = mackinac.create_template_model(
universal_folder,
bacteria_folder,
'bacteria',
'Bacteria template')
model = mackinac.reconstruct_model_from_features(
b_theta_features,
template,
b_theta_id,
'negbio',
gc_content=b_theta_summary['gc_content'] / 100.0
)
assert model.id == b_theta_id
assert len(model.reactions) == 923
assert len(model.metabolites) == 999
assert len(model.compartments) == 2
def test_reconstruct_likelihoods(self, universal_folder, bacteria_folder, b_theta_features,
b_theta_summary, b_theta_id, search_program_path,
search_db_path, fid_role_path, work_folder):
template = mackinac.create_template_model(
universal_folder,
bacteria_folder,
'bacteria',
'Bacteria template')
likelihoods = mackinac.calculate_likelihoods(
b_theta_id,
b_theta_features,
template,
search_program_path=search_program_path,
search_db_path=search_db_path,
fid_role_path=fid_role_path,
work_folder=work_folder)
assert len(likelihoods.reaction_values) == 5652
assert likelihoods.reaction_values['rxn00006']['likelihood'] == 0.0
assert pytest.approx(likelihoods.reaction_values['rxn14380']['likelihood'], 0.9594912486067599)
model = mackinac.reconstruct_model_from_likelihoods(
likelihoods,
template,
b_theta_id,
'negbio',
gc_content=b_theta_summary['gc_content'] / 100.0
)
assert model.id == b_theta_id
assert len(model.reactions) == 1164
assert len(model.metabolites) == 1260
assert len(model.compartments) == 2
| true | true |
f70f32820aaba13f43601a29ab5f2eaee2d8740b | 2,016 | py | Python | tests/mock/spi_checks_lsbmode.py | nazeer22/mraa | 0a12c5a0177f9fa8e7c4c564e70a65c4a0bb81af | [
"MIT"
] | 3 | 2017-04-21T02:26:06.000Z | 2017-12-27T01:37:35.000Z | tests/mock/spi_checks_lsbmode.py | nazeer22/mraa | 0a12c5a0177f9fa8e7c4c564e70a65c4a0bb81af | [
"MIT"
] | 1 | 2018-11-01T14:54:06.000Z | 2018-11-01T14:54:06.000Z | tests/mock/spi_checks_lsbmode.py | nazeer22/mraa | 0a12c5a0177f9fa8e7c4c564e70a65c4a0bb81af | [
"MIT"
] | 3 | 2019-04-20T07:24:48.000Z | 2020-11-05T00:27:43.000Z | #!/usr/bin/env python
# Author: Alex Tereschenko <alext.mkrs@gmail.com>
# Copyright (c) 2016 Alex Tereschenko.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import mraa as m
import unittest as u
from spi_checks_shared import *
class SpiChecksLsbmode(u.TestCase):
def setUp(self):
self.spi = m.Spi(MRAA_SPI_BUS_NUM)
def tearDown(self):
del self.spi
def test_spi_set_lsbmode_false(self):
TEST_LSBMODE = False
self.assertEqual(self.spi.lsbmode(TEST_LSBMODE),
m.SUCCESS,
"Setting LSB mode to %s did not return success" %TEST_LSBMODE)
def test_spi_set_lsbmode_true(self):
TEST_LSBMODE = True
self.assertEqual(self.spi.lsbmode(TEST_LSBMODE),
m.SUCCESS,
"Setting LSB mode to %s did not return success" %TEST_LSBMODE)
def test_spi_set_lsbmode_invalid(self):
TEST_LSBMODE = 10
self.assertRaises(TypeError, self.spi.lsbmode, TEST_LSBMODE)
if __name__ == "__main__":
u.main()
| 36 | 83 | 0.731151 |
import mraa as m
import unittest as u
from spi_checks_shared import *
class SpiChecksLsbmode(u.TestCase):
def setUp(self):
self.spi = m.Spi(MRAA_SPI_BUS_NUM)
def tearDown(self):
del self.spi
def test_spi_set_lsbmode_false(self):
TEST_LSBMODE = False
self.assertEqual(self.spi.lsbmode(TEST_LSBMODE),
m.SUCCESS,
"Setting LSB mode to %s did not return success" %TEST_LSBMODE)
def test_spi_set_lsbmode_true(self):
TEST_LSBMODE = True
self.assertEqual(self.spi.lsbmode(TEST_LSBMODE),
m.SUCCESS,
"Setting LSB mode to %s did not return success" %TEST_LSBMODE)
def test_spi_set_lsbmode_invalid(self):
TEST_LSBMODE = 10
self.assertRaises(TypeError, self.spi.lsbmode, TEST_LSBMODE)
if __name__ == "__main__":
u.main()
| true | true |
f70f333c177f5f6e3822f30ebcfacbfd14cf1385 | 115 | py | Python | modules/2.79/bpy/types/ThemeSpaceListGeneric.py | cmbasnett/fake-bpy-module | acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55 | [
"MIT"
] | null | null | null | modules/2.79/bpy/types/ThemeSpaceListGeneric.py | cmbasnett/fake-bpy-module | acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55 | [
"MIT"
] | null | null | null | modules/2.79/bpy/types/ThemeSpaceListGeneric.py | cmbasnett/fake-bpy-module | acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55 | [
"MIT"
] | null | null | null | class ThemeSpaceListGeneric:
list = None
list_text = None
list_text_hi = None
list_title = None
| 12.777778 | 28 | 0.669565 | class ThemeSpaceListGeneric:
list = None
list_text = None
list_text_hi = None
list_title = None
| true | true |
f70f3389abfeefcf28c6e221bd594d807d6b386b | 1,810 | py | Python | mirage/core/loader.py | epablosensei/mirage | 3c0d2fb0f0e570356e7126c999e83e0256920420 | [
"MIT"
] | null | null | null | mirage/core/loader.py | epablosensei/mirage | 3c0d2fb0f0e570356e7126c999e83e0256920420 | [
"MIT"
] | null | null | null | mirage/core/loader.py | epablosensei/mirage | 3c0d2fb0f0e570356e7126c999e83e0256920420 | [
"MIT"
] | 1 | 2020-06-08T15:50:31.000Z | 2020-06-08T15:50:31.000Z | from mirage.libs import io
class Loader:
'''
This class permits to dynamically load the modules.
'''
def __init__(self):
'''
This constructor generates the modules list.
'''
import mirage.modules as modules
self.modulesList = {}
for moduleName,module in modules.__modules__.items():
current = module#__import__("modules."+module, fromlist=module)
moduleClass = getattr(current,moduleName)
self.modulesList[moduleName] = moduleClass
def getModulesNames(self):
'''
This method returns a list of existing modules' names.
:return: list of modules' name
:rtype: list of str
'''
return list(self.modulesList.keys())
def load(self,moduleName):
'''
This method returns an instance of a specific module according to the name provided as parameter.
:param moduleName: name of a module
:type moduleName: str
:return: an instance of the module
:rtype: core.module.Module
'''
if moduleName in self.modulesList:
return self.modulesList[moduleName]()
else:
return None
def list(self,pattern=""):
'''
Display the list of module, filtered by the string provided as ``pattern``.
:param pattern: filter
:type pattern: str
'''
displayDict = {}
for module in self.modulesList:
info = self.modulesList[module]().info()
technology = (info["technology"]).upper()
if (
pattern in info["description"] or
pattern in info["name"] or
pattern in info["technology"] or
pattern in info["type"]
):
if not technology in displayDict:
displayDict[technology] = []
displayDict[technology].append([info["name"], info["type"], info["description"]])
for module in sorted(displayDict):
if displayDict[module]:
io.chart(["Name", "Type","Description"], sorted(displayDict[module]), "{} Modules".format(module))
| 26.617647 | 102 | 0.691713 | from mirage.libs import io
class Loader:
def __init__(self):
import mirage.modules as modules
self.modulesList = {}
for moduleName,module in modules.__modules__.items():
current = module
moduleClass = getattr(current,moduleName)
self.modulesList[moduleName] = moduleClass
def getModulesNames(self):
return list(self.modulesList.keys())
def load(self,moduleName):
if moduleName in self.modulesList:
return self.modulesList[moduleName]()
else:
return None
def list(self,pattern=""):
displayDict = {}
for module in self.modulesList:
info = self.modulesList[module]().info()
technology = (info["technology"]).upper()
if (
pattern in info["description"] or
pattern in info["name"] or
pattern in info["technology"] or
pattern in info["type"]
):
if not technology in displayDict:
displayDict[technology] = []
displayDict[technology].append([info["name"], info["type"], info["description"]])
for module in sorted(displayDict):
if displayDict[module]:
io.chart(["Name", "Type","Description"], sorted(displayDict[module]), "{} Modules".format(module))
| true | true |
f70f33d05cc3dc8df24f43cf22b1baf7ccc96e5c | 427 | py | Python | terraform/modules/ecs/files/find_task_def.py | readevalprint/sift | 3015a2f704816a687a7c0f1974b873d7cda43444 | [
"Apache-2.0"
] | 1 | 2020-02-04T04:28:09.000Z | 2020-02-04T04:28:09.000Z | terraform/modules/ecs/files/find_task_def.py | readevalprint/sift | 3015a2f704816a687a7c0f1974b873d7cda43444 | [
"Apache-2.0"
] | 3 | 2020-06-05T17:34:59.000Z | 2021-06-10T22:45:45.000Z | terraform/modules/ecs/files/find_task_def.py | readevalprint/sift | 3015a2f704816a687a7c0f1974b873d7cda43444 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import boto3
import json
import sys
client = boto3.client('ecs')
data = json.load(sys.stdin)
family_prefix = data['family_prefix']
task_def = client.list_task_definitions(familyPrefix=family_prefix,
status="ACTIVE", sort="DESC", maxResults=1)
task_arn = task_def["taskDefinitionArns"]
sys.stdout.write(json.dumps({"task_arn": "%s" % task_arn[0]}))
sys.exit(0)
| 23.722222 | 83 | 0.676815 |
import boto3
import json
import sys
client = boto3.client('ecs')
data = json.load(sys.stdin)
family_prefix = data['family_prefix']
task_def = client.list_task_definitions(familyPrefix=family_prefix,
status="ACTIVE", sort="DESC", maxResults=1)
task_arn = task_def["taskDefinitionArns"]
sys.stdout.write(json.dumps({"task_arn": "%s" % task_arn[0]}))
sys.exit(0)
| true | true |
f70f34aa6054d5c7cfa28a06d14022ed6f21afe4 | 3,447 | py | Python | src/loader/load_opportunity_dataset.py | Sensors-in-Paradise/OpportunityML | a123b4842de45f735d517be6bcd96ca35171db91 | [
"MIT"
] | 1 | 2022-03-25T16:00:36.000Z | 2022-03-25T16:00:36.000Z | src/loader/load_opportunity_dataset.py | Sensors-in-Paradise/OpportunityML | a123b4842de45f735d517be6bcd96ca35171db91 | [
"MIT"
] | 1 | 2022-03-28T13:50:28.000Z | 2022-03-28T13:50:28.000Z | src/loader/load_opportunity_dataset.py | Sensors-in-Paradise/OpportunityML | a123b4842de45f735d517be6bcd96ca35171db91 | [
"MIT"
] | null | null | null | import itertools
import os
import numpy as np
import pandas as pd
from utils.Recording import Recording
import utils.settings as settings
def load_opportunity_dataset(opportunity_dataset_path: str) -> "list[Recording]":
"""
Returns a list of Recordings from the opportunity dataset
"""
print("Will read the opportunity dataset")
opportunity_dataset_path += "/dataset"
subject_ids = range(1, 5)
recording_ids = range(1, 6)
# see loader/opportunity_col_names to make your selection
selected_feature_names = [
"IMU-BACK-accX",
"IMU-BACK-accY",
"IMU-BACK-accZ",
"IMU-BACK-Quaternion1",
"IMU-BACK-Quaternion2",
"IMU-BACK-Quaternion3",
"IMU-BACK-Quaternion4",
"IMU-RLA-accX",
"IMU-RLA-accY",
"IMU-RLA-accZ",
"IMU-RLA-Quaternion1",
"IMU-RLA-Quaternion2",
"IMU-RLA-Quaternion3",
"IMU-RLA-Quaternion4",
"IMU-LLA-accX",
"IMU-LLA-accY",
"IMU-LLA-accZ",
"IMU-LLA-Quaternion1",
"IMU-LLA-Quaternion2",
"IMU-LLA-Quaternion3",
"IMU-LLA-Quaternion4",
"IMU-L-SHOE-EuX",
"IMU-L-SHOE-EuY",
"IMU-L-SHOE-EuZ",
"IMU-L-SHOE-Nav_Ax",
"IMU-L-SHOE-Nav_Ay",
"IMU-L-SHOE-Nav_Az",
"IMU-L-SHOE-Body_Ax",
"IMU-L-SHOE-Body_Ay",
"IMU-L-SHOE-Body_Az",
"IMU-L-SHOE-AngVelBodyFrameX",
"IMU-L-SHOE-AngVelBodyFrameY",
"IMU-L-SHOE-AngVelBodyFrameZ",
"IMU-L-SHOE-AngVelNavFrameX",
"IMU-L-SHOE-AngVelNavFrameY",
"IMU-L-SHOE-AngVelNavFrameZ",
"IMU-R-SHOE-EuX",
"IMU-R-SHOE-EuY",
"IMU-R-SHOE-EuZ",
"IMU-R-SHOE-Nav_Ax",
"IMU-R-SHOE-Nav_Ay",
"IMU-R-SHOE-Nav_Az",
"IMU-R-SHOE-Body_Ax",
"IMU-R-SHOE-Body_Ay",
"IMU-R-SHOE-Body_Az",
"IMU-R-SHOE-AngVelBodyFrameX",
"IMU-R-SHOE-AngVelBodyFrameY",
"IMU-R-SHOE-AngVelBodyFrameZ",
"IMU-R-SHOE-AngVelNavFrameX",
"IMU-R-SHOE-AngVelNavFrameY",
"IMU-R-SHOE-AngVelNavFrameZ",
]
print(f"Selected features (n_features: {len(selected_feature_names)}):\n", "\n".join(["\t" + str(feature_name) for feature_name in selected_feature_names]))
# Get column names
col_names = []
with open("src/loader/opportunity_col_names", "r") as file:
lines = file.read().splitlines()
for line in lines:
col_names.append(line)
recordings = []
for sub, rec in itertools.product(subject_ids, recording_ids):
file_name = f"S{sub}-ADL{rec}.dat"
file_path = os.path.join(opportunity_dataset_path, file_name)
print(f"Reading {file_path} ...")
file_df = pd.read_csv(file_path, delimiter=" ", header=None)
file_df.columns = col_names # give them the real column names
recordings.append(Recording(
sensor_frame = file_df.loc[:, selected_feature_names],
time_frame = file_df.loc[:, 'MILLISEC'],
activities = file_df.loc[:, 'HL_Activity'].map(
lambda label: settings.DATA_CONFIG.raw_label_to_activity_idx(label)
), # Use `[0]` to get only one activity | maps 0, 101, 102, 103, 104, 105 to 0, 1, 2, 3, 4, 5
subject=int(sub),
recording_index=int(rec)
))
print(f"\n => Total {len(recordings)} recordings read")
return recordings
| 32.828571 | 160 | 0.596171 | import itertools
import os
import numpy as np
import pandas as pd
from utils.Recording import Recording
import utils.settings as settings
def load_opportunity_dataset(opportunity_dataset_path: str) -> "list[Recording]":
print("Will read the opportunity dataset")
opportunity_dataset_path += "/dataset"
subject_ids = range(1, 5)
recording_ids = range(1, 6)
selected_feature_names = [
"IMU-BACK-accX",
"IMU-BACK-accY",
"IMU-BACK-accZ",
"IMU-BACK-Quaternion1",
"IMU-BACK-Quaternion2",
"IMU-BACK-Quaternion3",
"IMU-BACK-Quaternion4",
"IMU-RLA-accX",
"IMU-RLA-accY",
"IMU-RLA-accZ",
"IMU-RLA-Quaternion1",
"IMU-RLA-Quaternion2",
"IMU-RLA-Quaternion3",
"IMU-RLA-Quaternion4",
"IMU-LLA-accX",
"IMU-LLA-accY",
"IMU-LLA-accZ",
"IMU-LLA-Quaternion1",
"IMU-LLA-Quaternion2",
"IMU-LLA-Quaternion3",
"IMU-LLA-Quaternion4",
"IMU-L-SHOE-EuX",
"IMU-L-SHOE-EuY",
"IMU-L-SHOE-EuZ",
"IMU-L-SHOE-Nav_Ax",
"IMU-L-SHOE-Nav_Ay",
"IMU-L-SHOE-Nav_Az",
"IMU-L-SHOE-Body_Ax",
"IMU-L-SHOE-Body_Ay",
"IMU-L-SHOE-Body_Az",
"IMU-L-SHOE-AngVelBodyFrameX",
"IMU-L-SHOE-AngVelBodyFrameY",
"IMU-L-SHOE-AngVelBodyFrameZ",
"IMU-L-SHOE-AngVelNavFrameX",
"IMU-L-SHOE-AngVelNavFrameY",
"IMU-L-SHOE-AngVelNavFrameZ",
"IMU-R-SHOE-EuX",
"IMU-R-SHOE-EuY",
"IMU-R-SHOE-EuZ",
"IMU-R-SHOE-Nav_Ax",
"IMU-R-SHOE-Nav_Ay",
"IMU-R-SHOE-Nav_Az",
"IMU-R-SHOE-Body_Ax",
"IMU-R-SHOE-Body_Ay",
"IMU-R-SHOE-Body_Az",
"IMU-R-SHOE-AngVelBodyFrameX",
"IMU-R-SHOE-AngVelBodyFrameY",
"IMU-R-SHOE-AngVelBodyFrameZ",
"IMU-R-SHOE-AngVelNavFrameX",
"IMU-R-SHOE-AngVelNavFrameY",
"IMU-R-SHOE-AngVelNavFrameZ",
]
print(f"Selected features (n_features: {len(selected_feature_names)}):\n", "\n".join(["\t" + str(feature_name) for feature_name in selected_feature_names]))
col_names = []
with open("src/loader/opportunity_col_names", "r") as file:
lines = file.read().splitlines()
for line in lines:
col_names.append(line)
recordings = []
for sub, rec in itertools.product(subject_ids, recording_ids):
file_name = f"S{sub}-ADL{rec}.dat"
file_path = os.path.join(opportunity_dataset_path, file_name)
print(f"Reading {file_path} ...")
file_df = pd.read_csv(file_path, delimiter=" ", header=None)
file_df.columns = col_names
recordings.append(Recording(
sensor_frame = file_df.loc[:, selected_feature_names],
time_frame = file_df.loc[:, 'MILLISEC'],
activities = file_df.loc[:, 'HL_Activity'].map(
lambda label: settings.DATA_CONFIG.raw_label_to_activity_idx(label)
),
subject=int(sub),
recording_index=int(rec)
))
print(f"\n => Total {len(recordings)} recordings read")
return recordings
| true | true |
f70f36f9beef6000a030d08e92f17bdd06f49fa2 | 5,002 | py | Python | sdk/python/pulumi_azure_nextgen/recoveryservices/v20190513/get_protected_item.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/recoveryservices/v20190513/get_protected_item.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/recoveryservices/v20190513/get_protected_item.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetProtectedItemResult',
'AwaitableGetProtectedItemResult',
'get_protected_item',
]
@pulumi.output_type
class GetProtectedItemResult:
"""
Base class for backup items.
"""
def __init__(__self__, e_tag=None, location=None, name=None, properties=None, tags=None, type=None):
if e_tag and not isinstance(e_tag, str):
raise TypeError("Expected argument 'e_tag' to be a str")
pulumi.set(__self__, "e_tag", e_tag)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="eTag")
def e_tag(self) -> Optional[str]:
"""
Optional ETag.
"""
return pulumi.get(self, "e_tag")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name associated with the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> Any:
"""
ProtectedItemResource properties
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type represents the complete path of the form Namespace/ResourceType/ResourceType/...
"""
return pulumi.get(self, "type")
class AwaitableGetProtectedItemResult(GetProtectedItemResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetProtectedItemResult(
e_tag=self.e_tag,
location=self.location,
name=self.name,
properties=self.properties,
tags=self.tags,
type=self.type)
def get_protected_item(container_name: Optional[str] = None,
fabric_name: Optional[str] = None,
filter: Optional[str] = None,
protected_item_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
vault_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetProtectedItemResult:
"""
Use this data source to access information about an existing resource.
:param str container_name: Container name associated with the backed up item.
:param str fabric_name: Fabric name associated with the backed up item.
:param str filter: OData filter options.
:param str protected_item_name: Backed up item name whose details are to be fetched.
:param str resource_group_name: The name of the resource group where the recovery services vault is present.
:param str vault_name: The name of the recovery services vault.
"""
__args__ = dict()
__args__['containerName'] = container_name
__args__['fabricName'] = fabric_name
__args__['filter'] = filter
__args__['protectedItemName'] = protected_item_name
__args__['resourceGroupName'] = resource_group_name
__args__['vaultName'] = vault_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:recoveryservices/v20190513:getProtectedItem', __args__, opts=opts, typ=GetProtectedItemResult).value
return AwaitableGetProtectedItemResult(
e_tag=__ret__.e_tag,
location=__ret__.location,
name=__ret__.name,
properties=__ret__.properties,
tags=__ret__.tags,
type=__ret__.type)
| 34.979021 | 151 | 0.638545 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetProtectedItemResult',
'AwaitableGetProtectedItemResult',
'get_protected_item',
]
@pulumi.output_type
class GetProtectedItemResult:
def __init__(__self__, e_tag=None, location=None, name=None, properties=None, tags=None, type=None):
if e_tag and not isinstance(e_tag, str):
raise TypeError("Expected argument 'e_tag' to be a str")
pulumi.set(__self__, "e_tag", e_tag)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="eTag")
def e_tag(self) -> Optional[str]:
return pulumi.get(self, "e_tag")
@property
@pulumi.getter
def location(self) -> Optional[str]:
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> Any:
return pulumi.get(self, "properties")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
class AwaitableGetProtectedItemResult(GetProtectedItemResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetProtectedItemResult(
e_tag=self.e_tag,
location=self.location,
name=self.name,
properties=self.properties,
tags=self.tags,
type=self.type)
def get_protected_item(container_name: Optional[str] = None,
fabric_name: Optional[str] = None,
filter: Optional[str] = None,
protected_item_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
vault_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetProtectedItemResult:
__args__ = dict()
__args__['containerName'] = container_name
__args__['fabricName'] = fabric_name
__args__['filter'] = filter
__args__['protectedItemName'] = protected_item_name
__args__['resourceGroupName'] = resource_group_name
__args__['vaultName'] = vault_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:recoveryservices/v20190513:getProtectedItem', __args__, opts=opts, typ=GetProtectedItemResult).value
return AwaitableGetProtectedItemResult(
e_tag=__ret__.e_tag,
location=__ret__.location,
name=__ret__.name,
properties=__ret__.properties,
tags=__ret__.tags,
type=__ret__.type)
| true | true |
f70f378ef04c13ea6496cf8ff11091ce7230a8c8 | 262 | py | Python | Part_2_intermediate/mod_2/lesson_7/ex_1_cls/main.py | Mikma03/InfoShareacademy_Python_Courses | 3df1008c8c92831bebf1625f960f25b39d6987e6 | [
"MIT"
] | null | null | null | Part_2_intermediate/mod_2/lesson_7/ex_1_cls/main.py | Mikma03/InfoShareacademy_Python_Courses | 3df1008c8c92831bebf1625f960f25b39d6987e6 | [
"MIT"
] | null | null | null | Part_2_intermediate/mod_2/lesson_7/ex_1_cls/main.py | Mikma03/InfoShareacademy_Python_Courses | 3df1008c8c92831bebf1625f960f25b39d6987e6 | [
"MIT"
] | null | null | null | from estudent.school import School
def run_example():
school = School.create_school_with_students("Hogwart")
print(school)
print(f"W szkole może być maksymalnie {school.MAX_STUDENTS_NUMBER} uczniów")
if __name__ == '__main__':
run_example()
| 20.153846 | 80 | 0.736641 | from estudent.school import School
def run_example():
school = School.create_school_with_students("Hogwart")
print(school)
print(f"W szkole może być maksymalnie {school.MAX_STUDENTS_NUMBER} uczniów")
if __name__ == '__main__':
run_example()
| true | true |
f70f38918d4f1074b2011fa76e4171317554d533 | 3,855 | py | Python | HD_BET/utils.py | evertdeman/HD-BET | 817a50d2fe9b8663646cc74652cb50e26f343a3b | [
"Apache-2.0"
] | 115 | 2019-01-31T15:58:13.000Z | 2022-03-31T18:59:07.000Z | HD_BET/utils.py | evertdeman/HD-BET | 817a50d2fe9b8663646cc74652cb50e26f343a3b | [
"Apache-2.0"
] | 25 | 2019-02-05T16:12:28.000Z | 2022-03-23T06:12:09.000Z | HD_BET/utils.py | evertdeman/HD-BET | 817a50d2fe9b8663646cc74652cb50e26f343a3b | [
"Apache-2.0"
] | 40 | 2019-01-31T16:08:25.000Z | 2022-03-25T06:45:06.000Z | from urllib.request import urlopen
import torch
from torch import nn
import numpy as np
from skimage.morphology import label
import os
from HD_BET.paths import folder_with_parameter_files
def get_params_fname(fold):
return os.path.join(folder_with_parameter_files, "%d.model" % fold)
def maybe_download_parameters(fold=0, force_overwrite=False):
"""
Downloads the parameters for some fold if it is not present yet.
:param fold:
:param force_overwrite: if True the old parameter file will be deleted (if present) prior to download
:return:
"""
assert 0 <= fold <= 4, "fold must be between 0 and 4"
if not os.path.isdir(folder_with_parameter_files):
maybe_mkdir_p(folder_with_parameter_files)
out_filename = get_params_fname(fold)
if force_overwrite and os.path.isfile(out_filename):
os.remove(out_filename)
if not os.path.isfile(out_filename):
url = "https://zenodo.org/record/2540695/files/%d.model?download=1" % fold
print("Downloading", url, "...")
data = urlopen(url).read()
with open(out_filename, 'wb') as f:
f.write(data)
def init_weights(module):
if isinstance(module, nn.Conv3d):
module.weight = nn.init.kaiming_normal(module.weight, a=1e-2)
if module.bias is not None:
module.bias = nn.init.constant(module.bias, 0)
def softmax_helper(x):
rpt = [1 for _ in range(len(x.size()))]
rpt[1] = x.size(1)
x_max = x.max(1, keepdim=True)[0].repeat(*rpt)
e_x = torch.exp(x - x_max)
return e_x / e_x.sum(1, keepdim=True).repeat(*rpt)
class SetNetworkToVal(object):
def __init__(self, use_dropout_sampling=False, norm_use_average=True):
self.norm_use_average = norm_use_average
self.use_dropout_sampling = use_dropout_sampling
def __call__(self, module):
if isinstance(module, nn.Dropout3d) or isinstance(module, nn.Dropout2d) or isinstance(module, nn.Dropout):
module.train(self.use_dropout_sampling)
elif isinstance(module, nn.InstanceNorm3d) or isinstance(module, nn.InstanceNorm2d) or \
isinstance(module, nn.InstanceNorm1d) \
or isinstance(module, nn.BatchNorm2d) or isinstance(module, nn.BatchNorm3d) or \
isinstance(module, nn.BatchNorm1d):
module.train(not self.norm_use_average)
def postprocess_prediction(seg):
# basically look for connected components and choose the largest one, delete everything else
print("running postprocessing... ")
mask = seg != 0
lbls = label(mask, connectivity=mask.ndim)
lbls_sizes = [np.sum(lbls == i) for i in np.unique(lbls)]
largest_region = np.argmax(lbls_sizes[1:]) + 1
seg[lbls != largest_region] = 0
return seg
def subdirs(folder, join=True, prefix=None, suffix=None, sort=True):
if join:
l = os.path.join
else:
l = lambda x, y: y
res = [l(folder, i) for i in os.listdir(folder) if os.path.isdir(os.path.join(folder, i))
and (prefix is None or i.startswith(prefix))
and (suffix is None or i.endswith(suffix))]
if sort:
res.sort()
return res
def subfiles(folder, join=True, prefix=None, suffix=None, sort=True):
if join:
l = os.path.join
else:
l = lambda x, y: y
res = [l(folder, i) for i in os.listdir(folder) if os.path.isfile(os.path.join(folder, i))
and (prefix is None or i.startswith(prefix))
and (suffix is None or i.endswith(suffix))]
if sort:
res.sort()
return res
subfolders = subdirs # I am tired of confusing those
def maybe_mkdir_p(directory):
splits = directory.split("/")[1:]
for i in range(0, len(splits)):
if not os.path.isdir(os.path.join("/", *splits[:i+1])):
os.mkdir(os.path.join("/", *splits[:i+1]))
| 33.521739 | 114 | 0.659144 | from urllib.request import urlopen
import torch
from torch import nn
import numpy as np
from skimage.morphology import label
import os
from HD_BET.paths import folder_with_parameter_files
def get_params_fname(fold):
return os.path.join(folder_with_parameter_files, "%d.model" % fold)
def maybe_download_parameters(fold=0, force_overwrite=False):
assert 0 <= fold <= 4, "fold must be between 0 and 4"
if not os.path.isdir(folder_with_parameter_files):
maybe_mkdir_p(folder_with_parameter_files)
out_filename = get_params_fname(fold)
if force_overwrite and os.path.isfile(out_filename):
os.remove(out_filename)
if not os.path.isfile(out_filename):
url = "https://zenodo.org/record/2540695/files/%d.model?download=1" % fold
print("Downloading", url, "...")
data = urlopen(url).read()
with open(out_filename, 'wb') as f:
f.write(data)
def init_weights(module):
if isinstance(module, nn.Conv3d):
module.weight = nn.init.kaiming_normal(module.weight, a=1e-2)
if module.bias is not None:
module.bias = nn.init.constant(module.bias, 0)
def softmax_helper(x):
rpt = [1 for _ in range(len(x.size()))]
rpt[1] = x.size(1)
x_max = x.max(1, keepdim=True)[0].repeat(*rpt)
e_x = torch.exp(x - x_max)
return e_x / e_x.sum(1, keepdim=True).repeat(*rpt)
class SetNetworkToVal(object):
def __init__(self, use_dropout_sampling=False, norm_use_average=True):
self.norm_use_average = norm_use_average
self.use_dropout_sampling = use_dropout_sampling
def __call__(self, module):
if isinstance(module, nn.Dropout3d) or isinstance(module, nn.Dropout2d) or isinstance(module, nn.Dropout):
module.train(self.use_dropout_sampling)
elif isinstance(module, nn.InstanceNorm3d) or isinstance(module, nn.InstanceNorm2d) or \
isinstance(module, nn.InstanceNorm1d) \
or isinstance(module, nn.BatchNorm2d) or isinstance(module, nn.BatchNorm3d) or \
isinstance(module, nn.BatchNorm1d):
module.train(not self.norm_use_average)
def postprocess_prediction(seg):
print("running postprocessing... ")
mask = seg != 0
lbls = label(mask, connectivity=mask.ndim)
lbls_sizes = [np.sum(lbls == i) for i in np.unique(lbls)]
largest_region = np.argmax(lbls_sizes[1:]) + 1
seg[lbls != largest_region] = 0
return seg
def subdirs(folder, join=True, prefix=None, suffix=None, sort=True):
if join:
l = os.path.join
else:
l = lambda x, y: y
res = [l(folder, i) for i in os.listdir(folder) if os.path.isdir(os.path.join(folder, i))
and (prefix is None or i.startswith(prefix))
and (suffix is None or i.endswith(suffix))]
if sort:
res.sort()
return res
def subfiles(folder, join=True, prefix=None, suffix=None, sort=True):
if join:
l = os.path.join
else:
l = lambda x, y: y
res = [l(folder, i) for i in os.listdir(folder) if os.path.isfile(os.path.join(folder, i))
and (prefix is None or i.startswith(prefix))
and (suffix is None or i.endswith(suffix))]
if sort:
res.sort()
return res
subfolders = subdirs
def maybe_mkdir_p(directory):
splits = directory.split("/")[1:]
for i in range(0, len(splits)):
if not os.path.isdir(os.path.join("/", *splits[:i+1])):
os.mkdir(os.path.join("/", *splits[:i+1]))
| true | true |
f70f3a764a276710af7c757c3c63f045ee99d167 | 5,677 | py | Python | automationassets/automationassets.py | ben-garside/python_emulated_assets | 9df700db8f29579e9135a6107b3a7b490c4a68ce | [
"MIT"
] | 9 | 2018-02-05T18:44:34.000Z | 2021-08-25T12:20:05.000Z | automationassets/automationassets.py | ben-garside/python_emulated_assets | 9df700db8f29579e9135a6107b3a7b490c4a68ce | [
"MIT"
] | 2 | 2018-02-12T23:45:19.000Z | 2018-09-25T19:48:23.000Z | automationassets/automationassets.py | ben-garside/python_emulated_assets | 9df700db8f29579e9135a6107b3a7b490c4a68ce | [
"MIT"
] | 13 | 2018-02-08T22:55:17.000Z | 2022-03-14T21:36:10.000Z | """ Azure Automation assets module to be used with Azure Automation during offline development """
#!/usr/bin/env python2
# ----------------------------------------------------------------------------------
#
# MIT License
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ----------------------------------------------------------------------------------
# Constant keys for extracing items from automation assets.
_KEY_NAME = "Name"
_KEY_VALUE = "Value"
_KEY_USERNAME = "Username"
_KEY_PASSWORD = "Password"
_KEY_CERTPATH = "CertPath"
_KEY_CONNECTION_FIELDS = "ValueFields"
# Assets supported in Azure automation within python scripts
_KEY_VARIABLE = "Variable"
_KEY_CERTIFICATE = "Certificate"
_KEY_CREDENTIAL = "Credential"
_KEY_CONNECTION = "Connection"
# Get Azure Automation asset json file
def _get_automation_asset_file():
import os
if os.environ.get('AUTOMATION_ASSET_FILE') is not None:
return os.environ.get('AUTOMATION_ASSET_FILE')
return os.path.join(os.path.dirname(__file__), "localassets.json")
# Helper function to find an asset of a specific type and name in the asset file
def _get_asset_value(asset_file, asset_type, asset_name):
import json
json_data = open(asset_file)
json_string = json_data.read()
local_assets = json.loads(json_string)
return_value = None
for asset, asset_values in local_assets.iteritems():
if asset == asset_type:
for value in asset_values:
if value[_KEY_NAME] == asset_name:
return_value = value
break
if return_value != None:
# Found the value so break out of loop
break
return return_value
# Returns an asset from the asses file
def _get_asset(asset_type, asset_name):
local_assets_file = _get_automation_asset_file()
# Look in assets file for value
return_value = _get_asset_value(local_assets_file, asset_type, asset_name)
if return_value is None:
raise LookupError("asset:" + asset_name + " not found")
return return_value
# Helper function to set an asset of a specific type and name in the assetFile
def _set_asset_value(asset_file, asset_type, asset_name, asset_value):
import json
json_data = open(asset_file)
json_string = json_data.read()
local_assets = json.loads(json_string)
item_found = False
for asset, asset_values in local_assets.iteritems():
if asset == asset_type:
for value in asset_values:
if value[_KEY_NAME] == asset_name:
value[_KEY_VALUE] = asset_value
with open(asset_file, 'w') as asset_file_content:
asset_file_content.write(json.dumps(local_assets, indent=4))
item_found = True
break
if item_found:
break
return item_found
# Updates an asset in the assets file
def _set_asset(asset_type, asset_name, asset_value):
local_assets_file = _get_automation_asset_file()
# Check assets file for value.
item_found = _set_asset_value(local_assets_file,
asset_type, asset_name, asset_value)
if item_found is False:
raise LookupError("asset:" + asset_name + " not found")
# Below are the 5 supported calls that can be made to automation assets from within
# a python script
def get_automation_variable(name):
""" Returns an automation variable """
variable = _get_asset(_KEY_VARIABLE, name)
return variable[_KEY_VALUE]
def set_automation_variable(name, value):
""" Sets an automation variable """
_set_asset(_KEY_VARIABLE, name, value)
def get_automation_credential(name):
""" Returns an automation credential as a dictionay with username and password as keys """
credential = _get_asset(_KEY_CREDENTIAL, name)
# Return a dictionary of the credential asset
credential_dictionary = {}
credential_dictionary['username'] = credential['Username']
credential_dictionary['password'] = credential['Password']
return credential_dictionary
def get_automation_connection(name):
""" Returns an automation connection dictionary """
connection = _get_asset(_KEY_CONNECTION, name)
return connection[_KEY_CONNECTION_FIELDS]
def get_automation_certificate(name):
""" Returns an automation certificate in PKCS12 bytes """
from OpenSSL import crypto
certificate = _get_asset(_KEY_CERTIFICATE, name)
pks12_cert = crypto.load_pkcs12(open(certificate[_KEY_CERTPATH], 'rb').read(),
certificate[_KEY_PASSWORD])
return crypto.PKCS12.export(pks12_cert)
| 38.358108 | 98 | 0.692443 |
_KEY_NAME = "Name"
_KEY_VALUE = "Value"
_KEY_USERNAME = "Username"
_KEY_PASSWORD = "Password"
_KEY_CERTPATH = "CertPath"
_KEY_CONNECTION_FIELDS = "ValueFields"
_KEY_VARIABLE = "Variable"
_KEY_CERTIFICATE = "Certificate"
_KEY_CREDENTIAL = "Credential"
_KEY_CONNECTION = "Connection"
def _get_automation_asset_file():
import os
if os.environ.get('AUTOMATION_ASSET_FILE') is not None:
return os.environ.get('AUTOMATION_ASSET_FILE')
return os.path.join(os.path.dirname(__file__), "localassets.json")
def _get_asset_value(asset_file, asset_type, asset_name):
import json
json_data = open(asset_file)
json_string = json_data.read()
local_assets = json.loads(json_string)
return_value = None
for asset, asset_values in local_assets.iteritems():
if asset == asset_type:
for value in asset_values:
if value[_KEY_NAME] == asset_name:
return_value = value
break
if return_value != None:
break
return return_value
def _get_asset(asset_type, asset_name):
local_assets_file = _get_automation_asset_file()
return_value = _get_asset_value(local_assets_file, asset_type, asset_name)
if return_value is None:
raise LookupError("asset:" + asset_name + " not found")
return return_value
def _set_asset_value(asset_file, asset_type, asset_name, asset_value):
import json
json_data = open(asset_file)
json_string = json_data.read()
local_assets = json.loads(json_string)
item_found = False
for asset, asset_values in local_assets.iteritems():
if asset == asset_type:
for value in asset_values:
if value[_KEY_NAME] == asset_name:
value[_KEY_VALUE] = asset_value
with open(asset_file, 'w') as asset_file_content:
asset_file_content.write(json.dumps(local_assets, indent=4))
item_found = True
break
if item_found:
break
return item_found
def _set_asset(asset_type, asset_name, asset_value):
local_assets_file = _get_automation_asset_file()
item_found = _set_asset_value(local_assets_file,
asset_type, asset_name, asset_value)
if item_found is False:
raise LookupError("asset:" + asset_name + " not found")
def get_automation_variable(name):
variable = _get_asset(_KEY_VARIABLE, name)
return variable[_KEY_VALUE]
def set_automation_variable(name, value):
_set_asset(_KEY_VARIABLE, name, value)
def get_automation_credential(name):
credential = _get_asset(_KEY_CREDENTIAL, name)
credential_dictionary = {}
credential_dictionary['username'] = credential['Username']
credential_dictionary['password'] = credential['Password']
return credential_dictionary
def get_automation_connection(name):
connection = _get_asset(_KEY_CONNECTION, name)
return connection[_KEY_CONNECTION_FIELDS]
def get_automation_certificate(name):
from OpenSSL import crypto
certificate = _get_asset(_KEY_CERTIFICATE, name)
pks12_cert = crypto.load_pkcs12(open(certificate[_KEY_CERTPATH], 'rb').read(),
certificate[_KEY_PASSWORD])
return crypto.PKCS12.export(pks12_cert)
| true | true |
f70f3b45752881894252cff7a788451d568e330f | 889 | py | Python | backoffice/transactions/migrations/0008_auto_20210323_1912.py | AlejandroUPC/pythonmicroservices | 9d42bd6dfd9847ad4a8e6029e808de927292c251 | [
"MIT"
] | null | null | null | backoffice/transactions/migrations/0008_auto_20210323_1912.py | AlejandroUPC/pythonmicroservices | 9d42bd6dfd9847ad4a8e6029e808de927292c251 | [
"MIT"
] | null | null | null | backoffice/transactions/migrations/0008_auto_20210323_1912.py | AlejandroUPC/pythonmicroservices | 9d42bd6dfd9847ad4a8e6029e808de927292c251 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.7 on 2021-03-23 19:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('transactions', '0007_auto_20210323_1910'),
]
operations = [
migrations.AlterField(
model_name='customer',
name='customer_id',
field=models.BigIntegerField(default='791425045985934', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='supermarket',
name='supermarket_id',
field=models.BigIntegerField(default='874067270903651', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='transaction',
name='transaction_id',
field=models.BigIntegerField(default='363109663162057', primary_key=True, serialize=False),
),
]
| 30.655172 | 103 | 0.629921 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('transactions', '0007_auto_20210323_1910'),
]
operations = [
migrations.AlterField(
model_name='customer',
name='customer_id',
field=models.BigIntegerField(default='791425045985934', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='supermarket',
name='supermarket_id',
field=models.BigIntegerField(default='874067270903651', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='transaction',
name='transaction_id',
field=models.BigIntegerField(default='363109663162057', primary_key=True, serialize=False),
),
]
| true | true |
f70f3ba8a6408ce64c89b41a346b915bd784f0d9 | 1,759 | py | Python | lists/gaz50k/map.py | openregister/local-authority-data | 68f499152ba799feae602e67c6e30ae0914f2230 | [
"MIT"
] | 8 | 2016-07-05T14:46:48.000Z | 2018-11-07T17:31:33.000Z | lists/gaz50k/map.py | psd/local-authority-data | 68f499152ba799feae602e67c6e30ae0914f2230 | [
"MIT"
] | 10 | 2016-10-04T15:58:14.000Z | 2018-04-30T11:39:00.000Z | lists/gaz50k/map.py | psd/local-authority-data | 68f499152ba799feae602e67c6e30ae0914f2230 | [
"MIT"
] | 5 | 2016-06-03T13:02:21.000Z | 2021-04-11T08:27:38.000Z | #!/usr/bin/env python3
import sys
import csv
"""
Field Name Full name Format Example
1 SEQ Sequence number Int (6) 86415
2 KM_REF Kilometre reference Char (6) ST5265
3 DEF_NAM Definitive name Char (60) Felton
4 TILE_REF Tile reference Char (4) ST46
5 LAT_DEG Latitude degrees Int (2) 51
6 LAT_MIN Latitude minutes Float (3.1) 23.1
7 LONG_DEG Longitude degrees Int (2) 2
8 LONG_MIN Longitude minutes Float (3.1) 41
9 NORTH Northings Int (7) 165500
10 EAST Eastings Int (7) 352500
11 GMT Greenwich Meridian Char (1) W
12 CO_CODE County code Char (2) NS
13 COUNTY County name Char (20) N Som
14 FULL_COUNTY Full county name Char (60) North Somerset
15 F_CODE Feature code Char (3) O
16 E_DATE Edit date Char (11) 01-MAR-1993
17 UPDATE_CO Update code Char (1) l
18 SHEET_1 Primary sheet no Int (3) 172
19 SHEET_2 Second sheet no Int (3) 182
20 SHEET_3 Third sheet no Int (3) 0
"""
county = {}
for row in csv.reader(sys.stdin, delimiter=':', quoting=csv.QUOTE_NONE):
county[row[11]] = {
'gaz50k': row[11],
'county': row[12],
'name': row[13],
}
fields = [ 'gaz50k', 'county', 'name' ]
print("\t".join(fields))
for code in county:
print("\t".join([county[code][field] or "" for field in fields]))
| 39.977273 | 73 | 0.489483 |
import sys
import csv
county = {}
for row in csv.reader(sys.stdin, delimiter=':', quoting=csv.QUOTE_NONE):
county[row[11]] = {
'gaz50k': row[11],
'county': row[12],
'name': row[13],
}
fields = [ 'gaz50k', 'county', 'name' ]
print("\t".join(fields))
for code in county:
print("\t".join([county[code][field] or "" for field in fields]))
| true | true |
f70f3bf39f608a55e8c1b01d43760cd70a34aa3d | 3,014 | py | Python | setup.py | Legogris/electrum-zcl | fcffa73f944c1ee8f89b0a40f3dabc5c5bbfd4ec | [
"MIT"
] | null | null | null | setup.py | Legogris/electrum-zcl | fcffa73f944c1ee8f89b0a40f3dabc5c5bbfd4ec | [
"MIT"
] | null | null | null | setup.py | Legogris/electrum-zcl | fcffa73f944c1ee8f89b0a40f3dabc5c5bbfd4ec | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# python setup.py sdist --format=zip,gztar
from setuptools import setup
import os
import sys
import platform
import imp
import argparse
version = imp.load_source('version', 'lib/version.py')
def readhere(path):
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, path), 'r') as fd:
return fd.read()
def readreqs(path):
return [req for req in
[line.strip() for line in readhere(path).split('\n')]
if req and not req.startswith(('#', '-r'))]
install_requires = readreqs('requirements.txt')
tests_requires = install_requires + readreqs('requirements_travis.txt')
if sys.version_info[:3] < (3, 4, 0):
sys.exit("Error: Electrum requires Python version >= 3.4.0...")
data_files = []
if platform.system() in ['Linux', 'FreeBSD', 'DragonFly']:
parser = argparse.ArgumentParser()
parser.add_argument('--root=', dest='root_path', metavar='dir', default='/')
opts, _ = parser.parse_known_args(sys.argv[1:])
usr_share = os.path.join(sys.prefix, "share")
if not os.access(opts.root_path + usr_share, os.W_OK) and \
not os.access(opts.root_path, os.W_OK):
if 'XDG_DATA_HOME' in os.environ.keys():
usr_share = os.environ['XDG_DATA_HOME']
else:
usr_share = os.path.expanduser('~/.local/share')
data_files += [
(os.path.join(usr_share, 'applications/'), ['electrum.desktop']),
(os.path.join(usr_share, 'pixmaps/'), ['icons/electrum.png'])
]
setup(
name="Electrum-ZCL",
version=version.ELECTRUM_VERSION,
install_requires=install_requires,
tests_require=tests_requires,
packages=[
'electrum',
'electrum_gui',
'electrum_gui.qt',
'electrum_plugins',
'electrum_plugins.audio_modem',
'electrum_plugins.cosigner_pool',
'electrum_plugins.email_requests',
'electrum_plugins.greenaddress_instant',
'electrum_plugins.hw_wallet',
'electrum_plugins.keepkey',
'electrum_plugins.labels',
'electrum_plugins.ledger',
'electrum_plugins.trezor',
'electrum_plugins.digitalbitbox',
'electrum_plugins.trustedcoin',
'electrum_plugins.virtualkeyboard',
],
package_dir={
'electrum': 'lib',
'electrum_gui': 'gui',
'electrum_plugins': 'plugins',
},
package_data={
'electrum': [
'servers.json',
'servers_testnet.json',
'currencies.json',
'checkpoints.json',
'checkpoints_testnet.json',
'www/index.html',
'wordlist/*.txt',
'locale/*/LC_MESSAGES/electrum.mo',
]
},
scripts=['electrum-zcl'],
data_files=data_files,
description="Lightweight Zclassic Wallet",
author="Zclassic CE",
author_email="team@zclassic-ce.org",
license="MIT Licence",
url="https://zclassic-ce.org",
long_description="""Lightweight Zclassic Wallet"""
)
| 30.14 | 80 | 0.626742 |
from setuptools import setup
import os
import sys
import platform
import imp
import argparse
version = imp.load_source('version', 'lib/version.py')
def readhere(path):
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, path), 'r') as fd:
return fd.read()
def readreqs(path):
return [req for req in
[line.strip() for line in readhere(path).split('\n')]
if req and not req.startswith(('#', '-r'))]
install_requires = readreqs('requirements.txt')
tests_requires = install_requires + readreqs('requirements_travis.txt')
if sys.version_info[:3] < (3, 4, 0):
sys.exit("Error: Electrum requires Python version >= 3.4.0...")
data_files = []
if platform.system() in ['Linux', 'FreeBSD', 'DragonFly']:
parser = argparse.ArgumentParser()
parser.add_argument('--root=', dest='root_path', metavar='dir', default='/')
opts, _ = parser.parse_known_args(sys.argv[1:])
usr_share = os.path.join(sys.prefix, "share")
if not os.access(opts.root_path + usr_share, os.W_OK) and \
not os.access(opts.root_path, os.W_OK):
if 'XDG_DATA_HOME' in os.environ.keys():
usr_share = os.environ['XDG_DATA_HOME']
else:
usr_share = os.path.expanduser('~/.local/share')
data_files += [
(os.path.join(usr_share, 'applications/'), ['electrum.desktop']),
(os.path.join(usr_share, 'pixmaps/'), ['icons/electrum.png'])
]
setup(
name="Electrum-ZCL",
version=version.ELECTRUM_VERSION,
install_requires=install_requires,
tests_require=tests_requires,
packages=[
'electrum',
'electrum_gui',
'electrum_gui.qt',
'electrum_plugins',
'electrum_plugins.audio_modem',
'electrum_plugins.cosigner_pool',
'electrum_plugins.email_requests',
'electrum_plugins.greenaddress_instant',
'electrum_plugins.hw_wallet',
'electrum_plugins.keepkey',
'electrum_plugins.labels',
'electrum_plugins.ledger',
'electrum_plugins.trezor',
'electrum_plugins.digitalbitbox',
'electrum_plugins.trustedcoin',
'electrum_plugins.virtualkeyboard',
],
package_dir={
'electrum': 'lib',
'electrum_gui': 'gui',
'electrum_plugins': 'plugins',
},
package_data={
'electrum': [
'servers.json',
'servers_testnet.json',
'currencies.json',
'checkpoints.json',
'checkpoints_testnet.json',
'www/index.html',
'wordlist/*.txt',
'locale/*/LC_MESSAGES/electrum.mo',
]
},
scripts=['electrum-zcl'],
data_files=data_files,
description="Lightweight Zclassic Wallet",
author="Zclassic CE",
author_email="team@zclassic-ce.org",
license="MIT Licence",
url="https://zclassic-ce.org",
long_description="""Lightweight Zclassic Wallet"""
)
| true | true |
f70f3ec667d100f339d203021a68fd599082e6c4 | 4,607 | py | Python | Neural_Network_Tensorflow.py | mdhasan8/Machine-Learning-in-Python | d66607d3003e8279e35cf176851f506aa833a9fe | [
"MIT"
] | null | null | null | Neural_Network_Tensorflow.py | mdhasan8/Machine-Learning-in-Python | d66607d3003e8279e35cf176851f506aa833a9fe | [
"MIT"
] | null | null | null | Neural_Network_Tensorflow.py | mdhasan8/Machine-Learning-in-Python | d66607d3003e8279e35cf176851f506aa833a9fe | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sat Dec 16 22:30:11 2020
@author: Easin
"""
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from tensorflow.keras import Model, layers
import numpy as np
import matplotlib.pyplot as plt
# MNIST dataset parameters.
num_classes = 10 # total classes (0-9 digits).
num_features = 784 # data features (img shape: 28*28).
# Training parameters.
learning_rate = 0.1
training_steps = 2000
batch_size = 256
display_step = 100
# Network parameters.
n_hidden_1 = 128 # 1st layer number of neurons.
n_hidden_2 = 256 # 2nd layer number of neurons.
# Prepare MNIST data.
from tensorflow.keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# Convert to float32.
x_train, x_test = np.array(x_train, np.float32), np.array(x_test, np.float32)
# Flatten images to 1-D vector of 784 features (28*28).
x_train, x_test = x_train.reshape([-1, num_features]), x_test.reshape([-1, num_features])
# Normalize images value from [0, 255] to [0, 1].
x_train, x_test = x_train / 255., x_test / 255.
# Use tf.data API to shuffle and batch data.
train_data = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_data = train_data.repeat().shuffle(5000).batch(batch_size).prefetch(1)
# Create TF Model.
class NeuralNet(Model):
# Set layers.
def __init__(self):
super(NeuralNet, self).__init__()
# First fully-connected hidden layer.
self.fc1 = layers.Dense(n_hidden_1, activation=tf.nn.relu)
# First fully-connected hidden layer.
self.fc2 = layers.Dense(n_hidden_2, activation=tf.nn.relu)
# Second fully-connecter hidden layer.
self.out = layers.Dense(num_classes)
# Set forward pass.
def call(self, x, is_training=False):
x = self.fc1(x)
x = self.fc2(x)
x = self.out(x)
if not is_training:
# tf cross entropy expect logits without softmax, so only
# apply softmax when not training.
x = tf.nn.softmax(x)
return x
# Build neural network model.
neural_net = NeuralNet()
# Cross-Entropy Loss.
# Note that this will apply 'softmax' to the logits.
def cross_entropy_loss(x, y):
# Convert labels to int 64 for tf cross-entropy function.
y = tf.cast(y, tf.int64)
# Apply softmax to logits and compute cross-entropy.
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=x)
# Average loss across the batch.
return tf.reduce_mean(loss)
# Accuracy metric.
def accuracy(y_pred, y_true):
# Predicted class is the index of highest score in prediction vector (i.e. argmax).
correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.cast(y_true, tf.int64))
return tf.reduce_mean(tf.cast(correct_prediction, tf.float32), axis=-1)
# Stochastic gradient descent optimizer.
optimizer = tf.optimizers.SGD(learning_rate)
# Optimization process.
def run_optimization(x, y):
# Wrap computation inside a GradientTape for automatic differentiation.
with tf.GradientTape() as g:
# Forward pass.
pred = neural_net(x, is_training=True)
# Compute loss.
loss = cross_entropy_loss(pred, y)
# Variables to update, i.e. trainable variables.
trainable_variables = neural_net.trainable_variables
# Compute gradients.
gradients = g.gradient(loss, trainable_variables)
# Update W and b following gradients.
optimizer.apply_gradients(zip(gradients, trainable_variables))
# Run training for the given number of steps.
for step, (batch_x, batch_y) in enumerate(train_data.take(training_steps), 1):
# Run the optimization to update W and b values.
run_optimization(batch_x, batch_y)
if step % display_step == 0:
pred = neural_net(batch_x, is_training=True)
loss = cross_entropy_loss(pred, batch_y)
acc = accuracy(pred, batch_y)
print("step: %i, loss: %f, accuracy: %f" % (step, loss, acc))
# Test model on validation set.
pred = neural_net(x_test, is_training=False)
print("Test Accuracy: %f" % accuracy(pred, y_test))
# Predict 5 images from validation set.
n_images = 5
test_images = x_test[:n_images]
predictions = neural_net(test_images)
# Display image and model prediction.
for i in range(n_images):
plt.imshow(np.reshape(test_images[i], [28, 28]), cmap='gray')
plt.show()
print("Model prediction: %i" % np.argmax(predictions.numpy()[i]))
| 33.875 | 90 | 0.676362 |
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from tensorflow.keras import Model, layers
import numpy as np
import matplotlib.pyplot as plt
num_classes = 10
num_features = 784
learning_rate = 0.1
training_steps = 2000
batch_size = 256
display_step = 100
n_hidden_1 = 128
n_hidden_2 = 256
from tensorflow.keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = np.array(x_train, np.float32), np.array(x_test, np.float32)
x_train, x_test = x_train.reshape([-1, num_features]), x_test.reshape([-1, num_features])
x_train, x_test = x_train / 255., x_test / 255.
train_data = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_data = train_data.repeat().shuffle(5000).batch(batch_size).prefetch(1)
class NeuralNet(Model):
def __init__(self):
super(NeuralNet, self).__init__()
self.fc1 = layers.Dense(n_hidden_1, activation=tf.nn.relu)
self.fc2 = layers.Dense(n_hidden_2, activation=tf.nn.relu)
self.out = layers.Dense(num_classes)
def call(self, x, is_training=False):
x = self.fc1(x)
x = self.fc2(x)
x = self.out(x)
if not is_training:
x = tf.nn.softmax(x)
return x
neural_net = NeuralNet()
def cross_entropy_loss(x, y):
y = tf.cast(y, tf.int64)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=x)
return tf.reduce_mean(loss)
def accuracy(y_pred, y_true):
correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.cast(y_true, tf.int64))
return tf.reduce_mean(tf.cast(correct_prediction, tf.float32), axis=-1)
optimizer = tf.optimizers.SGD(learning_rate)
def run_optimization(x, y):
with tf.GradientTape() as g:
pred = neural_net(x, is_training=True)
loss = cross_entropy_loss(pred, y)
trainable_variables = neural_net.trainable_variables
gradients = g.gradient(loss, trainable_variables)
optimizer.apply_gradients(zip(gradients, trainable_variables))
for step, (batch_x, batch_y) in enumerate(train_data.take(training_steps), 1):
run_optimization(batch_x, batch_y)
if step % display_step == 0:
pred = neural_net(batch_x, is_training=True)
loss = cross_entropy_loss(pred, batch_y)
acc = accuracy(pred, batch_y)
print("step: %i, loss: %f, accuracy: %f" % (step, loss, acc))
pred = neural_net(x_test, is_training=False)
print("Test Accuracy: %f" % accuracy(pred, y_test))
n_images = 5
test_images = x_test[:n_images]
predictions = neural_net(test_images)
for i in range(n_images):
plt.imshow(np.reshape(test_images[i], [28, 28]), cmap='gray')
plt.show()
print("Model prediction: %i" % np.argmax(predictions.numpy()[i]))
| true | true |
f70f3ec9882d3b23e9f039de32639f2defccb7a5 | 18,141 | py | Python | python/ELASTICSEARCH_IN_VPC_ONLY/ELASTICSEARCH_IN_VPC_ONLY.py | docebo/aws-config-rules | 75f92bcad644bd71f19bbc15cf99e6d6de6b8227 | [
"CC0-1.0"
] | 1,295 | 2016-03-01T23:06:33.000Z | 2022-03-31T07:17:53.000Z | python/ELASTICSEARCH_IN_VPC_ONLY/ELASTICSEARCH_IN_VPC_ONLY.py | tied/aws-config-rules | 7c66e109c1225111d2ab8d1811d6e80dea0affcb | [
"CC0-1.0"
] | 287 | 2016-03-01T19:51:43.000Z | 2022-01-06T04:59:55.000Z | python/ELASTICSEARCH_IN_VPC_ONLY/ELASTICSEARCH_IN_VPC_ONLY.py | tied/aws-config-rules | 7c66e109c1225111d2ab8d1811d6e80dea0affcb | [
"CC0-1.0"
] | 744 | 2016-03-01T18:33:00.000Z | 2022-03-31T18:46:44.000Z | # Copyright 2017-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may
# not use this file except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
'''
Description:
Check whether the ElasticSearch Domains are in VPC and not as a public endpoint.
Trigger:
Periodic
Reports on:
AWS::Elasticsearch::Domain
Feature:
In order to: to protect my data for exposure
As: a Security Officer
I want: To ensure that all my ElasticSearch Domains to be in VPC and not as a public endpoint.
Rule Parameters:
None
Scenarios:
Scenario 1:
Given: No ElasticSearch Domain is present
Then: Return NOT_APPLICABLE on AWS::::Account
Scenario 2:
Given: At least one ElasticSearch Domain is present
And: No 'VPCOptions' key is present in the list of "DomainName" on DescribeElasticsearchDomains API
Then: Return NON_COMPLIANT on this Domain
Scenario 3:
Given: At least one ElasticSearch Domain is present
And: The 'VPCOptions' key is present in the list of "DomainName" on DescribeElasticsearchDomains API
Then: Return COMPLIANT on this Domain
'''
import json
import sys
import time
import datetime
import boto3
import botocore
try:
import liblogging
except ImportError:
pass
##############
# Parameters #
##############
# Define the default resource to report to Config Rules
DEFAULT_RESOURCE_TYPE = 'AWS::Elasticsearch::Domain'
# Set to True to get the lambda to assume the Role attached on the Config Service (useful for cross-account).
ASSUME_ROLE_MODE = True
# Other parameters (no change needed)
CONFIG_ROLE_TIMEOUT_SECONDS = 900
PAUSE_TO_AVOID_THROTTLE_SECONDS = 4
#############
# Main Code #
#############
def get_all_domain_details(es_client, es_domains):
es_domain_list_details = []
es_domains_names_only = []
for es_domain in es_domains:
es_domains_names_only.append(es_domain['DomainName'])
while es_domains_names_only:
time.sleep(PAUSE_TO_AVOID_THROTTLE_SECONDS)
domain_details = es_client.describe_elasticsearch_domains(DomainNames=es_domains_names_only[:5])['DomainStatusList']
es_domain_list_details += domain_details
del es_domains_names_only[:5]
return es_domain_list_details
def evaluate_compliance(event, configuration_item, valid_rule_parameters):
es_client = get_client('es', event)
es_domain_list = es_client.list_domain_names()['DomainNames']
if not es_domain_list:
return build_evaluation(event['accountId'], 'NOT_APPLICABLE', event, resource_type='AWS::::Account')
es_domain_list_details = get_all_domain_details(es_client, es_domain_list)
evaluation_list = []
for es_domain_details in es_domain_list_details:
if 'VPCOptions' not in es_domain_details:
compliance_type = 'NON_COMPLIANT'
else:
compliance_type = 'COMPLIANT'
evaluation_list.append(build_evaluation(es_domain_details['DomainName'], compliance_type, event))
if evaluation_list:
return evaluation_list
return build_evaluation(event['accountId'], 'NOT_APPLICABLE', event, resource_type='AWS::::Account')
def evaluate_parameters(rule_parameters):
"""Evaluate the rule parameters dictionary validity. Raise a ValueError for invalid parameters.
Return:
anything suitable for the evaluate_compliance()
Keyword arguments:
rule_parameters -- the Key/Value dictionary of the Config Rules parameters
"""
valid_rule_parameters = rule_parameters
return valid_rule_parameters
####################
# Helper Functions #
####################
# Build an error to be displayed in the logs when the parameter is invalid.
def build_parameters_value_error_response(ex):
"""Return an error dictionary when the evaluate_parameters() raises a ValueError.
Keyword arguments:
ex -- Exception text
"""
return build_error_response(internal_error_message="Parameter value is invalid",
internal_error_details="An ValueError was raised during the validation of the Parameter value",
customer_error_code="InvalidParameterValueException",
customer_error_message=str(ex))
# This gets the client after assuming the Config service role
# either in the same AWS account or cross-account.
def get_client(service, event):
"""Return the service boto client. It should be used instead of directly calling the client.
Keyword arguments:
service -- the service name used for calling the boto.client()
event -- the event variable given in the lambda handler
"""
if not ASSUME_ROLE_MODE:
return boto3.client(service)
credentials = get_assume_role_credentials(event["executionRoleArn"])
return boto3.client(service, aws_access_key_id=credentials['AccessKeyId'],
aws_secret_access_key=credentials['SecretAccessKey'],
aws_session_token=credentials['SessionToken']
)
# This generate an evaluation for config
def build_evaluation(resource_id, compliance_type, event, resource_type=DEFAULT_RESOURCE_TYPE, annotation=None):
"""Form an evaluation as a dictionary. Usually suited to report on scheduled rules.
Keyword arguments:
resource_id -- the unique id of the resource to report
compliance_type -- either COMPLIANT, NON_COMPLIANT or NOT_APPLICABLE
event -- the event variable given in the lambda handler
resource_type -- the CloudFormation resource type (or AWS::::Account) to report on the rule (default DEFAULT_RESOURCE_TYPE)
annotation -- an annotation to be added to the evaluation (default None)
"""
eval_cc = {}
if annotation:
eval_cc['Annotation'] = annotation
eval_cc['ComplianceResourceType'] = resource_type
eval_cc['ComplianceResourceId'] = resource_id
eval_cc['ComplianceType'] = compliance_type
eval_cc['OrderingTimestamp'] = str(json.loads(event['invokingEvent'])['notificationCreationTime'])
return eval_cc
def build_evaluation_from_config_item(configuration_item, compliance_type, annotation=None):
"""Form an evaluation as a dictionary. Usually suited to report on configuration change rules.
Keyword arguments:
configuration_item -- the configurationItem dictionary in the invokingEvent
compliance_type -- either COMPLIANT, NON_COMPLIANT or NOT_APPLICABLE
annotation -- an annotation to be added to the evaluation (default None)
"""
eval_ci = {}
if annotation:
eval_ci['Annotation'] = annotation
eval_ci['ComplianceResourceType'] = configuration_item['resourceType']
eval_ci['ComplianceResourceId'] = configuration_item['resourceId']
eval_ci['ComplianceType'] = compliance_type
eval_ci['OrderingTimestamp'] = configuration_item['configurationItemCaptureTime']
return eval_ci
####################
# Boilerplate Code #
####################
# Helper function used to validate input
def check_defined(reference, reference_name):
if not reference:
raise Exception('Error: ', reference_name, 'is not defined')
return reference
# Check whether the message is OversizedConfigurationItemChangeNotification or not
def is_oversized_changed_notification(message_type):
check_defined(message_type, 'messageType')
return message_type == 'OversizedConfigurationItemChangeNotification'
# Check whether the message is a ScheduledNotification or not.
def is_scheduled_notification(message_type):
check_defined(message_type, 'messageType')
return message_type == 'ScheduledNotification'
# Get configurationItem using getResourceConfigHistory API
# in case of OversizedConfigurationItemChangeNotification
def get_configuration(resource_type, resource_id, configuration_capture_time):
result = AWS_CONFIG_CLIENT.get_resource_config_history(
resourceType=resource_type,
resourceId=resource_id,
laterTime=configuration_capture_time,
limit=1)
configuration_item = result['configurationItems'][0]
return convert_api_configuration(configuration_item)
# Convert from the API model to the original invocation model
def convert_api_configuration(configuration_item):
for k, v in configuration_item.items():
if isinstance(v, datetime.datetime):
configuration_item[k] = str(v)
configuration_item['awsAccountId'] = configuration_item['accountId']
configuration_item['ARN'] = configuration_item['arn']
configuration_item['configurationStateMd5Hash'] = configuration_item['configurationItemMD5Hash']
configuration_item['configurationItemVersion'] = configuration_item['version']
configuration_item['configuration'] = json.loads(configuration_item['configuration'])
if 'relationships' in configuration_item:
for i in range(len(configuration_item['relationships'])):
configuration_item['relationships'][i]['name'] = configuration_item['relationships'][i]['relationshipName']
return configuration_item
# Based on the type of message get the configuration item
# either from configurationItem in the invoking event
# or using the getResourceConfigHistiry API in getConfiguration function.
def get_configuration_item(invoking_event):
check_defined(invoking_event, 'invokingEvent')
if is_oversized_changed_notification(invoking_event['messageType']):
configuration_item_summary = check_defined(invoking_event['configuration_item_summary'], 'configurationItemSummary')
return get_configuration(configuration_item_summary['resourceType'], configuration_item_summary['resourceId'], configuration_item_summary['configurationItemCaptureTime'])
if is_scheduled_notification(invoking_event['messageType']):
return None
return check_defined(invoking_event['configurationItem'], 'configurationItem')
# Check whether the resource has been deleted. If it has, then the evaluation is unnecessary.
def is_applicable(configuration_item, event):
try:
check_defined(configuration_item, 'configurationItem')
check_defined(event, 'event')
except:
return True
status = configuration_item['configurationItemStatus']
event_left_scope = event['eventLeftScope']
if status == 'ResourceDeleted':
print("Resource Deleted, setting Compliance Status to NOT_APPLICABLE.")
return status in ('OK', 'ResourceDiscovered') and not event_left_scope
def get_assume_role_credentials(role_arn):
sts_client = boto3.client('sts')
try:
assume_role_response = sts_client.assume_role(RoleArn=role_arn,
RoleSessionName="configLambdaExecution",
DurationSeconds=CONFIG_ROLE_TIMEOUT_SECONDS)
if 'liblogging' in sys.modules:
liblogging.logSession(role_arn, assume_role_response)
return assume_role_response['Credentials']
except botocore.exceptions.ClientError as ex:
# Scrub error message for any internal account info leaks
print(str(ex))
if 'AccessDenied' in ex.response['Error']['Code']:
ex.response['Error']['Message'] = "AWS Config does not have permission to assume the IAM role."
else:
ex.response['Error']['Message'] = "InternalError"
ex.response['Error']['Code'] = "InternalError"
raise ex
# This removes older evaluation (usually useful for periodic rule not reporting on AWS::::Account).
def clean_up_old_evaluations(latest_evaluations, event):
cleaned_evaluations = []
old_eval = AWS_CONFIG_CLIENT.get_compliance_details_by_config_rule(
ConfigRuleName=event['configRuleName'],
ComplianceTypes=['COMPLIANT', 'NON_COMPLIANT'],
Limit=100)
old_eval_list = []
while True:
for old_result in old_eval['EvaluationResults']:
old_eval_list.append(old_result)
if 'NextToken' in old_eval:
next_token = old_eval['NextToken']
old_eval = AWS_CONFIG_CLIENT.get_compliance_details_by_config_rule(
ConfigRuleName=event['configRuleName'],
ComplianceTypes=['COMPLIANT', 'NON_COMPLIANT'],
Limit=100,
NextToken=next_token)
else:
break
for old_eval in old_eval_list:
old_resource_id = old_eval['EvaluationResultIdentifier']['EvaluationResultQualifier']['ResourceId']
newer_founded = False
for latest_eval in latest_evaluations:
if old_resource_id == latest_eval['ComplianceResourceId']:
newer_founded = True
if not newer_founded:
cleaned_evaluations.append(build_evaluation(old_resource_id, "NOT_APPLICABLE", event))
return cleaned_evaluations + latest_evaluations
def lambda_handler(event, context):
if 'liblogging' in sys.modules:
liblogging.logEvent(event)
global AWS_CONFIG_CLIENT
#print(event)
check_defined(event, 'event')
invoking_event = json.loads(event['invokingEvent'])
rule_parameters = {}
if 'ruleParameters' in event:
rule_parameters = json.loads(event['ruleParameters'])
try:
valid_rule_parameters = evaluate_parameters(rule_parameters)
except ValueError as ex:
return build_parameters_value_error_response(ex)
try:
AWS_CONFIG_CLIENT = get_client('config', event)
if invoking_event['messageType'] in ['ConfigurationItemChangeNotification', 'ScheduledNotification', 'OversizedConfigurationItemChangeNotification']:
configuration_item = get_configuration_item(invoking_event)
if is_applicable(configuration_item, event):
compliance_result = evaluate_compliance(event, configuration_item, valid_rule_parameters)
else:
compliance_result = "NOT_APPLICABLE"
else:
return build_internal_error_response('Unexpected message type', str(invoking_event))
except botocore.exceptions.ClientError as ex:
if is_internal_error(ex):
return build_internal_error_response("Unexpected error while completing API request", str(ex))
return build_error_response("Customer error while making API request", str(ex), ex.response['Error']['Code'], ex.response['Error']['Message'])
except ValueError as ex:
return build_internal_error_response(str(ex), str(ex))
evaluations = []
latest_evaluations = []
if not compliance_result:
latest_evaluations.append(build_evaluation(event['accountId'], "NOT_APPLICABLE", event, resource_type='AWS::::Account'))
evaluations = clean_up_old_evaluations(latest_evaluations, event)
elif isinstance(compliance_result, str):
if configuration_item:
evaluations.append(build_evaluation_from_config_item(configuration_item, compliance_result))
else:
evaluations.append(build_evaluation(event['accountId'], compliance_result, event, resource_type=DEFAULT_RESOURCE_TYPE))
elif isinstance(compliance_result, list):
for evaluation in compliance_result:
missing_fields = False
for field in ('ComplianceResourceType', 'ComplianceResourceId', 'ComplianceType', 'OrderingTimestamp'):
if field not in evaluation:
print("Missing " + field + " from custom evaluation.")
missing_fields = True
if not missing_fields:
latest_evaluations.append(evaluation)
evaluations = clean_up_old_evaluations(latest_evaluations, event)
elif isinstance(compliance_result, dict):
missing_fields = False
for field in ('ComplianceResourceType', 'ComplianceResourceId', 'ComplianceType', 'OrderingTimestamp'):
if field not in compliance_result:
print("Missing " + field + " from custom evaluation.")
missing_fields = True
if not missing_fields:
evaluations.append(compliance_result)
else:
evaluations.append(build_evaluation_from_config_item(configuration_item, 'NOT_APPLICABLE'))
# Put together the request that reports the evaluation status
result_token = event['resultToken']
test_mode = False
if result_token == 'TESTMODE':
# Used solely for RDK test to skip actual put_evaluation API call
test_mode = True
# Invoke the Config API to report the result of the evaluation
evaluation_copy = []
evaluation_copy = evaluations[:]
while evaluation_copy:
AWS_CONFIG_CLIENT.put_evaluations(Evaluations=evaluation_copy[:100], ResultToken=result_token, TestMode=test_mode)
del evaluation_copy[:100]
# Used solely for RDK test to be able to test Lambda function
return evaluations
def is_internal_error(exception):
return ((not isinstance(exception, botocore.exceptions.ClientError)) or exception.response['Error']['Code'].startswith('5')
or 'InternalError' in exception.response['Error']['Code'] or 'ServiceError' in exception.response['Error']['Code'])
def build_internal_error_response(internal_error_message, internal_error_details=None):
return build_error_response(internal_error_message, internal_error_details, 'InternalError', 'InternalError')
def build_error_response(internal_error_message, internal_error_details=None, customer_error_code=None, customer_error_message=None):
error_response = {
'internalErrorMessage': internal_error_message,
'internalErrorDetails': internal_error_details,
'customerErrorMessage': customer_error_message,
'customerErrorCode': customer_error_code
}
print(error_response)
return error_response
| 43.295943 | 178 | 0.72179 |
import json
import sys
import time
import datetime
import boto3
import botocore
try:
import liblogging
except ImportError:
pass
pend(es_domain['DomainName'])
while es_domains_names_only:
time.sleep(PAUSE_TO_AVOID_THROTTLE_SECONDS)
domain_details = es_client.describe_elasticsearch_domains(DomainNames=es_domains_names_only[:5])['DomainStatusList']
es_domain_list_details += domain_details
del es_domains_names_only[:5]
return es_domain_list_details
def evaluate_compliance(event, configuration_item, valid_rule_parameters):
es_client = get_client('es', event)
es_domain_list = es_client.list_domain_names()['DomainNames']
if not es_domain_list:
return build_evaluation(event['accountId'], 'NOT_APPLICABLE', event, resource_type='AWS::::Account')
es_domain_list_details = get_all_domain_details(es_client, es_domain_list)
evaluation_list = []
for es_domain_details in es_domain_list_details:
if 'VPCOptions' not in es_domain_details:
compliance_type = 'NON_COMPLIANT'
else:
compliance_type = 'COMPLIANT'
evaluation_list.append(build_evaluation(es_domain_details['DomainName'], compliance_type, event))
if evaluation_list:
return evaluation_list
return build_evaluation(event['accountId'], 'NOT_APPLICABLE', event, resource_type='AWS::::Account')
def evaluate_parameters(rule_parameters):
valid_rule_parameters = rule_parameters
return valid_rule_parameters
customer_error_message=str(ex))
def get_client(service, event):
if not ASSUME_ROLE_MODE:
return boto3.client(service)
credentials = get_assume_role_credentials(event["executionRoleArn"])
return boto3.client(service, aws_access_key_id=credentials['AccessKeyId'],
aws_secret_access_key=credentials['SecretAccessKey'],
aws_session_token=credentials['SessionToken']
)
def build_evaluation(resource_id, compliance_type, event, resource_type=DEFAULT_RESOURCE_TYPE, annotation=None):
eval_cc = {}
if annotation:
eval_cc['Annotation'] = annotation
eval_cc['ComplianceResourceType'] = resource_type
eval_cc['ComplianceResourceId'] = resource_id
eval_cc['ComplianceType'] = compliance_type
eval_cc['OrderingTimestamp'] = str(json.loads(event['invokingEvent'])['notificationCreationTime'])
return eval_cc
def build_evaluation_from_config_item(configuration_item, compliance_type, annotation=None):
eval_ci = {}
if annotation:
eval_ci['Annotation'] = annotation
eval_ci['ComplianceResourceType'] = configuration_item['resourceType']
eval_ci['ComplianceResourceId'] = configuration_item['resourceId']
eval_ci['ComplianceType'] = compliance_type
eval_ci['OrderingTimestamp'] = configuration_item['configurationItemCaptureTime']
return eval_ci
pe):
check_defined(message_type, 'messageType')
return message_type == 'ScheduledNotification'
def get_configuration(resource_type, resource_id, configuration_capture_time):
result = AWS_CONFIG_CLIENT.get_resource_config_history(
resourceType=resource_type,
resourceId=resource_id,
laterTime=configuration_capture_time,
limit=1)
configuration_item = result['configurationItems'][0]
return convert_api_configuration(configuration_item)
def convert_api_configuration(configuration_item):
for k, v in configuration_item.items():
if isinstance(v, datetime.datetime):
configuration_item[k] = str(v)
configuration_item['awsAccountId'] = configuration_item['accountId']
configuration_item['ARN'] = configuration_item['arn']
configuration_item['configurationStateMd5Hash'] = configuration_item['configurationItemMD5Hash']
configuration_item['configurationItemVersion'] = configuration_item['version']
configuration_item['configuration'] = json.loads(configuration_item['configuration'])
if 'relationships' in configuration_item:
for i in range(len(configuration_item['relationships'])):
configuration_item['relationships'][i]['name'] = configuration_item['relationships'][i]['relationshipName']
return configuration_item
def get_configuration_item(invoking_event):
check_defined(invoking_event, 'invokingEvent')
if is_oversized_changed_notification(invoking_event['messageType']):
configuration_item_summary = check_defined(invoking_event['configuration_item_summary'], 'configurationItemSummary')
return get_configuration(configuration_item_summary['resourceType'], configuration_item_summary['resourceId'], configuration_item_summary['configurationItemCaptureTime'])
if is_scheduled_notification(invoking_event['messageType']):
return None
return check_defined(invoking_event['configurationItem'], 'configurationItem')
def is_applicable(configuration_item, event):
try:
check_defined(configuration_item, 'configurationItem')
check_defined(event, 'event')
except:
return True
status = configuration_item['configurationItemStatus']
event_left_scope = event['eventLeftScope']
if status == 'ResourceDeleted':
print("Resource Deleted, setting Compliance Status to NOT_APPLICABLE.")
return status in ('OK', 'ResourceDiscovered') and not event_left_scope
def get_assume_role_credentials(role_arn):
sts_client = boto3.client('sts')
try:
assume_role_response = sts_client.assume_role(RoleArn=role_arn,
RoleSessionName="configLambdaExecution",
DurationSeconds=CONFIG_ROLE_TIMEOUT_SECONDS)
if 'liblogging' in sys.modules:
liblogging.logSession(role_arn, assume_role_response)
return assume_role_response['Credentials']
except botocore.exceptions.ClientError as ex:
print(str(ex))
if 'AccessDenied' in ex.response['Error']['Code']:
ex.response['Error']['Message'] = "AWS Config does not have permission to assume the IAM role."
else:
ex.response['Error']['Message'] = "InternalError"
ex.response['Error']['Code'] = "InternalError"
raise ex
def clean_up_old_evaluations(latest_evaluations, event):
cleaned_evaluations = []
old_eval = AWS_CONFIG_CLIENT.get_compliance_details_by_config_rule(
ConfigRuleName=event['configRuleName'],
ComplianceTypes=['COMPLIANT', 'NON_COMPLIANT'],
Limit=100)
old_eval_list = []
while True:
for old_result in old_eval['EvaluationResults']:
old_eval_list.append(old_result)
if 'NextToken' in old_eval:
next_token = old_eval['NextToken']
old_eval = AWS_CONFIG_CLIENT.get_compliance_details_by_config_rule(
ConfigRuleName=event['configRuleName'],
ComplianceTypes=['COMPLIANT', 'NON_COMPLIANT'],
Limit=100,
NextToken=next_token)
else:
break
for old_eval in old_eval_list:
old_resource_id = old_eval['EvaluationResultIdentifier']['EvaluationResultQualifier']['ResourceId']
newer_founded = False
for latest_eval in latest_evaluations:
if old_resource_id == latest_eval['ComplianceResourceId']:
newer_founded = True
if not newer_founded:
cleaned_evaluations.append(build_evaluation(old_resource_id, "NOT_APPLICABLE", event))
return cleaned_evaluations + latest_evaluations
def lambda_handler(event, context):
if 'liblogging' in sys.modules:
liblogging.logEvent(event)
global AWS_CONFIG_CLIENT
check_defined(event, 'event')
invoking_event = json.loads(event['invokingEvent'])
rule_parameters = {}
if 'ruleParameters' in event:
rule_parameters = json.loads(event['ruleParameters'])
try:
valid_rule_parameters = evaluate_parameters(rule_parameters)
except ValueError as ex:
return build_parameters_value_error_response(ex)
try:
AWS_CONFIG_CLIENT = get_client('config', event)
if invoking_event['messageType'] in ['ConfigurationItemChangeNotification', 'ScheduledNotification', 'OversizedConfigurationItemChangeNotification']:
configuration_item = get_configuration_item(invoking_event)
if is_applicable(configuration_item, event):
compliance_result = evaluate_compliance(event, configuration_item, valid_rule_parameters)
else:
compliance_result = "NOT_APPLICABLE"
else:
return build_internal_error_response('Unexpected message type', str(invoking_event))
except botocore.exceptions.ClientError as ex:
if is_internal_error(ex):
return build_internal_error_response("Unexpected error while completing API request", str(ex))
return build_error_response("Customer error while making API request", str(ex), ex.response['Error']['Code'], ex.response['Error']['Message'])
except ValueError as ex:
return build_internal_error_response(str(ex), str(ex))
evaluations = []
latest_evaluations = []
if not compliance_result:
latest_evaluations.append(build_evaluation(event['accountId'], "NOT_APPLICABLE", event, resource_type='AWS::::Account'))
evaluations = clean_up_old_evaluations(latest_evaluations, event)
elif isinstance(compliance_result, str):
if configuration_item:
evaluations.append(build_evaluation_from_config_item(configuration_item, compliance_result))
else:
evaluations.append(build_evaluation(event['accountId'], compliance_result, event, resource_type=DEFAULT_RESOURCE_TYPE))
elif isinstance(compliance_result, list):
for evaluation in compliance_result:
missing_fields = False
for field in ('ComplianceResourceType', 'ComplianceResourceId', 'ComplianceType', 'OrderingTimestamp'):
if field not in evaluation:
print("Missing " + field + " from custom evaluation.")
missing_fields = True
if not missing_fields:
latest_evaluations.append(evaluation)
evaluations = clean_up_old_evaluations(latest_evaluations, event)
elif isinstance(compliance_result, dict):
missing_fields = False
for field in ('ComplianceResourceType', 'ComplianceResourceId', 'ComplianceType', 'OrderingTimestamp'):
if field not in compliance_result:
print("Missing " + field + " from custom evaluation.")
missing_fields = True
if not missing_fields:
evaluations.append(compliance_result)
else:
evaluations.append(build_evaluation_from_config_item(configuration_item, 'NOT_APPLICABLE'))
result_token = event['resultToken']
test_mode = False
if result_token == 'TESTMODE':
test_mode = True
evaluation_copy = []
evaluation_copy = evaluations[:]
while evaluation_copy:
AWS_CONFIG_CLIENT.put_evaluations(Evaluations=evaluation_copy[:100], ResultToken=result_token, TestMode=test_mode)
del evaluation_copy[:100]
return evaluations
def is_internal_error(exception):
return ((not isinstance(exception, botocore.exceptions.ClientError)) or exception.response['Error']['Code'].startswith('5')
or 'InternalError' in exception.response['Error']['Code'] or 'ServiceError' in exception.response['Error']['Code'])
def build_internal_error_response(internal_error_message, internal_error_details=None):
return build_error_response(internal_error_message, internal_error_details, 'InternalError', 'InternalError')
def build_error_response(internal_error_message, internal_error_details=None, customer_error_code=None, customer_error_message=None):
error_response = {
'internalErrorMessage': internal_error_message,
'internalErrorDetails': internal_error_details,
'customerErrorMessage': customer_error_message,
'customerErrorCode': customer_error_code
}
print(error_response)
return error_response
| true | true |
f70f3ed795d311d796b3c5c3ebf880041a890b8c | 3,232 | py | Python | fuse_examples/tests/test_classification_knight.py | alexgo1/fuse-med-ml | 928375828ff321d2bf7b2084389e34e1db0682e9 | [
"Apache-2.0"
] | null | null | null | fuse_examples/tests/test_classification_knight.py | alexgo1/fuse-med-ml | 928375828ff321d2bf7b2084389e34e1db0682e9 | [
"Apache-2.0"
] | null | null | null | fuse_examples/tests/test_classification_knight.py | alexgo1/fuse-med-ml | 928375828ff321d2bf7b2084389e34e1db0682e9 | [
"Apache-2.0"
] | null | null | null | """
(C) Copyright 2021 IBM Corp.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Created on June 30, 2021
"""
import pathlib
import shutil
import tempfile
import unittest
import os
from fuse.utils.file_io.file_io import create_dir
import wget
from fuse_examples.classification.knight.eval.eval import eval
from fuse_examples.classification.knight.make_targets_file import make_targets_file
import fuse_examples.classification.knight.baseline.fuse_baseline as baseline
class KnightTestTestCase(unittest.TestCase):
def setUp(self):
self.root = tempfile.mkdtemp()
def test_eval(self):
dir_path = pathlib.Path(__file__).parent.resolve()
target_filename = os.path.join(dir_path, "../classification/knight/eval/example/example_targets.csv")
task1_prediction_filename = os.path.join(dir_path, "../classification/knight/eval/example/example_task1_predictions.csv")
task2_prediction_filename = os.path.join(dir_path, "../classification/knight/eval/example/example_task2_predictions.csv")
eval(target_filename=target_filename, task1_prediction_filename=task1_prediction_filename, task2_prediction_filename=task2_prediction_filename, output_dir=self.root)
def test_make_targets(self):
dir_path = pathlib.Path(__file__).parent.resolve()
data_path = os.path.join(self.root, "data")
cache_path = os.path.join(self.root, "cache")
split = os.path.join(dir_path, "../classification/knight/baseline/splits_final.pkl")
output_filename = os.path.join(self.root, "output/validation_targets.csv")
create_dir(os.path.join(data_path, "knight", "data"))
create_dir(os.path.dirname(output_filename))
wget.download("https://raw.github.com/neheller/KNIGHT/main/knight/data/knight.json", os.path.join(data_path, "knight", "data"))
make_targets_file(data_path=data_path, cache_path=cache_path, split=split, output_filename=output_filename)
@unittest.skip("Not ready yet")
# TODOs: set KNIGHT data
# 1 Set 'KNIGHT_DATA' ahead (and not in the test)
# 2, Add code that skip test if this var wasn't set
# 2. Modify main() to support overriding the arguments and override number of epochs to 2 (and maybe number of samples)
# 3. Use and test make predictions (inference script)
def test_train(self):
os.environ['KNIGHT_DATA'] = "/projects/msieve/MedicalSieve/PatientData/KNIGHT"
os.environ['KNIGHT_CACHE'] = os.path.join(self.root, "train", "cache")
os.environ['KNIGHT_RESULTS'] = os.path.join(self.root, "train", "results")
baseline.main()
def tearDown(self):
# Delete temporary directories
shutil.rmtree(self.root)
if __name__ == '__main__':
unittest.main() | 43.675676 | 173 | 0.740718 |
import pathlib
import shutil
import tempfile
import unittest
import os
from fuse.utils.file_io.file_io import create_dir
import wget
from fuse_examples.classification.knight.eval.eval import eval
from fuse_examples.classification.knight.make_targets_file import make_targets_file
import fuse_examples.classification.knight.baseline.fuse_baseline as baseline
class KnightTestTestCase(unittest.TestCase):
def setUp(self):
self.root = tempfile.mkdtemp()
def test_eval(self):
dir_path = pathlib.Path(__file__).parent.resolve()
target_filename = os.path.join(dir_path, "../classification/knight/eval/example/example_targets.csv")
task1_prediction_filename = os.path.join(dir_path, "../classification/knight/eval/example/example_task1_predictions.csv")
task2_prediction_filename = os.path.join(dir_path, "../classification/knight/eval/example/example_task2_predictions.csv")
eval(target_filename=target_filename, task1_prediction_filename=task1_prediction_filename, task2_prediction_filename=task2_prediction_filename, output_dir=self.root)
def test_make_targets(self):
dir_path = pathlib.Path(__file__).parent.resolve()
data_path = os.path.join(self.root, "data")
cache_path = os.path.join(self.root, "cache")
split = os.path.join(dir_path, "../classification/knight/baseline/splits_final.pkl")
output_filename = os.path.join(self.root, "output/validation_targets.csv")
create_dir(os.path.join(data_path, "knight", "data"))
create_dir(os.path.dirname(output_filename))
wget.download("https://raw.github.com/neheller/KNIGHT/main/knight/data/knight.json", os.path.join(data_path, "knight", "data"))
make_targets_file(data_path=data_path, cache_path=cache_path, split=split, output_filename=output_filename)
@unittest.skip("Not ready yet")
# 2. Modify main() to support overriding the arguments and override number of epochs to 2 (and maybe number of samples)
# 3. Use and test make predictions (inference script)
def test_train(self):
os.environ['KNIGHT_DATA'] = "/projects/msieve/MedicalSieve/PatientData/KNIGHT"
os.environ['KNIGHT_CACHE'] = os.path.join(self.root, "train", "cache")
os.environ['KNIGHT_RESULTS'] = os.path.join(self.root, "train", "results")
baseline.main()
def tearDown(self):
# Delete temporary directories
shutil.rmtree(self.root)
if __name__ == '__main__':
unittest.main() | true | true |
f70f40aab05c3c3b88ea6a9589c157f02f828ff7 | 5,922 | py | Python | libs/DatabaseConnection.py | hackerthinktank/RootTheBox | 59d1ee0088b95214702efc47be437b6e770b50a2 | [
"Apache-2.0"
] | 2 | 2021-05-18T12:20:27.000Z | 2021-05-18T13:33:58.000Z | libs/DatabaseConnection.py | hackerthinktank/RootTheBox | 59d1ee0088b95214702efc47be437b6e770b50a2 | [
"Apache-2.0"
] | null | null | null | libs/DatabaseConnection.py | hackerthinktank/RootTheBox | 59d1ee0088b95214702efc47be437b6e770b50a2 | [
"Apache-2.0"
] | 1 | 2021-05-18T12:20:29.000Z | 2021-05-18T12:20:29.000Z | # -*- coding: utf-8 -*-
"""
Created on Sep 20, 2012
@author: moloch
Copyright 2012 Root the Box
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# pylint: disable=unused-wildcard-import,unused-variable
import os
import logging
import sys
import getpass
import codecs
from builtins import object
from libs.ConsoleColors import *
try:
from urllib.parse import quote, quote_plus
except ImportError:
from urllib import quote, quote_plus
from sqlalchemy import create_engine
from tornado.options import options
class DatabaseConnection(object):
def __init__(
self,
database,
hostname="",
port="",
username="",
password="",
dialect="",
ssl_ca="",
):
self.database = database
self.hostname = hostname
self.port = port
self.username = username
self.password = password
self.dialect = dialect
self.ssl_ca = ssl_ca
def __str__(self):
""" Construct the database connection string """
if self.dialect == "sqlite":
db_conn = self._sqlite()
elif self.dialect.startswith("postgres"):
db_conn = self._postgresql()
elif self.dialect == "mysql":
db_conn = self._mysql()
else:
raise ValueError("Database dialect not supported")
self._test_connection(db_conn)
return db_conn
def _postgresql(self):
"""
Configured to use postgresql, there is no built-in support
for postgresql so make sure we can import the 3rd party
python lib 'pypostgresql'
"""
logging.debug("Configured to use Postgresql for a database")
try:
import pypostgresql
except ImportError:
print(WARN + "You must install 'pypostgresql'")
os._exit(1)
db_host, db_name, db_user, db_password = self._db_credentials()
postgres = "postgresql+pypostgresql://%s:%s@%s/%s" % (
db_user,
db_password,
db_host,
db_name,
)
if self._test_connection(postgres):
return postgres
else:
logging.fatal("Cannot connect to database with any available driver")
os._exit(1)
def _sqlite(self):
"""
SQLite connection string, always save db file to cwd, or in-memory
"""
logging.debug("Configured to use SQLite for a database")
db_name = self.database
if not len(db_name):
db_name = "rtb"
if not db_name.endswith(".db"):
db_name = "%s.db" % db_name
return "sqlite:///%s" % db_name
def _mysql(self):
""" Configure db_connection for MySQL """
logging.debug("Configured to use MySQL for a database")
db_server, db_name, db_user, db_password = self._db_credentials()
db_charset = "utf8mb4"
db_connection = "%s:%s@%s/%s?charset=%s" % (
db_user,
db_password,
db_server,
db_name,
db_charset,
)
if self.ssl_ca != "":
db_connection = db_connection + "&ssl_ca=" + self.ssl_ca
codecs.register(
lambda name: codecs.lookup("utf8") if name == "utf8mb4" else None
)
__mysql = "mysql://%s" % db_connection
__mysqlclient = "mysql+mysqldb://%s" % db_connection
__pymysql = "mysql+pymysql://%s" % db_connection
__mysqlconnector = "mysql+mysqlconnector://%s" % db_connection
if self._test_connection(__mysql):
return __mysql
elif self._test_connection(__mysqlclient):
return __mysqlclient
elif self._test_connection(__pymysql):
return __pymysql
elif self._test_connection(__mysqlconnector):
return __mysqlconnector
else:
logging.fatal(
"Cannot connect to database with any available driver. Verify correct username & password in rootthebox.cfg and db dependecies."
)
os._exit(1)
def _test_connection(self, connection_string):
"""
Test the connection string to see if we can connect to the database
"""
try:
engine = create_engine(connection_string)
connection = engine.connect()
connection.close()
return True
except Exception as e:
if options.debug:
logging.exception("Database connection failed: %s" % e)
return False
def _db_credentials(self):
""" Pull db creds and return them url encoded """
if self.password == "" or self.password == "RUNTIME":
sys.stdout.write(PROMPT + "Database password: ")
sys.stdout.flush()
self.password = getpass.getpass()
elif self.password == "ENV":
self.password = os.environ["sql_password"]
db_host = quote(self.hostname)
db_name = quote(self.database)
db_user = quote(self.username)
db_password = quote_plus(self.password)
if "@" in db_password:
logging.warning(
"%sWARNING:%s Using the '@' symbol in your database password can cause login issues with SQL Alchemy.%s"
% (WARN + bold + R, W, WARN)
)
return db_host, db_name, db_user, db_password
| 33.083799 | 144 | 0.599797 |
import os
import logging
import sys
import getpass
import codecs
from builtins import object
from libs.ConsoleColors import *
try:
from urllib.parse import quote, quote_plus
except ImportError:
from urllib import quote, quote_plus
from sqlalchemy import create_engine
from tornado.options import options
class DatabaseConnection(object):
def __init__(
self,
database,
hostname="",
port="",
username="",
password="",
dialect="",
ssl_ca="",
):
self.database = database
self.hostname = hostname
self.port = port
self.username = username
self.password = password
self.dialect = dialect
self.ssl_ca = ssl_ca
def __str__(self):
if self.dialect == "sqlite":
db_conn = self._sqlite()
elif self.dialect.startswith("postgres"):
db_conn = self._postgresql()
elif self.dialect == "mysql":
db_conn = self._mysql()
else:
raise ValueError("Database dialect not supported")
self._test_connection(db_conn)
return db_conn
def _postgresql(self):
logging.debug("Configured to use Postgresql for a database")
try:
import pypostgresql
except ImportError:
print(WARN + "You must install 'pypostgresql'")
os._exit(1)
db_host, db_name, db_user, db_password = self._db_credentials()
postgres = "postgresql+pypostgresql://%s:%s@%s/%s" % (
db_user,
db_password,
db_host,
db_name,
)
if self._test_connection(postgres):
return postgres
else:
logging.fatal("Cannot connect to database with any available driver")
os._exit(1)
def _sqlite(self):
logging.debug("Configured to use SQLite for a database")
db_name = self.database
if not len(db_name):
db_name = "rtb"
if not db_name.endswith(".db"):
db_name = "%s.db" % db_name
return "sqlite:///%s" % db_name
def _mysql(self):
logging.debug("Configured to use MySQL for a database")
db_server, db_name, db_user, db_password = self._db_credentials()
db_charset = "utf8mb4"
db_connection = "%s:%s@%s/%s?charset=%s" % (
db_user,
db_password,
db_server,
db_name,
db_charset,
)
if self.ssl_ca != "":
db_connection = db_connection + "&ssl_ca=" + self.ssl_ca
codecs.register(
lambda name: codecs.lookup("utf8") if name == "utf8mb4" else None
)
__mysql = "mysql://%s" % db_connection
__mysqlclient = "mysql+mysqldb://%s" % db_connection
__pymysql = "mysql+pymysql://%s" % db_connection
__mysqlconnector = "mysql+mysqlconnector://%s" % db_connection
if self._test_connection(__mysql):
return __mysql
elif self._test_connection(__mysqlclient):
return __mysqlclient
elif self._test_connection(__pymysql):
return __pymysql
elif self._test_connection(__mysqlconnector):
return __mysqlconnector
else:
logging.fatal(
"Cannot connect to database with any available driver. Verify correct username & password in rootthebox.cfg and db dependecies."
)
os._exit(1)
def _test_connection(self, connection_string):
try:
engine = create_engine(connection_string)
connection = engine.connect()
connection.close()
return True
except Exception as e:
if options.debug:
logging.exception("Database connection failed: %s" % e)
return False
def _db_credentials(self):
if self.password == "" or self.password == "RUNTIME":
sys.stdout.write(PROMPT + "Database password: ")
sys.stdout.flush()
self.password = getpass.getpass()
elif self.password == "ENV":
self.password = os.environ["sql_password"]
db_host = quote(self.hostname)
db_name = quote(self.database)
db_user = quote(self.username)
db_password = quote_plus(self.password)
if "@" in db_password:
logging.warning(
"%sWARNING:%s Using the '@' symbol in your database password can cause login issues with SQL Alchemy.%s"
% (WARN + bold + R, W, WARN)
)
return db_host, db_name, db_user, db_password
| true | true |
f70f41653ba4836a1f0e9437fabd0b3d59b9213c | 2,625 | py | Python | schedules/tests/test_forms.py | mennonitengemeinde/church_site | ae9ef5f0f78811cecd734705339511dc0efb8340 | [
"MIT"
] | null | null | null | schedules/tests/test_forms.py | mennonitengemeinde/church_site | ae9ef5f0f78811cecd734705339511dc0efb8340 | [
"MIT"
] | 44 | 2020-05-13T20:15:26.000Z | 2022-03-04T02:58:58.000Z | schedules/tests/test_forms.py | mennonitengemeinde/church_site | ae9ef5f0f78811cecd734705339511dc0efb8340 | [
"MIT"
] | 4 | 2020-06-05T17:59:52.000Z | 2021-02-06T19:09:43.000Z | from datetime import timedelta
from django.contrib.auth import get_user_model
from django.utils import timezone
from churches.models import Church
from schedules.forms import EventForm, AttendantAdminForm, AttendantForm
from schedules.models import Event, Attendant
from schedules.tests._setup import EventSetupTestCase
class EventFormTests(EventSetupTestCase):
def test_init(self):
user = get_user_model().objects.get(username='test_user')
form = EventForm(user=user)
self.assertEqual(form.fields['church'].queryset.count(), 1)
def test_invalid_clean_end(self):
user = get_user_model().objects.get(username='test_user')
church = Church.objects.get(name='Church 1')
data = {'church': church, 'start': timezone.now(), 'end': timezone.now() - timedelta(days=1),
'title': 'Test', 'visibility': 'public', 'attendance_limit': 0}
form = EventForm(user=user, data=data)
self.assertFalse(form.is_valid())
def test_valid_clean_end(self):
user = get_user_model().objects.get(username='test_user')
church = Church.objects.get(name='Church 1')
data = {'church': church, 'start': timezone.now(), 'end': timezone.now() + timedelta(days=1),
'title': 'Test', 'visibility': 'public', 'attendance_limit': 0}
form = EventForm(user=user, data=data)
self.assertTrue(form.is_valid())
class AttendantAdminFormTests(EventSetupTestCase):
def test_valid_clean_amount(self):
e = Event.objects.get(title='Title 5')
a = Attendant.objects.get(full_name='Attendant 1')
data = {'event': e, 'full_name': 'Attendant 5', 'amount': 2}
form = AttendantAdminForm(instance=a, data=data)
self.assertTrue(form.is_valid())
def test_invalid_clean_amount(self):
e = Event.objects.get(title='Title 5')
a = Attendant.objects.get(full_name='Attendant 1')
data = {'event': e, 'full_name': 'Attendant 5', 'amount': 4}
form = AttendantAdminForm(instance=a, data=data)
self.assertFalse(form.is_valid())
class AttendantFormTests(EventSetupTestCase):
def test_valid_clean_amount(self):
e = Event.objects.get(title='Title 5')
data = {'event': e, 'full_name': 'Attendant 5', 'amount': 1}
form = AttendantForm(data=data)
self.assertTrue(form.is_valid())
def test_invalid_clean_amount(self):
e = Event.objects.get(title='Title 5')
data = {'event': e, 'full_name': 'Attendant 5', 'amount': 2}
form = AttendantForm(data=data)
self.assertFalse(form.is_valid())
| 39.179104 | 101 | 0.664381 | from datetime import timedelta
from django.contrib.auth import get_user_model
from django.utils import timezone
from churches.models import Church
from schedules.forms import EventForm, AttendantAdminForm, AttendantForm
from schedules.models import Event, Attendant
from schedules.tests._setup import EventSetupTestCase
class EventFormTests(EventSetupTestCase):
def test_init(self):
user = get_user_model().objects.get(username='test_user')
form = EventForm(user=user)
self.assertEqual(form.fields['church'].queryset.count(), 1)
def test_invalid_clean_end(self):
user = get_user_model().objects.get(username='test_user')
church = Church.objects.get(name='Church 1')
data = {'church': church, 'start': timezone.now(), 'end': timezone.now() - timedelta(days=1),
'title': 'Test', 'visibility': 'public', 'attendance_limit': 0}
form = EventForm(user=user, data=data)
self.assertFalse(form.is_valid())
def test_valid_clean_end(self):
user = get_user_model().objects.get(username='test_user')
church = Church.objects.get(name='Church 1')
data = {'church': church, 'start': timezone.now(), 'end': timezone.now() + timedelta(days=1),
'title': 'Test', 'visibility': 'public', 'attendance_limit': 0}
form = EventForm(user=user, data=data)
self.assertTrue(form.is_valid())
class AttendantAdminFormTests(EventSetupTestCase):
def test_valid_clean_amount(self):
e = Event.objects.get(title='Title 5')
a = Attendant.objects.get(full_name='Attendant 1')
data = {'event': e, 'full_name': 'Attendant 5', 'amount': 2}
form = AttendantAdminForm(instance=a, data=data)
self.assertTrue(form.is_valid())
def test_invalid_clean_amount(self):
e = Event.objects.get(title='Title 5')
a = Attendant.objects.get(full_name='Attendant 1')
data = {'event': e, 'full_name': 'Attendant 5', 'amount': 4}
form = AttendantAdminForm(instance=a, data=data)
self.assertFalse(form.is_valid())
class AttendantFormTests(EventSetupTestCase):
def test_valid_clean_amount(self):
e = Event.objects.get(title='Title 5')
data = {'event': e, 'full_name': 'Attendant 5', 'amount': 1}
form = AttendantForm(data=data)
self.assertTrue(form.is_valid())
def test_invalid_clean_amount(self):
e = Event.objects.get(title='Title 5')
data = {'event': e, 'full_name': 'Attendant 5', 'amount': 2}
form = AttendantForm(data=data)
self.assertFalse(form.is_valid())
| true | true |
f70f422ca39290a91d8641eadeef0839ff5a9c19 | 3,128 | py | Python | aizynthfinder/mcts/config.py | robertburbidgedata/aizynthfinder | c05be3ce2eb3773dbdd2c52345f49543ca5b5b1e | [
"MIT"
] | null | null | null | aizynthfinder/mcts/config.py | robertburbidgedata/aizynthfinder | c05be3ce2eb3773dbdd2c52345f49543ca5b5b1e | [
"MIT"
] | null | null | null | aizynthfinder/mcts/config.py | robertburbidgedata/aizynthfinder | c05be3ce2eb3773dbdd2c52345f49543ca5b5b1e | [
"MIT"
] | null | null | null | """ Module containing a class for encapsulating the settings of the tree search
"""
import os
import yaml
from aizynthfinder.utils.logging import logger
from aizynthfinder.utils.paths import data_path
from aizynthfinder.mcts.policy import Policy
from aizynthfinder.mcts.stock import Stock, MongoDbInchiKeyQuery
class Configuration:
"""
Encapsulating the settings of the tree search, including the policy,
the stock and various parameters.
All the parameters can be retrieved as attributes of the Configuration
object, e.g.
.. code-block::
config.max_transforms # The maximum of transform
config.iteration_limit # The maximum number of iterations
On instantiation it will read default parameters from a config.yml
file located in the `data` folder of the package.
"""
def __init__(self):
self._properties = {}
filename = os.path.join(data_path(), "config.yml")
with open(filename, "r") as fileobj:
_config = yaml.load(fileobj.read(), Loader=yaml.SafeLoader)
self._update_from_config(_config)
self.stock = Stock()
self.policy = Policy(self)
self._logger = logger()
def __eq__(self, other):
return self._properties == other._properties
@classmethod
def from_file(cls, filename):
"""
Loads a configuration from a yaml file.
The parameters not set in the yaml file are taken from the default values.
The policies and stocks specified in the yaml file are directly loaded.
:param filename: the path to a yaml file
:type filename: str
:return: a Configuration object with settings from the yaml file
:rtype: Configuration
"""
config_obj = Configuration()
with open(filename, "r") as fileobj:
_config = yaml.load(fileobj.read(), Loader=yaml.SafeLoader)
config_obj._update_from_config(_config)
for key, policy_spec in _config.get("policy", {}).get("files", {}).items():
modelfile, templatefile = policy_spec
config_obj.policy.load_policy(modelfile, templatefile, key)
for key, stockfile in _config.get("stock", {}).get("files", {}).items():
config_obj.stock.load_stock(stockfile, key)
if "mongodb" in _config.get("stock", {}):
query_obj = MongoDbInchiKeyQuery(**(_config["stock"]["mongodb"] or {}))
config_obj.stock.load_stock(query_obj, "mongodb_stock")
return config_obj
def update(self, **settings):
""" Update the configuration using dictionary of parameters
"""
for setting, value in settings.items():
setattr(self, setting, value)
self._logger.info(f"Setting {setting.replace('_', ' ')} to {value}")
def _update_from_config(self, config):
self._properties.update(config.get("finder", {}).get("properties", {}))
self._properties.update(config.get("policy", {}).get("properties", {}))
self._properties.update(config.get("properties", {}))
self.__dict__.update(self._properties)
| 35.545455 | 83 | 0.656969 | import os
import yaml
from aizynthfinder.utils.logging import logger
from aizynthfinder.utils.paths import data_path
from aizynthfinder.mcts.policy import Policy
from aizynthfinder.mcts.stock import Stock, MongoDbInchiKeyQuery
class Configuration:
def __init__(self):
self._properties = {}
filename = os.path.join(data_path(), "config.yml")
with open(filename, "r") as fileobj:
_config = yaml.load(fileobj.read(), Loader=yaml.SafeLoader)
self._update_from_config(_config)
self.stock = Stock()
self.policy = Policy(self)
self._logger = logger()
def __eq__(self, other):
return self._properties == other._properties
@classmethod
def from_file(cls, filename):
config_obj = Configuration()
with open(filename, "r") as fileobj:
_config = yaml.load(fileobj.read(), Loader=yaml.SafeLoader)
config_obj._update_from_config(_config)
for key, policy_spec in _config.get("policy", {}).get("files", {}).items():
modelfile, templatefile = policy_spec
config_obj.policy.load_policy(modelfile, templatefile, key)
for key, stockfile in _config.get("stock", {}).get("files", {}).items():
config_obj.stock.load_stock(stockfile, key)
if "mongodb" in _config.get("stock", {}):
query_obj = MongoDbInchiKeyQuery(**(_config["stock"]["mongodb"] or {}))
config_obj.stock.load_stock(query_obj, "mongodb_stock")
return config_obj
def update(self, **settings):
for setting, value in settings.items():
setattr(self, setting, value)
self._logger.info(f"Setting {setting.replace('_', ' ')} to {value}")
def _update_from_config(self, config):
self._properties.update(config.get("finder", {}).get("properties", {}))
self._properties.update(config.get("policy", {}).get("properties", {}))
self._properties.update(config.get("properties", {}))
self.__dict__.update(self._properties)
| true | true |
f70f42676fb42ff2e24a3774a95fa3284dddc61f | 9,770 | py | Python | hata/ext/command_utils/bases.py | Multiface24111/hata | cd28f9ef158e347363669cc8d1d49db0ff41aba0 | [
"0BSD"
] | 173 | 2019-06-14T20:25:00.000Z | 2022-03-21T19:36:10.000Z | hata/ext/command_utils/bases.py | Tari-dev/hata | a5c3199c845858f997af3b0b2c18770fdc691897 | [
"0BSD"
] | 52 | 2020-01-03T17:05:14.000Z | 2022-03-31T11:39:50.000Z | hata/ext/command_utils/bases.py | Tari-dev/hata | a5c3199c845858f997af3b0b2c18770fdc691897 | [
"0BSD"
] | 47 | 2019-11-09T08:46:45.000Z | 2022-03-31T14:33:34.000Z | __all__ = ('GUI_STATE_CANCELLED', 'GUI_STATE_CANCELLING', 'GUI_STATE_READY', 'GUI_STATE_SWITCHING_CTX',
'GUI_STATE_SWITCHING_PAGE', 'PaginationBase')
from ...backend.futures import Task, CancelledError
from ...discord.core import KOKORO
from ...discord.exceptions import DiscordException, ERROR_CODES
GUI_STATE_READY = 0
GUI_STATE_SWITCHING_PAGE = 1
GUI_STATE_CANCELLING = 2
GUI_STATE_CANCELLED = 3
GUI_STATE_SWITCHING_CTX = 4
GUI_STATE_VALUE_TO_NAME = {
GUI_STATE_READY : 'ready',
GUI_STATE_SWITCHING_PAGE : 'switching_page',
GUI_STATE_CANCELLING : 'cancelling',
GUI_STATE_CANCELLED : 'cancelled',
GUI_STATE_SWITCHING_CTX : 'switching_context',
}
class PaginationBase:
"""
Base class for pagination like objects.
Attributes
----------
_canceller : `None` or `Function`
The function called when the ``Pagination`` is cancelled or when it expires. This is a onetime use and after
it was used, is set as `None`.
_task_flag : `int`
A flag to store the state of the ``Pagination``.
Possible values:
+---------------------------+-------+-----------------------------------------------------------------------+
| Respective name | Value | Description |
+===========================+=======+=======================================================================+
| GUI_STATE_READY | 0 | The Pagination does nothing, is ready to be used. |
+---------------------------+-------+-----------------------------------------------------------------------+
| GUI_STATE_SWITCHING_PAGE | 1 | The Pagination is currently changing it's page. |
+---------------------------+-------+-----------------------------------------------------------------------+
| GUI_STATE_CANCELLING | 2 | The pagination is currently changing it's page, but it was cancelled |
| | | meanwhile. |
+---------------------------+-------+-----------------------------------------------------------------------+
| GUI_STATE_CANCELLED | 3 | The pagination is, or is being cancelled right now. |
+---------------------------+-------+-----------------------------------------------------------------------+
| GUI_STATE_SWITCHING_CTX | 4 | The Pagination is switching context. Not used by the default class, |
| | | but expected. |
+---------------------------+-------+-----------------------------------------------------------------------+
_timeouter : `None` or ``Timeouter``
Executes the timing out feature on the ``Pagination``.
channel : ``ChannelTextBase`` instance
The channel where the ``Pagination`` is executed.
client : ``Client`` of ``Embed`` (or any compatible)
The client who executes the ``Pagination``.
message : `None` or ``Message``
The message on what the ``Pagination`` is executed.
"""
__slots__ = ('_canceller', '_task_flag', '_timeouter', 'channel', 'client', 'message')
async def __new__(cls, client, channel):
"""
Pagination instances should have asynchronous constructor.
Parameters
----------
Raises
------
NotImplementedError
"""
raise NotImplementedError
async def __call__(self, client, event):
"""
Called when a reaction is added or removed from the respective message.
This method is a coroutine.
Parameters
----------
client : ``Client``
The client who executes the ``Pagination``.
event : ``ReactionAddEvent``, ``ReactionDeleteEvent``
The received event.
"""
pass
async def _canceller_function(self, exception):
"""
Used when the ``Pagination`` is cancelled.
First of all removes the pagination from waitfors, so it will not wait for reaction events, then sets the
``._task_flag`` of the it to `GUI_STATE_CANCELLED`.
If `exception` is given as `TimeoutError`, then removes the ``Pagination``'s reactions from the respective
message.
This method is a coroutine.
Parameters
----------
exception : `None` or ``BaseException`` instance
Exception to cancel the ``Pagination`` with.
"""
client = self.client
message = self.message
client.events.reaction_add.remove(message, self)
client.events.reaction_delete.remove(message, self)
if self._task_flag == GUI_STATE_SWITCHING_CTX:
# the message is not our, we should not do anything with it.
return
self._task_flag = GUI_STATE_CANCELLED
if not await self._handle_close_exception(exception):
await client.events.error(client, f'{self!r}._canceller_function', exception)
async def _handle_close_exception(self, exception):
"""
Handles close exception if any.
This method is a coroutine.
Parameters
----------
exception : `None` or `BaseException`
The close exception to handle.
Returns
-------
exception_handled : `bool`
Whether the exception was handled.
"""
if exception is None:
return True
client = self.client
message = self.message
if isinstance(exception, CancelledError):
try:
await client.message_delete(message)
except BaseException as err:
if isinstance(err, ConnectionError):
# no internet
return True
if isinstance(err, DiscordException):
if err.code in (
ERROR_CODES.unknown_channel, # channel deleted
ERROR_CODES.unknown_message, # message deleted
ERROR_CODES.missing_access, # client removed
):
return True
await client.events.error(client, f'{self!r}._handle_close_exception', err)
return True
if isinstance(exception, TimeoutError):
if self.channel.cached_permissions_for(client).can_manage_messages:
try:
await client.reaction_clear(message)
except BaseException as err:
if isinstance(err, ConnectionError):
# no internet
return True
if isinstance(err, DiscordException):
if err.code in (
ERROR_CODES.unknown_message, # message deleted
ERROR_CODES.unknown_channel, # channel deleted
ERROR_CODES.missing_access, # client removed
ERROR_CODES.missing_permissions, # permissions changed meanwhile
):
return True
await client.events.error(client, f'{self!r}._handle_close_exception', err)
return True
if isinstance(exception, PermissionError):
return True
return False
def cancel(self, exception=None):
"""
Cancels the pagination, if it is not cancelled yet.
Parameters
----------
exception : `None` or ``BaseException`` instance, Optional
Exception to cancel the pagination with. Defaults to `None`
Returns
-------
canceller_task : `None` or ``Task``
"""
if self._task_flag in (GUI_STATE_READY, GUI_STATE_SWITCHING_PAGE, GUI_STATE_CANCELLING):
self._task_flag = GUI_STATE_CANCELLED
canceller = self._canceller
if canceller is None:
return
self._canceller = None
timeouter = self._timeouter
if (timeouter is not None):
timeouter.cancel()
return Task(canceller(self, exception), KOKORO)
def __repr__(self):
"""Returns the pagination instance's representation."""
repr_parts = [
'<', self.__class__.__name__,
' client=', repr(self.client),
', channel=', repr(self.channel),
', state='
]
task_flag = self._task_flag
repr_parts.append(repr(task_flag))
repr_parts.append(' (')
task_flag_name = GUI_STATE_VALUE_TO_NAME[task_flag]
repr_parts.append(task_flag_name)
repr_parts.append(')')
# Third party things go here
repr_parts.append('>')
return ''.join(repr_parts)
def is_active(self):
"""
Returns whether the menu is still active.
Returns
-------
is_active : `bool`
"""
return (self._task_flag in (GUI_STATE_READY, GUI_STATE_SWITCHING_PAGE))
| 37.148289 | 117 | 0.487513 | __all__ = ('GUI_STATE_CANCELLED', 'GUI_STATE_CANCELLING', 'GUI_STATE_READY', 'GUI_STATE_SWITCHING_CTX',
'GUI_STATE_SWITCHING_PAGE', 'PaginationBase')
from ...backend.futures import Task, CancelledError
from ...discord.core import KOKORO
from ...discord.exceptions import DiscordException, ERROR_CODES
GUI_STATE_READY = 0
GUI_STATE_SWITCHING_PAGE = 1
GUI_STATE_CANCELLING = 2
GUI_STATE_CANCELLED = 3
GUI_STATE_SWITCHING_CTX = 4
GUI_STATE_VALUE_TO_NAME = {
GUI_STATE_READY : 'ready',
GUI_STATE_SWITCHING_PAGE : 'switching_page',
GUI_STATE_CANCELLING : 'cancelling',
GUI_STATE_CANCELLED : 'cancelled',
GUI_STATE_SWITCHING_CTX : 'switching_context',
}
class PaginationBase:
__slots__ = ('_canceller', '_task_flag', '_timeouter', 'channel', 'client', 'message')
async def __new__(cls, client, channel):
raise NotImplementedError
async def __call__(self, client, event):
pass
async def _canceller_function(self, exception):
client = self.client
message = self.message
client.events.reaction_add.remove(message, self)
client.events.reaction_delete.remove(message, self)
if self._task_flag == GUI_STATE_SWITCHING_CTX:
return
self._task_flag = GUI_STATE_CANCELLED
if not await self._handle_close_exception(exception):
await client.events.error(client, f'{self!r}._canceller_function', exception)
async def _handle_close_exception(self, exception):
if exception is None:
return True
client = self.client
message = self.message
if isinstance(exception, CancelledError):
try:
await client.message_delete(message)
except BaseException as err:
if isinstance(err, ConnectionError):
return True
if isinstance(err, DiscordException):
if err.code in (
ERROR_CODES.unknown_channel,
ERROR_CODES.unknown_message,
ERROR_CODES.missing_access,
):
return True
await client.events.error(client, f'{self!r}._handle_close_exception', err)
return True
if isinstance(exception, TimeoutError):
if self.channel.cached_permissions_for(client).can_manage_messages:
try:
await client.reaction_clear(message)
except BaseException as err:
if isinstance(err, ConnectionError):
return True
if isinstance(err, DiscordException):
if err.code in (
ERROR_CODES.unknown_message,
ERROR_CODES.unknown_channel,
ERROR_CODES.missing_access,
ERROR_CODES.missing_permissions,
):
return True
await client.events.error(client, f'{self!r}._handle_close_exception', err)
return True
if isinstance(exception, PermissionError):
return True
return False
def cancel(self, exception=None):
if self._task_flag in (GUI_STATE_READY, GUI_STATE_SWITCHING_PAGE, GUI_STATE_CANCELLING):
self._task_flag = GUI_STATE_CANCELLED
canceller = self._canceller
if canceller is None:
return
self._canceller = None
timeouter = self._timeouter
if (timeouter is not None):
timeouter.cancel()
return Task(canceller(self, exception), KOKORO)
def __repr__(self):
repr_parts = [
'<', self.__class__.__name__,
' client=', repr(self.client),
', channel=', repr(self.channel),
', state='
]
task_flag = self._task_flag
repr_parts.append(repr(task_flag))
repr_parts.append(' (')
task_flag_name = GUI_STATE_VALUE_TO_NAME[task_flag]
repr_parts.append(task_flag_name)
repr_parts.append(')')
repr_parts.append('>')
return ''.join(repr_parts)
def is_active(self):
return (self._task_flag in (GUI_STATE_READY, GUI_STATE_SWITCHING_PAGE))
| true | true |
f70f42c642bcd5f1c7503962ab9976885292a0cd | 277 | py | Python | tests/test_0242.py | delirious-lettuce/LeetCode | 4fbf2627f86cbaf1054c3df59bb9ffe72096d405 | [
"MIT"
] | 3 | 2018-02-14T23:50:07.000Z | 2022-01-20T11:34:42.000Z | tests/test_0242.py | delirious-lettuce/LeetCode | 4fbf2627f86cbaf1054c3df59bb9ffe72096d405 | [
"MIT"
] | null | null | null | tests/test_0242.py | delirious-lettuce/LeetCode | 4fbf2627f86cbaf1054c3df59bb9ffe72096d405 | [
"MIT"
] | null | null | null | import pytest
from problems.problem_0242 import Solution
@pytest.mark.parametrize('test_input, expected', (
(('anagram', 'nagaram'), True),
(('rat', 'car'), False),
))
def test_is_anagram(test_input, expected):
assert Solution.isAnagram(*test_input) == expected
| 23.083333 | 54 | 0.703971 | import pytest
from problems.problem_0242 import Solution
@pytest.mark.parametrize('test_input, expected', (
(('anagram', 'nagaram'), True),
(('rat', 'car'), False),
))
def test_is_anagram(test_input, expected):
assert Solution.isAnagram(*test_input) == expected
| true | true |
f70f42d0b880c0ab80731f4f8d628378224e54c4 | 7,587 | py | Python | oneflow/python/test/ops/test_binary_elementwise_ops.py | wanghongsheng01/framework_enflame | debf613e05e3f5ea8084c3e79b60d0dd9e349526 | [
"Apache-2.0"
] | 1 | 2021-04-14T03:19:35.000Z | 2021-04-14T03:19:35.000Z | oneflow/python/test/ops/test_binary_elementwise_ops.py | wanghongsheng01/framework_enflame | debf613e05e3f5ea8084c3e79b60d0dd9e349526 | [
"Apache-2.0"
] | 1 | 2021-06-16T08:37:50.000Z | 2021-06-16T08:37:50.000Z | oneflow/python/test/ops/test_binary_elementwise_ops.py | wanghongsheng01/framework_enflame | debf613e05e3f5ea8084c3e79b60d0dd9e349526 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import os
import numpy as np
import tensorflow as tf
import oneflow as flow
from collections import OrderedDict
import oneflow.typing as oft
import test_global_storage
from test_util import (
GenArgDict,
GenArgList,
type_name_to_flow_type,
type_name_to_np_type,
)
gpus = tf.config.experimental.list_physical_devices("GPU")
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
def RunOneflowBinaryOp(device_type, flow_op, x, y, data_type):
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
flow_type = type_name_to_flow_type[data_type]
@flow.global_function(type="train", function_config=func_config)
def FlowJob(
x: oft.Numpy.Placeholder(x.shape, dtype=flow_type),
y: oft.Numpy.Placeholder(y.shape, dtype=flow_type),
):
with flow.scope.placement(device_type, "0:0"):
x += flow.get_variable(
name="x",
shape=x.shape,
dtype=flow_type,
initializer=flow.zeros_initializer(),
trainable=True,
)
y += flow.get_variable(
name="y",
shape=y.shape,
dtype=flow_type,
initializer=flow.zeros_initializer(),
trainable=True,
)
loss = flow_op(x, y)
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [1e-4]), momentum=0
).minimize(loss)
flow.watch_diff(x, test_global_storage.Setter("x_diff"))
flow.watch_diff(y, test_global_storage.Setter("y_diff"))
return loss
# Oneflow
out = FlowJob(x, y).get().numpy()
x_diff = test_global_storage.Get("x_diff")
y_diff = test_global_storage.Get("y_diff")
return out, x_diff, y_diff
def RunTensorFlowBinaryOp(tf_op, x, y):
# TensorFlow
with tf.GradientTape(persistent=True) as tape:
x = tf.Variable(x)
y = tf.Variable(y)
out = tf_op(x, y)
x_diff = tape.gradient(out, x)
y_diff = tape.gradient(out, y)
return out.numpy(), x_diff, y_diff
def compare_with_tensorflow(
test_case,
device_type,
flow_op,
tf_op,
x_shape,
y_shape,
data_type,
x_minval=-10,
x_maxval=10,
y_minval=-10,
y_maxval=10,
compare_grad=True,
out_rtol=1e-5,
out_atol=1e-5,
diff_rtol=1e-5,
diff_atol=1e-5,
):
test_case.assertTrue(device_type in ["gpu", "cpu"])
np_type = type_name_to_np_type[data_type]
x = np.random.uniform(low=x_minval, high=x_maxval, size=x_shape).astype(np_type)
y = np.random.uniform(low=y_minval, high=y_maxval, size=y_shape).astype(np_type)
of_out, of_x_diff, of_y_diff, = RunOneflowBinaryOp(
device_type, flow_op, x, y, data_type
)
tf_out, tf_x_diff, tf_y_diff = RunTensorFlowBinaryOp(tf_op, x, y)
test_case.assertTrue(
np.allclose(of_out, tf_out, rtol=out_rtol, atol=out_atol, equal_nan=True)
)
if compare_grad:
test_case.assertTrue(
np.allclose(
of_x_diff,
tf_x_diff.numpy(),
rtol=diff_rtol,
atol=diff_atol,
equal_nan=True,
)
)
test_case.assertTrue(
np.allclose(
of_y_diff,
tf_y_diff.numpy(),
rtol=diff_rtol,
atol=diff_atol,
equal_nan=True,
)
)
flow.clear_default_session()
@flow.unittest.skip_unless_1n1d()
class TestBinaryElementwiseOps(flow.unittest.TestCase):
def test_floordiv(test_case):
arg_dict = OrderedDict()
arg_dict["test_case"] = [test_case]
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["flow_op"] = [flow.math.floordiv]
arg_dict["tf_op"] = [tf.math.floordiv]
arg_dict["x_shape"] = [(5, 5,)]
arg_dict["y_shape"] = [(5, 5,)]
arg_dict["data_type"] = ["float32", "double"]
arg_dict["x_minval"] = [-10]
arg_dict["x_maxval"] = [10]
arg_dict["y_minval"] = [1]
arg_dict["y_maxval"] = [10]
arg_dict["compare_grad"] = [False]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg)
def test_pow(test_case):
arg_dict = OrderedDict()
arg_dict["test_case"] = [test_case]
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["flow_op"] = [flow.math.pow]
arg_dict["tf_op"] = [tf.math.pow]
arg_dict["x_shape"] = [(5, 5,)]
arg_dict["y_shape"] = [(5, 5,)]
arg_dict["data_type"] = ["float32", "double"]
arg_dict["x_minval"] = [1]
arg_dict["x_maxval"] = [5]
arg_dict["y_minval"] = [1]
arg_dict["y_maxval"] = [5]
arg_dict["compare_grad"] = [True]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg)
def test_xdivy(test_case):
arg_dict = OrderedDict()
arg_dict["test_case"] = [test_case]
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["flow_op"] = [flow.math.xdivy]
arg_dict["tf_op"] = [tf.math.xdivy]
arg_dict["x_shape"] = [(5, 5,)]
arg_dict["y_shape"] = [(5, 5,)]
arg_dict["data_type"] = ["float32", "double"]
arg_dict["x_minval"] = [1]
arg_dict["x_maxval"] = [100]
arg_dict["y_minval"] = [1]
arg_dict["y_maxval"] = [10]
arg_dict["compare_grad"] = [True]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg)
def test_xlogy(test_case):
arg_dict = OrderedDict()
arg_dict["test_case"] = [test_case]
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["flow_op"] = [flow.math.xlogy]
arg_dict["tf_op"] = [tf.math.xlogy]
arg_dict["x_shape"] = [(5, 5,)]
arg_dict["y_shape"] = [(5, 5,)]
arg_dict["data_type"] = ["float32", "double"]
arg_dict["x_minval"] = [1]
arg_dict["x_maxval"] = [5]
arg_dict["y_minval"] = [1]
arg_dict["y_maxval"] = [5]
arg_dict["compare_grad"] = [True]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg)
def test_atan2(test_case):
arg_dict = OrderedDict()
arg_dict["test_case"] = [test_case]
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["flow_op"] = [flow.math.atan2]
arg_dict["tf_op"] = [tf.math.atan2]
arg_dict["x_shape"] = [(5, 5,)]
arg_dict["y_shape"] = [(5, 5,)]
arg_dict["data_type"] = ["float32", "double"]
arg_dict["x_minval"] = [1]
arg_dict["x_maxval"] = [5]
arg_dict["y_minval"] = [1]
arg_dict["y_maxval"] = [5]
arg_dict["compare_grad"] = [True]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg)
if __name__ == "__main__":
unittest.main()
| 32.012658 | 84 | 0.596283 | import unittest
import os
import numpy as np
import tensorflow as tf
import oneflow as flow
from collections import OrderedDict
import oneflow.typing as oft
import test_global_storage
from test_util import (
GenArgDict,
GenArgList,
type_name_to_flow_type,
type_name_to_np_type,
)
gpus = tf.config.experimental.list_physical_devices("GPU")
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
def RunOneflowBinaryOp(device_type, flow_op, x, y, data_type):
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
flow_type = type_name_to_flow_type[data_type]
@flow.global_function(type="train", function_config=func_config)
def FlowJob(
x: oft.Numpy.Placeholder(x.shape, dtype=flow_type),
y: oft.Numpy.Placeholder(y.shape, dtype=flow_type),
):
with flow.scope.placement(device_type, "0:0"):
x += flow.get_variable(
name="x",
shape=x.shape,
dtype=flow_type,
initializer=flow.zeros_initializer(),
trainable=True,
)
y += flow.get_variable(
name="y",
shape=y.shape,
dtype=flow_type,
initializer=flow.zeros_initializer(),
trainable=True,
)
loss = flow_op(x, y)
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [1e-4]), momentum=0
).minimize(loss)
flow.watch_diff(x, test_global_storage.Setter("x_diff"))
flow.watch_diff(y, test_global_storage.Setter("y_diff"))
return loss
out = FlowJob(x, y).get().numpy()
x_diff = test_global_storage.Get("x_diff")
y_diff = test_global_storage.Get("y_diff")
return out, x_diff, y_diff
def RunTensorFlowBinaryOp(tf_op, x, y):
with tf.GradientTape(persistent=True) as tape:
x = tf.Variable(x)
y = tf.Variable(y)
out = tf_op(x, y)
x_diff = tape.gradient(out, x)
y_diff = tape.gradient(out, y)
return out.numpy(), x_diff, y_diff
def compare_with_tensorflow(
test_case,
device_type,
flow_op,
tf_op,
x_shape,
y_shape,
data_type,
x_minval=-10,
x_maxval=10,
y_minval=-10,
y_maxval=10,
compare_grad=True,
out_rtol=1e-5,
out_atol=1e-5,
diff_rtol=1e-5,
diff_atol=1e-5,
):
test_case.assertTrue(device_type in ["gpu", "cpu"])
np_type = type_name_to_np_type[data_type]
x = np.random.uniform(low=x_minval, high=x_maxval, size=x_shape).astype(np_type)
y = np.random.uniform(low=y_minval, high=y_maxval, size=y_shape).astype(np_type)
of_out, of_x_diff, of_y_diff, = RunOneflowBinaryOp(
device_type, flow_op, x, y, data_type
)
tf_out, tf_x_diff, tf_y_diff = RunTensorFlowBinaryOp(tf_op, x, y)
test_case.assertTrue(
np.allclose(of_out, tf_out, rtol=out_rtol, atol=out_atol, equal_nan=True)
)
if compare_grad:
test_case.assertTrue(
np.allclose(
of_x_diff,
tf_x_diff.numpy(),
rtol=diff_rtol,
atol=diff_atol,
equal_nan=True,
)
)
test_case.assertTrue(
np.allclose(
of_y_diff,
tf_y_diff.numpy(),
rtol=diff_rtol,
atol=diff_atol,
equal_nan=True,
)
)
flow.clear_default_session()
@flow.unittest.skip_unless_1n1d()
class TestBinaryElementwiseOps(flow.unittest.TestCase):
def test_floordiv(test_case):
arg_dict = OrderedDict()
arg_dict["test_case"] = [test_case]
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["flow_op"] = [flow.math.floordiv]
arg_dict["tf_op"] = [tf.math.floordiv]
arg_dict["x_shape"] = [(5, 5,)]
arg_dict["y_shape"] = [(5, 5,)]
arg_dict["data_type"] = ["float32", "double"]
arg_dict["x_minval"] = [-10]
arg_dict["x_maxval"] = [10]
arg_dict["y_minval"] = [1]
arg_dict["y_maxval"] = [10]
arg_dict["compare_grad"] = [False]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg)
def test_pow(test_case):
arg_dict = OrderedDict()
arg_dict["test_case"] = [test_case]
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["flow_op"] = [flow.math.pow]
arg_dict["tf_op"] = [tf.math.pow]
arg_dict["x_shape"] = [(5, 5,)]
arg_dict["y_shape"] = [(5, 5,)]
arg_dict["data_type"] = ["float32", "double"]
arg_dict["x_minval"] = [1]
arg_dict["x_maxval"] = [5]
arg_dict["y_minval"] = [1]
arg_dict["y_maxval"] = [5]
arg_dict["compare_grad"] = [True]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg)
def test_xdivy(test_case):
arg_dict = OrderedDict()
arg_dict["test_case"] = [test_case]
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["flow_op"] = [flow.math.xdivy]
arg_dict["tf_op"] = [tf.math.xdivy]
arg_dict["x_shape"] = [(5, 5,)]
arg_dict["y_shape"] = [(5, 5,)]
arg_dict["data_type"] = ["float32", "double"]
arg_dict["x_minval"] = [1]
arg_dict["x_maxval"] = [100]
arg_dict["y_minval"] = [1]
arg_dict["y_maxval"] = [10]
arg_dict["compare_grad"] = [True]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg)
def test_xlogy(test_case):
arg_dict = OrderedDict()
arg_dict["test_case"] = [test_case]
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["flow_op"] = [flow.math.xlogy]
arg_dict["tf_op"] = [tf.math.xlogy]
arg_dict["x_shape"] = [(5, 5,)]
arg_dict["y_shape"] = [(5, 5,)]
arg_dict["data_type"] = ["float32", "double"]
arg_dict["x_minval"] = [1]
arg_dict["x_maxval"] = [5]
arg_dict["y_minval"] = [1]
arg_dict["y_maxval"] = [5]
arg_dict["compare_grad"] = [True]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg)
def test_atan2(test_case):
arg_dict = OrderedDict()
arg_dict["test_case"] = [test_case]
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["flow_op"] = [flow.math.atan2]
arg_dict["tf_op"] = [tf.math.atan2]
arg_dict["x_shape"] = [(5, 5,)]
arg_dict["y_shape"] = [(5, 5,)]
arg_dict["data_type"] = ["float32", "double"]
arg_dict["x_minval"] = [1]
arg_dict["x_maxval"] = [5]
arg_dict["y_minval"] = [1]
arg_dict["y_maxval"] = [5]
arg_dict["compare_grad"] = [True]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg)
if __name__ == "__main__":
unittest.main()
| true | true |
f70f42d336559c1f4e453214becd60439e47c812 | 1,534 | py | Python | samples/generated_samples/dialogflow_generated_dialogflowcx_v3_deployments_list_deployments_async.py | nicain/python-dialogflow-cx | 2292ff540aea24c3c831a5ffe1604c2c022ccb82 | [
"Apache-2.0"
] | null | null | null | samples/generated_samples/dialogflow_generated_dialogflowcx_v3_deployments_list_deployments_async.py | nicain/python-dialogflow-cx | 2292ff540aea24c3c831a5ffe1604c2c022ccb82 | [
"Apache-2.0"
] | null | null | null | samples/generated_samples/dialogflow_generated_dialogflowcx_v3_deployments_list_deployments_async.py | nicain/python-dialogflow-cx | 2292ff540aea24c3c831a5ffe1604c2c022ccb82 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ListDeployments
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dialogflowcx
# [START dialogflow_generated_dialogflowcx_v3_Deployments_ListDeployments_async]
from google.cloud import dialogflowcx_v3
async def sample_list_deployments():
# Create a client
client = dialogflowcx_v3.DeploymentsAsyncClient()
# Initialize request argument(s)
request = dialogflowcx_v3.ListDeploymentsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_deployments(request=request)
async for response in page_result:
print(response)
# [END dialogflow_generated_dialogflowcx_v3_Deployments_ListDeployments_async]
| 34.088889 | 85 | 0.769231 |
from google.cloud import dialogflowcx_v3
async def sample_list_deployments():
client = dialogflowcx_v3.DeploymentsAsyncClient()
request = dialogflowcx_v3.ListDeploymentsRequest(
parent="parent_value",
)
page_result = client.list_deployments(request=request)
async for response in page_result:
print(response)
| true | true |
f70f44300f3740ebdbf6b9adc5ad84b625f4771e | 9,080 | py | Python | core/domain/rte_component_registry_test.py | Panda2498/oppia | fccfd7e89c6904c244deaccdee80cc5658f2520a | [
"Apache-2.0"
] | 1 | 2021-12-09T10:37:09.000Z | 2021-12-09T10:37:09.000Z | core/domain/rte_component_registry_test.py | Panda2498/oppia | fccfd7e89c6904c244deaccdee80cc5658f2520a | [
"Apache-2.0"
] | 1 | 2020-01-26T14:02:43.000Z | 2020-01-26T14:02:43.000Z | core/domain/rte_component_registry_test.py | ryanboris/oppia | bc39e54e00d53ea2f00bca906fe02162d0c422ac | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import string
import struct
from core.domain import obj_services
from core.domain import rte_component_registry
from core.tests import test_utils
import feconf
import schema_utils
import schema_utils_test
import utils
# File names ending in any of these suffixes will be ignored when checking for
# RTE component validity.
IGNORED_FILE_SUFFIXES = ['.pyc', '.DS_Store']
RTE_THUMBNAIL_HEIGHT_PX = 16
RTE_THUMBNAIL_WIDTH_PX = 16
_COMPONENT_CONFIG_SCHEMA = [
('backend_id', basestring), ('category', basestring),
('description', basestring), ('frontend_id', basestring),
('tooltip', basestring), ('icon_data_url', basestring),
('preview_url_template', basestring), ('is_complex', bool),
('requires_fs', bool), ('is_block_element', bool),
('customization_arg_specs', list)]
class RteComponentUnitTests(test_utils.GenericTestBase):
"""Tests that all the default RTE comopnents are valid."""
def _is_camel_cased(self, name):
"""Check whether a name is in CamelCase."""
return name and (name[0] in string.ascii_uppercase)
def _is_alphanumeric_string(self, input_string):
"""Check whether a string is alphanumeric."""
return bool(re.compile("^[a-zA-Z0-9_]+$").match(input_string))
def _validate_customization_arg_specs(self, customization_arg_specs):
for ca_spec in customization_arg_specs:
self.assertEqual(set(ca_spec.keys()), set([
'name', 'description', 'schema', 'default_value']))
self.assertTrue(isinstance(ca_spec['name'], basestring))
self.assertTrue(self._is_alphanumeric_string(ca_spec['name']))
self.assertTrue(isinstance(ca_spec['description'], basestring))
self.assertGreater(len(ca_spec['description']), 0)
# The default value might not pass validation checks (e.g. the
# Image component has a required field whose default value is
# empty). Thus, when checking the default value schema, we don't
# apply the custom validators.
schema_utils_test.validate_schema(ca_spec['schema'])
self.assertEqual(
ca_spec['default_value'],
schema_utils.normalize_against_schema(
ca_spec['default_value'], ca_spec['schema'],
apply_custom_validators=False))
if ca_spec['schema']['type'] == 'custom':
obj_class = obj_services.Registry.get_object_class_by_type(
ca_spec['schema']['obj_type'])
self.assertIsNotNone(obj_class.edit_html_filename)
self.assertIsNotNone(obj_class.edit_js_filename)
self.assertEqual(
ca_spec['default_value'],
obj_class.normalize(ca_spec['default_value']))
def _listdir_omit_ignored(self, directory):
"""List all files and directories within 'directory', omitting the ones
whose name ends in one of the IGNORED_FILE_SUFFIXES.
"""
names = os.listdir(directory)
for suffix in IGNORED_FILE_SUFFIXES:
names = [name for name in names if not name.endswith(suffix)]
return names
def test_image_thumbnails_for_rte_components(self):
"""Test the thumbnails for the RTE component icons."""
rte_components = (
rte_component_registry.Registry.get_all_rte_components())
for (component_name, component_specs) in rte_components.iteritems():
generated_image_filepath = os.path.join(
os.getcwd(), feconf.RTE_EXTENSIONS_DIR,
component_name, '%s.png' % component_name)
relative_icon_data_url = component_specs['icon_data_url'][1:]
defined_image_filepath = os.path.join(
os.getcwd(), feconf.EXTENSIONS_DIR_PREFIX,
'extensions', relative_icon_data_url)
self.assertEqual(generated_image_filepath, defined_image_filepath)
with open(generated_image_filepath, 'rb') as f:
img_data = f.read()
width, height = struct.unpack('>LL', img_data[16:24])
self.assertEqual(int(width), RTE_THUMBNAIL_WIDTH_PX)
self.assertEqual(int(height), RTE_THUMBNAIL_HEIGHT_PX)
def test_rte_components_are_valid(self):
"""Test that the default RTE components are valid."""
rte_components = (
rte_component_registry.Registry.get_all_rte_components())
for (component_id, component_specs) in rte_components.iteritems():
# Check that the component id is valid.
self.assertTrue(self._is_camel_cased(component_id))
# Check that the component directory exists.
component_dir = os.path.join(
feconf.RTE_EXTENSIONS_DIR, component_id)
self.assertTrue(os.path.isdir(component_dir))
# In this directory there should be a /directives directory, an
# an icon .png file and a protractor.js file, and an optional
# preview .png file.
# In /directives directory should be HTML file, a JS file,
# there could be multiple JS and HTML files.
dir_contents = self._listdir_omit_ignored(component_dir)
self.assertLessEqual(len(dir_contents), 4)
directives_dir = os.path.join(component_dir, 'directives')
png_file = os.path.join(component_dir, '%s.png' % component_id)
preview_file = os.path.join(
component_dir, '%sPreview.png' % component_id)
protractor_file = os.path.join(component_dir, 'protractor.js')
self.assertTrue(os.path.isdir(directives_dir))
self.assertTrue(os.path.isfile(png_file))
self.assertTrue(os.path.isfile(protractor_file))
if len(dir_contents) == 5:
self.assertTrue(os.path.isfile(preview_file))
main_js_file = os.path.join(
directives_dir, '%sDirective.js' % component_id)
main_html_file = os.path.join(
directives_dir, '%s_directive.html' % component_id.lower())
self.assertTrue(os.path.isfile(main_js_file))
self.assertTrue(os.path.isfile(main_html_file))
js_file_content = utils.get_file_contents(main_js_file)
self.assertIn(
'oppiaNoninteractive%s' % component_id, js_file_content)
self.assertNotIn('<script>', js_file_content)
self.assertNotIn('</script>', js_file_content)
# Check that the configuration file contains the correct
# top-level keys, and that these keys have the correct types.
for item, item_type in _COMPONENT_CONFIG_SCHEMA:
self.assertTrue(isinstance(
component_specs[item], item_type))
# The string attributes should be non-empty.
if item_type == basestring:
self.assertTrue(component_specs[item])
self._validate_customization_arg_specs(
component_specs['customization_arg_specs']) # pylint: disable=protected-access
def test_html_contains_all_imports(self):
"""Test that the rich_text_components.html file contains script-imports
for all directives of all RTE components.
"""
js_files_paths = []
for component_id in feconf.ALLOWED_RTE_EXTENSIONS:
component_dir = os.path.join(
feconf.RTE_EXTENSIONS_DIR, component_id)
directives_dir = os.path.join(component_dir, 'directives')
directive_filenames = os.listdir(directives_dir)
js_files_paths.extend(
os.path.join(directives_dir, filename) for filename
in directive_filenames if filename.endswith('.js'))
js_files_paths.sort()
prefix = '<script src="{{ASSET_DIR_PREFIX}}/'
suffix = '"></script>'
html_script_tags = [
'%s%s%s' % (prefix, path, suffix) for path in js_files_paths]
generated_html = '\n'.join(html_script_tags)
rtc_html_file = os.path.join(
feconf.FRONTEND_TEMPLATES_DIR, 'components',
'rich_text_components.html')
with open(rtc_html_file, 'r') as f:
rtc_html_file_contents = f.read()
self.assertEqual(generated_html, rtc_html_file_contents.strip())
| 44.07767 | 95 | 0.651432 |
import os
import re
import string
import struct
from core.domain import obj_services
from core.domain import rte_component_registry
from core.tests import test_utils
import feconf
import schema_utils
import schema_utils_test
import utils
IGNORED_FILE_SUFFIXES = ['.pyc', '.DS_Store']
RTE_THUMBNAIL_HEIGHT_PX = 16
RTE_THUMBNAIL_WIDTH_PX = 16
_COMPONENT_CONFIG_SCHEMA = [
('backend_id', basestring), ('category', basestring),
('description', basestring), ('frontend_id', basestring),
('tooltip', basestring), ('icon_data_url', basestring),
('preview_url_template', basestring), ('is_complex', bool),
('requires_fs', bool), ('is_block_element', bool),
('customization_arg_specs', list)]
class RteComponentUnitTests(test_utils.GenericTestBase):
def _is_camel_cased(self, name):
return name and (name[0] in string.ascii_uppercase)
def _is_alphanumeric_string(self, input_string):
return bool(re.compile("^[a-zA-Z0-9_]+$").match(input_string))
def _validate_customization_arg_specs(self, customization_arg_specs):
for ca_spec in customization_arg_specs:
self.assertEqual(set(ca_spec.keys()), set([
'name', 'description', 'schema', 'default_value']))
self.assertTrue(isinstance(ca_spec['name'], basestring))
self.assertTrue(self._is_alphanumeric_string(ca_spec['name']))
self.assertTrue(isinstance(ca_spec['description'], basestring))
self.assertGreater(len(ca_spec['description']), 0)
# apply the custom validators.
schema_utils_test.validate_schema(ca_spec['schema'])
self.assertEqual(
ca_spec['default_value'],
schema_utils.normalize_against_schema(
ca_spec['default_value'], ca_spec['schema'],
apply_custom_validators=False))
if ca_spec['schema']['type'] == 'custom':
obj_class = obj_services.Registry.get_object_class_by_type(
ca_spec['schema']['obj_type'])
self.assertIsNotNone(obj_class.edit_html_filename)
self.assertIsNotNone(obj_class.edit_js_filename)
self.assertEqual(
ca_spec['default_value'],
obj_class.normalize(ca_spec['default_value']))
def _listdir_omit_ignored(self, directory):
names = os.listdir(directory)
for suffix in IGNORED_FILE_SUFFIXES:
names = [name for name in names if not name.endswith(suffix)]
return names
def test_image_thumbnails_for_rte_components(self):
rte_components = (
rte_component_registry.Registry.get_all_rte_components())
for (component_name, component_specs) in rte_components.iteritems():
generated_image_filepath = os.path.join(
os.getcwd(), feconf.RTE_EXTENSIONS_DIR,
component_name, '%s.png' % component_name)
relative_icon_data_url = component_specs['icon_data_url'][1:]
defined_image_filepath = os.path.join(
os.getcwd(), feconf.EXTENSIONS_DIR_PREFIX,
'extensions', relative_icon_data_url)
self.assertEqual(generated_image_filepath, defined_image_filepath)
with open(generated_image_filepath, 'rb') as f:
img_data = f.read()
width, height = struct.unpack('>LL', img_data[16:24])
self.assertEqual(int(width), RTE_THUMBNAIL_WIDTH_PX)
self.assertEqual(int(height), RTE_THUMBNAIL_HEIGHT_PX)
def test_rte_components_are_valid(self):
rte_components = (
rte_component_registry.Registry.get_all_rte_components())
for (component_id, component_specs) in rte_components.iteritems():
# Check that the component id is valid.
self.assertTrue(self._is_camel_cased(component_id))
# Check that the component directory exists.
component_dir = os.path.join(
feconf.RTE_EXTENSIONS_DIR, component_id)
self.assertTrue(os.path.isdir(component_dir))
# In this directory there should be a /directives directory, an
# an icon .png file and a protractor.js file, and an optional
# preview .png file.
# In /directives directory should be HTML file, a JS file,
# there could be multiple JS and HTML files.
dir_contents = self._listdir_omit_ignored(component_dir)
self.assertLessEqual(len(dir_contents), 4)
directives_dir = os.path.join(component_dir, 'directives')
png_file = os.path.join(component_dir, '%s.png' % component_id)
preview_file = os.path.join(
component_dir, '%sPreview.png' % component_id)
protractor_file = os.path.join(component_dir, 'protractor.js')
self.assertTrue(os.path.isdir(directives_dir))
self.assertTrue(os.path.isfile(png_file))
self.assertTrue(os.path.isfile(protractor_file))
if len(dir_contents) == 5:
self.assertTrue(os.path.isfile(preview_file))
main_js_file = os.path.join(
directives_dir, '%sDirective.js' % component_id)
main_html_file = os.path.join(
directives_dir, '%s_directive.html' % component_id.lower())
self.assertTrue(os.path.isfile(main_js_file))
self.assertTrue(os.path.isfile(main_html_file))
js_file_content = utils.get_file_contents(main_js_file)
self.assertIn(
'oppiaNoninteractive%s' % component_id, js_file_content)
self.assertNotIn('<script>', js_file_content)
self.assertNotIn('</script>', js_file_content)
# Check that the configuration file contains the correct
# top-level keys, and that these keys have the correct types.
for item, item_type in _COMPONENT_CONFIG_SCHEMA:
self.assertTrue(isinstance(
component_specs[item], item_type))
# The string attributes should be non-empty.
if item_type == basestring:
self.assertTrue(component_specs[item])
self._validate_customization_arg_specs(
component_specs['customization_arg_specs']) # pylint: disable=protected-access
def test_html_contains_all_imports(self):
js_files_paths = []
for component_id in feconf.ALLOWED_RTE_EXTENSIONS:
component_dir = os.path.join(
feconf.RTE_EXTENSIONS_DIR, component_id)
directives_dir = os.path.join(component_dir, 'directives')
directive_filenames = os.listdir(directives_dir)
js_files_paths.extend(
os.path.join(directives_dir, filename) for filename
in directive_filenames if filename.endswith('.js'))
js_files_paths.sort()
prefix = '<script src="{{ASSET_DIR_PREFIX}}/'
suffix = '"></script>'
html_script_tags = [
'%s%s%s' % (prefix, path, suffix) for path in js_files_paths]
generated_html = '\n'.join(html_script_tags)
rtc_html_file = os.path.join(
feconf.FRONTEND_TEMPLATES_DIR, 'components',
'rich_text_components.html')
with open(rtc_html_file, 'r') as f:
rtc_html_file_contents = f.read()
self.assertEqual(generated_html, rtc_html_file_contents.strip())
| true | true |
f70f444915b3432c8906c8cf40a5ebbacc9d4f46 | 15,039 | py | Python | wo/cli/plugins/info.py | cdk-comp/WordOps | d1f0ccc7202d43c90ee7640f7acd4b7c3c158ee1 | [
"MIT"
] | null | null | null | wo/cli/plugins/info.py | cdk-comp/WordOps | d1f0ccc7202d43c90ee7640f7acd4b7c3c158ee1 | [
"MIT"
] | null | null | null | wo/cli/plugins/info.py | cdk-comp/WordOps | d1f0ccc7202d43c90ee7640f7acd4b7c3c158ee1 | [
"MIT"
] | 2 | 2021-01-02T07:49:51.000Z | 2022-03-26T15:58:50.000Z | """WOInfo Plugin for WordOps"""
from cement.core.controller import CementBaseController, expose
from cement.core import handler, hook
from wo.core.variables import WOVariables
from pynginxconfig import NginxConfig
from wo.core.aptget import WOAptGet
from wo.core.shellexec import WOShellExec
from wo.core.logging import Log
import os
import configparser
def wo_info_hook(app):
pass
class WOInfoController(CementBaseController):
class Meta:
label = 'info'
stacked_on = 'base'
stacked_type = 'nested'
description = ('Display configuration information related to Nginx,'
' PHP and MySQL')
arguments = [
(['--mysql'],
dict(help='Get MySQL configuration information',
action='store_true')),
(['--php'],
dict(help='Get PHP configuration information',
action='store_true')),
(['--php72'],
dict(help='Get PHP 7.2 configuration information',
action='store_true')),
(['--nginx'],
dict(help='Get Nginx configuration information',
action='store_true')),
]
usage = "wo info [options]"
@expose(hide=True)
def info_nginx(self):
"""Display Nginx information"""
version = os.popen("nginx -v 2>&1 | cut -d':' -f2 | cut -d' ' -f2 | "
"cut -d'/' -f2 | tr -d '\n'").read()
allow = os.popen("grep ^allow /etc/nginx/common/acl.conf | "
"cut -d' ' -f2 | cut -d';' -f1 | tr '\n' ' '").read()
nc = NginxConfig()
nc.loadf('/etc/nginx/nginx.conf')
user = nc.get('user')[1]
worker_processes = nc.get('worker_processes')[1]
worker_connections = nc.get([('events',), 'worker_connections'])[1]
keepalive_timeout = nc.get([('http',), 'keepalive_timeout'])[1]
fastcgi_read_timeout = nc.get([('http',),
'fastcgi_read_timeout'])[1]
client_max_body_size = nc.get([('http',),
'client_max_body_size'])[1]
data = dict(version=version, allow=allow, user=user,
worker_processes=worker_processes,
keepalive_timeout=keepalive_timeout,
worker_connections=worker_connections,
fastcgi_read_timeout=fastcgi_read_timeout,
client_max_body_size=client_max_body_size)
self.app.render((data), 'info_nginx.mustache')
@expose(hide=True)
def info_php(self):
"""Display PHP information"""
version = os.popen("{0} -v 2>/dev/null | head -n1 | cut -d' ' -f2 |".format("php5.6" if (WOVariables.wo_platform_codename == 'trusty' or WOVariables.wo_platform_codename == 'xenial' or WOVariables.wo_platform_codename == 'bionic') else "php") +
" cut -d'+' -f1 | tr -d '\n'").read
config = configparser.ConfigParser()
config.read('/etc/{0}/fpm/php.ini'.format("php/5.6" if (WOVariables.wo_platform_codename == 'trusty' or WOVariables.wo_platform_codename == 'xenial' or WOVariables.wo_platform_codename == 'bionic') else "php5"))
expose_php = config['PHP']['expose_php']
memory_limit = config['PHP']['memory_limit']
post_max_size = config['PHP']['post_max_size']
upload_max_filesize = config['PHP']['upload_max_filesize']
max_execution_time = config['PHP']['max_execution_time']
config.read('/etc/{0}/fpm/pool.d/www.conf'.format("php/5.6" if (WOVariables.wo_platform_codename == 'trusty' or WOVariables.wo_platform_codename == 'xenial' or WOVariables.wo_platform_codename == 'bionic') else "php5"))
www_listen = config['www']['listen']
www_ping_path = config['www']['ping.path']
www_pm_status_path = config['www']['pm.status_path']
www_pm = config['www']['pm']
www_pm_max_requests = config['www']['pm.max_requests']
www_pm_max_children = config['www']['pm.max_children']
www_pm_start_servers = config['www']['pm.start_servers']
www_pm_min_spare_servers = config['www']['pm.min_spare_servers']
www_pm_max_spare_servers = config['www']['pm.max_spare_servers']
www_request_terminate_time = (config['www']
['request_terminate_timeout'])
try:
www_xdebug = (config['www']['php_admin_flag[xdebug.profiler_enable'
'_trigger]'])
except Exception as e:
www_xdebug = 'off'
config.read('/etc/{0}/fpm/pool.d/debug.conf'.format("php/5.6" if (WOVariables.wo_platform_codename == 'trusty' or WOVariables.wo_platform_codename == 'xenial' or WOVariables.wo_platform_codename == 'bionic') else "php5"))
debug_listen = config['debug']['listen']
debug_ping_path = config['debug']['ping.path']
debug_pm_status_path = config['debug']['pm.status_path']
debug_pm = config['debug']['pm']
debug_pm_max_requests = config['debug']['pm.max_requests']
debug_pm_max_children = config['debug']['pm.max_children']
debug_pm_start_servers = config['debug']['pm.start_servers']
debug_pm_min_spare_servers = config['debug']['pm.min_spare_servers']
debug_pm_max_spare_servers = config['debug']['pm.max_spare_servers']
debug_request_terminate = (config['debug']
['request_terminate_timeout'])
try:
debug_xdebug = (config['debug']['php_admin_flag[xdebug.profiler_'
'enable_trigger]'])
except Exception as e:
debug_xdebug = 'off'
data = dict(version=version, expose_php=expose_php,
memory_limit=memory_limit, post_max_size=post_max_size,
upload_max_filesize=upload_max_filesize,
max_execution_time=max_execution_time,
www_listen=www_listen, www_ping_path=www_ping_path,
www_pm_status_path=www_pm_status_path, www_pm=www_pm,
www_pm_max_requests=www_pm_max_requests,
www_pm_max_children=www_pm_max_children,
www_pm_start_servers=www_pm_start_servers,
www_pm_min_spare_servers=www_pm_min_spare_servers,
www_pm_max_spare_servers=www_pm_max_spare_servers,
www_request_terminate_timeout=www_request_terminate_time,
www_xdebug_profiler_enable_trigger=www_xdebug,
debug_listen=debug_listen, debug_ping_path=debug_ping_path,
debug_pm_status_path=debug_pm_status_path,
debug_pm=debug_pm,
debug_pm_max_requests=debug_pm_max_requests,
debug_pm_max_children=debug_pm_max_children,
debug_pm_start_servers=debug_pm_start_servers,
debug_pm_min_spare_servers=debug_pm_min_spare_servers,
debug_pm_max_spare_servers=debug_pm_max_spare_servers,
debug_request_terminate_timeout=debug_request_terminate,
debug_xdebug_profiler_enable_trigger=debug_xdebug)
self.app.render((data), 'info_php.mustache')
@expose(hide=True)
def info_php72(self):
"""Display PHP information"""
version = os.popen("php7.2 -v 2>/dev/null | head -n1 | cut -d' ' -f2 |"
" cut -d'+' -f1 | tr -d '\n'").read
config = configparser.ConfigParser()
config.read('/etc/php/7.2/fpm/php.ini')
expose_php = config['PHP']['expose_php']
memory_limit = config['PHP']['memory_limit']
post_max_size = config['PHP']['post_max_size']
upload_max_filesize = config['PHP']['upload_max_filesize']
max_execution_time = config['PHP']['max_execution_time']
config.read('/etc/php/7.2/fpm/pool.d/www.conf')
www_listen = config['www']['listen']
www_ping_path = config['www']['ping.path']
www_pm_status_path = config['www']['pm.status_path']
www_pm = config['www']['pm']
www_pm_max_requests = config['www']['pm.max_requests']
www_pm_max_children = config['www']['pm.max_children']
www_pm_start_servers = config['www']['pm.start_servers']
www_pm_min_spare_servers = config['www']['pm.min_spare_servers']
www_pm_max_spare_servers = config['www']['pm.max_spare_servers']
www_request_terminate_time = (config['www']
['request_terminate_timeout'])
try:
www_xdebug = (config['www']['php_admin_flag[xdebug.profiler_enable'
'_trigger]'])
except Exception as e:
www_xdebug = 'off'
config.read('/etc/php/7.2/fpm/pool.d/debug.conf')
debug_listen = config['debug']['listen']
debug_ping_path = config['debug']['ping.path']
debug_pm_status_path = config['debug']['pm.status_path']
debug_pm = config['debug']['pm']
debug_pm_max_requests = config['debug']['pm.max_requests']
debug_pm_max_children = config['debug']['pm.max_children']
debug_pm_start_servers = config['debug']['pm.start_servers']
debug_pm_min_spare_servers = config['debug']['pm.min_spare_servers']
debug_pm_max_spare_servers = config['debug']['pm.max_spare_servers']
debug_request_terminate = (config['debug']
['request_terminate_timeout'])
try:
debug_xdebug = (config['debug']['php_admin_flag[xdebug.profiler_'
'enable_trigger]'])
except Exception as e:
debug_xdebug = 'off'
data = dict(version=version, expose_php=expose_php,
memory_limit=memory_limit, post_max_size=post_max_size,
upload_max_filesize=upload_max_filesize,
max_execution_time=max_execution_time,
www_listen=www_listen, www_ping_path=www_ping_path,
www_pm_status_path=www_pm_status_path, www_pm=www_pm,
www_pm_max_requests=www_pm_max_requests,
www_pm_max_children=www_pm_max_children,
www_pm_start_servers=www_pm_start_servers,
www_pm_min_spare_servers=www_pm_min_spare_servers,
www_pm_max_spare_servers=www_pm_max_spare_servers,
www_request_terminate_timeout=www_request_terminate_time,
www_xdebug_profiler_enable_trigger=www_xdebug,
debug_listen=debug_listen, debug_ping_path=debug_ping_path,
debug_pm_status_path=debug_pm_status_path,
debug_pm=debug_pm,
debug_pm_max_requests=debug_pm_max_requests,
debug_pm_max_children=debug_pm_max_children,
debug_pm_start_servers=debug_pm_start_servers,
debug_pm_min_spare_servers=debug_pm_min_spare_servers,
debug_pm_max_spare_servers=debug_pm_max_spare_servers,
debug_request_terminate_timeout=debug_request_terminate,
debug_xdebug_profiler_enable_trigger=debug_xdebug)
self.app.render((data), 'info_php.mustache')
@expose(hide=True)
def info_mysql(self):
"""Display MySQL information"""
version = os.popen("mysql -V | awk '{print($5)}' | cut -d ',' "
"-f1 | tr -d '\n'").read()
host = "localhost"
port = os.popen("mysql -e \"show variables\" | grep ^port | awk "
"'{print($2)}' | tr -d '\n'").read()
wait_timeout = os.popen("mysql -e \"show variables\" | grep "
"^wait_timeout | awk '{print($2)}' | "
"tr -d '\n'").read()
interactive_timeout = os.popen("mysql -e \"show variables\" | grep "
"^interactive_timeout | awk "
"'{print($2)}' | tr -d '\n'").read()
max_used_connections = os.popen("mysql -e \"show global status\" | "
"grep Max_used_connections | awk "
"'{print($2)}' | tr -d '\n'").read()
datadir = os.popen("mysql -e \"show variables\" | grep datadir | awk"
" '{print($2)}' | tr -d '\n'").read()
socket = os.popen("mysql -e \"show variables\" | grep \"^socket\" | "
"awk '{print($2)}' | tr -d '\n'").read()
data = dict(version=version, host=host, port=port,
wait_timeout=wait_timeout,
interactive_timeout=interactive_timeout,
max_used_connections=max_used_connections,
datadir=datadir, socket=socket)
self.app.render((data), 'info_mysql.mustache')
@expose(hide=True)
def default(self):
"""default function for info"""
if (not self.app.pargs.nginx and not self.app.pargs.php
and not self.app.pargs.mysql and not self.app.pargs.php72):
self.app.pargs.nginx = True
self.app.pargs.php = True
self.app.pargs.mysql = True
if WOAptGet.is_installed(self, 'php7.2-fpm'):
self.app.pargs.php = True
if self.app.pargs.nginx:
if WOAptGet.is_installed(self, 'nginx-custom') or WOAptGet.is_installed(self, 'nginx-common'):
self.info_nginx()
else:
Log.error(self, "Nginx is not installed")
if self.app.pargs.php:
if (WOVariables.wo_platform_distro == 'debian' or WOVariables.wo_platform_codename == 'precise'):
if WOAptGet.is_installed(self, 'php5-fpm'):
self.info_php()
else:
Log.error(self, "PHP5 is not installed")
else:
if WOAptGet.is_installed(self, 'php5.6-fpm'):
self.info_php()
else:
Log.error(self, "PHP5.6 is not installed")
if self.app.pargs.php72:
if WOAptGet.is_installed(self, 'php7.2-fpm'):
self.info_php72()
else:
Log.error(self, "PHP 7.2 is not installed")
if self.app.pargs.mysql:
if WOShellExec.cmd_exec(self, "mysqladmin ping"):
self.info_mysql()
else:
Log.error(self, "MySQL is not installed")
def load(app):
# register the plugin class.. this only happens if the plugin is enabled
handler.register(WOInfoController)
# register a hook (function) to run after arguments are parsed.
hook.register('post_argument_parsing', wo_info_hook)
| 51.858621 | 252 | 0.583416 |
from cement.core.controller import CementBaseController, expose
from cement.core import handler, hook
from wo.core.variables import WOVariables
from pynginxconfig import NginxConfig
from wo.core.aptget import WOAptGet
from wo.core.shellexec import WOShellExec
from wo.core.logging import Log
import os
import configparser
def wo_info_hook(app):
pass
class WOInfoController(CementBaseController):
class Meta:
label = 'info'
stacked_on = 'base'
stacked_type = 'nested'
description = ('Display configuration information related to Nginx,'
' PHP and MySQL')
arguments = [
(['--mysql'],
dict(help='Get MySQL configuration information',
action='store_true')),
(['--php'],
dict(help='Get PHP configuration information',
action='store_true')),
(['--php72'],
dict(help='Get PHP 7.2 configuration information',
action='store_true')),
(['--nginx'],
dict(help='Get Nginx configuration information',
action='store_true')),
]
usage = "wo info [options]"
@expose(hide=True)
def info_nginx(self):
version = os.popen("nginx -v 2>&1 | cut -d':' -f2 | cut -d' ' -f2 | "
"cut -d'/' -f2 | tr -d '\n'").read()
allow = os.popen("grep ^allow /etc/nginx/common/acl.conf | "
"cut -d' ' -f2 | cut -d';' -f1 | tr '\n' ' '").read()
nc = NginxConfig()
nc.loadf('/etc/nginx/nginx.conf')
user = nc.get('user')[1]
worker_processes = nc.get('worker_processes')[1]
worker_connections = nc.get([('events',), 'worker_connections'])[1]
keepalive_timeout = nc.get([('http',), 'keepalive_timeout'])[1]
fastcgi_read_timeout = nc.get([('http',),
'fastcgi_read_timeout'])[1]
client_max_body_size = nc.get([('http',),
'client_max_body_size'])[1]
data = dict(version=version, allow=allow, user=user,
worker_processes=worker_processes,
keepalive_timeout=keepalive_timeout,
worker_connections=worker_connections,
fastcgi_read_timeout=fastcgi_read_timeout,
client_max_body_size=client_max_body_size)
self.app.render((data), 'info_nginx.mustache')
@expose(hide=True)
def info_php(self):
version = os.popen("{0} -v 2>/dev/null | head -n1 | cut -d' ' -f2 |".format("php5.6" if (WOVariables.wo_platform_codename == 'trusty' or WOVariables.wo_platform_codename == 'xenial' or WOVariables.wo_platform_codename == 'bionic') else "php") +
" cut -d'+' -f1 | tr -d '\n'").read
config = configparser.ConfigParser()
config.read('/etc/{0}/fpm/php.ini'.format("php/5.6" if (WOVariables.wo_platform_codename == 'trusty' or WOVariables.wo_platform_codename == 'xenial' or WOVariables.wo_platform_codename == 'bionic') else "php5"))
expose_php = config['PHP']['expose_php']
memory_limit = config['PHP']['memory_limit']
post_max_size = config['PHP']['post_max_size']
upload_max_filesize = config['PHP']['upload_max_filesize']
max_execution_time = config['PHP']['max_execution_time']
config.read('/etc/{0}/fpm/pool.d/www.conf'.format("php/5.6" if (WOVariables.wo_platform_codename == 'trusty' or WOVariables.wo_platform_codename == 'xenial' or WOVariables.wo_platform_codename == 'bionic') else "php5"))
www_listen = config['www']['listen']
www_ping_path = config['www']['ping.path']
www_pm_status_path = config['www']['pm.status_path']
www_pm = config['www']['pm']
www_pm_max_requests = config['www']['pm.max_requests']
www_pm_max_children = config['www']['pm.max_children']
www_pm_start_servers = config['www']['pm.start_servers']
www_pm_min_spare_servers = config['www']['pm.min_spare_servers']
www_pm_max_spare_servers = config['www']['pm.max_spare_servers']
www_request_terminate_time = (config['www']
['request_terminate_timeout'])
try:
www_xdebug = (config['www']['php_admin_flag[xdebug.profiler_enable'
'_trigger]'])
except Exception as e:
www_xdebug = 'off'
config.read('/etc/{0}/fpm/pool.d/debug.conf'.format("php/5.6" if (WOVariables.wo_platform_codename == 'trusty' or WOVariables.wo_platform_codename == 'xenial' or WOVariables.wo_platform_codename == 'bionic') else "php5"))
debug_listen = config['debug']['listen']
debug_ping_path = config['debug']['ping.path']
debug_pm_status_path = config['debug']['pm.status_path']
debug_pm = config['debug']['pm']
debug_pm_max_requests = config['debug']['pm.max_requests']
debug_pm_max_children = config['debug']['pm.max_children']
debug_pm_start_servers = config['debug']['pm.start_servers']
debug_pm_min_spare_servers = config['debug']['pm.min_spare_servers']
debug_pm_max_spare_servers = config['debug']['pm.max_spare_servers']
debug_request_terminate = (config['debug']
['request_terminate_timeout'])
try:
debug_xdebug = (config['debug']['php_admin_flag[xdebug.profiler_'
'enable_trigger]'])
except Exception as e:
debug_xdebug = 'off'
data = dict(version=version, expose_php=expose_php,
memory_limit=memory_limit, post_max_size=post_max_size,
upload_max_filesize=upload_max_filesize,
max_execution_time=max_execution_time,
www_listen=www_listen, www_ping_path=www_ping_path,
www_pm_status_path=www_pm_status_path, www_pm=www_pm,
www_pm_max_requests=www_pm_max_requests,
www_pm_max_children=www_pm_max_children,
www_pm_start_servers=www_pm_start_servers,
www_pm_min_spare_servers=www_pm_min_spare_servers,
www_pm_max_spare_servers=www_pm_max_spare_servers,
www_request_terminate_timeout=www_request_terminate_time,
www_xdebug_profiler_enable_trigger=www_xdebug,
debug_listen=debug_listen, debug_ping_path=debug_ping_path,
debug_pm_status_path=debug_pm_status_path,
debug_pm=debug_pm,
debug_pm_max_requests=debug_pm_max_requests,
debug_pm_max_children=debug_pm_max_children,
debug_pm_start_servers=debug_pm_start_servers,
debug_pm_min_spare_servers=debug_pm_min_spare_servers,
debug_pm_max_spare_servers=debug_pm_max_spare_servers,
debug_request_terminate_timeout=debug_request_terminate,
debug_xdebug_profiler_enable_trigger=debug_xdebug)
self.app.render((data), 'info_php.mustache')
@expose(hide=True)
def info_php72(self):
version = os.popen("php7.2 -v 2>/dev/null | head -n1 | cut -d' ' -f2 |"
" cut -d'+' -f1 | tr -d '\n'").read
config = configparser.ConfigParser()
config.read('/etc/php/7.2/fpm/php.ini')
expose_php = config['PHP']['expose_php']
memory_limit = config['PHP']['memory_limit']
post_max_size = config['PHP']['post_max_size']
upload_max_filesize = config['PHP']['upload_max_filesize']
max_execution_time = config['PHP']['max_execution_time']
config.read('/etc/php/7.2/fpm/pool.d/www.conf')
www_listen = config['www']['listen']
www_ping_path = config['www']['ping.path']
www_pm_status_path = config['www']['pm.status_path']
www_pm = config['www']['pm']
www_pm_max_requests = config['www']['pm.max_requests']
www_pm_max_children = config['www']['pm.max_children']
www_pm_start_servers = config['www']['pm.start_servers']
www_pm_min_spare_servers = config['www']['pm.min_spare_servers']
www_pm_max_spare_servers = config['www']['pm.max_spare_servers']
www_request_terminate_time = (config['www']
['request_terminate_timeout'])
try:
www_xdebug = (config['www']['php_admin_flag[xdebug.profiler_enable'
'_trigger]'])
except Exception as e:
www_xdebug = 'off'
config.read('/etc/php/7.2/fpm/pool.d/debug.conf')
debug_listen = config['debug']['listen']
debug_ping_path = config['debug']['ping.path']
debug_pm_status_path = config['debug']['pm.status_path']
debug_pm = config['debug']['pm']
debug_pm_max_requests = config['debug']['pm.max_requests']
debug_pm_max_children = config['debug']['pm.max_children']
debug_pm_start_servers = config['debug']['pm.start_servers']
debug_pm_min_spare_servers = config['debug']['pm.min_spare_servers']
debug_pm_max_spare_servers = config['debug']['pm.max_spare_servers']
debug_request_terminate = (config['debug']
['request_terminate_timeout'])
try:
debug_xdebug = (config['debug']['php_admin_flag[xdebug.profiler_'
'enable_trigger]'])
except Exception as e:
debug_xdebug = 'off'
data = dict(version=version, expose_php=expose_php,
memory_limit=memory_limit, post_max_size=post_max_size,
upload_max_filesize=upload_max_filesize,
max_execution_time=max_execution_time,
www_listen=www_listen, www_ping_path=www_ping_path,
www_pm_status_path=www_pm_status_path, www_pm=www_pm,
www_pm_max_requests=www_pm_max_requests,
www_pm_max_children=www_pm_max_children,
www_pm_start_servers=www_pm_start_servers,
www_pm_min_spare_servers=www_pm_min_spare_servers,
www_pm_max_spare_servers=www_pm_max_spare_servers,
www_request_terminate_timeout=www_request_terminate_time,
www_xdebug_profiler_enable_trigger=www_xdebug,
debug_listen=debug_listen, debug_ping_path=debug_ping_path,
debug_pm_status_path=debug_pm_status_path,
debug_pm=debug_pm,
debug_pm_max_requests=debug_pm_max_requests,
debug_pm_max_children=debug_pm_max_children,
debug_pm_start_servers=debug_pm_start_servers,
debug_pm_min_spare_servers=debug_pm_min_spare_servers,
debug_pm_max_spare_servers=debug_pm_max_spare_servers,
debug_request_terminate_timeout=debug_request_terminate,
debug_xdebug_profiler_enable_trigger=debug_xdebug)
self.app.render((data), 'info_php.mustache')
@expose(hide=True)
def info_mysql(self):
version = os.popen("mysql -V | awk '{print($5)}' | cut -d ',' "
"-f1 | tr -d '\n'").read()
host = "localhost"
port = os.popen("mysql -e \"show variables\" | grep ^port | awk "
"'{print($2)}' | tr -d '\n'").read()
wait_timeout = os.popen("mysql -e \"show variables\" | grep "
"^wait_timeout | awk '{print($2)}' | "
"tr -d '\n'").read()
interactive_timeout = os.popen("mysql -e \"show variables\" | grep "
"^interactive_timeout | awk "
"'{print($2)}' | tr -d '\n'").read()
max_used_connections = os.popen("mysql -e \"show global status\" | "
"grep Max_used_connections | awk "
"'{print($2)}' | tr -d '\n'").read()
datadir = os.popen("mysql -e \"show variables\" | grep datadir | awk"
" '{print($2)}' | tr -d '\n'").read()
socket = os.popen("mysql -e \"show variables\" | grep \"^socket\" | "
"awk '{print($2)}' | tr -d '\n'").read()
data = dict(version=version, host=host, port=port,
wait_timeout=wait_timeout,
interactive_timeout=interactive_timeout,
max_used_connections=max_used_connections,
datadir=datadir, socket=socket)
self.app.render((data), 'info_mysql.mustache')
@expose(hide=True)
def default(self):
if (not self.app.pargs.nginx and not self.app.pargs.php
and not self.app.pargs.mysql and not self.app.pargs.php72):
self.app.pargs.nginx = True
self.app.pargs.php = True
self.app.pargs.mysql = True
if WOAptGet.is_installed(self, 'php7.2-fpm'):
self.app.pargs.php = True
if self.app.pargs.nginx:
if WOAptGet.is_installed(self, 'nginx-custom') or WOAptGet.is_installed(self, 'nginx-common'):
self.info_nginx()
else:
Log.error(self, "Nginx is not installed")
if self.app.pargs.php:
if (WOVariables.wo_platform_distro == 'debian' or WOVariables.wo_platform_codename == 'precise'):
if WOAptGet.is_installed(self, 'php5-fpm'):
self.info_php()
else:
Log.error(self, "PHP5 is not installed")
else:
if WOAptGet.is_installed(self, 'php5.6-fpm'):
self.info_php()
else:
Log.error(self, "PHP5.6 is not installed")
if self.app.pargs.php72:
if WOAptGet.is_installed(self, 'php7.2-fpm'):
self.info_php72()
else:
Log.error(self, "PHP 7.2 is not installed")
if self.app.pargs.mysql:
if WOShellExec.cmd_exec(self, "mysqladmin ping"):
self.info_mysql()
else:
Log.error(self, "MySQL is not installed")
def load(app):
handler.register(WOInfoController)
hook.register('post_argument_parsing', wo_info_hook)
| true | true |
f70f445b5ea4447aaf2b1b379dd71f3f6cb58149 | 1,874 | py | Python | setup.py | aalcock/ledtheatre | 6de1193c13b930f8650280d9153713055a1eef7e | [
"MIT"
] | 2 | 2018-01-29T19:56:02.000Z | 2018-02-05T16:29:03.000Z | setup.py | aalcock/ledtheatre | 6de1193c13b930f8650280d9153713055a1eef7e | [
"MIT"
] | null | null | null | setup.py | aalcock/ledtheatre | 6de1193c13b930f8650280d9153713055a1eef7e | [
"MIT"
] | null | null | null | # ledtheatre is Licensed under the MIT License
# Copyright 2017 Andrew Alcock
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from setuptools import setup
def readme():
with open('README.rst') as f:
return f.read()
setup(name='ledtheatre',
version='0.1.3',
description='Control LED lights on the Adafruit PCA9685 PWM card',
long_description=readme(),
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Topic :: System :: Hardware',
],
url='https://github.com/aalcock/ledtheatre',
author='Andrew Alcock',
author_email='andrew@alcock.sg',
license='MIT',
packages=['ledtheatre'],
install_requires=[
'Adafruit_PCA9685>=1.0.1'
],
zip_safe=True)
| 39.87234 | 80 | 0.704376 |
from setuptools import setup
def readme():
with open('README.rst') as f:
return f.read()
setup(name='ledtheatre',
version='0.1.3',
description='Control LED lights on the Adafruit PCA9685 PWM card',
long_description=readme(),
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Topic :: System :: Hardware',
],
url='https://github.com/aalcock/ledtheatre',
author='Andrew Alcock',
author_email='andrew@alcock.sg',
license='MIT',
packages=['ledtheatre'],
install_requires=[
'Adafruit_PCA9685>=1.0.1'
],
zip_safe=True)
| true | true |
f70f45557504faf15ed7f0658f2ee3c35b2a0eb7 | 63 | py | Python | built-in/TensorFlow/Research/cv/image_classification/Cars_for_TensorFlow/automl/vega/search_space/networks/pytorch/roi_extractors/__init__.py | Huawei-Ascend/modelzoo | df51ed9c1d6dbde1deef63f2a037a369f8554406 | [
"Apache-2.0"
] | 12 | 2020-12-13T08:34:24.000Z | 2022-03-20T15:17:17.000Z | built-in/TensorFlow/Research/cv/image_classification/Cars_for_TensorFlow/automl/vega/search_space/networks/pytorch/roi_extractors/__init__.py | Huawei-Ascend/modelzoo | df51ed9c1d6dbde1deef63f2a037a369f8554406 | [
"Apache-2.0"
] | 3 | 2021-03-31T20:15:40.000Z | 2022-02-09T23:50:46.000Z | built-in/TensorFlow/Research/cv/image_classification/Darts_for_TensorFlow/automl/vega/search_space/networks/pytorch/roi_extractors/__init__.py | Huawei-Ascend/modelzoo | df51ed9c1d6dbde1deef63f2a037a369f8554406 | [
"Apache-2.0"
] | 2 | 2021-07-10T12:40:46.000Z | 2021-12-17T07:55:15.000Z | from .single_level import *
from .multi_roi_extractor import *
| 21 | 34 | 0.809524 | from .single_level import *
from .multi_roi_extractor import *
| true | true |
f70f457996a102881d83cdcf904a912167be83c4 | 207 | py | Python | transmission_bot/cfg.py | brunojhovany/Telegram-bot-torrent | 8de3b212648b23ef038dbf3b7434d1148a392783 | [
"MIT"
] | null | null | null | transmission_bot/cfg.py | brunojhovany/Telegram-bot-torrent | 8de3b212648b23ef038dbf3b7434d1148a392783 | [
"MIT"
] | null | null | null | transmission_bot/cfg.py | brunojhovany/Telegram-bot-torrent | 8de3b212648b23ef038dbf3b7434d1148a392783 | [
"MIT"
] | null | null | null | from decouple import config
ADDRESS = config("ADDRESS")
PORT = config("PORT")
TS_USER = config("TS_USER")
PASSWORD = config("PASSWORD")
TOKEN = config("TOKEN")
PERSISTENCE_FILE = config("PERSISTENCE_FILE")
| 23 | 45 | 0.743961 | from decouple import config
ADDRESS = config("ADDRESS")
PORT = config("PORT")
TS_USER = config("TS_USER")
PASSWORD = config("PASSWORD")
TOKEN = config("TOKEN")
PERSISTENCE_FILE = config("PERSISTENCE_FILE")
| true | true |
f70f48c501100e45d4611665ec2ffdc842a3c49f | 1,704 | py | Python | nnvm/tvm/topi/tests/python/test_topi_relu.py | rah9eu/p3 | 530628be7b7a8dd3e6199c3bebebdbf104005e5f | [
"Apache-2.0"
] | 22 | 2019-02-20T12:42:20.000Z | 2021-12-25T06:09:46.000Z | nnvm/tvm/topi/tests/python/test_topi_relu.py | rah9eu/p3 | 530628be7b7a8dd3e6199c3bebebdbf104005e5f | [
"Apache-2.0"
] | 4 | 2019-04-01T07:36:04.000Z | 2022-03-24T03:11:26.000Z | nnvm/tvm/topi/tests/python/test_topi_relu.py | rah9eu/p3 | 530628be7b7a8dd3e6199c3bebebdbf104005e5f | [
"Apache-2.0"
] | 7 | 2019-03-20T16:04:37.000Z | 2021-04-28T18:40:11.000Z | """Test code for relu activation"""
import os
import numpy as np
import tvm
import topi
from topi.util import get_const_tuple
def verify_relu(m, n):
A = tvm.placeholder((m, n), name='A')
B = topi.nn.relu(A)
a_np = np.random.uniform(size=get_const_tuple(A.shape)).astype(A.dtype)
b_np = a_np * (a_np > 0)
def check_device(device):
if not tvm.module.enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
s = topi.generic.schedule_elemwise(B)
ctx = tvm.context(device, 0)
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), ctx)
foo = tvm.build(s, [A, B], device, name="relu")
foo(a, b)
np.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5)
for device in ['cuda', 'opencl', 'metal', 'rocm']:
check_device(device)
def verify_leaky_relu(m, alpha):
A = tvm.placeholder((m,), name='A')
B = topi.nn.leaky_relu(A, alpha)
s = tvm.create_schedule([B.op])
a_np = np.random.uniform(size=get_const_tuple(A.shape)).astype(A.dtype)
b_np = a_np * (a_np > 0) + a_np * (a_np < 0) * alpha
ctx = tvm.cpu(0)
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), ctx)
foo = tvm.build(s, [A, B], "llvm", name="leaky_relu")
foo(a, b)
np.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5)
def test_relu():
verify_relu(10, 128)
def test_leaky_relu():
verify_leaky_relu(100, 0.1)
if __name__ == "__main__":
test_relu()
test_leaky_relu()
| 29.37931 | 80 | 0.616784 | import os
import numpy as np
import tvm
import topi
from topi.util import get_const_tuple
def verify_relu(m, n):
A = tvm.placeholder((m, n), name='A')
B = topi.nn.relu(A)
a_np = np.random.uniform(size=get_const_tuple(A.shape)).astype(A.dtype)
b_np = a_np * (a_np > 0)
def check_device(device):
if not tvm.module.enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
s = topi.generic.schedule_elemwise(B)
ctx = tvm.context(device, 0)
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), ctx)
foo = tvm.build(s, [A, B], device, name="relu")
foo(a, b)
np.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5)
for device in ['cuda', 'opencl', 'metal', 'rocm']:
check_device(device)
def verify_leaky_relu(m, alpha):
A = tvm.placeholder((m,), name='A')
B = topi.nn.leaky_relu(A, alpha)
s = tvm.create_schedule([B.op])
a_np = np.random.uniform(size=get_const_tuple(A.shape)).astype(A.dtype)
b_np = a_np * (a_np > 0) + a_np * (a_np < 0) * alpha
ctx = tvm.cpu(0)
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), ctx)
foo = tvm.build(s, [A, B], "llvm", name="leaky_relu")
foo(a, b)
np.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5)
def test_relu():
verify_relu(10, 128)
def test_leaky_relu():
verify_leaky_relu(100, 0.1)
if __name__ == "__main__":
test_relu()
test_leaky_relu()
| true | true |
f70f495f7639c90049b126928377ce133e1f4381 | 1,578 | py | Python | inst/python/rpytools/test.py | bjungbogati/reticulate | e650515a8418ce7732bf89132b0c7a3dc29a7383 | [
"Apache-2.0"
] | 1 | 2018-10-21T18:41:05.000Z | 2018-10-21T18:41:05.000Z | inst/python/rpytools/test.py | bjungbogati/reticulate | e650515a8418ce7732bf89132b0c7a3dc29a7383 | [
"Apache-2.0"
] | 35 | 2019-08-20T16:59:55.000Z | 2022-02-12T19:22:46.000Z | inst/python/rpytools/test.py | bjungbogati/reticulate | e650515a8418ce7732bf89132b0c7a3dc29a7383 | [
"Apache-2.0"
] | 1 | 2020-02-14T02:54:52.000Z | 2020-02-14T02:54:52.000Z |
import threading
import sys
is_py2 = sys.version[0] == '2'
if is_py2:
import Queue as queue
else:
import queue as queue
def isScalar(x):
return not isinstance(x, (list, tuple))
def isList(x):
return isinstance(x, (list))
def asString(x):
return str(x)
def makeDict():
return {'a': 1.0, 'c': 3.0, 'b': 2.0}
def makeTuple():
return (1.0, 2.0, 3.0)
def makeIterator(x):
return iter(x)
def makeGenerator(n):
i = 0
while i < n:
yield i
i += 1
def iterateOnThread(iter):
results = []
def iteration_worker():
for i in iter:
results.append(i)
thread = threading.Thread(target = iteration_worker)
thread.start()
while thread.isAlive():
thread.join(0.1)
return results
def invokeOnThread(f, *args, **kwargs):
result = []
def invoke_worker():
result.append(f(*args, **kwargs))
thread = threading.Thread(target = invoke_worker)
thread.start()
while thread.isAlive():
thread.join(0.1)
return result[0]
def reflect(x):
return x
def callFunc(f, *args, **kwargs):
return f(*args, **kwargs)
def testThrowError():
throwError()
def throwError():
raise ValueError('A very specific bad thing happened')
class PythonClass(object):
FOO = 1
BAR = 2
@classmethod
def class_method(cls):
return cls.FOO
class PythonCallable(object):
FOO = 1
BAR = 2
""" Call a callable
Args:
arg1: First argument.
"""
def __call__(self, arg1):
return arg1
def create_callable():
return PythonCallable()
dict_with_callable = dict(callable = create_callable())
| 16.268041 | 56 | 0.647655 |
import threading
import sys
is_py2 = sys.version[0] == '2'
if is_py2:
import Queue as queue
else:
import queue as queue
def isScalar(x):
return not isinstance(x, (list, tuple))
def isList(x):
return isinstance(x, (list))
def asString(x):
return str(x)
def makeDict():
return {'a': 1.0, 'c': 3.0, 'b': 2.0}
def makeTuple():
return (1.0, 2.0, 3.0)
def makeIterator(x):
return iter(x)
def makeGenerator(n):
i = 0
while i < n:
yield i
i += 1
def iterateOnThread(iter):
results = []
def iteration_worker():
for i in iter:
results.append(i)
thread = threading.Thread(target = iteration_worker)
thread.start()
while thread.isAlive():
thread.join(0.1)
return results
def invokeOnThread(f, *args, **kwargs):
result = []
def invoke_worker():
result.append(f(*args, **kwargs))
thread = threading.Thread(target = invoke_worker)
thread.start()
while thread.isAlive():
thread.join(0.1)
return result[0]
def reflect(x):
return x
def callFunc(f, *args, **kwargs):
return f(*args, **kwargs)
def testThrowError():
throwError()
def throwError():
raise ValueError('A very specific bad thing happened')
class PythonClass(object):
FOO = 1
BAR = 2
@classmethod
def class_method(cls):
return cls.FOO
class PythonCallable(object):
FOO = 1
BAR = 2
def __call__(self, arg1):
return arg1
def create_callable():
return PythonCallable()
dict_with_callable = dict(callable = create_callable())
| true | true |
f70f499b3938d65a8dec688ff8e18592b5f6b4c1 | 32,683 | py | Python | python/ray/data/dataset_pipeline.py | mgelbart/ray | 4cec2286572e368a4bd64aae467751a384eff62d | [
"Apache-2.0"
] | 22 | 2018-05-08T05:52:34.000Z | 2020-04-01T10:09:55.000Z | python/ray/data/dataset_pipeline.py | mgelbart/ray | 4cec2286572e368a4bd64aae467751a384eff62d | [
"Apache-2.0"
] | 51 | 2018-05-17T05:55:28.000Z | 2020-03-18T06:49:49.000Z | python/ray/data/dataset_pipeline.py | mgelbart/ray | 4cec2286572e368a4bd64aae467751a384eff62d | [
"Apache-2.0"
] | 10 | 2018-04-27T10:50:59.000Z | 2020-02-24T02:41:43.000Z | import inspect
import itertools
import logging
import time
from typing import (
Any,
Callable,
List,
Iterator,
Iterable,
Generic,
Union,
Optional,
TYPE_CHECKING,
)
import ray
from ray.data.context import DatasetContext
from ray.data.dataset import Dataset, T, U
from ray.data.impl.pipeline_executor import (
PipelineExecutor,
PipelineSplitExecutorCoordinator,
)
from ray.data.block import Block
from ray.data.row import TableRow
from ray.data.impl import progress_bar
from ray.data.impl.block_batching import batch_blocks, BatchType
from ray.data.impl.block_list import BlockList
from ray.data.impl.plan import ExecutionPlan
from ray.data.impl.stats import DatasetPipelineStats, DatasetStats
from ray.util.annotations import PublicAPI, DeveloperAPI
if TYPE_CHECKING:
import pyarrow
logger = logging.getLogger(__name__)
# Operations that can be naively applied per dataset row in the pipeline.
_PER_DATASET_OPS = ["map", "map_batches", "add_column", "flat_map", "filter"]
# Operations that apply to each dataset holistically in the pipeline.
_HOLISTIC_PER_DATASET_OPS = ["repartition", "random_shuffle", "sort"]
# Similar to above but we should force evaluation immediately.
_PER_DATASET_OUTPUT_OPS = [
"write_json",
"write_csv",
"write_parquet",
"write_datasource",
]
# Operations that operate over the stream of output batches from the pipeline.
_OUTPUT_ITER_OPS = ["take", "take_all", "show", "to_tf", "to_torch"]
@PublicAPI
class DatasetPipeline(Generic[T]):
"""Implements a pipeline of Datasets.
Unlike Datasets, which execute all transformations synchronously,
DatasetPipelines implement pipelined execution. This allows for the
overlapped execution of data input (e.g., reading files), computation
(e.g. feature preprocessing), and output (e.g., distributed ML training).
A DatasetPipeline can be created by either repeating a Dataset
(``ds.repeat(times=None)``), by turning a single Dataset into a pipeline
(``ds.window(blocks_per_window=10)``), or defined explicitly using
``DatasetPipeline.from_iterable()``.
DatasetPipeline supports the all the per-record transforms of Datasets
(e.g., map, flat_map, filter), holistic transforms (e.g., repartition),
and output methods (e.g., iter_rows, to_tf, to_torch, write_datasource).
"""
def __init__(
self,
base_iterable: Iterable[Callable[[], Dataset[T]]],
stages: List[Callable[[Dataset[Any]], Dataset[Any]]] = None,
length: int = None,
progress_bars: bool = progress_bar._enabled,
_executed: List[bool] = None,
):
"""Construct a DatasetPipeline (internal API).
The constructor is not part of the DatasetPipeline API. Use the
``Dataset.repeat()``, ``Dataset.window()``, or
``DatasetPipeline.from_iterable()`` methods to construct a pipeline.
"""
self._base_iterable = base_iterable
self._stages = stages or []
self._optimized_stages = None
self._length = length
self._progress_bars = progress_bars
self._uuid = None # For testing only.
# Whether the pipeline execution has started.
# This variable is shared across all pipelines descending from this.
self._executed = _executed or [False]
self._dataset_iter = None
self._first_dataset = None
self._schema = None
self._stats = DatasetPipelineStats()
def iter_rows(self, *, prefetch_blocks: int = 0) -> Iterator[Union[T, TableRow]]:
"""Return a local row iterator over the data in the pipeline.
If the dataset is a tabular dataset (Arrow/Pandas blocks), dict-like mappings
:py:class:`~ray.data.row.TableRow` are yielded for each row by the iterator.
If the dataset is not tabular, the raw row is yielded.
Examples:
>>> import ray
>>> for i in ray.data.range(1000000).repeat(5).iter_rows(): # doctest: +SKIP
... print(i) # doctest: +SKIP
Time complexity: O(1)
Args:
prefetch_blocks: The number of blocks to prefetch ahead of the
current block during the scan.
Returns:
A local iterator over the records in the pipeline.
"""
def gen_rows() -> Iterator[Union[T, TableRow]]:
time_start = time.perf_counter()
for ds in self.iter_datasets():
wait_start = time.perf_counter()
for row in ds.iter_rows(prefetch_blocks=prefetch_blocks):
self._stats.iter_wait_s.add(time.perf_counter() - wait_start)
with self._stats.iter_user_s.timer():
yield row
wait_start = time.perf_counter()
self._stats.iter_total_s.add(time.perf_counter() - time_start)
return gen_rows()
def iter_batches(
self,
*,
prefetch_blocks: int = 0,
batch_size: int = None,
batch_format: str = "native",
drop_last: bool = False,
) -> Iterator[BatchType]:
"""Return a local batched iterator over the data in the pipeline.
Examples:
>>> import ray
>>> ds = ray.data.range(1000000).repeat(5) # doctest: +SKIP
>>> for pandas_df in ds.iter_batches(): # doctest: +SKIP
... print(pandas_df) # doctest: +SKIP
Time complexity: O(1)
Args:
prefetch_blocks: The number of blocks to prefetch ahead of the
current block during the scan.
batch_size: Record batch size, or None to let the system pick.
batch_format: The format in which to return each batch.
Specify "native" to use the current block format (promoting
Arrow to pandas automatically), "pandas" to
select ``pandas.DataFrame`` or "pyarrow" to select
``pyarrow.Table``. Default is "native".
drop_last: Whether to drop the last batch if it's incomplete.
Returns:
An iterator over record batches.
"""
time_start = time.perf_counter()
yield from batch_blocks(
self._iter_blocks(),
self._stats,
prefetch_blocks=prefetch_blocks,
batch_size=batch_size,
batch_format=batch_format,
drop_last=drop_last,
)
self._stats.iter_total_s.add(time.perf_counter() - time_start)
def _iter_blocks(self) -> Iterator[Block]:
ds_wait_start = time.perf_counter()
for ds in self.iter_datasets():
self._stats.iter_ds_wait_s.add(time.perf_counter() - ds_wait_start)
yield from ds._plan.execute().iter_blocks()
ds_wait_start = time.perf_counter()
def split(
self, n: int, *, equal: bool = False, locality_hints: List[Any] = None
) -> List["DatasetPipeline[T]"]:
"""Split the pipeline into ``n`` disjoint pipeline shards.
This returns a list of sub-pipelines that can be passed to Ray tasks
and actors and used to read the pipeline records in parallel.
Examples:
>>> import ray
>>> pipe = ray.data.range(10).repeat(50) # doctest: +SKIP
>>> workers = ... # doctest: +SKIP
>>> # Split up a pipeline to process over `n` worker actors.
>>> shards = pipe.split( # doctest: +SKIP
... len(workers), locality_hints=workers)
>>> for shard, worker in zip(shards, workers): # doctest: +SKIP
... worker.consume.remote(shard) # doctest: +SKIP
Time complexity: O(1)
Implementation detail: this launches a coordinator actor that is used
to execute the pipeline and push data blocks to each pipeline shard.
Reading from an individual shard will be blocked if other shards are
falling behind. A warning will be printed if a shard has been blocked
on read for more than 10 seconds.
Args:
n: Number of child pipelines to return.
equal: Whether to guarantee each split has an equal
number of records. This may drop records if they cannot be
divided equally among the splits.
locality_hints: A list of Ray actor handles of size ``n``. The
system will try to co-locate the blocks of the ith pipeline
shard with the ith actor to maximize data locality.
Returns:
A list of ``n`` disjoint pipeline splits.
"""
return self._split(
n,
lambda ds, equal=equal: ds.split(
n, equal=equal, locality_hints=locality_hints
),
)
def split_at_indices(self, indices: List[int]) -> List["DatasetPipeline[T]"]:
"""Split the datasets within the pipeline at the given indices
(like np.split).
This will split each dataset contained within this pipeline, thereby
producing len(indices) + 1 pipelines with the first pipeline containing
the [0, indices[0]) slice from each dataset, the second pipeline
containing the [indices[0], indices[1]) slice from each dataset, and so
on, with the final pipeline will containing the
[indices[-1], self.count()) slice from each dataset.
Examples:
>>> import ray
>>> p1, p2, p3 = ray.data.range( # doctest: +SKIP
... 8).repeat(2).split_at_indices([2, 5]) # doctest: +SKIP
>>> p1.take() # doctest: +SKIP
[0, 1, 0, 1]
>>> p2.take() # doctest: +SKIP
[2, 3, 4, 2, 3, 4]
>>> p3.take() # doctest: +SKIP
[5, 6, 7, 5, 6, 7]
Time complexity: O(num splits)
See also: ``DatasetPipeline.split``
Args:
indices: List of sorted integers which indicate where the pipeline
will be split. If an index exceeds the length of the pipeline,
an empty pipeline will be returned.
Returns:
The pipeline splits.
"""
if len(indices) < 1:
raise ValueError("indices must be at least of length 1")
if sorted(indices) != indices:
raise ValueError("indices must be sorted")
if indices[0] < 0:
raise ValueError("indices must be positive")
return self._split(len(indices) + 1, lambda ds: ds.split_at_indices(indices))
def _split(self, n: int, splitter: Callable[[Dataset], "DatasetPipeline[T]"]):
resources = {}
if not ray.util.client.ray.is_connected():
# Pin the coordinator (and any child actors) to the local node to avoid
# errors during node failures. If the local node dies, then the driver
# will fate-share with the coordinator anyway.
resources["node:{}".format(ray.util.get_node_ip_address())] = 0.0001
coordinator = PipelineSplitExecutorCoordinator.options(
resources=resources,
placement_group=None,
).remote(self, n, splitter, DatasetContext.get_current())
if self._executed[0]:
raise RuntimeError("Pipeline cannot be read multiple times.")
self._executed[0] = True
class SplitIterator:
def __init__(self, split_index, coordinator):
self.split_index = split_index
self.coordinator = coordinator
self.warn_threshold = 100
self.wait_delay_s = 0.1
def __iter__(self):
return self
def __next__(self):
ds = None
tries = 0
while ds is None:
ds = ray.get(
self.coordinator.next_dataset_if_ready.remote(self.split_index)
)
# Wait for other shards to catch up reading.
if not ds:
time.sleep(self.wait_delay_s)
tries += 1
if tries > self.warn_threshold:
print(
"Warning: reader on shard {} of the pipeline "
"has been blocked more than {}s waiting for "
"other readers to catch up. All pipeline shards "
"must be read from concurrently.".format(
self.split_index,
self.wait_delay_s * self.warn_threshold,
)
)
self.warn_threshold *= 2
return lambda: ds
return [
# Disable progress bars for the split readers since they would
# overwhelm the console.
DatasetPipeline(
SplitIterator(idx, coordinator),
length=self._length,
progress_bars=False,
)
for idx in range(n)
]
def rewindow(
self, *, blocks_per_window: int, preserve_epoch: bool = True
) -> "DatasetPipeline[T]":
"""Change the windowing (blocks per dataset) of this pipeline.
Changes the windowing of this pipeline to the specified size. For
example, if the current pipeline has two blocks per dataset, and
`.rewindow(blocks_per_window=4)` is requested, adjacent datasets will
be merged until each dataset is 4 blocks. If
`.rewindow(blocks_per_window)` was requested the datasets will be
split into smaller windows.
Args:
blocks_per_window: The new target blocks per window.
preserve_epoch: Whether to preserve epoch boundaries. If set to
False, then windows can contain data from two adjacent epochs.
"""
class WindowIterator:
def __init__(self, original_iter):
self._original_iter = original_iter
self._buffer: Optional[Dataset[T]] = None
def __next__(self) -> Dataset[T]:
try:
# Merge windows until we meet the requested window size.
if self._buffer is None:
self._buffer = next(self._original_iter)
while self._buffer.num_blocks() < blocks_per_window:
next_ds = next(self._original_iter)
if (
preserve_epoch
and self._buffer._get_epoch() != next_ds._get_epoch()
):
partial_window = self._buffer
self._buffer = next_ds
return lambda: partial_window
else:
self._buffer = self._buffer.union(next_ds)
# Slice off the left-most chunk and return it.
res, self._buffer = self._buffer._divide(blocks_per_window)
assert res.num_blocks() <= blocks_per_window, res
if self._buffer.num_blocks() == 0:
self._buffer = None
return lambda: res
except StopIteration:
# Return the left-over data as a single window.
if self._buffer and self._buffer.num_blocks() > 0:
res = self._buffer
assert res.num_blocks() <= blocks_per_window, res
self._buffer = None
return lambda: res
else:
raise
class WindowIterable:
def __init__(self, original_iter):
self._original_iter = original_iter
def __iter__(self):
return WindowIterator(self._original_iter)
if self._length == float("inf"):
length = float("inf")
else:
length = None
return DatasetPipeline(WindowIterable(self.iter_datasets()), length=length)
def repeat(self, times: int = None) -> "DatasetPipeline[T]":
"""Repeat this pipeline a given number or times, or indefinitely.
This operation is only allowed for pipelines of a finite length. An
error will be raised for pipelines of infinite length.
Note that every repeat of the pipeline is considered an "epoch" for
the purposes of ``iter_epochs()``. If there are multiple repeat calls,
the latest repeat takes precedence for the purpose of defining epochs.
Args:
times: The number of times to loop over this pipeline, or None
to repeat indefinitely.
"""
if self._length == float("inf"):
raise ValueError("Cannot repeat a pipeline of infinite length.")
class RepeatIterator:
def __init__(self, original_iter):
self._original_iter = original_iter
# Holds results to repeat.
self._results = []
# Incrementing cursor over results.
self._i = 0
# This is calculated later.
self._max_i = None
def __next__(self) -> Dataset[T]:
# Still going through the original pipeline.
if self._original_iter:
try:
make_ds = next(self._original_iter)
self._results.append(make_ds)
def gen():
res = make_ds()
res._set_epoch(0)
return res
return gen
except StopIteration:
self._original_iter = None
# Calculate the cursor limit.
if times:
self._max_i = len(self._results) * (times - 1)
else:
self._max_i = float("inf")
# Going through a repeat of the pipeline.
if self._i < self._max_i:
make_ds = self._results[self._i % len(self._results)]
epoch = 1 + self._i // len(self._results)
def gen():
res = make_ds()
res._set_epoch(epoch)
return res
self._i += 1
return gen
else:
raise StopIteration
class RepeatIterable:
def __init__(self, original_iter):
self._original_iter = original_iter
def __iter__(self):
return RepeatIterator(self._original_iter)
if not times:
length = float("inf")
elif times and self._length:
length = times * self._length
else:
length = None
return DatasetPipeline(
RepeatIterable(iter(self._base_iterable)),
stages=self._stages.copy(),
length=length,
)
def schema(
self, fetch_if_missing: bool = False
) -> Union[type, "pyarrow.lib.Schema"]:
"""Return the schema of the dataset pipeline.
For datasets of Arrow records, this will return the Arrow schema.
For dataset of Python objects, this returns their Python type.
Note: This is intended to be a method for peeking schema before
the execution of DatasetPipeline. If execution has already started,
it will simply return the cached schema from the previous call.
Time complexity: O(1)
Args:
fetch_if_missing: If True, synchronously fetch the schema if it's
not known. Default is False, where None is returned if the
schema is not known.
Returns:
The Python type or Arrow schema of the records, or None if the
schema is not known.
"""
if not self._executed[0]:
self._schema = self._peek().schema(fetch_if_missing)
return self._schema
def count(self) -> int:
"""Count the number of records in the dataset pipeline.
This blocks until the entire pipeline is fully executed.
Time complexity: O(dataset size / parallelism)
Returns:
The number of records in the dataset pipeline.
"""
if self._length == float("inf"):
raise ValueError("Cannot count a pipeline of infinite length.")
pipe = self.map_batches(lambda batch: [len(batch)])
total = 0
for elem in pipe.iter_rows():
total += elem
return total
def sum(self) -> int:
"""Sum the records in the dataset pipeline.
This blocks until the entire pipeline is fully executed.
Time complexity: O(dataset size / parallelism)
Returns:
The sum of the records in the dataset pipeline.
"""
if self._length == float("inf"):
raise ValueError("Cannot sum a pipeline of infinite length.")
pipe = self.map_batches(lambda batch: [batch.sum()[0]], batch_format="pandas")
total = 0
for elem in pipe.iter_rows():
total += elem
return total
def show_windows(self, limit_per_dataset: int = 10) -> None:
"""Print up to the given number of records from each window/dataset.
This is helpful as a debugging tool for understanding the structure of
dataset pipelines.
Args:
limit_per_dataset: Rows to print per window/dataset.
"""
epoch = None
for i, ds in enumerate(self.iter_datasets()):
if ds._get_epoch() != epoch:
epoch = ds._get_epoch()
print("------ Epoch {} ------".format(epoch))
print("=== Window {} ===".format(i))
ds.show(limit_per_dataset)
def iter_epochs(self) -> Iterator["DatasetPipeline[T]"]:
"""Split this pipeline up by epoch.
This allows reading of data per-epoch for repeated Datasets, which is
useful for ML training. For example, ``ray.data.range(10).repeat(50)``
generates a pipeline with 500 rows total split across 50 epochs. This
method allows iterating over the data individually per epoch
(repetition) of the original data.
Examples:
>>> import ray
>>> epochs = ray.data.range(10).repeat(50).iter_epochs() # doctest: +SKIP
>>> for i, epoch in enumerate(epochs): # doctest: +SKIP
... print("Epoch", i) # doctest: +SKIP
... for row in epoch.iter_rows(): # doctest: +SKIP
... print(row) # doctest: +SKIP
Returns:
Iterator over epoch objects, where each epoch is a DatasetPipeline
containing data from that epoch only.
"""
class Peekable:
def __init__(self, base_iter: Iterator[T]):
self._iter = base_iter
self._buffer = None
def _fill_buffer_if_possible(self):
if self._buffer is None:
try:
self._buffer = next(self._iter)
assert self._buffer is not None
except StopIteration:
pass
def peek(self) -> T:
self._fill_buffer_if_possible()
if self._buffer is None:
raise StopIteration
return self._buffer
def __next__(self) -> T:
self._fill_buffer_if_possible()
if self._buffer is None:
raise StopIteration
item = self._buffer
self._buffer = None
return item
class SingleEpochIterator:
def __init__(self, peekable_iter: Iterator[Dataset[T]], epoch: int):
self._iter = peekable_iter
self._epoch = epoch
def __next__(self) -> Dataset[T]:
if self._iter.peek()._get_epoch() > self._epoch:
raise StopIteration
ds = next(self._iter)
return lambda: ds
def __iter__(self):
return self
class EpochDelimitedIterator:
def __init__(self, pipe):
self._iter = Peekable(pipe.iter_datasets())
self._cur_epoch = None
def __next__(self) -> "DatasetPipeline[T]":
if self._cur_epoch is None:
self._cur_epoch = self._iter.peek()._get_epoch()
else:
self._cur_epoch += 1
warned = False
while self._iter.peek()._get_epoch() < self._cur_epoch:
if not warned:
warned = True
logger.warn(
"Data from epoch {} was not fully read, "
"skipping to next epoch.".format(self._cur_epoch - 1)
)
next(self._iter)
epoch_pipe = DatasetPipeline.from_iterable(
SingleEpochIterator(self._iter, epoch=self._cur_epoch)
)
return epoch_pipe
def __iter__(self):
return self
return EpochDelimitedIterator(self)
@DeveloperAPI
def iter_datasets(self) -> Iterator[Dataset[T]]:
"""Iterate over the output datasets of this pipeline.
Returns:
Iterator over the datasets outputted from this pipeline.
"""
if self._executed[0]:
raise RuntimeError("Pipeline cannot be read multiple times.")
self._executed[0] = True
if self._first_dataset is None:
self._peek()
iter = itertools.chain([self._first_dataset], self._dataset_iter)
self._first_dataset = None
self._dataset_iter = None
return iter
@DeveloperAPI
def foreach_window(
self, fn: Callable[[Dataset[T]], Dataset[U]]
) -> "DatasetPipeline[U]":
"""Apply a transform to each dataset/window in this pipeline.
Args:
fn: The function to transform each dataset with.
Returns:
The transformed DatasetPipeline.
"""
if self._executed[0]:
raise RuntimeError("Pipeline cannot be read multiple times.")
return DatasetPipeline(
self._base_iterable,
self._stages + [fn],
self._length,
self._progress_bars,
_executed=self._executed,
)
def stats(self, exclude_first_window: bool = True) -> str:
"""Returns a string containing execution timing information.
Args:
exclude_first_window: Whether to exclude the first window from
the pipeline time breakdown. This is generally a good idea
since there is always a stall waiting for the first window to
be initially computed, which can be misleading in the stats.
"""
return self._stats.summary_string(exclude_first_window)
@staticmethod
def from_iterable(
iterable: Iterable[Callable[[], Dataset[T]]],
) -> "DatasetPipeline[T]":
"""Create a pipeline from an sequence of Dataset producing functions.
Args:
iterable: A finite or infinite-length sequence of functions that
each produce a Dataset when called.
"""
if hasattr(iterable, "__len__"):
length = len(iterable)
else:
length = None
return DatasetPipeline(iterable, length=length)
def __repr__(self) -> str:
return "DatasetPipeline(num_windows={}, num_stages={})".format(
self._length, 1 + len(self._stages)
)
def __str__(self) -> str:
return repr(self)
def _get_uuid(self) -> str:
return self._uuid
def _set_uuid(self, uuid: str) -> None:
self._uuid = uuid
def _optimize_stages(self):
"""Optimize this pipeline, fusing stages together as possible."""
context = DatasetContext.get_current()
if not context.optimize_fuse_stages:
self._optimized_stages = self._stages
return
# This dummy dataset will be used to get a set of optimized stages.
dummy_ds = Dataset(
ExecutionPlan(BlockList([], []), DatasetStats(stages={}, parent=None)),
0,
True,
)
# Apply all pipeline operations to the dummy dataset.
for stage in self._stages:
dummy_ds = stage(dummy_ds)
# Get the optimized stages.
_, _, stages = dummy_ds._plan._optimize()
# Apply these optimized stages to the datasets underlying the pipeline.
# These optimized stages will be executed by the PipelineExecutor.
optimized_stages = []
for stage in stages:
optimized_stages.append(
lambda ds, stage=stage: Dataset(
ds._plan.with_stage(stage), ds._epoch, True
)
)
self._optimized_stages = optimized_stages
def _peek(self) -> Dataset[T]:
if self._first_dataset is None:
self._optimize_stages()
self._dataset_iter = PipelineExecutor(self)
self._first_dataset = next(self._dataset_iter)
return self._first_dataset
for method in _PER_DATASET_OPS:
def make_impl(method):
delegate = getattr(Dataset, method)
def impl(self, *args, **kwargs) -> "DatasetPipeline[U]":
return self.foreach_window(lambda ds: getattr(ds, method)(*args, **kwargs))
impl.__name__ = delegate.__name__
impl.__doc__ = """
Apply ``Dataset.{method}`` to each dataset/window in this pipeline.
""".format(
method=method
)
setattr(
impl,
"__signature__",
inspect.signature(delegate).replace(return_annotation="DatasetPipeline[U]"),
)
return impl
setattr(DatasetPipeline, method, make_impl(method))
for method in _HOLISTIC_PER_DATASET_OPS:
def make_impl(method):
delegate = getattr(Dataset, method)
def impl(self, *args, **kwargs) -> "DatasetPipeline[U]":
return self.foreach_window(lambda ds: getattr(ds, method)(*args, **kwargs))
impl.__name__ = delegate.__name__
impl.__doc__ = """
Apply ``Dataset.{method}`` to each dataset/window in this pipeline.
""".format(
method=method
)
setattr(
impl,
"__signature__",
inspect.signature(delegate).replace(return_annotation="DatasetPipeline[U]"),
)
return impl
def deprecation_warning(method: str):
def impl(*a, **kw):
raise DeprecationWarning(
"`{}` has been renamed to `{}_each_window`.".format(method, method)
)
return impl
setattr(DatasetPipeline, method, deprecation_warning(method))
setattr(DatasetPipeline, method + "_each_window", make_impl(method))
for method in _PER_DATASET_OUTPUT_OPS:
def make_impl(method):
delegate = getattr(Dataset, method)
def impl(self, *args, **kwargs):
uuid = None
for i, ds in enumerate(self.iter_datasets()):
if uuid is None:
uuid = self._get_uuid() or ds._get_uuid()
ds._set_uuid(f"{uuid}_{i:06}")
getattr(ds, method)(*args, **kwargs)
impl.__name__ = delegate.__name__
impl.__doc__ = """
Call ``Dataset.{method}`` on each output dataset of this pipeline.
""".format(
method=method
)
setattr(impl, "__signature__", inspect.signature(delegate))
return impl
setattr(DatasetPipeline, method, make_impl(method))
for method in _OUTPUT_ITER_OPS:
def make_impl(method):
delegate = getattr(Dataset, method)
def impl(self, *args, **kwargs):
return delegate(self, *args, **kwargs)
impl.__name__ = delegate.__name__
impl.__doc__ = """
Call ``Dataset.{method}`` over the stream of output batches from the pipeline.
""".format(
method=method
)
setattr(impl, "__signature__", inspect.signature(delegate))
return impl
setattr(DatasetPipeline, method, make_impl(method))
| 37.055556 | 88 | 0.572775 | import inspect
import itertools
import logging
import time
from typing import (
Any,
Callable,
List,
Iterator,
Iterable,
Generic,
Union,
Optional,
TYPE_CHECKING,
)
import ray
from ray.data.context import DatasetContext
from ray.data.dataset import Dataset, T, U
from ray.data.impl.pipeline_executor import (
PipelineExecutor,
PipelineSplitExecutorCoordinator,
)
from ray.data.block import Block
from ray.data.row import TableRow
from ray.data.impl import progress_bar
from ray.data.impl.block_batching import batch_blocks, BatchType
from ray.data.impl.block_list import BlockList
from ray.data.impl.plan import ExecutionPlan
from ray.data.impl.stats import DatasetPipelineStats, DatasetStats
from ray.util.annotations import PublicAPI, DeveloperAPI
if TYPE_CHECKING:
import pyarrow
logger = logging.getLogger(__name__)
_PER_DATASET_OPS = ["map", "map_batches", "add_column", "flat_map", "filter"]
_HOLISTIC_PER_DATASET_OPS = ["repartition", "random_shuffle", "sort"]
_PER_DATASET_OUTPUT_OPS = [
"write_json",
"write_csv",
"write_parquet",
"write_datasource",
]
_OUTPUT_ITER_OPS = ["take", "take_all", "show", "to_tf", "to_torch"]
@PublicAPI
class DatasetPipeline(Generic[T]):
def __init__(
self,
base_iterable: Iterable[Callable[[], Dataset[T]]],
stages: List[Callable[[Dataset[Any]], Dataset[Any]]] = None,
length: int = None,
progress_bars: bool = progress_bar._enabled,
_executed: List[bool] = None,
):
self._base_iterable = base_iterable
self._stages = stages or []
self._optimized_stages = None
self._length = length
self._progress_bars = progress_bars
self._uuid = None
self._executed = _executed or [False]
self._dataset_iter = None
self._first_dataset = None
self._schema = None
self._stats = DatasetPipelineStats()
def iter_rows(self, *, prefetch_blocks: int = 0) -> Iterator[Union[T, TableRow]]:
def gen_rows() -> Iterator[Union[T, TableRow]]:
time_start = time.perf_counter()
for ds in self.iter_datasets():
wait_start = time.perf_counter()
for row in ds.iter_rows(prefetch_blocks=prefetch_blocks):
self._stats.iter_wait_s.add(time.perf_counter() - wait_start)
with self._stats.iter_user_s.timer():
yield row
wait_start = time.perf_counter()
self._stats.iter_total_s.add(time.perf_counter() - time_start)
return gen_rows()
def iter_batches(
self,
*,
prefetch_blocks: int = 0,
batch_size: int = None,
batch_format: str = "native",
drop_last: bool = False,
) -> Iterator[BatchType]:
time_start = time.perf_counter()
yield from batch_blocks(
self._iter_blocks(),
self._stats,
prefetch_blocks=prefetch_blocks,
batch_size=batch_size,
batch_format=batch_format,
drop_last=drop_last,
)
self._stats.iter_total_s.add(time.perf_counter() - time_start)
def _iter_blocks(self) -> Iterator[Block]:
ds_wait_start = time.perf_counter()
for ds in self.iter_datasets():
self._stats.iter_ds_wait_s.add(time.perf_counter() - ds_wait_start)
yield from ds._plan.execute().iter_blocks()
ds_wait_start = time.perf_counter()
def split(
self, n: int, *, equal: bool = False, locality_hints: List[Any] = None
) -> List["DatasetPipeline[T]"]:
return self._split(
n,
lambda ds, equal=equal: ds.split(
n, equal=equal, locality_hints=locality_hints
),
)
def split_at_indices(self, indices: List[int]) -> List["DatasetPipeline[T]"]:
if len(indices) < 1:
raise ValueError("indices must be at least of length 1")
if sorted(indices) != indices:
raise ValueError("indices must be sorted")
if indices[0] < 0:
raise ValueError("indices must be positive")
return self._split(len(indices) + 1, lambda ds: ds.split_at_indices(indices))
def _split(self, n: int, splitter: Callable[[Dataset], "DatasetPipeline[T]"]):
resources = {}
if not ray.util.client.ray.is_connected():
resources["node:{}".format(ray.util.get_node_ip_address())] = 0.0001
coordinator = PipelineSplitExecutorCoordinator.options(
resources=resources,
placement_group=None,
).remote(self, n, splitter, DatasetContext.get_current())
if self._executed[0]:
raise RuntimeError("Pipeline cannot be read multiple times.")
self._executed[0] = True
class SplitIterator:
def __init__(self, split_index, coordinator):
self.split_index = split_index
self.coordinator = coordinator
self.warn_threshold = 100
self.wait_delay_s = 0.1
def __iter__(self):
return self
def __next__(self):
ds = None
tries = 0
while ds is None:
ds = ray.get(
self.coordinator.next_dataset_if_ready.remote(self.split_index)
)
if not ds:
time.sleep(self.wait_delay_s)
tries += 1
if tries > self.warn_threshold:
print(
"Warning: reader on shard {} of the pipeline "
"has been blocked more than {}s waiting for "
"other readers to catch up. All pipeline shards "
"must be read from concurrently.".format(
self.split_index,
self.wait_delay_s * self.warn_threshold,
)
)
self.warn_threshold *= 2
return lambda: ds
return [
DatasetPipeline(
SplitIterator(idx, coordinator),
length=self._length,
progress_bars=False,
)
for idx in range(n)
]
def rewindow(
self, *, blocks_per_window: int, preserve_epoch: bool = True
) -> "DatasetPipeline[T]":
class WindowIterator:
def __init__(self, original_iter):
self._original_iter = original_iter
self._buffer: Optional[Dataset[T]] = None
def __next__(self) -> Dataset[T]:
try:
if self._buffer is None:
self._buffer = next(self._original_iter)
while self._buffer.num_blocks() < blocks_per_window:
next_ds = next(self._original_iter)
if (
preserve_epoch
and self._buffer._get_epoch() != next_ds._get_epoch()
):
partial_window = self._buffer
self._buffer = next_ds
return lambda: partial_window
else:
self._buffer = self._buffer.union(next_ds)
res, self._buffer = self._buffer._divide(blocks_per_window)
assert res.num_blocks() <= blocks_per_window, res
if self._buffer.num_blocks() == 0:
self._buffer = None
return lambda: res
except StopIteration:
if self._buffer and self._buffer.num_blocks() > 0:
res = self._buffer
assert res.num_blocks() <= blocks_per_window, res
self._buffer = None
return lambda: res
else:
raise
class WindowIterable:
def __init__(self, original_iter):
self._original_iter = original_iter
def __iter__(self):
return WindowIterator(self._original_iter)
if self._length == float("inf"):
length = float("inf")
else:
length = None
return DatasetPipeline(WindowIterable(self.iter_datasets()), length=length)
def repeat(self, times: int = None) -> "DatasetPipeline[T]":
if self._length == float("inf"):
raise ValueError("Cannot repeat a pipeline of infinite length.")
class RepeatIterator:
def __init__(self, original_iter):
self._original_iter = original_iter
self._results = []
self._i = 0
self._max_i = None
def __next__(self) -> Dataset[T]:
if self._original_iter:
try:
make_ds = next(self._original_iter)
self._results.append(make_ds)
def gen():
res = make_ds()
res._set_epoch(0)
return res
return gen
except StopIteration:
self._original_iter = None
if times:
self._max_i = len(self._results) * (times - 1)
else:
self._max_i = float("inf")
if self._i < self._max_i:
make_ds = self._results[self._i % len(self._results)]
epoch = 1 + self._i // len(self._results)
def gen():
res = make_ds()
res._set_epoch(epoch)
return res
self._i += 1
return gen
else:
raise StopIteration
class RepeatIterable:
def __init__(self, original_iter):
self._original_iter = original_iter
def __iter__(self):
return RepeatIterator(self._original_iter)
if not times:
length = float("inf")
elif times and self._length:
length = times * self._length
else:
length = None
return DatasetPipeline(
RepeatIterable(iter(self._base_iterable)),
stages=self._stages.copy(),
length=length,
)
def schema(
self, fetch_if_missing: bool = False
) -> Union[type, "pyarrow.lib.Schema"]:
if not self._executed[0]:
self._schema = self._peek().schema(fetch_if_missing)
return self._schema
def count(self) -> int:
if self._length == float("inf"):
raise ValueError("Cannot count a pipeline of infinite length.")
pipe = self.map_batches(lambda batch: [len(batch)])
total = 0
for elem in pipe.iter_rows():
total += elem
return total
def sum(self) -> int:
if self._length == float("inf"):
raise ValueError("Cannot sum a pipeline of infinite length.")
pipe = self.map_batches(lambda batch: [batch.sum()[0]], batch_format="pandas")
total = 0
for elem in pipe.iter_rows():
total += elem
return total
def show_windows(self, limit_per_dataset: int = 10) -> None:
epoch = None
for i, ds in enumerate(self.iter_datasets()):
if ds._get_epoch() != epoch:
epoch = ds._get_epoch()
print("------ Epoch {} ------".format(epoch))
print("=== Window {} ===".format(i))
ds.show(limit_per_dataset)
def iter_epochs(self) -> Iterator["DatasetPipeline[T]"]:
class Peekable:
def __init__(self, base_iter: Iterator[T]):
self._iter = base_iter
self._buffer = None
def _fill_buffer_if_possible(self):
if self._buffer is None:
try:
self._buffer = next(self._iter)
assert self._buffer is not None
except StopIteration:
pass
def peek(self) -> T:
self._fill_buffer_if_possible()
if self._buffer is None:
raise StopIteration
return self._buffer
def __next__(self) -> T:
self._fill_buffer_if_possible()
if self._buffer is None:
raise StopIteration
item = self._buffer
self._buffer = None
return item
class SingleEpochIterator:
def __init__(self, peekable_iter: Iterator[Dataset[T]], epoch: int):
self._iter = peekable_iter
self._epoch = epoch
def __next__(self) -> Dataset[T]:
if self._iter.peek()._get_epoch() > self._epoch:
raise StopIteration
ds = next(self._iter)
return lambda: ds
def __iter__(self):
return self
class EpochDelimitedIterator:
def __init__(self, pipe):
self._iter = Peekable(pipe.iter_datasets())
self._cur_epoch = None
def __next__(self) -> "DatasetPipeline[T]":
if self._cur_epoch is None:
self._cur_epoch = self._iter.peek()._get_epoch()
else:
self._cur_epoch += 1
warned = False
while self._iter.peek()._get_epoch() < self._cur_epoch:
if not warned:
warned = True
logger.warn(
"Data from epoch {} was not fully read, "
"skipping to next epoch.".format(self._cur_epoch - 1)
)
next(self._iter)
epoch_pipe = DatasetPipeline.from_iterable(
SingleEpochIterator(self._iter, epoch=self._cur_epoch)
)
return epoch_pipe
def __iter__(self):
return self
return EpochDelimitedIterator(self)
@DeveloperAPI
def iter_datasets(self) -> Iterator[Dataset[T]]:
if self._executed[0]:
raise RuntimeError("Pipeline cannot be read multiple times.")
self._executed[0] = True
if self._first_dataset is None:
self._peek()
iter = itertools.chain([self._first_dataset], self._dataset_iter)
self._first_dataset = None
self._dataset_iter = None
return iter
@DeveloperAPI
def foreach_window(
self, fn: Callable[[Dataset[T]], Dataset[U]]
) -> "DatasetPipeline[U]":
if self._executed[0]:
raise RuntimeError("Pipeline cannot be read multiple times.")
return DatasetPipeline(
self._base_iterable,
self._stages + [fn],
self._length,
self._progress_bars,
_executed=self._executed,
)
def stats(self, exclude_first_window: bool = True) -> str:
return self._stats.summary_string(exclude_first_window)
@staticmethod
def from_iterable(
iterable: Iterable[Callable[[], Dataset[T]]],
) -> "DatasetPipeline[T]":
if hasattr(iterable, "__len__"):
length = len(iterable)
else:
length = None
return DatasetPipeline(iterable, length=length)
def __repr__(self) -> str:
return "DatasetPipeline(num_windows={}, num_stages={})".format(
self._length, 1 + len(self._stages)
)
def __str__(self) -> str:
return repr(self)
def _get_uuid(self) -> str:
return self._uuid
def _set_uuid(self, uuid: str) -> None:
self._uuid = uuid
def _optimize_stages(self):
context = DatasetContext.get_current()
if not context.optimize_fuse_stages:
self._optimized_stages = self._stages
return
dummy_ds = Dataset(
ExecutionPlan(BlockList([], []), DatasetStats(stages={}, parent=None)),
0,
True,
)
for stage in self._stages:
dummy_ds = stage(dummy_ds)
_, _, stages = dummy_ds._plan._optimize()
optimized_stages = []
for stage in stages:
optimized_stages.append(
lambda ds, stage=stage: Dataset(
ds._plan.with_stage(stage), ds._epoch, True
)
)
self._optimized_stages = optimized_stages
def _peek(self) -> Dataset[T]:
if self._first_dataset is None:
self._optimize_stages()
self._dataset_iter = PipelineExecutor(self)
self._first_dataset = next(self._dataset_iter)
return self._first_dataset
for method in _PER_DATASET_OPS:
def make_impl(method):
delegate = getattr(Dataset, method)
def impl(self, *args, **kwargs) -> "DatasetPipeline[U]":
return self.foreach_window(lambda ds: getattr(ds, method)(*args, **kwargs))
impl.__name__ = delegate.__name__
impl.__doc__ = """
Apply ``Dataset.{method}`` to each dataset/window in this pipeline.
""".format(
method=method
)
setattr(
impl,
"__signature__",
inspect.signature(delegate).replace(return_annotation="DatasetPipeline[U]"),
)
return impl
setattr(DatasetPipeline, method, make_impl(method))
for method in _HOLISTIC_PER_DATASET_OPS:
def make_impl(method):
delegate = getattr(Dataset, method)
def impl(self, *args, **kwargs) -> "DatasetPipeline[U]":
return self.foreach_window(lambda ds: getattr(ds, method)(*args, **kwargs))
impl.__name__ = delegate.__name__
impl.__doc__ = """
Apply ``Dataset.{method}`` to each dataset/window in this pipeline.
""".format(
method=method
)
setattr(
impl,
"__signature__",
inspect.signature(delegate).replace(return_annotation="DatasetPipeline[U]"),
)
return impl
def deprecation_warning(method: str):
def impl(*a, **kw):
raise DeprecationWarning(
"`{}` has been renamed to `{}_each_window`.".format(method, method)
)
return impl
setattr(DatasetPipeline, method, deprecation_warning(method))
setattr(DatasetPipeline, method + "_each_window", make_impl(method))
for method in _PER_DATASET_OUTPUT_OPS:
def make_impl(method):
delegate = getattr(Dataset, method)
def impl(self, *args, **kwargs):
uuid = None
for i, ds in enumerate(self.iter_datasets()):
if uuid is None:
uuid = self._get_uuid() or ds._get_uuid()
ds._set_uuid(f"{uuid}_{i:06}")
getattr(ds, method)(*args, **kwargs)
impl.__name__ = delegate.__name__
impl.__doc__ = """
Call ``Dataset.{method}`` on each output dataset of this pipeline.
""".format(
method=method
)
setattr(impl, "__signature__", inspect.signature(delegate))
return impl
setattr(DatasetPipeline, method, make_impl(method))
for method in _OUTPUT_ITER_OPS:
def make_impl(method):
delegate = getattr(Dataset, method)
def impl(self, *args, **kwargs):
return delegate(self, *args, **kwargs)
impl.__name__ = delegate.__name__
impl.__doc__ = """
Call ``Dataset.{method}`` over the stream of output batches from the pipeline.
""".format(
method=method
)
setattr(impl, "__signature__", inspect.signature(delegate))
return impl
setattr(DatasetPipeline, method, make_impl(method))
| true | true |
f70f4a4cadff4f4dbb2f9dedfaac54e51efae86e | 2,329 | py | Python | alipay/aop/api/domain/AlipayEcoMycarMaintainServiceproductUpdateModel.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/AlipayEcoMycarMaintainServiceproductUpdateModel.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/AlipayEcoMycarMaintainServiceproductUpdateModel.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.MaitainShopProduct import MaitainShopProduct
class AlipayEcoMycarMaintainServiceproductUpdateModel(object):
def __init__(self):
self._operation_type = None
self._out_product_id = None
self._shop_product = None
@property
def operation_type(self):
return self._operation_type
@operation_type.setter
def operation_type(self, value):
self._operation_type = value
@property
def out_product_id(self):
return self._out_product_id
@out_product_id.setter
def out_product_id(self, value):
self._out_product_id = value
@property
def shop_product(self):
return self._shop_product
@shop_product.setter
def shop_product(self, value):
if isinstance(value, MaitainShopProduct):
self._shop_product = value
else:
self._shop_product = MaitainShopProduct.from_alipay_dict(value)
def to_alipay_dict(self):
params = dict()
if self.operation_type:
if hasattr(self.operation_type, 'to_alipay_dict'):
params['operation_type'] = self.operation_type.to_alipay_dict()
else:
params['operation_type'] = self.operation_type
if self.out_product_id:
if hasattr(self.out_product_id, 'to_alipay_dict'):
params['out_product_id'] = self.out_product_id.to_alipay_dict()
else:
params['out_product_id'] = self.out_product_id
if self.shop_product:
if hasattr(self.shop_product, 'to_alipay_dict'):
params['shop_product'] = self.shop_product.to_alipay_dict()
else:
params['shop_product'] = self.shop_product
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayEcoMycarMaintainServiceproductUpdateModel()
if 'operation_type' in d:
o.operation_type = d['operation_type']
if 'out_product_id' in d:
o.out_product_id = d['out_product_id']
if 'shop_product' in d:
o.shop_product = d['shop_product']
return o
| 31.053333 | 79 | 0.640618 |
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.MaitainShopProduct import MaitainShopProduct
class AlipayEcoMycarMaintainServiceproductUpdateModel(object):
def __init__(self):
self._operation_type = None
self._out_product_id = None
self._shop_product = None
@property
def operation_type(self):
return self._operation_type
@operation_type.setter
def operation_type(self, value):
self._operation_type = value
@property
def out_product_id(self):
return self._out_product_id
@out_product_id.setter
def out_product_id(self, value):
self._out_product_id = value
@property
def shop_product(self):
return self._shop_product
@shop_product.setter
def shop_product(self, value):
if isinstance(value, MaitainShopProduct):
self._shop_product = value
else:
self._shop_product = MaitainShopProduct.from_alipay_dict(value)
def to_alipay_dict(self):
params = dict()
if self.operation_type:
if hasattr(self.operation_type, 'to_alipay_dict'):
params['operation_type'] = self.operation_type.to_alipay_dict()
else:
params['operation_type'] = self.operation_type
if self.out_product_id:
if hasattr(self.out_product_id, 'to_alipay_dict'):
params['out_product_id'] = self.out_product_id.to_alipay_dict()
else:
params['out_product_id'] = self.out_product_id
if self.shop_product:
if hasattr(self.shop_product, 'to_alipay_dict'):
params['shop_product'] = self.shop_product.to_alipay_dict()
else:
params['shop_product'] = self.shop_product
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayEcoMycarMaintainServiceproductUpdateModel()
if 'operation_type' in d:
o.operation_type = d['operation_type']
if 'out_product_id' in d:
o.out_product_id = d['out_product_id']
if 'shop_product' in d:
o.shop_product = d['shop_product']
return o
| true | true |
f70f4a707f33e1e9b61bd480f17e84e9551a9632 | 6,795 | py | Python | lazy/io/pathz/_flavours.py | trisongz/lazycls | 701bad1a358ed3bb136347d0c5eb81de3201f6a3 | [
"MIT"
] | 2 | 2021-12-02T00:13:16.000Z | 2022-02-26T11:18:33.000Z | lazy/io/pathz/_flavours.py | trisongz/lazycls | 701bad1a358ed3bb136347d0c5eb81de3201f6a3 | [
"MIT"
] | null | null | null | lazy/io/pathz/_flavours.py | trisongz/lazycls | 701bad1a358ed3bb136347d0c5eb81de3201f6a3 | [
"MIT"
] | null | null | null | from __future__ import annotations
from pathlib import _PosixFlavour, _WindowsFlavour
from typing import Optional, Callable, Awaitable, Dict, List, TYPE_CHECKING
from errno import EINVAL
import os
import sys
from aiopath.wrap import func_to_async_func as wrap_async
try:
from pathlib import _getfinalpathname
_async_getfinalpathname = wrap_async(_getfinalpathname)
except ImportError:
def _getfinalpathname(*args, **kwargs):
raise ImportError("_getfinalpathname() requires a Windows/NT platform")
async def _async_getfinalpathname(*args, **kwargs):
raise ImportError("_getfinalpathname() requires a Windows/NT platform")
if TYPE_CHECKING: # keep mypy quiet
from ._base import AsyncSyncPath, _AsyncSyncAccessor
class _AsyncSyncPosixFlavour(_PosixFlavour):
def gethomedir(self, username: str) -> str:
return super().gethomedir(username)
async def async_gethomedir(self, username: str) -> str:
gethomedir: Callable[[str], Awaitable[str]] = wrap_async(super().gethomedir)
return await gethomedir(username)
def resolve(self, path: AsyncSyncPath, strict: bool = False) -> Optional[str]:
sep: str = self.sep
accessor: '_AsyncSyncAccessor' = path._accessor
seen: Dict[str, Optional[str]] = {}
def _resolve(path: str, rest: str) -> str:
if rest.startswith(sep): path = ''
for name in rest.split(sep):
if not name or name == '.': continue
if name == '..':
# parent dir
path, _, _ = path.rpartition(sep)
continue
newpath = path + name if path.endswith(sep) else path + sep + name
if newpath in seen:
# Already seen this path
path = seen[newpath]
if path is not None: continue
# The symlink is not resolved, so we must have a symlink loop.
raise RuntimeError(f"Symlink loop from {newpath}")
# Resolve the symbolic link
try: target = accessor.readlink(newpath)
except OSError as e:
if e.errno != EINVAL and strict: raise
# Not a symlink, or non-strict mode. We just leave the path
# untouched.
path = newpath
else:
seen[newpath] = None # not resolved symlink
path = _resolve(path, target)
seen[newpath] = path # resolved symlink
return path
# NOTE: according to POSIX, getcwd() cannot contain path components
# which are symlinks.
base = '' if path.is_absolute() else os.getcwd()
result = _resolve(base, str(path))
return result or sep
async def async_resolve(self, path: AsyncSyncPath, strict: bool = False) -> Optional[str]:
sep: str = self.sep
accessor: '_AsyncSyncAccessor' = path._accessor
seen: Dict[str, Optional[str]] = {}
async def _resolve(path: str, rest: str) -> str:
if rest.startswith(sep): path = ''
for name in rest.split(sep):
if not name or name == '.': continue
if name == '..':
# parent dir
path, _, _ = path.rpartition(sep)
continue
newpath = path + name if path.endswith(sep) else path + sep + name
if newpath in seen:
# Already seen this path
path = seen[newpath]
if path is not None: continue
# The symlink is not resolved, so we must have a symlink loop.
raise RuntimeError(f"Symlink loop from {newpath}")
# Resolve the symbolic link
try: target = await accessor.async_readlink(newpath)
except OSError as e:
if e.errno != EINVAL and strict: raise
# Not a symlink, or non-strict mode. We just leave the path
# untouched.
path = newpath
else:
seen[newpath] = None # not resolved symlink
path = await _resolve(path, target)
seen[newpath] = path # resolved symlink
return path
# NOTE: according to POSIX, getcwd() cannot contain path components
# which are symlinks.
base = '' if path.is_absolute() else os.getcwd()
result = await _resolve(base, str(path))
return result or sep
class _AsyncSyncWindowsFlavour(_WindowsFlavour):
def gethomedir(self, username: str) -> str:
return super().gethomedir(username)
async def async_gethomedir(self, username: str) -> str:
gethomedir: Callable[[str], Awaitable[str]] = wrap_async(super().gethomedir)
return await gethomedir(username)
def resolve(self, path: 'AsyncSyncPath', strict: bool = False) -> Optional[str]:
s = str(path)
if not s: return os.getcwd()
previous_s: Optional[str] = None
if _getfinalpathname is not None:
if strict: return self._ext_to_normal(_getfinalpathname(s))
else:
tail_parts: List[str] = [] # End of the path after the first one not found
while True:
try: s = self._ext_to_normal(_getfinalpathname(s))
except FileNotFoundError:
previous_s = s
s, tail = os.path.split(s)
tail_parts.append(tail)
if previous_s == s: return path
else: return os.path.join(s, *reversed(tail_parts))
return None
async def async_resolve(self, path: 'AsyncSyncPath', strict: bool = False) -> Optional[str]:
s = str(path)
if not s: return os.getcwd()
previous_s: Optional[str] = None
if _async_getfinalpathname is not None:
if strict: return self._ext_to_normal(await _async_getfinalpathname(s))
else:
tail_parts: List[str] = [] # End of the path after the first one not found
while True:
try: s = self._ext_to_normal(await _async_getfinalpathname(s))
except FileNotFoundError:
previous_s = s
s, tail = os.path.split(s)
tail_parts.append(tail)
if previous_s == s: return path
else: return os.path.join(s, *reversed(tail_parts))
return None
_async_sync_windows_flavour = _AsyncSyncWindowsFlavour()
_async_sync_posix_flavour = _AsyncSyncPosixFlavour()
| 37.75 | 96 | 0.569536 | from __future__ import annotations
from pathlib import _PosixFlavour, _WindowsFlavour
from typing import Optional, Callable, Awaitable, Dict, List, TYPE_CHECKING
from errno import EINVAL
import os
import sys
from aiopath.wrap import func_to_async_func as wrap_async
try:
from pathlib import _getfinalpathname
_async_getfinalpathname = wrap_async(_getfinalpathname)
except ImportError:
def _getfinalpathname(*args, **kwargs):
raise ImportError("_getfinalpathname() requires a Windows/NT platform")
async def _async_getfinalpathname(*args, **kwargs):
raise ImportError("_getfinalpathname() requires a Windows/NT platform")
if TYPE_CHECKING:
from ._base import AsyncSyncPath, _AsyncSyncAccessor
class _AsyncSyncPosixFlavour(_PosixFlavour):
def gethomedir(self, username: str) -> str:
return super().gethomedir(username)
async def async_gethomedir(self, username: str) -> str:
gethomedir: Callable[[str], Awaitable[str]] = wrap_async(super().gethomedir)
return await gethomedir(username)
def resolve(self, path: AsyncSyncPath, strict: bool = False) -> Optional[str]:
sep: str = self.sep
accessor: '_AsyncSyncAccessor' = path._accessor
seen: Dict[str, Optional[str]] = {}
def _resolve(path: str, rest: str) -> str:
if rest.startswith(sep): path = ''
for name in rest.split(sep):
if not name or name == '.': continue
if name == '..':
path, _, _ = path.rpartition(sep)
continue
newpath = path + name if path.endswith(sep) else path + sep + name
if newpath in seen:
path = seen[newpath]
if path is not None: continue
raise RuntimeError(f"Symlink loop from {newpath}")
try: target = accessor.readlink(newpath)
except OSError as e:
if e.errno != EINVAL and strict: raise
path = newpath
else:
seen[newpath] = None
path = _resolve(path, target)
seen[newpath] = path
return path
base = '' if path.is_absolute() else os.getcwd()
result = _resolve(base, str(path))
return result or sep
async def async_resolve(self, path: AsyncSyncPath, strict: bool = False) -> Optional[str]:
sep: str = self.sep
accessor: '_AsyncSyncAccessor' = path._accessor
seen: Dict[str, Optional[str]] = {}
async def _resolve(path: str, rest: str) -> str:
if rest.startswith(sep): path = ''
for name in rest.split(sep):
if not name or name == '.': continue
if name == '..':
path, _, _ = path.rpartition(sep)
continue
newpath = path + name if path.endswith(sep) else path + sep + name
if newpath in seen:
path = seen[newpath]
if path is not None: continue
raise RuntimeError(f"Symlink loop from {newpath}")
try: target = await accessor.async_readlink(newpath)
except OSError as e:
if e.errno != EINVAL and strict: raise
path = newpath
else:
seen[newpath] = None
path = await _resolve(path, target)
seen[newpath] = path
return path
base = '' if path.is_absolute() else os.getcwd()
result = await _resolve(base, str(path))
return result or sep
class _AsyncSyncWindowsFlavour(_WindowsFlavour):
def gethomedir(self, username: str) -> str:
return super().gethomedir(username)
async def async_gethomedir(self, username: str) -> str:
gethomedir: Callable[[str], Awaitable[str]] = wrap_async(super().gethomedir)
return await gethomedir(username)
def resolve(self, path: 'AsyncSyncPath', strict: bool = False) -> Optional[str]:
s = str(path)
if not s: return os.getcwd()
previous_s: Optional[str] = None
if _getfinalpathname is not None:
if strict: return self._ext_to_normal(_getfinalpathname(s))
else:
tail_parts: List[str] = []
while True:
try: s = self._ext_to_normal(_getfinalpathname(s))
except FileNotFoundError:
previous_s = s
s, tail = os.path.split(s)
tail_parts.append(tail)
if previous_s == s: return path
else: return os.path.join(s, *reversed(tail_parts))
return None
async def async_resolve(self, path: 'AsyncSyncPath', strict: bool = False) -> Optional[str]:
s = str(path)
if not s: return os.getcwd()
previous_s: Optional[str] = None
if _async_getfinalpathname is not None:
if strict: return self._ext_to_normal(await _async_getfinalpathname(s))
else:
tail_parts: List[str] = []
while True:
try: s = self._ext_to_normal(await _async_getfinalpathname(s))
except FileNotFoundError:
previous_s = s
s, tail = os.path.split(s)
tail_parts.append(tail)
if previous_s == s: return path
else: return os.path.join(s, *reversed(tail_parts))
return None
_async_sync_windows_flavour = _AsyncSyncWindowsFlavour()
_async_sync_posix_flavour = _AsyncSyncPosixFlavour()
| true | true |
f70f4a9ec79445bf614842937d279938a47f9b30 | 2,796 | py | Python | pocovidnet/scripts/eval_vid_classifier.py | 983632847/covid19_pocus_ultrasound | 3625e95bbf189926dbd12966ef59ee71ed10e453 | [
"MIT"
] | 1 | 2020-11-24T07:40:40.000Z | 2020-11-24T07:40:40.000Z | pocovidnet/scripts/eval_vid_classifier.py | 983632847/covid19_pocus_ultrasound | 3625e95bbf189926dbd12966ef59ee71ed10e453 | [
"MIT"
] | null | null | null | pocovidnet/scripts/eval_vid_classifier.py | 983632847/covid19_pocus_ultrasound | 3625e95bbf189926dbd12966ef59ee71ed10e453 | [
"MIT"
] | null | null | null | import argparse
import json
import os
import pickle
import numpy as np
from pocovidnet.evaluate_genesis import GenesisEvaluator
from pocovidnet.evaluate_video import VideoEvaluator
from tensorflow.keras import backend as K
from pocovidnet.videoto3d import Videoto3D
def main():
parser = argparse.ArgumentParser(description='Evaluate genesis and cam')
parser.add_argument('--json', type=str, default="../data/cross_val.json")
parser.add_argument(
'--genesis_weights', type=str, default='video_genesis_lr1e4'
)
parser.add_argument(
'--cam_weights', type=str, default='trained_models_cam'
)
parser.add_argument(
'--videos', type=str, default='../data/pocus_videos/convex'
)
args = parser.parse_args()
with open(args.json, "r") as infile:
cross_val_split = json.load(infile)
VIDEO_DIR = args.videos
all_genesis_preds = []
all_frame_preds = []
for i in range(5):
gen_eval = GenesisEvaluator(
weights_dir=args.genesis_weights, ensemble=False, split=i
)
K.set_image_data_format("channels_last")
normal_eval = VideoEvaluator(
weights_dir=args.cam_weights,
ensemble=False,
split=i,
model_id="vgg_cam",
num_classes=4
)
files = cross_val_split[str(i)]["test"][0]
# print(files)
for f in files:
print("evaluate", f)
# TEST if the video is working
vid3d = Videoto3D("", 64, 64, 5, 5)
vid3d.max_vid = {"cov": 20, "pne": 20, "reg": 20}
X_test, _, fn = vid3d.video3d(
[os.path.join(VIDEO_DIR, f)], ["cov"]
)
if len(np.unique(fn)) != 1:
print("ERROR: WRONG FILE!")
print(fn)
print(X_test.shape)
continue
# run genesis model
K.set_image_data_format("channels_first")
preds = gen_eval(os.path.join(VIDEO_DIR, f))
vid_pred_genesis = np.argmax(np.mean(preds, axis=(0, 1)))
all_genesis_preds.append(preds)
# run cam model
K.set_image_data_format("channels_last")
preds_framebased = normal_eval(os.path.join(VIDEO_DIR, f))
frame_pred = np.argmax(np.mean(preds_framebased, axis=0))
all_frame_preds.append(preds_framebased)
print(preds.shape, preds_framebased.shape)
print(
"genesis pred", vid_pred_genesis, "frame based pred",
frame_pred
)
print("-------------")
with open("evaluation_outputs.dat", "wb") as outfile:
pickle.dump((all_genesis_preds, all_frame_preds), outfile)
if __name__ == '__main__':
main()
| 34.518519 | 77 | 0.594063 | import argparse
import json
import os
import pickle
import numpy as np
from pocovidnet.evaluate_genesis import GenesisEvaluator
from pocovidnet.evaluate_video import VideoEvaluator
from tensorflow.keras import backend as K
from pocovidnet.videoto3d import Videoto3D
def main():
parser = argparse.ArgumentParser(description='Evaluate genesis and cam')
parser.add_argument('--json', type=str, default="../data/cross_val.json")
parser.add_argument(
'--genesis_weights', type=str, default='video_genesis_lr1e4'
)
parser.add_argument(
'--cam_weights', type=str, default='trained_models_cam'
)
parser.add_argument(
'--videos', type=str, default='../data/pocus_videos/convex'
)
args = parser.parse_args()
with open(args.json, "r") as infile:
cross_val_split = json.load(infile)
VIDEO_DIR = args.videos
all_genesis_preds = []
all_frame_preds = []
for i in range(5):
gen_eval = GenesisEvaluator(
weights_dir=args.genesis_weights, ensemble=False, split=i
)
K.set_image_data_format("channels_last")
normal_eval = VideoEvaluator(
weights_dir=args.cam_weights,
ensemble=False,
split=i,
model_id="vgg_cam",
num_classes=4
)
files = cross_val_split[str(i)]["test"][0]
for f in files:
print("evaluate", f)
vid3d = Videoto3D("", 64, 64, 5, 5)
vid3d.max_vid = {"cov": 20, "pne": 20, "reg": 20}
X_test, _, fn = vid3d.video3d(
[os.path.join(VIDEO_DIR, f)], ["cov"]
)
if len(np.unique(fn)) != 1:
print("ERROR: WRONG FILE!")
print(fn)
print(X_test.shape)
continue
K.set_image_data_format("channels_first")
preds = gen_eval(os.path.join(VIDEO_DIR, f))
vid_pred_genesis = np.argmax(np.mean(preds, axis=(0, 1)))
all_genesis_preds.append(preds)
K.set_image_data_format("channels_last")
preds_framebased = normal_eval(os.path.join(VIDEO_DIR, f))
frame_pred = np.argmax(np.mean(preds_framebased, axis=0))
all_frame_preds.append(preds_framebased)
print(preds.shape, preds_framebased.shape)
print(
"genesis pred", vid_pred_genesis, "frame based pred",
frame_pred
)
print("-------------")
with open("evaluation_outputs.dat", "wb") as outfile:
pickle.dump((all_genesis_preds, all_frame_preds), outfile)
if __name__ == '__main__':
main()
| true | true |
f70f4aaaa09628c56d20528c22d3cd194f64d284 | 542 | py | Python | atlas/skills/serializers.py | Kayra/atlas | 63db74fd66c8f8a874c5352248ea7b6fc4dc0c6f | [
"BSD-3-Clause"
] | 1 | 2015-12-16T06:20:14.000Z | 2015-12-16T06:20:14.000Z | atlas/skills/serializers.py | Kayra/atlas | 63db74fd66c8f8a874c5352248ea7b6fc4dc0c6f | [
"BSD-3-Clause"
] | null | null | null | atlas/skills/serializers.py | Kayra/atlas | 63db74fd66c8f8a874c5352248ea7b6fc4dc0c6f | [
"BSD-3-Clause"
] | null | null | null | from rest_framework import serializers
from .models import Skill, Task, Days
class SkillSerializer(serializers.ModelSerializer):
class Meta:
model = Skill
fields = ('name', 'user')
class TaskSerializer(serializers.ModelSerializer):
class Meta:
model = Task
fields = ('name', 'completion_time', 'skill')
class DaysSerializer(serializers.ModelSerializer):
class Meta:
model = Days
fields = ('monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday', 'user')
| 24.636364 | 103 | 0.662362 | from rest_framework import serializers
from .models import Skill, Task, Days
class SkillSerializer(serializers.ModelSerializer):
class Meta:
model = Skill
fields = ('name', 'user')
class TaskSerializer(serializers.ModelSerializer):
class Meta:
model = Task
fields = ('name', 'completion_time', 'skill')
class DaysSerializer(serializers.ModelSerializer):
class Meta:
model = Days
fields = ('monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday', 'user')
| true | true |
f70f4b9ac58896fac91046fbf000439679cf8a27 | 1,211 | py | Python | test/functional/rpc_named_arguments.py | wizz13150/gapcoin-core | 424b36ac0f232583e51bbef0ba87f4b1a53fde70 | [
"MIT"
] | 2 | 2020-11-07T01:18:24.000Z | 2021-04-25T15:49:28.000Z | test/functional/rpc_named_arguments.py | wizz13150/gapcoin-core | 424b36ac0f232583e51bbef0ba87f4b1a53fde70 | [
"MIT"
] | 2 | 2021-03-27T14:19:47.000Z | 2021-04-30T12:57:11.000Z | test/functional/rpc_named_arguments.py | wizz13150/gapcoin-core | 424b36ac0f232583e51bbef0ba87f4b1a53fde70 | [
"MIT"
] | 3 | 2021-02-03T14:39:29.000Z | 2022-01-05T11:56:21.000Z | #!/usr/bin/env python3
# Copyright (c) 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test using named arguments for RPCs."""
from test_framework.test_framework import AltcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class NamedArgumentTest(AltcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def run_test(self):
node = self.nodes[0]
h = node.help(command='getblockchaininfo')
assert(h.startswith('getblockchaininfo\n'))
assert_raises_rpc_error(-8, 'Unknown named parameter', node.help, random='getblockchaininfo')
h = node.getblockhash(height=0)
node.getblock(blockhash=h)
assert_equal(node.echo(), [])
assert_equal(node.echo(arg0=0,arg9=9), [0] + [None]*8 + [9])
assert_equal(node.echo(arg1=1), [None, 1])
assert_equal(node.echo(arg9=None), [None]*10)
assert_equal(node.echo(arg0=0,arg3=3,arg9=9), [0] + [None]*2 + [3] + [None]*5 + [9])
if __name__ == '__main__':
NamedArgumentTest().main()
| 34.6 | 101 | 0.679604 |
from test_framework.test_framework import AltcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class NamedArgumentTest(AltcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def run_test(self):
node = self.nodes[0]
h = node.help(command='getblockchaininfo')
assert(h.startswith('getblockchaininfo\n'))
assert_raises_rpc_error(-8, 'Unknown named parameter', node.help, random='getblockchaininfo')
h = node.getblockhash(height=0)
node.getblock(blockhash=h)
assert_equal(node.echo(), [])
assert_equal(node.echo(arg0=0,arg9=9), [0] + [None]*8 + [9])
assert_equal(node.echo(arg1=1), [None, 1])
assert_equal(node.echo(arg9=None), [None]*10)
assert_equal(node.echo(arg0=0,arg3=3,arg9=9), [0] + [None]*2 + [3] + [None]*5 + [9])
if __name__ == '__main__':
NamedArgumentTest().main()
| true | true |
f70f4b9c17d9141ff1eea0eec43d47d91b593441 | 14,098 | py | Python | test/geometry/transform/crop/test_crop2d.py | pworinger/kornia | a8bddbc5412694d778b1a7338e0d001910bb8024 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2021-06-25T18:10:22.000Z | 2021-06-25T18:10:22.000Z | test/geometry/transform/crop/test_crop2d.py | pworinger/kornia | a8bddbc5412694d778b1a7338e0d001910bb8024 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | test/geometry/transform/crop/test_crop2d.py | pworinger/kornia | a8bddbc5412694d778b1a7338e0d001910bb8024 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | from typing import Tuple
import pytest
import kornia as kornia
import kornia.testing as utils # test utils
import torch
from torch.testing import assert_allclose
from torch.autograd import gradcheck
class TestBoundingBoxInferring:
def test_bounding_boxes_dim_inferring(self, device, dtype):
boxes = torch.tensor([[
[1., 1.],
[3., 1.],
[3., 2.],
[1., 2.],
]], device=device, dtype=dtype)
h, w = kornia.geometry.transform.crop.infer_box_shape(boxes)
assert (h, w) == (2, 3)
def test_bounding_boxes_dim_inferring_batch(self, device, dtype):
boxes = torch.tensor([[
[1., 1.],
[3., 1.],
[3., 2.],
[1., 2.],
], [
[2., 2.],
[4., 2.],
[4., 3.],
[2., 3.],
]], device=device, dtype=dtype)
h, w = kornia.geometry.transform.crop.infer_box_shape(boxes)
assert (h.unique().item(), w.unique().item()) == (2, 3)
def test_gradcheck(self, device, dtype):
boxes = torch.tensor([[
[1., 1.],
[3., 1.],
[3., 2.],
[1., 2.],
]], device=device, dtype=dtype)
boxes = utils.tensor_to_gradcheck_var(boxes)
assert gradcheck(kornia.kornia.geometry.transform.crop.infer_box_shape,
(boxes,), raise_exception=True)
def test_jit(self, device, dtype):
# Define script
op = kornia.geometry.transform.crop.infer_box_shape
op_script = torch.jit.script(op)
# Define input
boxes = torch.tensor([[
[1., 1.],
[3., 1.],
[3., 2.],
[1., 2.],
]], device=device, dtype=dtype)
actual = op_script(boxes)
expected = op(boxes)
assert_allclose(actual, expected)
class TestCropAndResize:
def test_align_corners_true(self, device, dtype):
inp = torch.tensor([[[
[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.],
[13., 14., 15., 16.],
]]], device=device, dtype=dtype)
height, width = 2, 3
expected = torch.tensor(
[[[[6.0000, 6.5000, 7.0000],
[10.0000, 10.5000, 11.0000]]]], device=device, dtype=dtype)
boxes = torch.tensor([[
[1., 1.],
[2., 1.],
[2., 2.],
[1., 2.],
]], device=device, dtype=dtype) # 1x4x2
# default should use align_coners True
patches = kornia.crop_and_resize(inp, boxes, (height, width))
assert_allclose(patches, expected, rtol=1e-4, atol=1e-4)
def test_align_corners_false(self, device, dtype):
inp = torch.tensor([[[
[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.],
[13., 14., 15., 16.],
]]], device=device, dtype=dtype)
height, width = 2, 3
expected = torch.tensor(
[[[[6.7222, 7.1667, 7.6111],
[9.3889, 9.8333, 10.2778]]]], device=device, dtype=dtype)
boxes = torch.tensor([[
[1., 1.],
[2., 1.],
[2., 2.],
[1., 2.],
]], device=device, dtype=dtype) # 1x4x2
patches = kornia.crop_and_resize(inp, boxes, (height, width), align_corners=False)
assert_allclose(patches, expected, rtol=1e-4, atol=1e-4)
def test_crop_batch(self, device, dtype):
inp = torch.tensor([[[
[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.],
[13., 14., 15., 16.],
]], [[
[1., 5., 9., 13.],
[2., 6., 10., 14.],
[3., 7., 11., 15.],
[4., 8., 12., 16.],
]]], device=device, dtype=dtype)
expected = torch.tensor([[[
[6., 7.],
[10., 11.],
]], [[
[7., 15.],
[8., 16.],
]]], device=device, dtype=dtype)
boxes = torch.tensor([[
[1., 1.],
[2., 1.],
[2., 2.],
[1., 2.],
], [
[1., 2.],
[3., 2.],
[3., 3.],
[1., 3.],
]], device=device, dtype=dtype) # 2x4x2
patches = kornia.crop_and_resize(inp, boxes, (2, 2))
assert_allclose(patches, expected, rtol=1e-4, atol=1e-4)
def test_crop_batch_broadcast(self, device, dtype):
inp = torch.tensor([[[
[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.],
[13., 14., 15., 16.],
]], [[
[1., 5., 9., 13.],
[2., 6., 10., 14.],
[3., 7., 11., 15.],
[4., 8., 12., 16.],
]]], device=device, dtype=dtype)
expected = torch.tensor([[[
[6., 7.],
[10., 11.],
]], [[
[6., 10.],
[7., 11.],
]]], device=device, dtype=dtype)
boxes = torch.tensor([[
[1., 1.],
[2., 1.],
[2., 2.],
[1., 2.],
]], device=device, dtype=dtype) # 1x4x2
patches = kornia.crop_and_resize(inp, boxes, (2, 2))
assert_allclose(patches, expected, rtol=1e-4, atol=1e-4)
def test_gradcheck(self, device, dtype):
img = torch.rand(1, 2, 5, 4, device=device, dtype=dtype)
img = utils.tensor_to_gradcheck_var(img) # to var
boxes = torch.tensor([[
[1., 1.],
[2., 1.],
[2., 2.],
[1., 2.],
]], device=device, dtype=dtype) # 1x4x2
boxes = utils.tensor_to_gradcheck_var(boxes, requires_grad=False) # to var
assert gradcheck(kornia.crop_and_resize,
(img, boxes, (4, 2),),
raise_exception=True)
def test_jit(self, device, dtype):
# Define script
op = kornia.crop_and_resize
op_script = torch.jit.script(op)
# Define input
img = torch.tensor([[[
[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.],
[13., 14., 15., 16.],
]]], device=device, dtype=dtype)
boxes = torch.tensor([[
[1., 1.],
[2., 1.],
[2., 2.],
[1., 2.],
]], device=device, dtype=dtype) # 1x4x2
crop_height, crop_width = 4, 2
actual = op_script(img, boxes, (crop_height, crop_width))
expected = op(img, boxes, (crop_height, crop_width))
assert_allclose(actual, expected, rtol=1e-4, atol=1e-4)
class TestCenterCrop:
def test_center_crop_h2_w4(self, device, dtype):
inp = torch.tensor([[[
[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.],
[13., 14., 15., 16.],
]]], device=device, dtype=dtype)
expected = torch.tensor([[[
[5., 6., 7., 8.],
[9., 10., 11., 12.],
]]], device=device, dtype=dtype)
out_crop = kornia.center_crop(inp, (2, 4))
assert_allclose(out_crop, expected, rtol=1e-4, atol=1e-4)
def test_center_crop_h4_w2(self, device, dtype):
inp = torch.tensor([[[
[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.],
[13., 14., 15., 16.],
]]], device=device, dtype=dtype)
height, width = 4, 2
expected = torch.tensor([[[
[2., 3.],
[6., 7.],
[10., 11.],
[14., 15.],
]]], device=device, dtype=dtype)
out_crop = kornia.center_crop(inp, (height, width))
assert_allclose(out_crop, expected, rtol=1e-4, atol=1e-4)
def test_center_crop_h4_w2_batch(self, device, dtype):
inp = torch.tensor([
[[[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.],
[13., 14., 15., 16.]]],
[[[1., 5., 9., 13.],
[2., 6., 10., 14.],
[3., 7., 11., 15.],
[4., 8., 12., 16.]]]
], device=device, dtype=dtype)
expected = torch.tensor([[[
[2., 3.],
[6., 7.],
[10., 11.],
[14., 15.],
]], [[
[5., 9.],
[6., 10.],
[7., 11.],
[8., 12.],
]]], device=device, dtype=dtype)
out_crop = kornia.center_crop(inp, (4, 2))
assert_allclose(out_crop, expected, rtol=1e-4, atol=1e-4)
def test_gradcheck(self, device, dtype):
img = torch.rand(1, 2, 5, 4, device=device, dtype=dtype)
img = utils.tensor_to_gradcheck_var(img) # to var
assert gradcheck(kornia.center_crop, (img, (4, 2),), raise_exception=True)
def test_jit(self, device, dtype):
# Define script
op = kornia.center_crop
op_script = torch.jit.script(op)
# Define input
img = torch.ones(1, 2, 5, 4, device=device, dtype=dtype)
actual = op_script(img, (4, 2))
expected = op(img, (4, 2))
assert_allclose(actual, expected, rtol=1e-4, atol=1e-4)
def test_jit_trace(self, device, dtype):
# Define script
op = kornia.center_crop
op_script = torch.jit.script(op)
# Define input
img = torch.ones(2, 1, 6, 3, device=device, dtype=dtype)
op_trace = torch.jit.trace(op_script, (img, (torch.tensor(2), torch.tensor(3))))
img = torch.ones(2, 1, 6, 3, device=device, dtype=dtype)
# Run
actual = op_trace(img, (torch.tensor(2), torch.tensor(3)))
expected = op(img, (2, 3))
assert_allclose(actual, expected, rtol=1e-4, atol=1e-4)
class TestCropByBoxes:
def test_crop_by_boxes_no_resizing(self, device, dtype):
inp = torch.tensor([[[
[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.],
[13., 14., 15., 16.],
]]], device=device, dtype=dtype)
src = torch.tensor([[
[1., 1.],
[2., 1.],
[2., 2.],
[1., 2.],
]], device=device, dtype=dtype) # 1x4x2
dst = torch.tensor([[
[0., 0.],
[1., 0.],
[1., 1.],
[0., 1.],
]], device=device, dtype=dtype) # 1x4x2
expected = torch.tensor([[[
[6., 7.],
[10., 11.],
]]], device=device, dtype=dtype)
patches = kornia.geometry.transform.crop.crop_by_boxes(inp, src, dst)
assert_allclose(patches, expected)
def test_crop_by_boxes_resizing(self, device, dtype):
inp = torch.tensor([[[
[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.],
[13., 14., 15., 16.],
]]], device=device, dtype=dtype)
src = torch.tensor([[
[1., 1.],
[2., 1.],
[2., 2.],
[1., 2.],
]], device=device, dtype=dtype) # 1x4x2
dst = torch.tensor([[
[0., 0.],
[2., 0.],
[2., 1.],
[0., 1.],
]], device=device, dtype=dtype) # 1x4x2
expected = torch.tensor([[[
[6., 6.5, 7.],
[10., 10.5, 11.],
]]], device=device, dtype=dtype)
patches = kornia.geometry.transform.crop.crop_by_boxes(inp, src, dst)
assert_allclose(patches, expected, rtol=1e-4, atol=1e-4)
def test_gradcheck(self, device, dtype):
inp = torch.randn((1, 1, 3, 3), device=device, dtype=dtype)
src = torch.tensor([[
[1., 0.],
[2., 0.],
[2., 1.],
[1., 1.]]], device=device, dtype=dtype)
dst = torch.tensor([[
[0., 0.],
[1., 0.],
[1., 1.],
[0., 1.]]], device=device, dtype=dtype)
inp = utils.tensor_to_gradcheck_var(inp, requires_grad=True) # to var
assert gradcheck(kornia.geometry.transform.crop.crop_by_boxes,
(inp, src, dst,),
raise_exception=True)
class TestCropByTransform:
def test_crop_by_transform_no_resizing(self, device, dtype):
inp = torch.tensor([[[
[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.],
[13., 14., 15., 16.],
]]], device=device, dtype=dtype)
transform = torch.tensor([[
[1., 0., -1.],
[0., 1., -1.],
[0., 0., 1.],
]], device=device, dtype=dtype) # 1x3x3
expected = torch.tensor([[[
[6., 7.],
[10., 11.],
]]], device=device, dtype=dtype)
patches = kornia.geometry.transform.crop.crop_by_transform_mat(inp, transform, (2, 2))
assert_allclose(patches, expected)
def test_crop_by_boxes_resizing(self, device, dtype):
inp = torch.tensor([[[
[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.],
[13., 14., 15., 16.],
]]], device=device, dtype=dtype)
transform = torch.tensor([[
[2., 0., -2.],
[0., 1., -1.],
[0., 0., 1.],
]], device=device, dtype=dtype) # 1x3x3
expected = torch.tensor([[[
[6., 6.5, 7.],
[10., 10.5, 11.],
]]], device=device, dtype=dtype)
patches = kornia.geometry.transform.crop.crop_by_transform_mat(inp, transform, (2, 3))
assert_allclose(patches, expected, rtol=1e-4, atol=1e-4)
def test_gradcheck(self, device, dtype):
inp = torch.randn((1, 1, 3, 3), device=device, dtype=dtype)
transform = torch.tensor([[
[2., 0., -2.],
[0., 1., -1.],
[0., 0., 1.],
]], device=device, dtype=dtype) # 1x3x3
inp = utils.tensor_to_gradcheck_var(inp, requires_grad=True) # to var
assert gradcheck(kornia.geometry.transform.crop.crop_by_transform_mat,
(inp, transform, (2, 2),),
raise_exception=True)
| 30.849015 | 94 | 0.452901 | from typing import Tuple
import pytest
import kornia as kornia
import kornia.testing as utils
import torch
from torch.testing import assert_allclose
from torch.autograd import gradcheck
class TestBoundingBoxInferring:
def test_bounding_boxes_dim_inferring(self, device, dtype):
boxes = torch.tensor([[
[1., 1.],
[3., 1.],
[3., 2.],
[1., 2.],
]], device=device, dtype=dtype)
h, w = kornia.geometry.transform.crop.infer_box_shape(boxes)
assert (h, w) == (2, 3)
def test_bounding_boxes_dim_inferring_batch(self, device, dtype):
boxes = torch.tensor([[
[1., 1.],
[3., 1.],
[3., 2.],
[1., 2.],
], [
[2., 2.],
[4., 2.],
[4., 3.],
[2., 3.],
]], device=device, dtype=dtype)
h, w = kornia.geometry.transform.crop.infer_box_shape(boxes)
assert (h.unique().item(), w.unique().item()) == (2, 3)
def test_gradcheck(self, device, dtype):
boxes = torch.tensor([[
[1., 1.],
[3., 1.],
[3., 2.],
[1., 2.],
]], device=device, dtype=dtype)
boxes = utils.tensor_to_gradcheck_var(boxes)
assert gradcheck(kornia.kornia.geometry.transform.crop.infer_box_shape,
(boxes,), raise_exception=True)
def test_jit(self, device, dtype):
op = kornia.geometry.transform.crop.infer_box_shape
op_script = torch.jit.script(op)
boxes = torch.tensor([[
[1., 1.],
[3., 1.],
[3., 2.],
[1., 2.],
]], device=device, dtype=dtype)
actual = op_script(boxes)
expected = op(boxes)
assert_allclose(actual, expected)
class TestCropAndResize:
def test_align_corners_true(self, device, dtype):
inp = torch.tensor([[[
[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.],
[13., 14., 15., 16.],
]]], device=device, dtype=dtype)
height, width = 2, 3
expected = torch.tensor(
[[[[6.0000, 6.5000, 7.0000],
[10.0000, 10.5000, 11.0000]]]], device=device, dtype=dtype)
boxes = torch.tensor([[
[1., 1.],
[2., 1.],
[2., 2.],
[1., 2.],
]], device=device, dtype=dtype)
patches = kornia.crop_and_resize(inp, boxes, (height, width))
assert_allclose(patches, expected, rtol=1e-4, atol=1e-4)
def test_align_corners_false(self, device, dtype):
inp = torch.tensor([[[
[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.],
[13., 14., 15., 16.],
]]], device=device, dtype=dtype)
height, width = 2, 3
expected = torch.tensor(
[[[[6.7222, 7.1667, 7.6111],
[9.3889, 9.8333, 10.2778]]]], device=device, dtype=dtype)
boxes = torch.tensor([[
[1., 1.],
[2., 1.],
[2., 2.],
[1., 2.],
]], device=device, dtype=dtype)
patches = kornia.crop_and_resize(inp, boxes, (height, width), align_corners=False)
assert_allclose(patches, expected, rtol=1e-4, atol=1e-4)
def test_crop_batch(self, device, dtype):
inp = torch.tensor([[[
[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.],
[13., 14., 15., 16.],
]], [[
[1., 5., 9., 13.],
[2., 6., 10., 14.],
[3., 7., 11., 15.],
[4., 8., 12., 16.],
]]], device=device, dtype=dtype)
expected = torch.tensor([[[
[6., 7.],
[10., 11.],
]], [[
[7., 15.],
[8., 16.],
]]], device=device, dtype=dtype)
boxes = torch.tensor([[
[1., 1.],
[2., 1.],
[2., 2.],
[1., 2.],
], [
[1., 2.],
[3., 2.],
[3., 3.],
[1., 3.],
]], device=device, dtype=dtype)
patches = kornia.crop_and_resize(inp, boxes, (2, 2))
assert_allclose(patches, expected, rtol=1e-4, atol=1e-4)
def test_crop_batch_broadcast(self, device, dtype):
inp = torch.tensor([[[
[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.],
[13., 14., 15., 16.],
]], [[
[1., 5., 9., 13.],
[2., 6., 10., 14.],
[3., 7., 11., 15.],
[4., 8., 12., 16.],
]]], device=device, dtype=dtype)
expected = torch.tensor([[[
[6., 7.],
[10., 11.],
]], [[
[6., 10.],
[7., 11.],
]]], device=device, dtype=dtype)
boxes = torch.tensor([[
[1., 1.],
[2., 1.],
[2., 2.],
[1., 2.],
]], device=device, dtype=dtype)
patches = kornia.crop_and_resize(inp, boxes, (2, 2))
assert_allclose(patches, expected, rtol=1e-4, atol=1e-4)
def test_gradcheck(self, device, dtype):
img = torch.rand(1, 2, 5, 4, device=device, dtype=dtype)
img = utils.tensor_to_gradcheck_var(img)
boxes = torch.tensor([[
[1., 1.],
[2., 1.],
[2., 2.],
[1., 2.],
]], device=device, dtype=dtype)
boxes = utils.tensor_to_gradcheck_var(boxes, requires_grad=False)
assert gradcheck(kornia.crop_and_resize,
(img, boxes, (4, 2),),
raise_exception=True)
def test_jit(self, device, dtype):
op = kornia.crop_and_resize
op_script = torch.jit.script(op)
img = torch.tensor([[[
[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.],
[13., 14., 15., 16.],
]]], device=device, dtype=dtype)
boxes = torch.tensor([[
[1., 1.],
[2., 1.],
[2., 2.],
[1., 2.],
]], device=device, dtype=dtype)
crop_height, crop_width = 4, 2
actual = op_script(img, boxes, (crop_height, crop_width))
expected = op(img, boxes, (crop_height, crop_width))
assert_allclose(actual, expected, rtol=1e-4, atol=1e-4)
class TestCenterCrop:
def test_center_crop_h2_w4(self, device, dtype):
inp = torch.tensor([[[
[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.],
[13., 14., 15., 16.],
]]], device=device, dtype=dtype)
expected = torch.tensor([[[
[5., 6., 7., 8.],
[9., 10., 11., 12.],
]]], device=device, dtype=dtype)
out_crop = kornia.center_crop(inp, (2, 4))
assert_allclose(out_crop, expected, rtol=1e-4, atol=1e-4)
def test_center_crop_h4_w2(self, device, dtype):
inp = torch.tensor([[[
[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.],
[13., 14., 15., 16.],
]]], device=device, dtype=dtype)
height, width = 4, 2
expected = torch.tensor([[[
[2., 3.],
[6., 7.],
[10., 11.],
[14., 15.],
]]], device=device, dtype=dtype)
out_crop = kornia.center_crop(inp, (height, width))
assert_allclose(out_crop, expected, rtol=1e-4, atol=1e-4)
def test_center_crop_h4_w2_batch(self, device, dtype):
inp = torch.tensor([
[[[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.],
[13., 14., 15., 16.]]],
[[[1., 5., 9., 13.],
[2., 6., 10., 14.],
[3., 7., 11., 15.],
[4., 8., 12., 16.]]]
], device=device, dtype=dtype)
expected = torch.tensor([[[
[2., 3.],
[6., 7.],
[10., 11.],
[14., 15.],
]], [[
[5., 9.],
[6., 10.],
[7., 11.],
[8., 12.],
]]], device=device, dtype=dtype)
out_crop = kornia.center_crop(inp, (4, 2))
assert_allclose(out_crop, expected, rtol=1e-4, atol=1e-4)
def test_gradcheck(self, device, dtype):
img = torch.rand(1, 2, 5, 4, device=device, dtype=dtype)
img = utils.tensor_to_gradcheck_var(img)
assert gradcheck(kornia.center_crop, (img, (4, 2),), raise_exception=True)
def test_jit(self, device, dtype):
op = kornia.center_crop
op_script = torch.jit.script(op)
img = torch.ones(1, 2, 5, 4, device=device, dtype=dtype)
actual = op_script(img, (4, 2))
expected = op(img, (4, 2))
assert_allclose(actual, expected, rtol=1e-4, atol=1e-4)
def test_jit_trace(self, device, dtype):
op = kornia.center_crop
op_script = torch.jit.script(op)
img = torch.ones(2, 1, 6, 3, device=device, dtype=dtype)
op_trace = torch.jit.trace(op_script, (img, (torch.tensor(2), torch.tensor(3))))
img = torch.ones(2, 1, 6, 3, device=device, dtype=dtype)
actual = op_trace(img, (torch.tensor(2), torch.tensor(3)))
expected = op(img, (2, 3))
assert_allclose(actual, expected, rtol=1e-4, atol=1e-4)
class TestCropByBoxes:
def test_crop_by_boxes_no_resizing(self, device, dtype):
inp = torch.tensor([[[
[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.],
[13., 14., 15., 16.],
]]], device=device, dtype=dtype)
src = torch.tensor([[
[1., 1.],
[2., 1.],
[2., 2.],
[1., 2.],
]], device=device, dtype=dtype)
dst = torch.tensor([[
[0., 0.],
[1., 0.],
[1., 1.],
[0., 1.],
]], device=device, dtype=dtype)
expected = torch.tensor([[[
[6., 7.],
[10., 11.],
]]], device=device, dtype=dtype)
patches = kornia.geometry.transform.crop.crop_by_boxes(inp, src, dst)
assert_allclose(patches, expected)
def test_crop_by_boxes_resizing(self, device, dtype):
inp = torch.tensor([[[
[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.],
[13., 14., 15., 16.],
]]], device=device, dtype=dtype)
src = torch.tensor([[
[1., 1.],
[2., 1.],
[2., 2.],
[1., 2.],
]], device=device, dtype=dtype)
dst = torch.tensor([[
[0., 0.],
[2., 0.],
[2., 1.],
[0., 1.],
]], device=device, dtype=dtype)
expected = torch.tensor([[[
[6., 6.5, 7.],
[10., 10.5, 11.],
]]], device=device, dtype=dtype)
patches = kornia.geometry.transform.crop.crop_by_boxes(inp, src, dst)
assert_allclose(patches, expected, rtol=1e-4, atol=1e-4)
def test_gradcheck(self, device, dtype):
inp = torch.randn((1, 1, 3, 3), device=device, dtype=dtype)
src = torch.tensor([[
[1., 0.],
[2., 0.],
[2., 1.],
[1., 1.]]], device=device, dtype=dtype)
dst = torch.tensor([[
[0., 0.],
[1., 0.],
[1., 1.],
[0., 1.]]], device=device, dtype=dtype)
inp = utils.tensor_to_gradcheck_var(inp, requires_grad=True)
assert gradcheck(kornia.geometry.transform.crop.crop_by_boxes,
(inp, src, dst,),
raise_exception=True)
class TestCropByTransform:
def test_crop_by_transform_no_resizing(self, device, dtype):
inp = torch.tensor([[[
[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.],
[13., 14., 15., 16.],
]]], device=device, dtype=dtype)
transform = torch.tensor([[
[1., 0., -1.],
[0., 1., -1.],
[0., 0., 1.],
]], device=device, dtype=dtype)
expected = torch.tensor([[[
[6., 7.],
[10., 11.],
]]], device=device, dtype=dtype)
patches = kornia.geometry.transform.crop.crop_by_transform_mat(inp, transform, (2, 2))
assert_allclose(patches, expected)
def test_crop_by_boxes_resizing(self, device, dtype):
inp = torch.tensor([[[
[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.],
[13., 14., 15., 16.],
]]], device=device, dtype=dtype)
transform = torch.tensor([[
[2., 0., -2.],
[0., 1., -1.],
[0., 0., 1.],
]], device=device, dtype=dtype)
expected = torch.tensor([[[
[6., 6.5, 7.],
[10., 10.5, 11.],
]]], device=device, dtype=dtype)
patches = kornia.geometry.transform.crop.crop_by_transform_mat(inp, transform, (2, 3))
assert_allclose(patches, expected, rtol=1e-4, atol=1e-4)
def test_gradcheck(self, device, dtype):
inp = torch.randn((1, 1, 3, 3), device=device, dtype=dtype)
transform = torch.tensor([[
[2., 0., -2.],
[0., 1., -1.],
[0., 0., 1.],
]], device=device, dtype=dtype)
inp = utils.tensor_to_gradcheck_var(inp, requires_grad=True)
assert gradcheck(kornia.geometry.transform.crop.crop_by_transform_mat,
(inp, transform, (2, 2),),
raise_exception=True)
| true | true |
f70f4bcb38a6805d80e29b9d93b8b14c0b0a2adf | 782 | py | Python | Dynamic Programming/213. House Robber II.py | beckswu/Leetcode | 480e8dc276b1f65961166d66efa5497d7ff0bdfd | [
"MIT"
] | 138 | 2020-02-08T05:25:26.000Z | 2021-11-04T11:59:28.000Z | Dynamic Programming/213. House Robber II.py | beckswu/Leetcode | 480e8dc276b1f65961166d66efa5497d7ff0bdfd | [
"MIT"
] | null | null | null | Dynamic Programming/213. House Robber II.py | beckswu/Leetcode | 480e8dc276b1f65961166d66efa5497d7ff0bdfd | [
"MIT"
] | 24 | 2021-01-02T07:18:43.000Z | 2022-03-20T08:17:54.000Z |
class Solution:
def rob(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if(len(nums)==1): return nums[0] # 1的时候不work 两个dp,一个从第一位开始,一个从倒数第二位结束
last, now = 0, 0
last1, now1 = 0, 0
for i, n in enumerate(nums):
if i<len(nums)-1:
last, now = now, max(n+last,now)
print(now, last)
if i>0:
last1, now1 = now1, max(n+last1,now1)
return max(now,now1)
class Solution:
def rob(self, nums):
def rob(nums):
now = prev = 0
for n in nums:
now, prev = max(now, prev + n), now
return now
return max(rob(nums[len(nums) != 1:]), rob(nums[:-1])) | 28.962963 | 78 | 0.446292 |
class Solution:
def rob(self, nums):
if(len(nums)==1): return nums[0]
last, now = 0, 0
last1, now1 = 0, 0
for i, n in enumerate(nums):
if i<len(nums)-1:
last, now = now, max(n+last,now)
print(now, last)
if i>0:
last1, now1 = now1, max(n+last1,now1)
return max(now,now1)
class Solution:
def rob(self, nums):
def rob(nums):
now = prev = 0
for n in nums:
now, prev = max(now, prev + n), now
return now
return max(rob(nums[len(nums) != 1:]), rob(nums[:-1])) | true | true |
f70f4bf8220077d2a182387cb01ae41173d9d9fa | 1,026 | py | Python | awacs/aps.py | mtrspringer/awacs | a5d2fe37c2a468a977536c4d6e66dda7da69717f | [
"BSD-2-Clause"
] | null | null | null | awacs/aps.py | mtrspringer/awacs | a5d2fe37c2a468a977536c4d6e66dda7da69717f | [
"BSD-2-Clause"
] | 19 | 2020-11-30T06:43:54.000Z | 2022-02-21T09:02:54.000Z | awacs/aps.py | mtrspringer/awacs | a5d2fe37c2a468a977536c4d6e66dda7da69717f | [
"BSD-2-Clause"
] | null | null | null | # Copyright (c) 2012-2013, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from aws import Action as BaseAction
from aws import BaseARN
service_name = 'Amazon Managed Service for Prometheus'
prefix = 'aps'
class Action(BaseAction):
def __init__(self, action=None):
sup = super(Action, self)
sup.__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource='', region='', account=''):
sup = super(ARN, self)
sup.__init__(service=prefix, resource=resource, region=region,
account=account)
CreateWorkspace = Action('CreateWorkspace')
DeleteWorkspace = Action('DeleteWorkspace')
DescribeWorkspace = Action('DescribeWorkspace')
GetLabels = Action('GetLabels')
GetMetricMetadata = Action('GetMetricMetadata')
GetSeries = Action('GetSeries')
ListWorkspaces = Action('ListWorkspaces')
QueryMetrics = Action('QueryMetrics')
RemoteWrite = Action('RemoteWrite')
UpdateWorkspaceAlias = Action('UpdateWorkspaceAlias')
| 28.5 | 70 | 0.725146 |
from aws import Action as BaseAction
from aws import BaseARN
service_name = 'Amazon Managed Service for Prometheus'
prefix = 'aps'
class Action(BaseAction):
def __init__(self, action=None):
sup = super(Action, self)
sup.__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource='', region='', account=''):
sup = super(ARN, self)
sup.__init__(service=prefix, resource=resource, region=region,
account=account)
CreateWorkspace = Action('CreateWorkspace')
DeleteWorkspace = Action('DeleteWorkspace')
DescribeWorkspace = Action('DescribeWorkspace')
GetLabels = Action('GetLabels')
GetMetricMetadata = Action('GetMetricMetadata')
GetSeries = Action('GetSeries')
ListWorkspaces = Action('ListWorkspaces')
QueryMetrics = Action('QueryMetrics')
RemoteWrite = Action('RemoteWrite')
UpdateWorkspaceAlias = Action('UpdateWorkspaceAlias')
| true | true |
f70f4cea7dfe68ebcc562953c631258be61fcee2 | 7,789 | py | Python | kerasify.py | anmalara/kerasify | a32a2715d379ad58ef318b545acbaa6ea5983cda | [
"MIT"
] | 185 | 2016-11-08T11:45:35.000Z | 2022-03-30T12:53:30.000Z | kerasify.py | anmalara/kerasify | a32a2715d379ad58ef318b545acbaa6ea5983cda | [
"MIT"
] | 12 | 2016-12-25T00:25:52.000Z | 2021-05-07T04:02:26.000Z | kerasify.py | Krypton0001/KerasToCpp | 01924c857ee46d7c0380a3d3ceac9ff8de6c8bf4 | [
"MIT"
] | 61 | 2016-11-16T19:15:34.000Z | 2021-11-05T19:33:59.000Z | import numpy as np
import struct
LAYER_DENSE = 1
LAYER_CONVOLUTION2D = 2
LAYER_FLATTEN = 3
LAYER_ELU = 4
LAYER_ACTIVATION = 5
LAYER_MAXPOOLING2D = 6
LAYER_LSTM = 7
LAYER_EMBEDDING = 8
ACTIVATION_LINEAR = 1
ACTIVATION_RELU = 2
ACTIVATION_SOFTPLUS = 3
ACTIVATION_SIGMOID = 4
ACTIVATION_TANH = 5
ACTIVATION_HARD_SIGMOID = 6
def write_floats(file, floats):
'''
Writes floats to file in 1024 chunks.. prevents memory explosion
writing very large arrays to disk when calling struct.pack().
'''
step = 1024
written = 0
for i in np.arange(0, len(floats), step):
remaining = min(len(floats) - i, step)
written += remaining
file.write(struct.pack('=%sf' % remaining, *floats[i:i+remaining]))
assert written == len(floats)
def export_model(model, filename):
with open(filename, 'wb') as f:
def write_activation(activation):
if activation == 'linear':
f.write(struct.pack('I', ACTIVATION_LINEAR))
elif activation == 'relu':
f.write(struct.pack('I', ACTIVATION_RELU))
elif activation == 'softplus':
f.write(struct.pack('I', ACTIVATION_SOFTPLUS))
elif activation == 'tanh':
f.write(struct.pack('I', ACTIVATION_TANH))
elif activation == 'sigmoid':
f.write(struct.pack('I', ACTIVATION_SIGMOID))
elif activation == 'hard_sigmoid':
f.write(struct.pack('I', ACTIVATION_HARD_SIGMOID))
else:
assert False, "Unsupported activation type: %s" % activation
model_layers = [l for l in model.layers if type(l).__name__ not in ['Dropout']]
num_layers = len(model_layers)
f.write(struct.pack('I', num_layers))
for layer in model_layers:
layer_type = type(layer).__name__
if layer_type == 'Dense':
weights = layer.get_weights()[0]
biases = layer.get_weights()[1]
activation = layer.get_config()['activation']
f.write(struct.pack('I', LAYER_DENSE))
f.write(struct.pack('I', weights.shape[0]))
f.write(struct.pack('I', weights.shape[1]))
f.write(struct.pack('I', biases.shape[0]))
weights = weights.flatten()
biases = biases.flatten()
write_floats(f, weights)
write_floats(f, biases)
write_activation(activation)
elif layer_type == 'Convolution2D':
assert layer.border_mode == 'valid', "Only border_mode=valid is implemented"
weights = layer.get_weights()[0]
biases = layer.get_weights()[1]
activation = layer.get_config()['activation']
# The kernel is accessed in reverse order. To simplify the C side we'll
# flip the weight matrix for each kernel.
weights = weights[:,:,::-1,::-1]
f.write(struct.pack('I', LAYER_CONVOLUTION2D))
f.write(struct.pack('I', weights.shape[0]))
f.write(struct.pack('I', weights.shape[1]))
f.write(struct.pack('I', weights.shape[2]))
f.write(struct.pack('I', weights.shape[3]))
f.write(struct.pack('I', biases.shape[0]))
weights = weights.flatten()
biases = biases.flatten()
write_floats(f, weights)
write_floats(f, biases)
write_activation(activation)
elif layer_type == 'Flatten':
f.write(struct.pack('I', LAYER_FLATTEN))
elif layer_type == 'ELU':
f.write(struct.pack('I', LAYER_ELU))
f.write(struct.pack('f', layer.alpha))
elif layer_type == 'Activation':
activation = layer.get_config()['activation']
f.write(struct.pack('I', LAYER_ACTIVATION))
write_activation(activation)
elif layer_type == 'MaxPooling2D':
assert layer.border_mode == 'valid', "Only border_mode=valid is implemented"
pool_size = layer.get_config()['pool_size']
f.write(struct.pack('I', LAYER_MAXPOOLING2D))
f.write(struct.pack('I', pool_size[0]))
f.write(struct.pack('I', pool_size[1]))
elif layer_type == 'LSTM':
inner_activation = layer.get_config()['inner_activation']
activation = layer.get_config()['activation']
return_sequences = int(layer.get_config()['return_sequences'])
weights = layer.get_weights()
W_i = weights[0]
U_i = weights[1]
b_i = weights[2]
W_c = weights[3]
U_c = weights[4]
b_c = weights[5]
W_f = weights[6]
U_f = weights[7]
b_f = weights[8]
W_o = weights[9]
U_o = weights[10]
b_o = weights[11]
f.write(struct.pack('I', LAYER_LSTM))
f.write(struct.pack('I', W_i.shape[0]))
f.write(struct.pack('I', W_i.shape[1]))
f.write(struct.pack('I', U_i.shape[0]))
f.write(struct.pack('I', U_i.shape[1]))
f.write(struct.pack('I', b_i.shape[0]))
f.write(struct.pack('I', W_f.shape[0]))
f.write(struct.pack('I', W_f.shape[1]))
f.write(struct.pack('I', U_f.shape[0]))
f.write(struct.pack('I', U_f.shape[1]))
f.write(struct.pack('I', b_f.shape[0]))
f.write(struct.pack('I', W_c.shape[0]))
f.write(struct.pack('I', W_c.shape[1]))
f.write(struct.pack('I', U_c.shape[0]))
f.write(struct.pack('I', U_c.shape[1]))
f.write(struct.pack('I', b_c.shape[0]))
f.write(struct.pack('I', W_o.shape[0]))
f.write(struct.pack('I', W_o.shape[1]))
f.write(struct.pack('I', U_o.shape[0]))
f.write(struct.pack('I', U_o.shape[1]))
f.write(struct.pack('I', b_o.shape[0]))
W_i = W_i.flatten()
U_i = U_i.flatten()
b_i = b_i.flatten()
W_f = W_f.flatten()
U_f = U_f.flatten()
b_f = b_f.flatten()
W_c = W_c.flatten()
U_c = U_c.flatten()
b_c = b_c.flatten()
W_o = W_o.flatten()
U_o = U_o.flatten()
b_o = b_o.flatten()
write_floats(f, W_i)
write_floats(f, U_i)
write_floats(f, b_i)
write_floats(f, W_f)
write_floats(f, U_f)
write_floats(f, b_f)
write_floats(f, W_c)
write_floats(f, U_c)
write_floats(f, b_c)
write_floats(f, W_o)
write_floats(f, U_o)
write_floats(f, b_o)
write_activation(inner_activation)
write_activation(activation)
f.write(struct.pack('I', return_sequences))
elif layer_type == 'Embedding':
weights = layer.get_weights()[0]
f.write(struct.pack('I', LAYER_EMBEDDING))
f.write(struct.pack('I', weights.shape[0]))
f.write(struct.pack('I', weights.shape[1]))
weights = weights.flatten()
write_floats(f, weights)
else:
assert False, "Unsupported layer type: %s" % layer_type
| 35.894009 | 92 | 0.5147 | import numpy as np
import struct
LAYER_DENSE = 1
LAYER_CONVOLUTION2D = 2
LAYER_FLATTEN = 3
LAYER_ELU = 4
LAYER_ACTIVATION = 5
LAYER_MAXPOOLING2D = 6
LAYER_LSTM = 7
LAYER_EMBEDDING = 8
ACTIVATION_LINEAR = 1
ACTIVATION_RELU = 2
ACTIVATION_SOFTPLUS = 3
ACTIVATION_SIGMOID = 4
ACTIVATION_TANH = 5
ACTIVATION_HARD_SIGMOID = 6
def write_floats(file, floats):
step = 1024
written = 0
for i in np.arange(0, len(floats), step):
remaining = min(len(floats) - i, step)
written += remaining
file.write(struct.pack('=%sf' % remaining, *floats[i:i+remaining]))
assert written == len(floats)
def export_model(model, filename):
with open(filename, 'wb') as f:
def write_activation(activation):
if activation == 'linear':
f.write(struct.pack('I', ACTIVATION_LINEAR))
elif activation == 'relu':
f.write(struct.pack('I', ACTIVATION_RELU))
elif activation == 'softplus':
f.write(struct.pack('I', ACTIVATION_SOFTPLUS))
elif activation == 'tanh':
f.write(struct.pack('I', ACTIVATION_TANH))
elif activation == 'sigmoid':
f.write(struct.pack('I', ACTIVATION_SIGMOID))
elif activation == 'hard_sigmoid':
f.write(struct.pack('I', ACTIVATION_HARD_SIGMOID))
else:
assert False, "Unsupported activation type: %s" % activation
model_layers = [l for l in model.layers if type(l).__name__ not in ['Dropout']]
num_layers = len(model_layers)
f.write(struct.pack('I', num_layers))
for layer in model_layers:
layer_type = type(layer).__name__
if layer_type == 'Dense':
weights = layer.get_weights()[0]
biases = layer.get_weights()[1]
activation = layer.get_config()['activation']
f.write(struct.pack('I', LAYER_DENSE))
f.write(struct.pack('I', weights.shape[0]))
f.write(struct.pack('I', weights.shape[1]))
f.write(struct.pack('I', biases.shape[0]))
weights = weights.flatten()
biases = biases.flatten()
write_floats(f, weights)
write_floats(f, biases)
write_activation(activation)
elif layer_type == 'Convolution2D':
assert layer.border_mode == 'valid', "Only border_mode=valid is implemented"
weights = layer.get_weights()[0]
biases = layer.get_weights()[1]
activation = layer.get_config()['activation']
# flip the weight matrix for each kernel.
weights = weights[:,:,::-1,::-1]
f.write(struct.pack('I', LAYER_CONVOLUTION2D))
f.write(struct.pack('I', weights.shape[0]))
f.write(struct.pack('I', weights.shape[1]))
f.write(struct.pack('I', weights.shape[2]))
f.write(struct.pack('I', weights.shape[3]))
f.write(struct.pack('I', biases.shape[0]))
weights = weights.flatten()
biases = biases.flatten()
write_floats(f, weights)
write_floats(f, biases)
write_activation(activation)
elif layer_type == 'Flatten':
f.write(struct.pack('I', LAYER_FLATTEN))
elif layer_type == 'ELU':
f.write(struct.pack('I', LAYER_ELU))
f.write(struct.pack('f', layer.alpha))
elif layer_type == 'Activation':
activation = layer.get_config()['activation']
f.write(struct.pack('I', LAYER_ACTIVATION))
write_activation(activation)
elif layer_type == 'MaxPooling2D':
assert layer.border_mode == 'valid', "Only border_mode=valid is implemented"
pool_size = layer.get_config()['pool_size']
f.write(struct.pack('I', LAYER_MAXPOOLING2D))
f.write(struct.pack('I', pool_size[0]))
f.write(struct.pack('I', pool_size[1]))
elif layer_type == 'LSTM':
inner_activation = layer.get_config()['inner_activation']
activation = layer.get_config()['activation']
return_sequences = int(layer.get_config()['return_sequences'])
weights = layer.get_weights()
W_i = weights[0]
U_i = weights[1]
b_i = weights[2]
W_c = weights[3]
U_c = weights[4]
b_c = weights[5]
W_f = weights[6]
U_f = weights[7]
b_f = weights[8]
W_o = weights[9]
U_o = weights[10]
b_o = weights[11]
f.write(struct.pack('I', LAYER_LSTM))
f.write(struct.pack('I', W_i.shape[0]))
f.write(struct.pack('I', W_i.shape[1]))
f.write(struct.pack('I', U_i.shape[0]))
f.write(struct.pack('I', U_i.shape[1]))
f.write(struct.pack('I', b_i.shape[0]))
f.write(struct.pack('I', W_f.shape[0]))
f.write(struct.pack('I', W_f.shape[1]))
f.write(struct.pack('I', U_f.shape[0]))
f.write(struct.pack('I', U_f.shape[1]))
f.write(struct.pack('I', b_f.shape[0]))
f.write(struct.pack('I', W_c.shape[0]))
f.write(struct.pack('I', W_c.shape[1]))
f.write(struct.pack('I', U_c.shape[0]))
f.write(struct.pack('I', U_c.shape[1]))
f.write(struct.pack('I', b_c.shape[0]))
f.write(struct.pack('I', W_o.shape[0]))
f.write(struct.pack('I', W_o.shape[1]))
f.write(struct.pack('I', U_o.shape[0]))
f.write(struct.pack('I', U_o.shape[1]))
f.write(struct.pack('I', b_o.shape[0]))
W_i = W_i.flatten()
U_i = U_i.flatten()
b_i = b_i.flatten()
W_f = W_f.flatten()
U_f = U_f.flatten()
b_f = b_f.flatten()
W_c = W_c.flatten()
U_c = U_c.flatten()
b_c = b_c.flatten()
W_o = W_o.flatten()
U_o = U_o.flatten()
b_o = b_o.flatten()
write_floats(f, W_i)
write_floats(f, U_i)
write_floats(f, b_i)
write_floats(f, W_f)
write_floats(f, U_f)
write_floats(f, b_f)
write_floats(f, W_c)
write_floats(f, U_c)
write_floats(f, b_c)
write_floats(f, W_o)
write_floats(f, U_o)
write_floats(f, b_o)
write_activation(inner_activation)
write_activation(activation)
f.write(struct.pack('I', return_sequences))
elif layer_type == 'Embedding':
weights = layer.get_weights()[0]
f.write(struct.pack('I', LAYER_EMBEDDING))
f.write(struct.pack('I', weights.shape[0]))
f.write(struct.pack('I', weights.shape[1]))
weights = weights.flatten()
write_floats(f, weights)
else:
assert False, "Unsupported layer type: %s" % layer_type
| true | true |
f70f4d074c2ce70c7edb9ce44b7c84467d1ab3f9 | 1,084 | py | Python | aiodata/db/connection.py | jan-mue/aiodata | f1016d98520c8785e5c0c3db0485f04fa34257ce | [
"MIT"
] | null | null | null | aiodata/db/connection.py | jan-mue/aiodata | f1016d98520c8785e5c0c3db0485f04fa34257ce | [
"MIT"
] | null | null | null | aiodata/db/connection.py | jan-mue/aiodata | f1016d98520c8785e5c0c3db0485f04fa34257ce | [
"MIT"
] | null | null | null | import sqlalchemy
from functools import partial
async def create_engine(*args, **kwargs):
engine = sqlalchemy.create_engine(*args, **kwargs)
if engine.driver == "psycopg2":
import asyncpg
p = await asyncpg.create_pool(str(engine.url))
elif engine.driver == "pyodbc":
import aioodbc
p = await aioodbc.create_pool(**engine.url.translate_connect_args())
elif engine.driver == "mysqldb":
import aiomysql
p = aiomysql.create_pool(**engine.url.translate_connect_args())
else:
p = engine.pool
old_creator = engine.pool._creator
def creator(*a, **kw):
result = old_creator(*a, **kw)
async def aenter(self):
self._async_conn = p.acquire()
return await self._async_conn.__aenter__()
async def aexit(self):
return await self._async_conn.__aexit__()
result.__aenter__ = partial(aenter, result)
result.__aexit__ = partial(aexit, result)
engine.pool._creator = creator
return engine
| 28.526316 | 77 | 0.619926 | import sqlalchemy
from functools import partial
async def create_engine(*args, **kwargs):
engine = sqlalchemy.create_engine(*args, **kwargs)
if engine.driver == "psycopg2":
import asyncpg
p = await asyncpg.create_pool(str(engine.url))
elif engine.driver == "pyodbc":
import aioodbc
p = await aioodbc.create_pool(**engine.url.translate_connect_args())
elif engine.driver == "mysqldb":
import aiomysql
p = aiomysql.create_pool(**engine.url.translate_connect_args())
else:
p = engine.pool
old_creator = engine.pool._creator
def creator(*a, **kw):
result = old_creator(*a, **kw)
async def aenter(self):
self._async_conn = p.acquire()
return await self._async_conn.__aenter__()
async def aexit(self):
return await self._async_conn.__aexit__()
result.__aenter__ = partial(aenter, result)
result.__aexit__ = partial(aexit, result)
engine.pool._creator = creator
return engine
| true | true |
f70f4e13a5bf144e945fcaa58230dc35850c07e2 | 1,273 | py | Python | example-timeboard.py | hmain/cfn-datadog | 7c7bbcc63e6490e7c0d3ffcf07a546b71d222950 | [
"Apache-2.0"
] | 11 | 2018-01-15T15:00:53.000Z | 2019-08-07T01:44:59.000Z | example-timeboard.py | hmain/cfn-datadog | 7c7bbcc63e6490e7c0d3ffcf07a546b71d222950 | [
"Apache-2.0"
] | 3 | 2018-01-31T11:40:58.000Z | 2018-08-10T13:52:14.000Z | example-timeboard.py | hmain/cfn-datadog | 7c7bbcc63e6490e7c0d3ffcf07a546b71d222950 | [
"Apache-2.0"
] | 6 | 2018-01-16T12:38:11.000Z | 2019-04-18T11:33:09.000Z | from cfn_datadog import Timeboard, Graph, TemplateVariable, Definition, Request
from troposphere import Parameter, Template, Join, ImportValue, Sub
t = Template()
datadog_lambda_stackname = t.add_parameter(Parameter(
"DatadogLambdaStackname",
Type="String",
Description="Stack name of cfn-datadog"
))
time_board_arn = ImportValue(Sub("${DatadogLambdaStackname}-TimeboardDatadogLambdaArn"))
t.add_resource(Timeboard(
'ExampleTimeBoard',
ServiceToken=time_board_arn,
TimeboardTitle="Automated Datadog Test Board",
description="Automated Datadog timeboard created through Cloudformation",
graphs=[Graph(
GraphTitle="Example graph",
definition=Definition(
events=[],
requests=[Request(
q="avg:system.mem.free{*}"
)],
viz="timeseries"
)
), Graph(
GraphTitle="Example graph 2",
definition=Definition(
events=[],
requests=[Request(
q="avg:system.mem.free{*}"
)],
viz="timeseries"
)
),
],
template_variables=[TemplateVariable(
name="host1",
prefix="host",
default="host:my-host"
)],
read_only=True
))
print(t.to_json())
| 26.520833 | 88 | 0.616654 | from cfn_datadog import Timeboard, Graph, TemplateVariable, Definition, Request
from troposphere import Parameter, Template, Join, ImportValue, Sub
t = Template()
datadog_lambda_stackname = t.add_parameter(Parameter(
"DatadogLambdaStackname",
Type="String",
Description="Stack name of cfn-datadog"
))
time_board_arn = ImportValue(Sub("${DatadogLambdaStackname}-TimeboardDatadogLambdaArn"))
t.add_resource(Timeboard(
'ExampleTimeBoard',
ServiceToken=time_board_arn,
TimeboardTitle="Automated Datadog Test Board",
description="Automated Datadog timeboard created through Cloudformation",
graphs=[Graph(
GraphTitle="Example graph",
definition=Definition(
events=[],
requests=[Request(
q="avg:system.mem.free{*}"
)],
viz="timeseries"
)
), Graph(
GraphTitle="Example graph 2",
definition=Definition(
events=[],
requests=[Request(
q="avg:system.mem.free{*}"
)],
viz="timeseries"
)
),
],
template_variables=[TemplateVariable(
name="host1",
prefix="host",
default="host:my-host"
)],
read_only=True
))
print(t.to_json())
| true | true |
f70f4e7e24e09490ba7a9712796660d2ae2860e2 | 17,884 | py | Python | vivisect/qt/memory.py | bat-serjo/vivisect | f60934a2c8c51c7acdba52a65756e717a108a440 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | vivisect/qt/memory.py | bat-serjo/vivisect | f60934a2c8c51c7acdba52a65756e717a108a440 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | vivisect/qt/memory.py | bat-serjo/vivisect | f60934a2c8c51c7acdba52a65756e717a108a440 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | import logging
from PyQt5.QtWidgets import *
import envi.qt.memory as e_mem_qt
import envi.qt.memcanvas as e_mem_canvas
import vqt.hotkeys as vq_hotkey
import vivisect.base as viv_base
import vivisect.renderers as viv_rend
import vivisect.qt.views as viv_q_views
import vivisect.qt.ctxmenu as viv_q_ctxmenu
from vqt.main import *
from vivisect.const import *
logger = logging.getLogger(__name__)
# FIXME HACK where do these really live?
qt_horizontal = 1
qt_vertical = 2
class VivCanvasBase(vq_hotkey.HotKeyMixin, e_mem_canvas.VQMemoryCanvas):
def __init__(self, *args, **kwargs):
e_mem_canvas.VQMemoryCanvas.__init__(self, *args, **kwargs)
vq_hotkey.HotKeyMixin.__init__(self)
self.vw = self.mem
self._last_sname = None
self.addHotKey('c', 'viv:make:code')
self.addHotKey('f', 'viv:make:function')
self.addHotKey('s', 'viv:make:string')
self.addHotKey('p', 'viv:make:pointer')
self.addHotKey('u', 'viv:make:unicode')
self.addHotKey('n', 'viv:setname')
self.addHotKey('g', 'viv:getlocation')
self.addHotKey(';', 'viv:comment')
self.addHotKey('S', 'viv:make:struct')
self.addHotKey('ctrl+S', 'viv:make:struct:again')
self.addHotKey('ctrl+meta+S', 'viv:make:struct:multi')
self.addHotKey('U', 'viv:undefine')
self.addHotKey('ctrl+p', 'viv:preview:instr')
self.addHotKey('B', 'viv:bookmark')
self.addHotKey('ctrl+1', 'viv:make:number:one')
self.addHotKey('ctrl+2', 'viv:make:number:two')
self.addHotKey('ctrl+4', 'viv:make:number:four')
self.addHotKey('ctrl+6', 'viv:make:number:sixteen')
self.addHotKey('ctrl+8', 'viv:make:number:eight')
self.addHotKey('down', 'viv:nav:nextva')
self.addHotKey('up', 'viv:nav:prevva')
self.addHotKey('ctrl+down', 'viv:nav:nextundef')
self.addHotKey('ctrl+up', 'viv:nav:prevundef')
self.loadHotKeys(self.vw._viv_gui._vq_settings)
# All extenders must implement vivColorMap
vqtconnect(self.vivColorMap, 'viv:colormap')
def vivColorMap(self, event, einfo):
self._applyColorMap(einfo)
def _applyColorMap(self, cmap):
page = self.page()
inner = ''
for va, color in cmap.items():
inner += '.envi-va-0x%.8x { color: #000000; background-color: %s }\n' % (va, color)
js = 'var node = document.querySelector("#cmapstyle"); node.innerHTML = `%s`;' % inner
page.runJavaScript(js)
@vq_hotkey.hotkey('viv:nav:nextva')
def _hotkey_nav_nextva(self):
if self._canv_curva is None:
return
loc = self.vw.getLocation(self._canv_curva)
if loc is None:
loc = (self._canv_curva, 1, None, None)
nextva = loc[0] + loc[1]
self._selectVa(nextva)
@vq_hotkey.hotkey('viv:nav:prevva')
def _hotkey_nav_prevva(self):
if self._canv_curva is None:
return
loc = self.vw.getPrevLocation(self._canv_curva)
if loc is None:
loc = (self._canv_curva - 1, 1, None, None)
self._selectVa(loc[0])
@vq_hotkey.hotkey('viv:nav:nextundef')
def _hotkey_nav_nextundef(self):
if self._canv_curva is None:
return
vw = self.vw
va = self._canv_curva
loc = vw.getLocation(va)
if loc is None:
# find next defined location
while loc is None and vw.isValidPointer(va):
va += 1
loc = vw.getLocation(va)
va -= 1
lastloc = (va, 1, 0, 0)
else:
# find next undefined location
while loc is not None:
va = loc[0]
lastloc = loc
loc = vw.getLocation(va + loc[1])
# if we didn't fall off the map
if vw.isValidPointer(va+lastloc[1]):
va += lastloc[1]
self._navExpression(hex(va))
self._selectVa(va)
@vq_hotkey.hotkey('viv:nav:prevundef')
def _hotkey_nav_prevundef(self):
if self._canv_curva is None:
return
vw = self.vw
va = self._canv_curva
loc = vw.getLocation(va)
if loc is None:
# find previous defined location
while loc is None and vw.isValidPointer(va):
va -= 1
loc = vw.getLocation(va)
if loc is not None:
va = loc[0]
else:
# find previous undefined location
while loc is not None:
va = loc[0]
loc = vw.getLocation(va-1)
# if we fell off the end of a map
if vw.isValidPointer(va-1):
va -= 1
self._navExpression(hex(va))
self._selectVa(va)
@vq_hotkey.hotkey('viv:make:code')
def _hotkey_make_code(self):
if self._canv_curva is not None:
self.vw.makeCode(self._canv_curva)
@vq_hotkey.hotkey('viv:make:function')
def _hotkey_make_function(self):
if self._canv_curva is not None:
logger.debug('new function (manual): 0x%x', self._canv_curva)
self.vw.makeFunction(self._canv_curva)
@vq_hotkey.hotkey('viv:make:string')
def _hotkey_make_string(self):
if self._canv_curva is not None:
self.vw.makeString(self._canv_curva)
@vq_hotkey.hotkey('viv:make:pointer')
def _hotkey_make_pointer(self):
if self._canv_curva is not None:
self.vw.makePointer(self._canv_curva)
@vq_hotkey.hotkey('viv:make:unicode')
def _hotkey_make_unicode(self):
if self._canv_curva is not None:
self.vw.makeUnicode(self._canv_curva)
@vq_hotkey.hotkey('viv:undefine')
def _hotkey_undefine(self):
if self._canv_curva is not None:
self.vw.delLocation(self._canv_curva)
@vq_hotkey.hotkey('viv:getlocation')
def _hotkey_getlocation(self):
if self._canv_curva is not None:
self.vw.getVivGui().getLocation(self._canv_curva)
@vq_hotkey.hotkey('viv:setname')
def _hotkey_setname(self):
if self._canv_curva is not None:
self.vw.getVivGui().setVaName(self._canv_curva, parent=self)
@vq_hotkey.hotkey('viv:bookmark')
def _hotkey_bookmark(self):
if self._canv_curva is not None:
self.vw.getVivGui().addBookmark(self._canv_curva, parent=self)
@vq_hotkey.hotkey('viv:comment')
def _hotkey_comment(self):
if self._canv_curva is not None:
self.vw.getVivGui().setVaComment(self._canv_curva, parent=self)
@vq_hotkey.hotkey('viv:make:struct')
def _hotkey_make_struct(self):
if self._canv_curva is not None:
sname = self.vw.getVivGui().makeStruct(self._canv_curva)
if sname is not None:
self._last_sname = sname
@vq_hotkey.hotkey('viv:make:struct:again')
def _hotkey_make_struct_again(self):
if self._canv_curva is not None:
if self._last_sname is not None:
self.vw.makeStructure(self._canv_curva, self._last_sname)
@vq_hotkey.hotkey('viv:make:struct:multi')
def _hotkey_make_struct_multi(self, parent=None):
if self._canv_curva is not None:
if self._last_sname is not None:
number, ok = QInputDialog.getText(parent, 'Make Multiple Consecutive Structs', 'Number of Structures')
if ok:
curva = self._canv_curva
number = int(str(number), 0)
for count in range(number):
vs = self.vw.makeStructure(curva, self._last_sname)
curva += len(vs)
def makeStructAgainMulti(self, va, parent=None):
if parent is None:
parent = self
curcomment = self.vw.getComment(va)
if curcomment is None:
curcomment = ''
comment, ok = QInputDialog.getText(parent, 'Enter...', 'Comment', text=curcomment)
if ok:
self.vw.setComment(va, str(comment))
@vq_hotkey.hotkey('viv:make:number:one')
def _hotkey_make_number_one(self):
if self._canv_curva is not None:
self.vw.makeNumber(self._canv_curva, 1)
@vq_hotkey.hotkey('viv:make:number:two')
def _hotkey_make_number_two(self):
if self._canv_curva is not None:
self.vw.makeNumber(self._canv_curva, 2)
@vq_hotkey.hotkey('viv:make:number:four')
def _hotkey_make_number_four(self):
if self._canv_curva is not None:
self.vw.makeNumber(self._canv_curva, 4)
@vq_hotkey.hotkey('viv:make:number:eight')
def _hotkey_make_number_eight(self):
if self._canv_curva is not None:
self.vw.makeNumber(self._canv_curva, 8)
@vq_hotkey.hotkey('viv:make:number:sixteen')
def _hotkey_make_number_sixteen(self):
if self._canv_curva is not None:
self.vw.makeNumber(self._canv_curva, 16)
@vq_hotkey.hotkey('viv:preview:instr')
def _hotkey_preview_instr(self):
if self._canv_curva is not None:
self.vw.previewCode(self._canv_curva)
def getVaTag(self, va):
loc = self.mem.getLocation(va)
if loc is not None:
va = loc[L_VA]
return e_mem_canvas.VQMemoryCanvas.getVaTag(self, va)
class VQVivMemoryCanvas(VivCanvasBase):
def _wheelEventCallback(self, data):
'''
Ugh. Yes. I know this sucks.
But we have to do this because QtWebEngine does't natively let you get the max scroll size.
You *have* to go through javascript to get those elements, and the only way to be sure of
the function finishing (and being able to get a value outta js) is via this callback
mechanism they set up.
'''
smin = data[0]
spos = data[1]
smax = data[2]
if not len(self._canv_rendvas):
pass
elif spos >= smax:
lastva, lastsize = self._canv_rendvas[-1]
mapva, mapsize, mperm, mfname = self.vw.getMemoryMap(lastva)
sizeremain = (mapva + mapsize) - (lastva + lastsize)
if sizeremain:
self.renderMemoryAppend(min(sizeremain, 128))
elif spos == smin:
firstva, firstsize = self._canv_rendvas[0]
mapva, mapsize, mperm, mfname = self.vw.getMemoryMap(firstva)
sizeremain = firstva - mapva
if sizeremain:
self.renderMemoryPrepend(min(sizeremain, 128))
def wheelEvent(self, event):
page = self.page()
page.runJavaScript('''
var pcur = window.innerHeight + window.pageYOffset
var scrollMaxY = Math.max(
document.body.scrollHeight, document.documentElement.scrollHeight,
document.body.offsetHeight, document.documentElement.offsetHeight,
document.body.clientHeight, document.documentElement.clientHeight,
);
[window.innerHeight, pcur, scrollMaxY];
''', self._wheelEventCallback)
return e_mem_canvas.VQMemoryCanvas.wheelEvent(self, event)
def _clearColorMap(self):
page = self.page()
page.runJavaScript('var node = document.querySelector("#cmapstyle"); node.innerHTML = "";')
def _navExpression(self, expr):
if self._canv_navcallback:
self._canv_navcallback(expr)
def initMemWindowMenu(self, va, menu):
nav = self.parent() # our parent is always a VQVivMemoryWindow (nav target)
viv_q_ctxmenu.buildContextMenu(self.vw, va=va, menu=menu, nav=nav)
def _loc_helper(self, va):
'''
we assume we're being handed a valid va since renderMemory checks for valid MemoryMap
'''
nloc = self.mem.getLocation(va)
if nloc is None:
return va, 0
nva, nvsz, nvt, nvti = nloc
return (nva, va-nva)
class VQVivMemoryView(e_mem_qt.VQMemoryWindow, viv_base.VivEventCore):
def __init__(self, vw, vwqgui):
self.vw = vw
self.vwqgui = vwqgui
self._leading = False
self._following = None
self._follow_menu = None # init'd in handler below
e_mem_qt.VQMemoryWindow.__init__(self, vw, syms=vw, parent=vwqgui, mwname='viv')
viv_base.VivEventCore.__init__(self, vw)
vwqgui.addEventCore(self)
self.mem_canvas._canv_rend_middle = True
self.addHotKeyTarget('viv:xrefsto', self._viv_xrefsto)
self.addHotKey('x', 'viv:xrefsto')
def getRendToolsMenu(self):
menu = e_mem_qt.VQMemoryWindow.getRendToolsMenu(self)
if self.vw.server:
leadact = QAction('lead', menu, checkable=True)
def leadToggle():
self._leading = not self._leading
# We can only follow if not leading... (deep huh? ;) )
self._follow_menu.setEnabled(not self._leading)
if self._leading:
self._following = None
self.vw.iAmLeader(self.mwname)
self.updateMemWindowTitle()
def clearFollow():
self._following = None
self.updateMemWindowTitle()
leadact.toggled.connect(leadToggle)
menu.addAction(leadact)
self._follow_menu = menu.addMenu('Follow..')
self._follow_menu.addAction('(disable)', clearFollow)
return menu
def getExprTitle(self):
title = str(self.addr_entry.text())
try:
va = self.vw.parseExpression(title)
name = self.vw.getName(va)
if name is not None:
title = name
except Exception:
title = 'expr error'
if self._leading:
title += ' (leading)'
if self._following is not None:
user, window = self._following
title += ' (following %s %s)' % (user, window)
return title
def _getRenderVaSize(self):
'''
Vivisect steps in and attempts to map to locations when they exist.
since we have a location database, let's use that to make sure we get a
real location if it exists. otherwise, we end up in no-man's land,
since we rely on labels, which only exist for the base of a location.
'''
addr, size = e_mem_qt.VQMemoryWindow._getRenderVaSize(self)
if addr is None:
return addr, size
loc = self.vw.getLocation(addr)
if loc is None:
return addr, size
return loc[L_VA], size
def initMemoryCanvas(self, memobj, syms=None):
return VQVivMemoryCanvas(memobj, syms=syms, parent=self)
def _viv_xrefsto(self):
if self.mem_canvas._canv_curva is not None:
xrefs = self.vw.getXrefsTo(self.mem_canvas._canv_curva)
if len(xrefs) == 0:
self.vw.vprint('No xrefs found!')
return
title = 'Xrefs To: 0x%.8x' % self.mem_canvas._canv_curva
view = viv_q_views.VQXrefView(self.vw, self.vwqgui, xrefs=xrefs, title=title)
dock = self.vwqgui.vqDockWidget(view, floating=True)
dock.resize(800, 600)
def loadDefaultRenderers(self):
import envi.memcanvas.renderers as e_render
# FIXME check endianness
self.mem_canvas.addRenderer("bytes", e_render.ByteRend())
self.mem_canvas.addRenderer("u_int_16", e_render.ShortRend())
self.mem_canvas.addRenderer("u_int_32", e_render.LongRend())
self.mem_canvas.addRenderer("u_int_64", e_render.QuadRend())
vivrend = viv_rend.WorkspaceRenderer(self.vw)
self.mem_canvas.addRenderer('Viv', vivrend)
self.mem_canvas.setRenderer('Viv')
def _updateFunction(self, fva):
for cbva, cbsize, cbfva in self.vw.getFunctionBlocks(fva):
self.mem_canvas.renderMemoryUpdate(cbva, cbsize)
def VTE_IAMLEADER(self, vw, event, einfo):
user, followname = einfo
def VWE_SYMHINT(self, vw, event, einfo):
va, idx, hint = einfo
self.mem_canvas.renderMemoryUpdate(va, 1)
def VWE_ADDLOCATION(self, vw, event, einfo):
va, size, ltype, tinfo = einfo
self.mem_canvas.renderMemoryUpdate(va, size)
def VWE_DELLOCATION(self, vw, event, einfo):
va, size, ltype, tinfo = einfo
self.mem_canvas.renderMemoryUpdate(va, size)
def VWE_ADDFUNCTION(self, vw, event, einfo):
va, meta = einfo
self.mem_canvas.renderMemoryUpdate(va, 1)
def VWE_SETFUNCMETA(self, vw, event, einfo):
fva, key, val = einfo
self._updateFunction(fva)
def VWE_SETFUNCARGS(self, vw, event, einfo):
fva, fargs = einfo
self._updateFunction(fva)
def VWE_COMMENT(self, vw, event, einfo):
va, cmnt = einfo
self.mem_canvas.renderMemoryUpdate(va, 1)
@idlethread
def VWE_SETNAME(self, vw, event, einfo):
va, name = einfo
self.mem_canvas.renderMemoryUpdate(va, 1)
for fromva, tova, rtype, rflag in self.vw.getXrefsTo(va):
self.mem_canvas.renderMemoryUpdate(fromva, 1)
@idlethread
def VTE_IAMLEADER(self, vw, event, einfo):
user, fname = einfo
def setFollow():
self._following = einfo
self.updateMemWindowTitle()
self._follow_menu.addAction('%s - %s' % (user, fname), setFollow)
@idlethread
def VTE_FOLLOWME(self, vw, event, einfo):
user, fname, expr = einfo
if self._following != (user, fname):
return
self.enviNavGoto(expr)
@idlethread
def enviNavGoto(self, expr, sizeexpr='256', rend=''):
if self._leading:
self.vw.followTheLeader(str(self.mwname), str(expr))
return e_mem_qt.VQMemoryWindow.enviNavGoto(self, expr, sizeexpr=sizeexpr, rend=rend)
| 33.679849 | 118 | 0.61323 | import logging
from PyQt5.QtWidgets import *
import envi.qt.memory as e_mem_qt
import envi.qt.memcanvas as e_mem_canvas
import vqt.hotkeys as vq_hotkey
import vivisect.base as viv_base
import vivisect.renderers as viv_rend
import vivisect.qt.views as viv_q_views
import vivisect.qt.ctxmenu as viv_q_ctxmenu
from vqt.main import *
from vivisect.const import *
logger = logging.getLogger(__name__)
qt_horizontal = 1
qt_vertical = 2
class VivCanvasBase(vq_hotkey.HotKeyMixin, e_mem_canvas.VQMemoryCanvas):
def __init__(self, *args, **kwargs):
e_mem_canvas.VQMemoryCanvas.__init__(self, *args, **kwargs)
vq_hotkey.HotKeyMixin.__init__(self)
self.vw = self.mem
self._last_sname = None
self.addHotKey('c', 'viv:make:code')
self.addHotKey('f', 'viv:make:function')
self.addHotKey('s', 'viv:make:string')
self.addHotKey('p', 'viv:make:pointer')
self.addHotKey('u', 'viv:make:unicode')
self.addHotKey('n', 'viv:setname')
self.addHotKey('g', 'viv:getlocation')
self.addHotKey(';', 'viv:comment')
self.addHotKey('S', 'viv:make:struct')
self.addHotKey('ctrl+S', 'viv:make:struct:again')
self.addHotKey('ctrl+meta+S', 'viv:make:struct:multi')
self.addHotKey('U', 'viv:undefine')
self.addHotKey('ctrl+p', 'viv:preview:instr')
self.addHotKey('B', 'viv:bookmark')
self.addHotKey('ctrl+1', 'viv:make:number:one')
self.addHotKey('ctrl+2', 'viv:make:number:two')
self.addHotKey('ctrl+4', 'viv:make:number:four')
self.addHotKey('ctrl+6', 'viv:make:number:sixteen')
self.addHotKey('ctrl+8', 'viv:make:number:eight')
self.addHotKey('down', 'viv:nav:nextva')
self.addHotKey('up', 'viv:nav:prevva')
self.addHotKey('ctrl+down', 'viv:nav:nextundef')
self.addHotKey('ctrl+up', 'viv:nav:prevundef')
self.loadHotKeys(self.vw._viv_gui._vq_settings)
vqtconnect(self.vivColorMap, 'viv:colormap')
def vivColorMap(self, event, einfo):
self._applyColorMap(einfo)
def _applyColorMap(self, cmap):
page = self.page()
inner = ''
for va, color in cmap.items():
inner += '.envi-va-0x%.8x { color: #000000; background-color: %s }\n' % (va, color)
js = 'var node = document.querySelector("#cmapstyle"); node.innerHTML = `%s`;' % inner
page.runJavaScript(js)
@vq_hotkey.hotkey('viv:nav:nextva')
def _hotkey_nav_nextva(self):
if self._canv_curva is None:
return
loc = self.vw.getLocation(self._canv_curva)
if loc is None:
loc = (self._canv_curva, 1, None, None)
nextva = loc[0] + loc[1]
self._selectVa(nextva)
@vq_hotkey.hotkey('viv:nav:prevva')
def _hotkey_nav_prevva(self):
if self._canv_curva is None:
return
loc = self.vw.getPrevLocation(self._canv_curva)
if loc is None:
loc = (self._canv_curva - 1, 1, None, None)
self._selectVa(loc[0])
@vq_hotkey.hotkey('viv:nav:nextundef')
def _hotkey_nav_nextundef(self):
if self._canv_curva is None:
return
vw = self.vw
va = self._canv_curva
loc = vw.getLocation(va)
if loc is None:
while loc is None and vw.isValidPointer(va):
va += 1
loc = vw.getLocation(va)
va -= 1
lastloc = (va, 1, 0, 0)
else:
while loc is not None:
va = loc[0]
lastloc = loc
loc = vw.getLocation(va + loc[1])
if vw.isValidPointer(va+lastloc[1]):
va += lastloc[1]
self._navExpression(hex(va))
self._selectVa(va)
@vq_hotkey.hotkey('viv:nav:prevundef')
def _hotkey_nav_prevundef(self):
if self._canv_curva is None:
return
vw = self.vw
va = self._canv_curva
loc = vw.getLocation(va)
if loc is None:
# find previous defined location
while loc is None and vw.isValidPointer(va):
va -= 1
loc = vw.getLocation(va)
if loc is not None:
va = loc[0]
else:
# find previous undefined location
while loc is not None:
va = loc[0]
loc = vw.getLocation(va-1)
# if we fell off the end of a map
if vw.isValidPointer(va-1):
va -= 1
self._navExpression(hex(va))
self._selectVa(va)
@vq_hotkey.hotkey('viv:make:code')
def _hotkey_make_code(self):
if self._canv_curva is not None:
self.vw.makeCode(self._canv_curva)
@vq_hotkey.hotkey('viv:make:function')
def _hotkey_make_function(self):
if self._canv_curva is not None:
logger.debug('new function (manual): 0x%x', self._canv_curva)
self.vw.makeFunction(self._canv_curva)
@vq_hotkey.hotkey('viv:make:string')
def _hotkey_make_string(self):
if self._canv_curva is not None:
self.vw.makeString(self._canv_curva)
@vq_hotkey.hotkey('viv:make:pointer')
def _hotkey_make_pointer(self):
if self._canv_curva is not None:
self.vw.makePointer(self._canv_curva)
@vq_hotkey.hotkey('viv:make:unicode')
def _hotkey_make_unicode(self):
if self._canv_curva is not None:
self.vw.makeUnicode(self._canv_curva)
@vq_hotkey.hotkey('viv:undefine')
def _hotkey_undefine(self):
if self._canv_curva is not None:
self.vw.delLocation(self._canv_curva)
@vq_hotkey.hotkey('viv:getlocation')
def _hotkey_getlocation(self):
if self._canv_curva is not None:
self.vw.getVivGui().getLocation(self._canv_curva)
@vq_hotkey.hotkey('viv:setname')
def _hotkey_setname(self):
if self._canv_curva is not None:
self.vw.getVivGui().setVaName(self._canv_curva, parent=self)
@vq_hotkey.hotkey('viv:bookmark')
def _hotkey_bookmark(self):
if self._canv_curva is not None:
self.vw.getVivGui().addBookmark(self._canv_curva, parent=self)
@vq_hotkey.hotkey('viv:comment')
def _hotkey_comment(self):
if self._canv_curva is not None:
self.vw.getVivGui().setVaComment(self._canv_curva, parent=self)
@vq_hotkey.hotkey('viv:make:struct')
def _hotkey_make_struct(self):
if self._canv_curva is not None:
sname = self.vw.getVivGui().makeStruct(self._canv_curva)
if sname is not None:
self._last_sname = sname
@vq_hotkey.hotkey('viv:make:struct:again')
def _hotkey_make_struct_again(self):
if self._canv_curva is not None:
if self._last_sname is not None:
self.vw.makeStructure(self._canv_curva, self._last_sname)
@vq_hotkey.hotkey('viv:make:struct:multi')
def _hotkey_make_struct_multi(self, parent=None):
if self._canv_curva is not None:
if self._last_sname is not None:
number, ok = QInputDialog.getText(parent, 'Make Multiple Consecutive Structs', 'Number of Structures')
if ok:
curva = self._canv_curva
number = int(str(number), 0)
for count in range(number):
vs = self.vw.makeStructure(curva, self._last_sname)
curva += len(vs)
def makeStructAgainMulti(self, va, parent=None):
if parent is None:
parent = self
curcomment = self.vw.getComment(va)
if curcomment is None:
curcomment = ''
comment, ok = QInputDialog.getText(parent, 'Enter...', 'Comment', text=curcomment)
if ok:
self.vw.setComment(va, str(comment))
@vq_hotkey.hotkey('viv:make:number:one')
def _hotkey_make_number_one(self):
if self._canv_curva is not None:
self.vw.makeNumber(self._canv_curva, 1)
@vq_hotkey.hotkey('viv:make:number:two')
def _hotkey_make_number_two(self):
if self._canv_curva is not None:
self.vw.makeNumber(self._canv_curva, 2)
@vq_hotkey.hotkey('viv:make:number:four')
def _hotkey_make_number_four(self):
if self._canv_curva is not None:
self.vw.makeNumber(self._canv_curva, 4)
@vq_hotkey.hotkey('viv:make:number:eight')
def _hotkey_make_number_eight(self):
if self._canv_curva is not None:
self.vw.makeNumber(self._canv_curva, 8)
@vq_hotkey.hotkey('viv:make:number:sixteen')
def _hotkey_make_number_sixteen(self):
if self._canv_curva is not None:
self.vw.makeNumber(self._canv_curva, 16)
@vq_hotkey.hotkey('viv:preview:instr')
def _hotkey_preview_instr(self):
if self._canv_curva is not None:
self.vw.previewCode(self._canv_curva)
def getVaTag(self, va):
loc = self.mem.getLocation(va)
if loc is not None:
va = loc[L_VA]
return e_mem_canvas.VQMemoryCanvas.getVaTag(self, va)
class VQVivMemoryCanvas(VivCanvasBase):
def _wheelEventCallback(self, data):
smin = data[0]
spos = data[1]
smax = data[2]
if not len(self._canv_rendvas):
pass
elif spos >= smax:
lastva, lastsize = self._canv_rendvas[-1]
mapva, mapsize, mperm, mfname = self.vw.getMemoryMap(lastva)
sizeremain = (mapva + mapsize) - (lastva + lastsize)
if sizeremain:
self.renderMemoryAppend(min(sizeremain, 128))
elif spos == smin:
firstva, firstsize = self._canv_rendvas[0]
mapva, mapsize, mperm, mfname = self.vw.getMemoryMap(firstva)
sizeremain = firstva - mapva
if sizeremain:
self.renderMemoryPrepend(min(sizeremain, 128))
def wheelEvent(self, event):
page = self.page()
page.runJavaScript('''
var pcur = window.innerHeight + window.pageYOffset
var scrollMaxY = Math.max(
document.body.scrollHeight, document.documentElement.scrollHeight,
document.body.offsetHeight, document.documentElement.offsetHeight,
document.body.clientHeight, document.documentElement.clientHeight,
);
[window.innerHeight, pcur, scrollMaxY];
''', self._wheelEventCallback)
return e_mem_canvas.VQMemoryCanvas.wheelEvent(self, event)
def _clearColorMap(self):
page = self.page()
page.runJavaScript('var node = document.querySelector("#cmapstyle"); node.innerHTML = "";')
def _navExpression(self, expr):
if self._canv_navcallback:
self._canv_navcallback(expr)
def initMemWindowMenu(self, va, menu):
nav = self.parent() # our parent is always a VQVivMemoryWindow (nav target)
viv_q_ctxmenu.buildContextMenu(self.vw, va=va, menu=menu, nav=nav)
def _loc_helper(self, va):
nloc = self.mem.getLocation(va)
if nloc is None:
return va, 0
nva, nvsz, nvt, nvti = nloc
return (nva, va-nva)
class VQVivMemoryView(e_mem_qt.VQMemoryWindow, viv_base.VivEventCore):
def __init__(self, vw, vwqgui):
self.vw = vw
self.vwqgui = vwqgui
self._leading = False
self._following = None
self._follow_menu = None # init'd in handler below
e_mem_qt.VQMemoryWindow.__init__(self, vw, syms=vw, parent=vwqgui, mwname='viv')
viv_base.VivEventCore.__init__(self, vw)
vwqgui.addEventCore(self)
self.mem_canvas._canv_rend_middle = True
self.addHotKeyTarget('viv:xrefsto', self._viv_xrefsto)
self.addHotKey('x', 'viv:xrefsto')
def getRendToolsMenu(self):
menu = e_mem_qt.VQMemoryWindow.getRendToolsMenu(self)
if self.vw.server:
leadact = QAction('lead', menu, checkable=True)
def leadToggle():
self._leading = not self._leading
self._follow_menu.setEnabled(not self._leading)
if self._leading:
self._following = None
self.vw.iAmLeader(self.mwname)
self.updateMemWindowTitle()
def clearFollow():
self._following = None
self.updateMemWindowTitle()
leadact.toggled.connect(leadToggle)
menu.addAction(leadact)
self._follow_menu = menu.addMenu('Follow..')
self._follow_menu.addAction('(disable)', clearFollow)
return menu
def getExprTitle(self):
title = str(self.addr_entry.text())
try:
va = self.vw.parseExpression(title)
name = self.vw.getName(va)
if name is not None:
title = name
except Exception:
title = 'expr error'
if self._leading:
title += ' (leading)'
if self._following is not None:
user, window = self._following
title += ' (following %s %s)' % (user, window)
return title
def _getRenderVaSize(self):
addr, size = e_mem_qt.VQMemoryWindow._getRenderVaSize(self)
if addr is None:
return addr, size
loc = self.vw.getLocation(addr)
if loc is None:
return addr, size
return loc[L_VA], size
def initMemoryCanvas(self, memobj, syms=None):
return VQVivMemoryCanvas(memobj, syms=syms, parent=self)
def _viv_xrefsto(self):
if self.mem_canvas._canv_curva is not None:
xrefs = self.vw.getXrefsTo(self.mem_canvas._canv_curva)
if len(xrefs) == 0:
self.vw.vprint('No xrefs found!')
return
title = 'Xrefs To: 0x%.8x' % self.mem_canvas._canv_curva
view = viv_q_views.VQXrefView(self.vw, self.vwqgui, xrefs=xrefs, title=title)
dock = self.vwqgui.vqDockWidget(view, floating=True)
dock.resize(800, 600)
def loadDefaultRenderers(self):
import envi.memcanvas.renderers as e_render
self.mem_canvas.addRenderer("bytes", e_render.ByteRend())
self.mem_canvas.addRenderer("u_int_16", e_render.ShortRend())
self.mem_canvas.addRenderer("u_int_32", e_render.LongRend())
self.mem_canvas.addRenderer("u_int_64", e_render.QuadRend())
vivrend = viv_rend.WorkspaceRenderer(self.vw)
self.mem_canvas.addRenderer('Viv', vivrend)
self.mem_canvas.setRenderer('Viv')
def _updateFunction(self, fva):
for cbva, cbsize, cbfva in self.vw.getFunctionBlocks(fva):
self.mem_canvas.renderMemoryUpdate(cbva, cbsize)
def VTE_IAMLEADER(self, vw, event, einfo):
user, followname = einfo
def VWE_SYMHINT(self, vw, event, einfo):
va, idx, hint = einfo
self.mem_canvas.renderMemoryUpdate(va, 1)
def VWE_ADDLOCATION(self, vw, event, einfo):
va, size, ltype, tinfo = einfo
self.mem_canvas.renderMemoryUpdate(va, size)
def VWE_DELLOCATION(self, vw, event, einfo):
va, size, ltype, tinfo = einfo
self.mem_canvas.renderMemoryUpdate(va, size)
def VWE_ADDFUNCTION(self, vw, event, einfo):
va, meta = einfo
self.mem_canvas.renderMemoryUpdate(va, 1)
def VWE_SETFUNCMETA(self, vw, event, einfo):
fva, key, val = einfo
self._updateFunction(fva)
def VWE_SETFUNCARGS(self, vw, event, einfo):
fva, fargs = einfo
self._updateFunction(fva)
def VWE_COMMENT(self, vw, event, einfo):
va, cmnt = einfo
self.mem_canvas.renderMemoryUpdate(va, 1)
@idlethread
def VWE_SETNAME(self, vw, event, einfo):
va, name = einfo
self.mem_canvas.renderMemoryUpdate(va, 1)
for fromva, tova, rtype, rflag in self.vw.getXrefsTo(va):
self.mem_canvas.renderMemoryUpdate(fromva, 1)
@idlethread
def VTE_IAMLEADER(self, vw, event, einfo):
user, fname = einfo
def setFollow():
self._following = einfo
self.updateMemWindowTitle()
self._follow_menu.addAction('%s - %s' % (user, fname), setFollow)
@idlethread
def VTE_FOLLOWME(self, vw, event, einfo):
user, fname, expr = einfo
if self._following != (user, fname):
return
self.enviNavGoto(expr)
@idlethread
def enviNavGoto(self, expr, sizeexpr='256', rend=''):
if self._leading:
self.vw.followTheLeader(str(self.mwname), str(expr))
return e_mem_qt.VQMemoryWindow.enviNavGoto(self, expr, sizeexpr=sizeexpr, rend=rend)
| true | true |
f70f4f9a2f8e77773f4d700170933be3c3cb6593 | 1,970 | py | Python | python_modules/dagster/dagster/core/execution/context/step.py | jake-billings/dagster | 7a1548a1f246c48189f3d8109e831b744bceb7d4 | [
"Apache-2.0"
] | 1 | 2019-07-15T17:34:04.000Z | 2019-07-15T17:34:04.000Z | python_modules/dagster/dagster/core/execution/context/step.py | jake-billings/dagster | 7a1548a1f246c48189f3d8109e831b744bceb7d4 | [
"Apache-2.0"
] | null | null | null | python_modules/dagster/dagster/core/execution/context/step.py | jake-billings/dagster | 7a1548a1f246c48189f3d8109e831b744bceb7d4 | [
"Apache-2.0"
] | null | null | null | from dagster import check
from .system import SystemStepExecutionContext
class StepExecutionContext(object):
__slots__ = ['_system_step_execution_context', '_legacy_context']
def __init__(self, system_step_execution_context):
self._system_step_execution_context = check.inst_param(
system_step_execution_context,
'system_step_execution_context',
SystemStepExecutionContext,
)
@property
def file_manager(self):
return self._system_step_execution_context.file_manager
@property
def resources(self):
return self._system_step_execution_context.resources
@property
def run_id(self):
return self._system_step_execution_context.run_id
@property
def environment_dict(self):
return self._system_step_execution_context.environment_dict
@property
def pipeline_def(self):
return self._system_step_execution_context.pipeline_def
@property
def mode_def(self):
return self._system_step_execution_context.mode_def
@property
def log(self):
return self._system_step_execution_context.log
@property
def solid_handle(self):
return self._system_step_execution_context.solid_handle
@property
def solid(self):
return self._system_step_execution_context.pipeline_def.get_solid(self.solid_handle)
@property
def solid_def(self):
return self._system_step_execution_context.pipeline_def.get_solid(
self.solid_handle
).definition
def has_tag(self, key):
return self._system_step_execution_context.has_tag(key)
def get_tag(self, key):
return self._system_step_execution_context.get_tag(key)
def get_system_context(self):
'''
This allows advanced users (e.g. framework authors) to punch through
to the underlying system context.
'''
return self._system_step_execution_context
| 28.142857 | 92 | 0.716751 | from dagster import check
from .system import SystemStepExecutionContext
class StepExecutionContext(object):
__slots__ = ['_system_step_execution_context', '_legacy_context']
def __init__(self, system_step_execution_context):
self._system_step_execution_context = check.inst_param(
system_step_execution_context,
'system_step_execution_context',
SystemStepExecutionContext,
)
@property
def file_manager(self):
return self._system_step_execution_context.file_manager
@property
def resources(self):
return self._system_step_execution_context.resources
@property
def run_id(self):
return self._system_step_execution_context.run_id
@property
def environment_dict(self):
return self._system_step_execution_context.environment_dict
@property
def pipeline_def(self):
return self._system_step_execution_context.pipeline_def
@property
def mode_def(self):
return self._system_step_execution_context.mode_def
@property
def log(self):
return self._system_step_execution_context.log
@property
def solid_handle(self):
return self._system_step_execution_context.solid_handle
@property
def solid(self):
return self._system_step_execution_context.pipeline_def.get_solid(self.solid_handle)
@property
def solid_def(self):
return self._system_step_execution_context.pipeline_def.get_solid(
self.solid_handle
).definition
def has_tag(self, key):
return self._system_step_execution_context.has_tag(key)
def get_tag(self, key):
return self._system_step_execution_context.get_tag(key)
def get_system_context(self):
return self._system_step_execution_context
| true | true |
f70f4fab1f7787b020a92ba4a182e8dd17836149 | 746 | py | Python | setup.py | yanoshi/yootto | 27b48777d922de53d98a05011864804bf6c74883 | [
"MIT"
] | 5 | 2021-01-11T10:59:39.000Z | 2021-03-22T07:07:45.000Z | setup.py | yanoshi/yootto | 27b48777d922de53d98a05011864804bf6c74883 | [
"MIT"
] | 4 | 2021-01-11T07:03:50.000Z | 2021-08-13T18:13:16.000Z | setup.py | yanoshi/yootto | 27b48777d922de53d98a05011864804bf6c74883 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
with open('requirements.txt') as requirements_file:
install_requirements = requirements_file.read().splitlines()
setup(
name="yootto",
version="0.1.5",
description="yootto(ヨーッと) is tiny YouTube Music unofficial uploader",
author="yanoshi",
author_email="",
url="https://github.com/yanoshi/yootto",
packages=find_packages(),
install_requires=install_requirements,
python_requires='>3.6',
entry_points={
"console_scripts": [
"yootto=yootto.core:main",
]
},
classifiers=[
"Programming Language :: Python :: 3.6",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
]
)
| 29.84 | 73 | 0.647453 | from setuptools import setup, find_packages
with open('requirements.txt') as requirements_file:
install_requirements = requirements_file.read().splitlines()
setup(
name="yootto",
version="0.1.5",
description="yootto(ヨーッと) is tiny YouTube Music unofficial uploader",
author="yanoshi",
author_email="",
url="https://github.com/yanoshi/yootto",
packages=find_packages(),
install_requires=install_requirements,
python_requires='>3.6',
entry_points={
"console_scripts": [
"yootto=yootto.core:main",
]
},
classifiers=[
"Programming Language :: Python :: 3.6",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
]
)
| true | true |
f70f5032fb844be87e596edfc4407fe50215388c | 14,322 | py | Python | synapse/events/utils.py | ricco386/synapse | 0d464935f1771d966ab388d803349c7a2bf45ad9 | [
"Apache-2.0"
] | null | null | null | synapse/events/utils.py | ricco386/synapse | 0d464935f1771d966ab388d803349c7a2bf45ad9 | [
"Apache-2.0"
] | null | null | null | synapse/events/utils.py | ricco386/synapse | 0d464935f1771d966ab388d803349c7a2bf45ad9 | [
"Apache-2.0"
] | 2 | 2020-03-03T18:34:52.000Z | 2022-03-31T11:06:18.000Z | # -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import re
from typing import Mapping, Union
from six import string_types
from frozendict import frozendict
from twisted.internet import defer
from synapse.api.constants import EventTypes, RelationTypes
from synapse.util.async_helpers import yieldable_gather_results
from . import EventBase
# Split strings on "." but not "\." This uses a negative lookbehind assertion for '\'
# (?<!stuff) matches if the current position in the string is not preceded
# by a match for 'stuff'.
# TODO: This is fast, but fails to handle "foo\\.bar" which should be treated as
# the literal fields "foo\" and "bar" but will instead be treated as "foo\\.bar"
SPLIT_FIELD_REGEX = re.compile(r"(?<!\\)\.")
def prune_event(event):
""" Returns a pruned version of the given event, which removes all keys we
don't know about or think could potentially be dodgy.
This is used when we "redact" an event. We want to remove all fields that
the user has specified, but we do want to keep necessary information like
type, state_key etc.
Args:
event (FrozenEvent)
Returns:
FrozenEvent
"""
pruned_event_dict = prune_event_dict(event.get_dict())
from . import event_type_from_format_version
pruned_event = event_type_from_format_version(event.format_version)(
pruned_event_dict, event.internal_metadata.get_dict()
)
# Mark the event as redacted
pruned_event.internal_metadata.redacted = True
return pruned_event
def prune_event_dict(event_dict):
"""Redacts the event_dict in the same way as `prune_event`, except it
operates on dicts rather than event objects
Args:
event_dict (dict)
Returns:
dict: A copy of the pruned event dict
"""
allowed_keys = [
"event_id",
"sender",
"room_id",
"hashes",
"signatures",
"content",
"type",
"state_key",
"depth",
"prev_events",
"prev_state",
"auth_events",
"origin",
"origin_server_ts",
"membership",
]
event_type = event_dict["type"]
new_content = {}
def add_fields(*fields):
for field in fields:
if field in event_dict["content"]:
new_content[field] = event_dict["content"][field]
if event_type == EventTypes.Member:
add_fields("membership")
elif event_type == EventTypes.Create:
add_fields("creator")
elif event_type == EventTypes.JoinRules:
add_fields("join_rule")
elif event_type == EventTypes.PowerLevels:
add_fields(
"users",
"users_default",
"events",
"events_default",
"state_default",
"ban",
"kick",
"redact",
)
elif event_type == EventTypes.Aliases:
add_fields("aliases")
elif event_type == EventTypes.RoomHistoryVisibility:
add_fields("history_visibility")
allowed_fields = {k: v for k, v in event_dict.items() if k in allowed_keys}
allowed_fields["content"] = new_content
unsigned = {}
allowed_fields["unsigned"] = unsigned
event_unsigned = event_dict.get("unsigned", {})
if "age_ts" in event_unsigned:
unsigned["age_ts"] = event_unsigned["age_ts"]
if "replaces_state" in event_unsigned:
unsigned["replaces_state"] = event_unsigned["replaces_state"]
return allowed_fields
def _copy_field(src, dst, field):
"""Copy the field in 'src' to 'dst'.
For example, if src={"foo":{"bar":5}} and dst={}, and field=["foo","bar"]
then dst={"foo":{"bar":5}}.
Args:
src(dict): The dict to read from.
dst(dict): The dict to modify.
field(list<str>): List of keys to drill down to in 'src'.
"""
if len(field) == 0: # this should be impossible
return
if len(field) == 1: # common case e.g. 'origin_server_ts'
if field[0] in src:
dst[field[0]] = src[field[0]]
return
# Else is a nested field e.g. 'content.body'
# Pop the last field as that's the key to move across and we need the
# parent dict in order to access the data. Drill down to the right dict.
key_to_move = field.pop(-1)
sub_dict = src
for sub_field in field: # e.g. sub_field => "content"
if sub_field in sub_dict and type(sub_dict[sub_field]) in [dict, frozendict]:
sub_dict = sub_dict[sub_field]
else:
return
if key_to_move not in sub_dict:
return
# Insert the key into the output dictionary, creating nested objects
# as required. We couldn't do this any earlier or else we'd need to delete
# the empty objects if the key didn't exist.
sub_out_dict = dst
for sub_field in field:
sub_out_dict = sub_out_dict.setdefault(sub_field, {})
sub_out_dict[key_to_move] = sub_dict[key_to_move]
def only_fields(dictionary, fields):
"""Return a new dict with only the fields in 'dictionary' which are present
in 'fields'.
If there are no event fields specified then all fields are included.
The entries may include '.' charaters to indicate sub-fields.
So ['content.body'] will include the 'body' field of the 'content' object.
A literal '.' character in a field name may be escaped using a '\'.
Args:
dictionary(dict): The dictionary to read from.
fields(list<str>): A list of fields to copy over. Only shallow refs are
taken.
Returns:
dict: A new dictionary with only the given fields. If fields was empty,
the same dictionary is returned.
"""
if len(fields) == 0:
return dictionary
# for each field, convert it:
# ["content.body.thing\.with\.dots"] => [["content", "body", "thing\.with\.dots"]]
split_fields = [SPLIT_FIELD_REGEX.split(f) for f in fields]
# for each element of the output array of arrays:
# remove escaping so we can use the right key names.
split_fields[:] = [
[f.replace(r"\.", r".") for f in field_array] for field_array in split_fields
]
output = {}
for field_array in split_fields:
_copy_field(dictionary, output, field_array)
return output
def format_event_raw(d):
return d
def format_event_for_client_v1(d):
d = format_event_for_client_v2(d)
sender = d.get("sender")
if sender is not None:
d["user_id"] = sender
copy_keys = (
"age",
"redacted_because",
"replaces_state",
"prev_content",
"invite_room_state",
)
for key in copy_keys:
if key in d["unsigned"]:
d[key] = d["unsigned"][key]
return d
def format_event_for_client_v2(d):
drop_keys = (
"auth_events",
"prev_events",
"hashes",
"signatures",
"depth",
"origin",
"prev_state",
)
for key in drop_keys:
d.pop(key, None)
return d
def format_event_for_client_v2_without_room_id(d):
d = format_event_for_client_v2(d)
d.pop("room_id", None)
return d
def serialize_event(
e,
time_now_ms,
as_client_event=True,
event_format=format_event_for_client_v1,
token_id=None,
only_event_fields=None,
is_invite=False,
):
"""Serialize event for clients
Args:
e (EventBase)
time_now_ms (int)
as_client_event (bool)
event_format
token_id
only_event_fields
is_invite (bool): Whether this is an invite that is being sent to the
invitee
Returns:
dict
"""
# FIXME(erikj): To handle the case of presence events and the like
if not isinstance(e, EventBase):
return e
time_now_ms = int(time_now_ms)
# Should this strip out None's?
d = {k: v for k, v in e.get_dict().items()}
d["event_id"] = e.event_id
if "age_ts" in d["unsigned"]:
d["unsigned"]["age"] = time_now_ms - d["unsigned"]["age_ts"]
del d["unsigned"]["age_ts"]
if "redacted_because" in e.unsigned:
d["unsigned"]["redacted_because"] = serialize_event(
e.unsigned["redacted_because"], time_now_ms, event_format=event_format
)
if token_id is not None:
if token_id == getattr(e.internal_metadata, "token_id", None):
txn_id = getattr(e.internal_metadata, "txn_id", None)
if txn_id is not None:
d["unsigned"]["transaction_id"] = txn_id
# If this is an invite for somebody else, then we don't care about the
# invite_room_state as that's meant solely for the invitee. Other clients
# will already have the state since they're in the room.
if not is_invite:
d["unsigned"].pop("invite_room_state", None)
if as_client_event:
d = event_format(d)
if only_event_fields:
if not isinstance(only_event_fields, list) or not all(
isinstance(f, string_types) for f in only_event_fields
):
raise TypeError("only_event_fields must be a list of strings")
d = only_fields(d, only_event_fields)
return d
class EventClientSerializer(object):
"""Serializes events that are to be sent to clients.
This is used for bundling extra information with any events to be sent to
clients.
"""
def __init__(self, hs):
self.store = hs.get_datastore()
self.experimental_msc1849_support_enabled = (
hs.config.experimental_msc1849_support_enabled
)
@defer.inlineCallbacks
def serialize_event(self, event, time_now, bundle_aggregations=True, **kwargs):
"""Serializes a single event.
Args:
event (EventBase)
time_now (int): The current time in milliseconds
bundle_aggregations (bool): Whether to bundle in related events
**kwargs: Arguments to pass to `serialize_event`
Returns:
Deferred[dict]: The serialized event
"""
# To handle the case of presence events and the like
if not isinstance(event, EventBase):
return event
event_id = event.event_id
serialized_event = serialize_event(event, time_now, **kwargs)
# If MSC1849 is enabled then we need to look if there are any relations
# we need to bundle in with the event.
# Do not bundle relations if the event has been redacted
if not event.internal_metadata.is_redacted() and (
self.experimental_msc1849_support_enabled and bundle_aggregations
):
annotations = yield self.store.get_aggregation_groups_for_event(event_id)
references = yield self.store.get_relations_for_event(
event_id, RelationTypes.REFERENCE, direction="f"
)
if annotations.chunk:
r = serialized_event["unsigned"].setdefault("m.relations", {})
r[RelationTypes.ANNOTATION] = annotations.to_dict()
if references.chunk:
r = serialized_event["unsigned"].setdefault("m.relations", {})
r[RelationTypes.REFERENCE] = references.to_dict()
edit = None
if event.type == EventTypes.Message:
edit = yield self.store.get_applicable_edit(event_id)
if edit:
# If there is an edit replace the content, preserving existing
# relations.
relations = event.content.get("m.relates_to")
serialized_event["content"] = edit.content.get("m.new_content", {})
if relations:
serialized_event["content"]["m.relates_to"] = relations
else:
serialized_event["content"].pop("m.relates_to", None)
r = serialized_event["unsigned"].setdefault("m.relations", {})
r[RelationTypes.REPLACE] = {
"event_id": edit.event_id,
"origin_server_ts": edit.origin_server_ts,
"sender": edit.sender,
}
return serialized_event
def serialize_events(self, events, time_now, **kwargs):
"""Serializes multiple events.
Args:
event (iter[EventBase])
time_now (int): The current time in milliseconds
**kwargs: Arguments to pass to `serialize_event`
Returns:
Deferred[list[dict]]: The list of serialized events
"""
return yieldable_gather_results(
self.serialize_event, events, time_now=time_now, **kwargs
)
def copy_power_levels_contents(
old_power_levels: Mapping[str, Union[int, Mapping[str, int]]]
):
"""Copy the content of a power_levels event, unfreezing frozendicts along the way
Raises:
TypeError if the input does not look like a valid power levels event content
"""
if not isinstance(old_power_levels, collections.Mapping):
raise TypeError("Not a valid power-levels content: %r" % (old_power_levels,))
power_levels = {}
for k, v in old_power_levels.items():
if isinstance(v, int):
power_levels[k] = v
continue
if isinstance(v, collections.Mapping):
power_levels[k] = h = {}
for k1, v1 in v.items():
# we should only have one level of nesting
if not isinstance(v1, int):
raise TypeError(
"Invalid power_levels value for %s.%s: %r" % (k, k1, v1)
)
h[k1] = v1
continue
raise TypeError("Invalid power_levels value for %s: %r" % (k, v))
return power_levels
| 31.134783 | 86 | 0.624633 |
import collections
import re
from typing import Mapping, Union
from six import string_types
from frozendict import frozendict
from twisted.internet import defer
from synapse.api.constants import EventTypes, RelationTypes
from synapse.util.async_helpers import yieldable_gather_results
from . import EventBase
SPLIT_FIELD_REGEX = re.compile(r"(?<!\\)\.")
def prune_event(event):
pruned_event_dict = prune_event_dict(event.get_dict())
from . import event_type_from_format_version
pruned_event = event_type_from_format_version(event.format_version)(
pruned_event_dict, event.internal_metadata.get_dict()
)
pruned_event.internal_metadata.redacted = True
return pruned_event
def prune_event_dict(event_dict):
allowed_keys = [
"event_id",
"sender",
"room_id",
"hashes",
"signatures",
"content",
"type",
"state_key",
"depth",
"prev_events",
"prev_state",
"auth_events",
"origin",
"origin_server_ts",
"membership",
]
event_type = event_dict["type"]
new_content = {}
def add_fields(*fields):
for field in fields:
if field in event_dict["content"]:
new_content[field] = event_dict["content"][field]
if event_type == EventTypes.Member:
add_fields("membership")
elif event_type == EventTypes.Create:
add_fields("creator")
elif event_type == EventTypes.JoinRules:
add_fields("join_rule")
elif event_type == EventTypes.PowerLevels:
add_fields(
"users",
"users_default",
"events",
"events_default",
"state_default",
"ban",
"kick",
"redact",
)
elif event_type == EventTypes.Aliases:
add_fields("aliases")
elif event_type == EventTypes.RoomHistoryVisibility:
add_fields("history_visibility")
allowed_fields = {k: v for k, v in event_dict.items() if k in allowed_keys}
allowed_fields["content"] = new_content
unsigned = {}
allowed_fields["unsigned"] = unsigned
event_unsigned = event_dict.get("unsigned", {})
if "age_ts" in event_unsigned:
unsigned["age_ts"] = event_unsigned["age_ts"]
if "replaces_state" in event_unsigned:
unsigned["replaces_state"] = event_unsigned["replaces_state"]
return allowed_fields
def _copy_field(src, dst, field):
if len(field) == 0:
return
if len(field) == 1:
if field[0] in src:
dst[field[0]] = src[field[0]]
return
# parent dict in order to access the data. Drill down to the right dict.
key_to_move = field.pop(-1)
sub_dict = src
for sub_field in field: # e.g. sub_field => "content"
if sub_field in sub_dict and type(sub_dict[sub_field]) in [dict, frozendict]:
sub_dict = sub_dict[sub_field]
else:
return
if key_to_move not in sub_dict:
return
# Insert the key into the output dictionary, creating nested objects
# as required. We couldn't do this any earlier or else we'd need to delete
# the empty objects if the key didn't exist.
sub_out_dict = dst
for sub_field in field:
sub_out_dict = sub_out_dict.setdefault(sub_field, {})
sub_out_dict[key_to_move] = sub_dict[key_to_move]
def only_fields(dictionary, fields):
if len(fields) == 0:
return dictionary
split_fields = [SPLIT_FIELD_REGEX.split(f) for f in fields]
split_fields[:] = [
[f.replace(r"\.", r".") for f in field_array] for field_array in split_fields
]
output = {}
for field_array in split_fields:
_copy_field(dictionary, output, field_array)
return output
def format_event_raw(d):
return d
def format_event_for_client_v1(d):
d = format_event_for_client_v2(d)
sender = d.get("sender")
if sender is not None:
d["user_id"] = sender
copy_keys = (
"age",
"redacted_because",
"replaces_state",
"prev_content",
"invite_room_state",
)
for key in copy_keys:
if key in d["unsigned"]:
d[key] = d["unsigned"][key]
return d
def format_event_for_client_v2(d):
drop_keys = (
"auth_events",
"prev_events",
"hashes",
"signatures",
"depth",
"origin",
"prev_state",
)
for key in drop_keys:
d.pop(key, None)
return d
def format_event_for_client_v2_without_room_id(d):
d = format_event_for_client_v2(d)
d.pop("room_id", None)
return d
def serialize_event(
e,
time_now_ms,
as_client_event=True,
event_format=format_event_for_client_v1,
token_id=None,
only_event_fields=None,
is_invite=False,
):
if not isinstance(e, EventBase):
return e
time_now_ms = int(time_now_ms)
d = {k: v for k, v in e.get_dict().items()}
d["event_id"] = e.event_id
if "age_ts" in d["unsigned"]:
d["unsigned"]["age"] = time_now_ms - d["unsigned"]["age_ts"]
del d["unsigned"]["age_ts"]
if "redacted_because" in e.unsigned:
d["unsigned"]["redacted_because"] = serialize_event(
e.unsigned["redacted_because"], time_now_ms, event_format=event_format
)
if token_id is not None:
if token_id == getattr(e.internal_metadata, "token_id", None):
txn_id = getattr(e.internal_metadata, "txn_id", None)
if txn_id is not None:
d["unsigned"]["transaction_id"] = txn_id
# If this is an invite for somebody else, then we don't care about the
# will already have the state since they're in the room.
if not is_invite:
d["unsigned"].pop("invite_room_state", None)
if as_client_event:
d = event_format(d)
if only_event_fields:
if not isinstance(only_event_fields, list) or not all(
isinstance(f, string_types) for f in only_event_fields
):
raise TypeError("only_event_fields must be a list of strings")
d = only_fields(d, only_event_fields)
return d
class EventClientSerializer(object):
def __init__(self, hs):
self.store = hs.get_datastore()
self.experimental_msc1849_support_enabled = (
hs.config.experimental_msc1849_support_enabled
)
@defer.inlineCallbacks
def serialize_event(self, event, time_now, bundle_aggregations=True, **kwargs):
if not isinstance(event, EventBase):
return event
event_id = event.event_id
serialized_event = serialize_event(event, time_now, **kwargs)
if not event.internal_metadata.is_redacted() and (
self.experimental_msc1849_support_enabled and bundle_aggregations
):
annotations = yield self.store.get_aggregation_groups_for_event(event_id)
references = yield self.store.get_relations_for_event(
event_id, RelationTypes.REFERENCE, direction="f"
)
if annotations.chunk:
r = serialized_event["unsigned"].setdefault("m.relations", {})
r[RelationTypes.ANNOTATION] = annotations.to_dict()
if references.chunk:
r = serialized_event["unsigned"].setdefault("m.relations", {})
r[RelationTypes.REFERENCE] = references.to_dict()
edit = None
if event.type == EventTypes.Message:
edit = yield self.store.get_applicable_edit(event_id)
if edit:
relations = event.content.get("m.relates_to")
serialized_event["content"] = edit.content.get("m.new_content", {})
if relations:
serialized_event["content"]["m.relates_to"] = relations
else:
serialized_event["content"].pop("m.relates_to", None)
r = serialized_event["unsigned"].setdefault("m.relations", {})
r[RelationTypes.REPLACE] = {
"event_id": edit.event_id,
"origin_server_ts": edit.origin_server_ts,
"sender": edit.sender,
}
return serialized_event
def serialize_events(self, events, time_now, **kwargs):
return yieldable_gather_results(
self.serialize_event, events, time_now=time_now, **kwargs
)
def copy_power_levels_contents(
old_power_levels: Mapping[str, Union[int, Mapping[str, int]]]
):
if not isinstance(old_power_levels, collections.Mapping):
raise TypeError("Not a valid power-levels content: %r" % (old_power_levels,))
power_levels = {}
for k, v in old_power_levels.items():
if isinstance(v, int):
power_levels[k] = v
continue
if isinstance(v, collections.Mapping):
power_levels[k] = h = {}
for k1, v1 in v.items():
if not isinstance(v1, int):
raise TypeError(
"Invalid power_levels value for %s.%s: %r" % (k, k1, v1)
)
h[k1] = v1
continue
raise TypeError("Invalid power_levels value for %s: %r" % (k, v))
return power_levels
| true | true |
f70f504cbb9797914e75e4bb397d591ff5e8ca28 | 3,224 | py | Python | src/sentry/tagstore/v2/models/grouptagvalue.py | apragacz/sf-sentry | 2fdd6c1195c29a1d401d1cd538c22ea68556699a | [
"BSD-3-Clause"
] | 1 | 2018-03-05T15:40:12.000Z | 2018-03-05T15:40:12.000Z | src/sentry/tagstore/v2/models/grouptagvalue.py | pkaminski/sentry | 27e948283e27d93ca5192ca7b580830e092c25c7 | [
"BSD-3-Clause"
] | 1 | 2018-08-22T16:49:48.000Z | 2018-08-22T16:49:48.000Z | src/sentry/tagstore/v2/models/grouptagvalue.py | pkaminski/sentry | 27e948283e27d93ca5192ca7b580830e092c25c7 | [
"BSD-3-Clause"
] | 1 | 2018-07-02T09:46:44.000Z | 2018-07-02T09:46:44.000Z | """
sentry.tagstore.v2.models.grouptagvalue
~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2017 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import six
from django.db import models, router, transaction, DataError
from django.utils import timezone
from sentry.api.serializers import Serializer, register
from sentry.db.models import (
Model, BoundedPositiveIntegerField, BaseManager, FlexibleForeignKey, sane_repr
)
class GroupTagValue(Model):
"""
Stores the total number of messages seen by a group matching
the given filter.
"""
__core__ = False
project_id = BoundedPositiveIntegerField(db_index=True)
group_id = BoundedPositiveIntegerField(db_index=True)
times_seen = BoundedPositiveIntegerField(default=0)
_key = FlexibleForeignKey('tagstore.TagKey', db_column='key_id')
_value = FlexibleForeignKey('tagstore.TagValue', db_column='value_id')
last_seen = models.DateTimeField(
default=timezone.now, db_index=True, null=True)
first_seen = models.DateTimeField(
default=timezone.now, db_index=True, null=True)
objects = BaseManager()
class Meta:
app_label = 'tagstore'
unique_together = (('project_id', 'group_id', '_key', '_value'), )
index_together = (('project_id', '_key', '_value', 'last_seen'), )
__repr__ = sane_repr('project_id', 'group_id', '_key', '_value')
@property
def key(self):
return self._key.key
@property
def value(self):
return self._value.value
def save(self, *args, **kwargs):
if not self.first_seen:
self.first_seen = self.last_seen
super(GroupTagValue, self).save(*args, **kwargs)
def merge_counts(self, new_group):
try:
with transaction.atomic(using=router.db_for_write(GroupTagValue)):
new_obj = GroupTagValue.objects.get(
group_id=new_group.id,
_key_id=self._key_id,
_value_id=self._value_id,
)
new_obj.update(
first_seen=min(new_obj.first_seen, self.first_seen),
last_seen=max(new_obj.last_seen, self.last_seen),
times_seen=new_obj.times_seen + self.times_seen,
)
except DataError:
# it's possible to hit an out of range value for counters
pass
@register(GroupTagValue)
class GroupTagValueSerializer(Serializer):
def get_attrs(self, item_list, user):
from sentry import tagstore
result = {}
for item in item_list:
result[item] = {
'name': tagstore.get_tag_value_label(item.key, item.value),
}
return result
def serialize(self, obj, attrs, user):
from sentry import tagstore
return {
'id': six.text_type(obj.id),
'name': attrs['name'],
'key': tagstore.get_standardized_key(obj.key),
'value': obj.value,
'count': obj.times_seen,
'lastSeen': obj.last_seen,
'firstSeen': obj.first_seen,
}
| 31.300971 | 82 | 0.623759 | from __future__ import absolute_import
import six
from django.db import models, router, transaction, DataError
from django.utils import timezone
from sentry.api.serializers import Serializer, register
from sentry.db.models import (
Model, BoundedPositiveIntegerField, BaseManager, FlexibleForeignKey, sane_repr
)
class GroupTagValue(Model):
__core__ = False
project_id = BoundedPositiveIntegerField(db_index=True)
group_id = BoundedPositiveIntegerField(db_index=True)
times_seen = BoundedPositiveIntegerField(default=0)
_key = FlexibleForeignKey('tagstore.TagKey', db_column='key_id')
_value = FlexibleForeignKey('tagstore.TagValue', db_column='value_id')
last_seen = models.DateTimeField(
default=timezone.now, db_index=True, null=True)
first_seen = models.DateTimeField(
default=timezone.now, db_index=True, null=True)
objects = BaseManager()
class Meta:
app_label = 'tagstore'
unique_together = (('project_id', 'group_id', '_key', '_value'), )
index_together = (('project_id', '_key', '_value', 'last_seen'), )
__repr__ = sane_repr('project_id', 'group_id', '_key', '_value')
@property
def key(self):
return self._key.key
@property
def value(self):
return self._value.value
def save(self, *args, **kwargs):
if not self.first_seen:
self.first_seen = self.last_seen
super(GroupTagValue, self).save(*args, **kwargs)
def merge_counts(self, new_group):
try:
with transaction.atomic(using=router.db_for_write(GroupTagValue)):
new_obj = GroupTagValue.objects.get(
group_id=new_group.id,
_key_id=self._key_id,
_value_id=self._value_id,
)
new_obj.update(
first_seen=min(new_obj.first_seen, self.first_seen),
last_seen=max(new_obj.last_seen, self.last_seen),
times_seen=new_obj.times_seen + self.times_seen,
)
except DataError:
pass
@register(GroupTagValue)
class GroupTagValueSerializer(Serializer):
def get_attrs(self, item_list, user):
from sentry import tagstore
result = {}
for item in item_list:
result[item] = {
'name': tagstore.get_tag_value_label(item.key, item.value),
}
return result
def serialize(self, obj, attrs, user):
from sentry import tagstore
return {
'id': six.text_type(obj.id),
'name': attrs['name'],
'key': tagstore.get_standardized_key(obj.key),
'value': obj.value,
'count': obj.times_seen,
'lastSeen': obj.last_seen,
'firstSeen': obj.first_seen,
}
| true | true |
f70f507eef49a7c8fbcf5752023ba89d96e7f5af | 30,399 | py | Python | robotframework-ls/src/robotframework_ls/impl/libspec_manager.py | emanlove/robotframework-lsp | b0d8862d24e3bc1b72d8ce9412a671571520e7d9 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | robotframework-ls/src/robotframework_ls/impl/libspec_manager.py | emanlove/robotframework-lsp | b0d8862d24e3bc1b72d8ce9412a671571520e7d9 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2021-09-30T15:40:29.000Z | 2021-09-30T15:40:29.000Z | robotframework-ls/src/robotframework_ls/impl/libspec_manager.py | emanlove/robotframework-lsp | b0d8862d24e3bc1b72d8ce9412a671571520e7d9 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | import os
import sys
from robotframework_ls.constants import NULL
from robocode_ls_core.robotframework_log import get_logger
log = get_logger(__name__)
def _normfile(filename):
return os.path.abspath(os.path.normpath(os.path.normcase(filename)))
def _get_libspec_mutex_name(libspec_filename):
from robocode_ls_core.system_mutex import generate_mutex_name
libspec_filename = _norm_filename(libspec_filename)
basename = os.path.basename(libspec_filename)
name = os.path.splitext(basename)[0]
return generate_mutex_name(libspec_filename, prefix="%s_" % (name,))
def _get_additional_info_filename(spec_filename):
additional_info_filename = os.path.join(spec_filename + ".m")
return additional_info_filename
def _load_library_doc_and_mtime(spec_filename, obtain_mutex=True):
"""
:param obtain_mutex:
Should be False if this is part of a bigger operation that already
has the spec_filename mutex.
"""
from robotframework_ls.impl import robot_specbuilder
from robocode_ls_core.system_mutex import timed_acquire_mutex
if obtain_mutex:
ctx = timed_acquire_mutex(_get_libspec_mutex_name(spec_filename))
else:
ctx = NULL
with ctx:
# We must load it with a mutex to avoid conflicts between generating/reading.
builder = robot_specbuilder.SpecDocBuilder()
try:
mtime = os.path.getmtime(spec_filename)
libdoc = builder.build(spec_filename)
return libdoc, mtime
except Exception:
log.exception("Error when loading spec info from: %s", spec_filename)
return None
def _load_lib_info(spec_filename, can_regenerate):
libdoc_and_mtime = _load_library_doc_and_mtime(spec_filename)
if libdoc_and_mtime is None:
return None
libdoc, mtime = libdoc_and_mtime
return _LibInfo(libdoc, mtime, spec_filename, can_regenerate)
_IS_BUILTIN = "is_builtin"
_SOURCE_TO_MTIME = "source_to_mtime"
_UNABLE_TO_LOAD = "unable_to_load"
def _create_updated_source_to_mtime(library_doc):
sources = set()
source = library_doc.source
if source is not None:
sources.add(source)
for keyword in library_doc.keywords:
source = keyword.source
if source is not None:
sources.add(source)
source_to_mtime = {}
for source in sources:
try:
source = _normfile(source)
source_to_mtime[source] = os.path.getmtime(source)
except Exception:
log.exception("Unable to load source for file: %s", source)
return source_to_mtime
def _create_additional_info(spec_filename, is_builtin, obtain_mutex=True):
try:
additional_info = {_IS_BUILTIN: is_builtin}
if is_builtin:
# For builtins we don't have to check the mtime
# (on a new version we update the folder).
return additional_info
library_doc_and_mtime = _load_library_doc_and_mtime(
spec_filename, obtain_mutex=obtain_mutex
)
if library_doc_and_mtime is None:
additional_info[_UNABLE_TO_LOAD] = True
return additional_info
library_doc = library_doc_and_mtime[0]
additional_info[_SOURCE_TO_MTIME] = _create_updated_source_to_mtime(library_doc)
return additional_info
except:
log.exception(
"Error creating additional info for spec filename: %s", spec_filename
)
return {}
def _load_spec_filename_additional_info(spec_filename):
"""
Loads additional information given a spec filename.
"""
import json
try:
additional_info_filename = _get_additional_info_filename(spec_filename)
with open(additional_info_filename, "r") as stream:
source_to_mtime = json.load(stream)
return source_to_mtime
except:
log.exception("Unable to load source mtimes from: %s", additional_info_filename)
return {}
def _dump_spec_filename_additional_info(spec_filename, is_builtin, obtain_mutex=True):
"""
Creates a filename with additional information not directly available in the
spec.
"""
import json
source_to_mtime = _create_additional_info(
spec_filename, is_builtin, obtain_mutex=obtain_mutex
)
additional_info_filename = _get_additional_info_filename(spec_filename)
with open(additional_info_filename, "w") as stream:
json.dump(source_to_mtime, stream, indent=2, sort_keys=True)
class _LibInfo(object):
__slots__ = [
"library_doc",
"mtime",
"_spec_filename",
"_additional_info",
"_invalid",
"_can_regenerate",
]
def __init__(self, library_doc, mtime, spec_filename, can_regenerate):
"""
:param library_doc:
:param mtime:
:param spec_filename:
:param bool can_regenerate:
False means that the information from this file can't really be
regenerated (i.e.: this is a spec file from a library or created
by the user).
"""
assert library_doc
assert mtime
assert spec_filename
self.library_doc = library_doc
self.mtime = mtime
self._can_regenerate = can_regenerate
self._spec_filename = spec_filename
self._additional_info = None
self._invalid = False
def verify_sources_sync(self):
"""
:return bool:
True if everything is ok and this library info can be used. Otherwise,
the spec file and the _LibInfo must be recreated.
"""
if not self._can_regenerate:
# This means that this info was generated by a library or the user
# himself, thus, we can't regenerate it.
return True
if self._invalid: # Once invalid, always invalid.
return False
additional_info = self._additional_info
if additional_info is None:
additional_info = _load_spec_filename_additional_info(self._spec_filename)
if additional_info.get(_IS_BUILTIN, False):
return True
source_to_mtime = additional_info.get(_SOURCE_TO_MTIME)
if source_to_mtime is None:
# Nothing to validate...
return True
updated_source_to_mtime = _create_updated_source_to_mtime(self.library_doc)
if source_to_mtime != updated_source_to_mtime:
log.info(
"Library %s is invalid. Current source to mtime:\n%s\nChanged from:\n%s"
% (self.library_doc.name, source_to_mtime, updated_source_to_mtime)
)
self._invalid = True
return False
return True
def _norm_filename(path):
return os.path.normcase(os.path.realpath(os.path.abspath(path)))
class _FolderInfo(object):
def __init__(self, folder_path, recursive):
self.folder_path = folder_path
self.recursive = recursive
self.libspec_filename_to_info = {}
self._watch = NULL
def start_watch(self, observer, notifier):
if self._watch is NULL:
if not os.path.isdir(self.folder_path):
if not os.path.exists(self.folder_path):
log.info(
"Trying to track changes in path which does not exist: %s",
self.folder_path,
)
else:
log.info(
"Trying to track changes in path which is not a folder: %s",
self.folder_path,
)
return
log.info("Tracking folder for changes: %s", self.folder_path)
from robocode_ls_core.watchdog_wrapper import PathInfo
folder_path = self.folder_path
self._watch = observer.notify_on_extensions_change(
[PathInfo(folder_path, recursive=self.recursive)],
["libspec"],
notifier.on_change,
(self._on_change_spec,),
)
def _on_change_spec(self, spec_file):
spec_file = _norm_filename(spec_file)
# Just add/remove that specific spec file from the tracked list.
libspec_filename_to_info = self.libspec_filename_to_info.copy()
if os.path.exists(spec_file):
libspec_filename_to_info[spec_file] = None
else:
libspec_filename_to_info.pop(spec_file, None)
self.libspec_filename_to_info = libspec_filename_to_info
def synchronize(self):
try:
self.libspec_filename_to_info = self._collect_libspec_info(
[self.folder_path],
self.libspec_filename_to_info,
recursive=self.recursive,
)
except Exception:
log.exception("Error when synchronizing: %s", self.folder_path)
def dispose(self):
watch = self._watch
self._watch = NULL
watch.stop_tracking()
self.libspec_filename_to_info = {}
def _collect_libspec_info(self, folders, old_libspec_filename_to_info, recursive):
seen_libspec_files = set()
if recursive:
for folder in folders:
if os.path.isdir(folder):
for root, _dirs, files in os.walk(folder):
for filename in files:
if filename.lower().endswith(".libspec"):
seen_libspec_files.add(os.path.join(root, filename))
else:
for folder in folders:
if os.path.isdir(folder):
for filename in os.listdir(folder):
if filename.lower().endswith(".libspec"):
seen_libspec_files.add(os.path.join(folder, filename))
new_libspec_filename_to_info = {}
for filename in seen_libspec_files:
filename = _norm_filename(filename)
info = old_libspec_filename_to_info.get(filename)
if info is not None:
try:
curr_mtime = os.path.getmtime(filename)
except:
# it was deleted in the meanwhile...
continue
else:
if info.mtime != curr_mtime:
# The spec filename mtime changed, so, set to None
# to reload it.
info = None
new_libspec_filename_to_info[filename] = info
return new_libspec_filename_to_info
class LibspecManager(object):
"""
Used to manage the libspec files.
.libspec files are searched in the following directories:
- PYTHONPATH folders (not recursive)
- Workspace folders (recursive -- notifications from the LSP)
- ${user}.robotframework-ls/specs/${python_hash} (not recursive)
It searches for .libspec files in the folders tracked and provides the
keywords that are available from those (properly caching data as needed).
"""
@classmethod
def get_internal_libspec_dir(cls):
from robotframework_ls import robot_config
home = robot_config.get_robotframework_ls_home()
pyexe = sys.executable
if not isinstance(pyexe, bytes):
pyexe = pyexe.encode("utf-8")
import hashlib
digest = hashlib.sha256(pyexe).hexdigest()[:8]
try:
import robot
v = str(robot.get_version())
except:
v = "unknown"
# Note: _v1: information on the mtime of the libspec sources now available.
return os.path.join(home, "specs", "%s_%s" % (digest, v))
@classmethod
def get_internal_builtins_libspec_dir(cls, internal_libspec_dir=None):
return os.path.join(
internal_libspec_dir or cls.get_internal_libspec_dir(), "builtins"
)
def __init__(self, builtin_libspec_dir=None, user_libspec_dir=None):
"""
:param __internal_libspec_dir__:
Only to be used in tests (to regenerate the builtins)!
"""
from robocode_ls_core import watchdog_wrapper
from concurrent import futures
from multiprocessing import cpu_count
self._thread_pool = futures.ThreadPoolExecutor(
max_workers=(cpu_count() * 1.2) + 1
)
self._observer = watchdog_wrapper.create_observer()
self._spec_changes_notifier = watchdog_wrapper.create_notifier(
self._on_spec_file_changed, timeout=0.5
)
self._libspec_dir = self.get_internal_libspec_dir()
self._user_libspec_dir = user_libspec_dir or os.path.join(
self._libspec_dir, "user"
)
self._builtins_libspec_dir = (
builtin_libspec_dir
or self.get_internal_builtins_libspec_dir(self._libspec_dir)
)
log.debug("User libspec dir: %s", self._user_libspec_dir)
log.debug("Builtins libspec dir: %s", self._builtins_libspec_dir)
try:
os.makedirs(self._user_libspec_dir)
except:
# Ignore exception if it's already created.
pass
try:
os.makedirs(self._builtins_libspec_dir)
except:
# Ignore exception if it's already created.
pass
# Spec info found in the workspace
self._workspace_folder_uri_to_folder_info = {}
self._additional_pythonpath_folder_to_folder_info = {}
# Spec info found in the pythonpath
pythonpath_folder_to_folder_info = {}
for path in sys.path:
if path and os.path.isdir(path):
pythonpath_folder_to_folder_info[path] = _FolderInfo(
path, recursive=False
)
self._pythonpath_folder_to_folder_info = pythonpath_folder_to_folder_info
# Spec info found in internal dirs (autogenerated)
self._internal_folder_to_folder_info = {
self._user_libspec_dir: _FolderInfo(
self._user_libspec_dir, recursive=False
),
self._builtins_libspec_dir: _FolderInfo(
self._builtins_libspec_dir, recursive=False
),
}
# Must be set from the outside world when needed.
self.config = None
self._synchronize()
self._gen_builtin_libraries()
@property
def config(self):
return self._config
@config.setter
def config(self, config):
from robotframework_ls.impl.robot_lsp_constants import OPTION_ROBOT_PYTHONPATH
self._config = config
existing_entries = set(self._additional_pythonpath_folder_to_folder_info.keys())
if config is not None:
pythonpath_entries = set(
config.get_setting(OPTION_ROBOT_PYTHONPATH, list, [])
)
for new_pythonpath_entry in pythonpath_entries:
if new_pythonpath_entry not in existing_entries:
self.add_additional_pythonpath_folder(new_pythonpath_entry)
for old_entry in existing_entries:
if old_entry not in pythonpath_entries:
self.remove_additional_pythonpath_folder(old_entry)
self.synchronize_additional_pythonpath_folders()
@property
def user_libspec_dir(self):
return self._user_libspec_dir
def _on_spec_file_changed(self, spec_file, target):
log.debug("File change detected: %s", spec_file)
target(spec_file)
def add_workspace_folder(self, folder_uri):
from robocode_ls_core import uris
if folder_uri not in self._workspace_folder_uri_to_folder_info:
log.debug("Added workspace folder: %s", folder_uri)
cp = self._workspace_folder_uri_to_folder_info.copy()
folder_info = cp[folder_uri] = _FolderInfo(
uris.to_fs_path(folder_uri), recursive=True
)
self._workspace_folder_uri_to_folder_info = cp
folder_info.start_watch(self._observer, self._spec_changes_notifier)
folder_info.synchronize()
else:
log.debug("Workspace folder already added: %s", folder_uri)
def remove_workspace_folder(self, folder_uri):
if folder_uri in self._workspace_folder_uri_to_folder_info:
log.debug("Removed workspace folder: %s", folder_uri)
cp = self._workspace_folder_uri_to_folder_info.copy()
folder_info = cp.pop(folder_uri, NULL)
folder_info.dispose()
self._workspace_folder_uri_to_folder_info = cp
else:
log.debug("Workspace folder already removed: %s", folder_uri)
def add_additional_pythonpath_folder(self, folder_path):
if folder_path not in self._additional_pythonpath_folder_to_folder_info:
log.debug("Added additional pythonpath folder: %s", folder_path)
cp = self._additional_pythonpath_folder_to_folder_info.copy()
folder_info = cp[folder_path] = _FolderInfo(folder_path, recursive=True)
self._additional_pythonpath_folder_to_folder_info = cp
folder_info.start_watch(self._observer, self._spec_changes_notifier)
folder_info.synchronize()
else:
log.debug("Additional pythonpath folder already added: %s", folder_path)
def remove_additional_pythonpath_folder(self, folder_path):
if folder_path in self._additional_pythonpath_folder_to_folder_info:
log.debug("Removed additional pythonpath folder: %s", folder_path)
cp = self._additional_pythonpath_folder_to_folder_info.copy()
folder_info = cp.pop(folder_path, NULL)
folder_info.dispose()
self._additional_pythonpath_folder_to_folder_info = cp
else:
log.debug("Additional pythonpath folder already removed: %s", folder_path)
def _gen_builtin_libraries(self):
"""
Generates .libspec files for the libraries builtin (if needed).
"""
import time
try:
from robotframework_ls.impl import robot_constants
from robocode_ls_core.system_mutex import timed_acquire_mutex
from robocode_ls_core.system_mutex import generate_mutex_name
initial_time = time.time()
wait_for = []
with timed_acquire_mutex(
generate_mutex_name(
_norm_filename(self._builtins_libspec_dir), prefix="gen_builtins_"
),
timeout=100,
):
for libname in robot_constants.STDLIBS:
library_info = self.get_library_info(libname, create=False)
if library_info is None:
wait_for.append(
self._thread_pool.submit(
self._create_libspec, libname, is_builtin=True
)
)
for future in wait_for:
future.result()
if wait_for:
log.debug(
"Total time to generate builtins: %.2fs"
% (time.time() - initial_time)
)
self.synchronize_internal_libspec_folders()
except:
log.exception("Error creating builtin libraries.")
def synchronize_workspace_folders(self):
for folder_info in self._workspace_folder_uri_to_folder_info.values():
folder_info.start_watch(self._observer, self._spec_changes_notifier)
folder_info.synchronize()
def synchronize_pythonpath_folders(self):
for folder_info in self._pythonpath_folder_to_folder_info.values():
folder_info.start_watch(self._observer, self._spec_changes_notifier)
folder_info.synchronize()
def synchronize_additional_pythonpath_folders(self):
for folder_info in self._additional_pythonpath_folder_to_folder_info.values():
folder_info.start_watch(self._observer, self._spec_changes_notifier)
folder_info.synchronize()
def synchronize_internal_libspec_folders(self):
for folder_info in self._internal_folder_to_folder_info.values():
folder_info.start_watch(self._observer, self._spec_changes_notifier)
folder_info.synchronize()
def _synchronize(self):
"""
Updates the internal caches related to the tracked .libspec files found.
This can be a slow call as it may traverse the whole workspace folders
hierarchy, so, it should be used only during startup to fill the initial
info.
"""
self.synchronize_workspace_folders()
self.synchronize_pythonpath_folders()
self.synchronize_additional_pythonpath_folders()
self.synchronize_internal_libspec_folders()
def _iter_lib_info(self):
"""
:rtype: generator(_LibInfo)
"""
# Note: the iteration order is important (first ones are visited earlier
# and have higher priority).
iter_in = []
for (_uri, info) in self._workspace_folder_uri_to_folder_info.items():
iter_in.append((info.libspec_filename_to_info, False))
for (_uri, info) in self._pythonpath_folder_to_folder_info.items():
iter_in.append((info.libspec_filename_to_info, False))
for (_uri, info) in self._additional_pythonpath_folder_to_folder_info.items():
iter_in.append((info.libspec_filename_to_info, False))
for (_uri, info) in self._internal_folder_to_folder_info.items():
iter_in.append((info.libspec_filename_to_info, True))
for filename_to_info, can_regenerate in iter_in:
for spec_filename, info in list(filename_to_info.items()):
if info is None:
info = filename_to_info[spec_filename] = _load_lib_info(
spec_filename, can_regenerate
)
# Note: we could end up yielding a library with the same name
# multiple times due to its scope. It's up to the caller to
# validate that.
# Note: we also check if there are keywords available... in
# some cases we may create libraries for namespace packages
# (i.e.: empty folders) which don't really have anything -- in
# this case, this isn't a valid library.
if (
info is not None
and info.library_doc is not None
and info.library_doc.keywords
):
yield info
def get_library_names(self):
return sorted(
set(lib_info.library_doc.name for lib_info in self._iter_lib_info())
)
def _create_libspec(
self,
libname,
env=None,
log_time=True,
cwd=None,
additional_path=None,
is_builtin=False,
):
"""
:param str libname:
:raise Exception: if unable to create the library.
"""
import time
from robotframework_ls.impl import robot_constants
from robocode_ls_core.subprocess_wrapper import subprocess
from robocode_ls_core.system_mutex import timed_acquire_mutex
curtime = time.time()
try:
try:
call = [sys.executable]
call.extend("-m robot.libdoc --format XML:HTML".split())
if additional_path:
if os.path.exists(additional_path):
call.extend(["-P", additional_path])
additional_pythonpath_entries = list(
self._additional_pythonpath_folder_to_folder_info.keys()
)
for entry in list(additional_pythonpath_entries):
if os.path.exists(entry):
call.extend(["-P", entry])
call.append(libname)
libspec_dir = self._user_libspec_dir
if libname in robot_constants.STDLIBS:
libspec_dir = self._builtins_libspec_dir
libspec_filename = os.path.join(libspec_dir, libname + ".libspec")
with timed_acquire_mutex(
_get_libspec_mutex_name(libspec_filename)
): # Could fail.
call.append(libspec_filename)
mtime = -1
try:
mtime = os.path.getmtime(libspec_filename)
except:
pass
log.debug(
"Generating libspec for: %s.\nCwd:%s\nCommand line:\n%s",
libname,
cwd,
" ".join(call),
)
try:
try:
# Note: stdout is always subprocess.PIPE in this call.
subprocess.check_output(
call,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE,
env=env,
cwd=cwd,
)
except OSError as e:
log.exception("Error calling: %s", call)
# We may have something as: Ignore OSError: [WinError 6] The handle is invalid,
# give the result based on whether the file changed on disk.
try:
if mtime != os.path.getmtime(libspec_filename):
_dump_spec_filename_additional_info(
libspec_filename,
is_builtin=is_builtin,
obtain_mutex=False,
)
return True
except:
pass
log.debug("Not retrying after OSError failure.")
return False
except subprocess.CalledProcessError as e:
log.exception(
"Error creating libspec: %s. Output:\n%s", libname, e.output
)
return False
_dump_spec_filename_additional_info(
libspec_filename, is_builtin=is_builtin, obtain_mutex=False
)
return True
except Exception:
log.exception("Error creating libspec: %s", libname)
return False
finally:
if log_time:
delta = time.time() - curtime
log.debug("Took: %.2fs to generate info for: %s" % (delta, libname))
def dispose(self):
self._observer.dispose()
self._spec_changes_notifier.dispose()
def _do_create_libspec_on_get(self, libname, current_doc_uri):
from robocode_ls_core import uris
additional_path = None
abspath = None
cwd = None
if current_doc_uri is not None:
cwd = os.path.dirname(uris.to_fs_path(current_doc_uri))
if not cwd or not os.path.isdir(cwd):
cwd = None
if os.path.isabs(libname):
abspath = libname
elif current_doc_uri is not None:
# relative path: let's make it absolute
fs_path = os.path.dirname(uris.to_fs_path(current_doc_uri))
abspath = os.path.abspath(os.path.join(fs_path, libname))
if abspath:
additional_path = os.path.dirname(abspath)
libname = os.path.basename(libname)
if libname.lower().endswith((".py", ".class", ".java")):
libname = os.path.splitext(libname)[0]
if self._create_libspec(libname, additional_path=additional_path, cwd=cwd):
self.synchronize_internal_libspec_folders()
return True
return False
def get_library_info(self, libname, create=True, current_doc_uri=None):
"""
:param libname:
It may be a library name, a relative path to a .py file or an
absolute path to a .py file.
:rtype: LibraryDoc
"""
libname_lower = libname.lower()
if libname_lower.endswith((".py", ".class", ".java")):
libname_lower = os.path.splitext(libname)[0]
if "/" in libname_lower or "\\" in libname_lower:
libname_lower = os.path.basename(libname_lower)
for lib_info in self._iter_lib_info():
library_doc = lib_info.library_doc
if library_doc.name and library_doc.name.lower() == libname_lower:
if not lib_info.verify_sources_sync():
if create:
# Found but it's not in sync. Try to regenerate (don't proceed
# because we don't want to match a lower priority item, so,
# regenerate and get from the cache without creating).
self._do_create_libspec_on_get(libname, current_doc_uri)
# Note: get even if it if was not created (we may match
# a lower priority library).
return self.get_library_info(
libname, create=False, current_doc_uri=current_doc_uri
)
else:
# Not in sync and it should not be created, just skip it.
continue
else:
return library_doc
if create:
if self._do_create_libspec_on_get(libname, current_doc_uri):
return self.get_library_info(
libname, create=False, current_doc_uri=current_doc_uri
)
log.debug("Unable to find library named: %s", libname)
return None
| 37.437192 | 107 | 0.596467 | import os
import sys
from robotframework_ls.constants import NULL
from robocode_ls_core.robotframework_log import get_logger
log = get_logger(__name__)
def _normfile(filename):
return os.path.abspath(os.path.normpath(os.path.normcase(filename)))
def _get_libspec_mutex_name(libspec_filename):
from robocode_ls_core.system_mutex import generate_mutex_name
libspec_filename = _norm_filename(libspec_filename)
basename = os.path.basename(libspec_filename)
name = os.path.splitext(basename)[0]
return generate_mutex_name(libspec_filename, prefix="%s_" % (name,))
def _get_additional_info_filename(spec_filename):
additional_info_filename = os.path.join(spec_filename + ".m")
return additional_info_filename
def _load_library_doc_and_mtime(spec_filename, obtain_mutex=True):
from robotframework_ls.impl import robot_specbuilder
from robocode_ls_core.system_mutex import timed_acquire_mutex
if obtain_mutex:
ctx = timed_acquire_mutex(_get_libspec_mutex_name(spec_filename))
else:
ctx = NULL
with ctx:
builder = robot_specbuilder.SpecDocBuilder()
try:
mtime = os.path.getmtime(spec_filename)
libdoc = builder.build(spec_filename)
return libdoc, mtime
except Exception:
log.exception("Error when loading spec info from: %s", spec_filename)
return None
def _load_lib_info(spec_filename, can_regenerate):
libdoc_and_mtime = _load_library_doc_and_mtime(spec_filename)
if libdoc_and_mtime is None:
return None
libdoc, mtime = libdoc_and_mtime
return _LibInfo(libdoc, mtime, spec_filename, can_regenerate)
_IS_BUILTIN = "is_builtin"
_SOURCE_TO_MTIME = "source_to_mtime"
_UNABLE_TO_LOAD = "unable_to_load"
def _create_updated_source_to_mtime(library_doc):
sources = set()
source = library_doc.source
if source is not None:
sources.add(source)
for keyword in library_doc.keywords:
source = keyword.source
if source is not None:
sources.add(source)
source_to_mtime = {}
for source in sources:
try:
source = _normfile(source)
source_to_mtime[source] = os.path.getmtime(source)
except Exception:
log.exception("Unable to load source for file: %s", source)
return source_to_mtime
def _create_additional_info(spec_filename, is_builtin, obtain_mutex=True):
try:
additional_info = {_IS_BUILTIN: is_builtin}
if is_builtin:
# (on a new version we update the folder).
return additional_info
library_doc_and_mtime = _load_library_doc_and_mtime(
spec_filename, obtain_mutex=obtain_mutex
)
if library_doc_and_mtime is None:
additional_info[_UNABLE_TO_LOAD] = True
return additional_info
library_doc = library_doc_and_mtime[0]
additional_info[_SOURCE_TO_MTIME] = _create_updated_source_to_mtime(library_doc)
return additional_info
except:
log.exception(
"Error creating additional info for spec filename: %s", spec_filename
)
return {}
def _load_spec_filename_additional_info(spec_filename):
import json
try:
additional_info_filename = _get_additional_info_filename(spec_filename)
with open(additional_info_filename, "r") as stream:
source_to_mtime = json.load(stream)
return source_to_mtime
except:
log.exception("Unable to load source mtimes from: %s", additional_info_filename)
return {}
def _dump_spec_filename_additional_info(spec_filename, is_builtin, obtain_mutex=True):
import json
source_to_mtime = _create_additional_info(
spec_filename, is_builtin, obtain_mutex=obtain_mutex
)
additional_info_filename = _get_additional_info_filename(spec_filename)
with open(additional_info_filename, "w") as stream:
json.dump(source_to_mtime, stream, indent=2, sort_keys=True)
class _LibInfo(object):
__slots__ = [
"library_doc",
"mtime",
"_spec_filename",
"_additional_info",
"_invalid",
"_can_regenerate",
]
def __init__(self, library_doc, mtime, spec_filename, can_regenerate):
assert library_doc
assert mtime
assert spec_filename
self.library_doc = library_doc
self.mtime = mtime
self._can_regenerate = can_regenerate
self._spec_filename = spec_filename
self._additional_info = None
self._invalid = False
def verify_sources_sync(self):
if not self._can_regenerate:
# This means that this info was generated by a library or the user
# himself, thus, we can't regenerate it.
return True
if self._invalid:
return False
additional_info = self._additional_info
if additional_info is None:
additional_info = _load_spec_filename_additional_info(self._spec_filename)
if additional_info.get(_IS_BUILTIN, False):
return True
source_to_mtime = additional_info.get(_SOURCE_TO_MTIME)
if source_to_mtime is None:
return True
updated_source_to_mtime = _create_updated_source_to_mtime(self.library_doc)
if source_to_mtime != updated_source_to_mtime:
log.info(
"Library %s is invalid. Current source to mtime:\n%s\nChanged from:\n%s"
% (self.library_doc.name, source_to_mtime, updated_source_to_mtime)
)
self._invalid = True
return False
return True
def _norm_filename(path):
return os.path.normcase(os.path.realpath(os.path.abspath(path)))
class _FolderInfo(object):
def __init__(self, folder_path, recursive):
self.folder_path = folder_path
self.recursive = recursive
self.libspec_filename_to_info = {}
self._watch = NULL
def start_watch(self, observer, notifier):
if self._watch is NULL:
if not os.path.isdir(self.folder_path):
if not os.path.exists(self.folder_path):
log.info(
"Trying to track changes in path which does not exist: %s",
self.folder_path,
)
else:
log.info(
"Trying to track changes in path which is not a folder: %s",
self.folder_path,
)
return
log.info("Tracking folder for changes: %s", self.folder_path)
from robocode_ls_core.watchdog_wrapper import PathInfo
folder_path = self.folder_path
self._watch = observer.notify_on_extensions_change(
[PathInfo(folder_path, recursive=self.recursive)],
["libspec"],
notifier.on_change,
(self._on_change_spec,),
)
def _on_change_spec(self, spec_file):
spec_file = _norm_filename(spec_file)
libspec_filename_to_info = self.libspec_filename_to_info.copy()
if os.path.exists(spec_file):
libspec_filename_to_info[spec_file] = None
else:
libspec_filename_to_info.pop(spec_file, None)
self.libspec_filename_to_info = libspec_filename_to_info
def synchronize(self):
try:
self.libspec_filename_to_info = self._collect_libspec_info(
[self.folder_path],
self.libspec_filename_to_info,
recursive=self.recursive,
)
except Exception:
log.exception("Error when synchronizing: %s", self.folder_path)
def dispose(self):
watch = self._watch
self._watch = NULL
watch.stop_tracking()
self.libspec_filename_to_info = {}
def _collect_libspec_info(self, folders, old_libspec_filename_to_info, recursive):
seen_libspec_files = set()
if recursive:
for folder in folders:
if os.path.isdir(folder):
for root, _dirs, files in os.walk(folder):
for filename in files:
if filename.lower().endswith(".libspec"):
seen_libspec_files.add(os.path.join(root, filename))
else:
for folder in folders:
if os.path.isdir(folder):
for filename in os.listdir(folder):
if filename.lower().endswith(".libspec"):
seen_libspec_files.add(os.path.join(folder, filename))
new_libspec_filename_to_info = {}
for filename in seen_libspec_files:
filename = _norm_filename(filename)
info = old_libspec_filename_to_info.get(filename)
if info is not None:
try:
curr_mtime = os.path.getmtime(filename)
except:
continue
else:
if info.mtime != curr_mtime:
info = None
new_libspec_filename_to_info[filename] = info
return new_libspec_filename_to_info
class LibspecManager(object):
@classmethod
def get_internal_libspec_dir(cls):
from robotframework_ls import robot_config
home = robot_config.get_robotframework_ls_home()
pyexe = sys.executable
if not isinstance(pyexe, bytes):
pyexe = pyexe.encode("utf-8")
import hashlib
digest = hashlib.sha256(pyexe).hexdigest()[:8]
try:
import robot
v = str(robot.get_version())
except:
v = "unknown"
return os.path.join(home, "specs", "%s_%s" % (digest, v))
@classmethod
def get_internal_builtins_libspec_dir(cls, internal_libspec_dir=None):
return os.path.join(
internal_libspec_dir or cls.get_internal_libspec_dir(), "builtins"
)
def __init__(self, builtin_libspec_dir=None, user_libspec_dir=None):
from robocode_ls_core import watchdog_wrapper
from concurrent import futures
from multiprocessing import cpu_count
self._thread_pool = futures.ThreadPoolExecutor(
max_workers=(cpu_count() * 1.2) + 1
)
self._observer = watchdog_wrapper.create_observer()
self._spec_changes_notifier = watchdog_wrapper.create_notifier(
self._on_spec_file_changed, timeout=0.5
)
self._libspec_dir = self.get_internal_libspec_dir()
self._user_libspec_dir = user_libspec_dir or os.path.join(
self._libspec_dir, "user"
)
self._builtins_libspec_dir = (
builtin_libspec_dir
or self.get_internal_builtins_libspec_dir(self._libspec_dir)
)
log.debug("User libspec dir: %s", self._user_libspec_dir)
log.debug("Builtins libspec dir: %s", self._builtins_libspec_dir)
try:
os.makedirs(self._user_libspec_dir)
except:
pass
try:
os.makedirs(self._builtins_libspec_dir)
except:
# Ignore exception if it's already created.
pass
self._workspace_folder_uri_to_folder_info = {}
self._additional_pythonpath_folder_to_folder_info = {}
pythonpath_folder_to_folder_info = {}
for path in sys.path:
if path and os.path.isdir(path):
pythonpath_folder_to_folder_info[path] = _FolderInfo(
path, recursive=False
)
self._pythonpath_folder_to_folder_info = pythonpath_folder_to_folder_info
self._internal_folder_to_folder_info = {
self._user_libspec_dir: _FolderInfo(
self._user_libspec_dir, recursive=False
),
self._builtins_libspec_dir: _FolderInfo(
self._builtins_libspec_dir, recursive=False
),
}
self.config = None
self._synchronize()
self._gen_builtin_libraries()
@property
def config(self):
return self._config
@config.setter
def config(self, config):
from robotframework_ls.impl.robot_lsp_constants import OPTION_ROBOT_PYTHONPATH
self._config = config
existing_entries = set(self._additional_pythonpath_folder_to_folder_info.keys())
if config is not None:
pythonpath_entries = set(
config.get_setting(OPTION_ROBOT_PYTHONPATH, list, [])
)
for new_pythonpath_entry in pythonpath_entries:
if new_pythonpath_entry not in existing_entries:
self.add_additional_pythonpath_folder(new_pythonpath_entry)
for old_entry in existing_entries:
if old_entry not in pythonpath_entries:
self.remove_additional_pythonpath_folder(old_entry)
self.synchronize_additional_pythonpath_folders()
@property
def user_libspec_dir(self):
return self._user_libspec_dir
def _on_spec_file_changed(self, spec_file, target):
log.debug("File change detected: %s", spec_file)
target(spec_file)
def add_workspace_folder(self, folder_uri):
from robocode_ls_core import uris
if folder_uri not in self._workspace_folder_uri_to_folder_info:
log.debug("Added workspace folder: %s", folder_uri)
cp = self._workspace_folder_uri_to_folder_info.copy()
folder_info = cp[folder_uri] = _FolderInfo(
uris.to_fs_path(folder_uri), recursive=True
)
self._workspace_folder_uri_to_folder_info = cp
folder_info.start_watch(self._observer, self._spec_changes_notifier)
folder_info.synchronize()
else:
log.debug("Workspace folder already added: %s", folder_uri)
def remove_workspace_folder(self, folder_uri):
if folder_uri in self._workspace_folder_uri_to_folder_info:
log.debug("Removed workspace folder: %s", folder_uri)
cp = self._workspace_folder_uri_to_folder_info.copy()
folder_info = cp.pop(folder_uri, NULL)
folder_info.dispose()
self._workspace_folder_uri_to_folder_info = cp
else:
log.debug("Workspace folder already removed: %s", folder_uri)
def add_additional_pythonpath_folder(self, folder_path):
if folder_path not in self._additional_pythonpath_folder_to_folder_info:
log.debug("Added additional pythonpath folder: %s", folder_path)
cp = self._additional_pythonpath_folder_to_folder_info.copy()
folder_info = cp[folder_path] = _FolderInfo(folder_path, recursive=True)
self._additional_pythonpath_folder_to_folder_info = cp
folder_info.start_watch(self._observer, self._spec_changes_notifier)
folder_info.synchronize()
else:
log.debug("Additional pythonpath folder already added: %s", folder_path)
def remove_additional_pythonpath_folder(self, folder_path):
if folder_path in self._additional_pythonpath_folder_to_folder_info:
log.debug("Removed additional pythonpath folder: %s", folder_path)
cp = self._additional_pythonpath_folder_to_folder_info.copy()
folder_info = cp.pop(folder_path, NULL)
folder_info.dispose()
self._additional_pythonpath_folder_to_folder_info = cp
else:
log.debug("Additional pythonpath folder already removed: %s", folder_path)
def _gen_builtin_libraries(self):
import time
try:
from robotframework_ls.impl import robot_constants
from robocode_ls_core.system_mutex import timed_acquire_mutex
from robocode_ls_core.system_mutex import generate_mutex_name
initial_time = time.time()
wait_for = []
with timed_acquire_mutex(
generate_mutex_name(
_norm_filename(self._builtins_libspec_dir), prefix="gen_builtins_"
),
timeout=100,
):
for libname in robot_constants.STDLIBS:
library_info = self.get_library_info(libname, create=False)
if library_info is None:
wait_for.append(
self._thread_pool.submit(
self._create_libspec, libname, is_builtin=True
)
)
for future in wait_for:
future.result()
if wait_for:
log.debug(
"Total time to generate builtins: %.2fs"
% (time.time() - initial_time)
)
self.synchronize_internal_libspec_folders()
except:
log.exception("Error creating builtin libraries.")
def synchronize_workspace_folders(self):
for folder_info in self._workspace_folder_uri_to_folder_info.values():
folder_info.start_watch(self._observer, self._spec_changes_notifier)
folder_info.synchronize()
def synchronize_pythonpath_folders(self):
for folder_info in self._pythonpath_folder_to_folder_info.values():
folder_info.start_watch(self._observer, self._spec_changes_notifier)
folder_info.synchronize()
def synchronize_additional_pythonpath_folders(self):
for folder_info in self._additional_pythonpath_folder_to_folder_info.values():
folder_info.start_watch(self._observer, self._spec_changes_notifier)
folder_info.synchronize()
def synchronize_internal_libspec_folders(self):
for folder_info in self._internal_folder_to_folder_info.values():
folder_info.start_watch(self._observer, self._spec_changes_notifier)
folder_info.synchronize()
def _synchronize(self):
self.synchronize_workspace_folders()
self.synchronize_pythonpath_folders()
self.synchronize_additional_pythonpath_folders()
self.synchronize_internal_libspec_folders()
def _iter_lib_info(self):
iter_in = []
for (_uri, info) in self._workspace_folder_uri_to_folder_info.items():
iter_in.append((info.libspec_filename_to_info, False))
for (_uri, info) in self._pythonpath_folder_to_folder_info.items():
iter_in.append((info.libspec_filename_to_info, False))
for (_uri, info) in self._additional_pythonpath_folder_to_folder_info.items():
iter_in.append((info.libspec_filename_to_info, False))
for (_uri, info) in self._internal_folder_to_folder_info.items():
iter_in.append((info.libspec_filename_to_info, True))
for filename_to_info, can_regenerate in iter_in:
for spec_filename, info in list(filename_to_info.items()):
if info is None:
info = filename_to_info[spec_filename] = _load_lib_info(
spec_filename, can_regenerate
)
# validate that.
# Note: we also check if there are keywords available... in
# some cases we may create libraries for namespace packages
# (i.e.: empty folders) which don't really have anything -- in
if (
info is not None
and info.library_doc is not None
and info.library_doc.keywords
):
yield info
def get_library_names(self):
return sorted(
set(lib_info.library_doc.name for lib_info in self._iter_lib_info())
)
def _create_libspec(
self,
libname,
env=None,
log_time=True,
cwd=None,
additional_path=None,
is_builtin=False,
):
import time
from robotframework_ls.impl import robot_constants
from robocode_ls_core.subprocess_wrapper import subprocess
from robocode_ls_core.system_mutex import timed_acquire_mutex
curtime = time.time()
try:
try:
call = [sys.executable]
call.extend("-m robot.libdoc --format XML:HTML".split())
if additional_path:
if os.path.exists(additional_path):
call.extend(["-P", additional_path])
additional_pythonpath_entries = list(
self._additional_pythonpath_folder_to_folder_info.keys()
)
for entry in list(additional_pythonpath_entries):
if os.path.exists(entry):
call.extend(["-P", entry])
call.append(libname)
libspec_dir = self._user_libspec_dir
if libname in robot_constants.STDLIBS:
libspec_dir = self._builtins_libspec_dir
libspec_filename = os.path.join(libspec_dir, libname + ".libspec")
with timed_acquire_mutex(
_get_libspec_mutex_name(libspec_filename)
): # Could fail.
call.append(libspec_filename)
mtime = -1
try:
mtime = os.path.getmtime(libspec_filename)
except:
pass
log.debug(
"Generating libspec for: %s.\nCwd:%s\nCommand line:\n%s",
libname,
cwd,
" ".join(call),
)
try:
try:
# Note: stdout is always subprocess.PIPE in this call.
subprocess.check_output(
call,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE,
env=env,
cwd=cwd,
)
except OSError as e:
log.exception("Error calling: %s", call)
# We may have something as: Ignore OSError: [WinError 6] The handle is invalid,
# give the result based on whether the file changed on disk.
try:
if mtime != os.path.getmtime(libspec_filename):
_dump_spec_filename_additional_info(
libspec_filename,
is_builtin=is_builtin,
obtain_mutex=False,
)
return True
except:
pass
log.debug("Not retrying after OSError failure.")
return False
except subprocess.CalledProcessError as e:
log.exception(
"Error creating libspec: %s. Output:\n%s", libname, e.output
)
return False
_dump_spec_filename_additional_info(
libspec_filename, is_builtin=is_builtin, obtain_mutex=False
)
return True
except Exception:
log.exception("Error creating libspec: %s", libname)
return False
finally:
if log_time:
delta = time.time() - curtime
log.debug("Took: %.2fs to generate info for: %s" % (delta, libname))
def dispose(self):
self._observer.dispose()
self._spec_changes_notifier.dispose()
def _do_create_libspec_on_get(self, libname, current_doc_uri):
from robocode_ls_core import uris
additional_path = None
abspath = None
cwd = None
if current_doc_uri is not None:
cwd = os.path.dirname(uris.to_fs_path(current_doc_uri))
if not cwd or not os.path.isdir(cwd):
cwd = None
if os.path.isabs(libname):
abspath = libname
elif current_doc_uri is not None:
# relative path: let's make it absolute
fs_path = os.path.dirname(uris.to_fs_path(current_doc_uri))
abspath = os.path.abspath(os.path.join(fs_path, libname))
if abspath:
additional_path = os.path.dirname(abspath)
libname = os.path.basename(libname)
if libname.lower().endswith((".py", ".class", ".java")):
libname = os.path.splitext(libname)[0]
if self._create_libspec(libname, additional_path=additional_path, cwd=cwd):
self.synchronize_internal_libspec_folders()
return True
return False
def get_library_info(self, libname, create=True, current_doc_uri=None):
libname_lower = libname.lower()
if libname_lower.endswith((".py", ".class", ".java")):
libname_lower = os.path.splitext(libname)[0]
if "/" in libname_lower or "\\" in libname_lower:
libname_lower = os.path.basename(libname_lower)
for lib_info in self._iter_lib_info():
library_doc = lib_info.library_doc
if library_doc.name and library_doc.name.lower() == libname_lower:
if not lib_info.verify_sources_sync():
if create:
# regenerate and get from the cache without creating).
self._do_create_libspec_on_get(libname, current_doc_uri)
# Note: get even if it if was not created (we may match
# a lower priority library).
return self.get_library_info(
libname, create=False, current_doc_uri=current_doc_uri
)
else:
# Not in sync and it should not be created, just skip it.
continue
else:
return library_doc
if create:
if self._do_create_libspec_on_get(libname, current_doc_uri):
return self.get_library_info(
libname, create=False, current_doc_uri=current_doc_uri
)
log.debug("Unable to find library named: %s", libname)
return None
| true | true |
f70f508210f2ed61f04a9924a27265ba20a3c8c0 | 965 | py | Python | 019_removeNthNodeFromListEnd.py | stuti-rastogi/leetcode-python-solutions | 73593fe642a06a83cde974ba5e6de3a7b396ec84 | [
"MIT"
] | 4 | 2018-07-24T08:36:42.000Z | 2019-08-25T17:48:47.000Z | 019_removeNthNodeFromListEnd.py | stuti-rastogi/leetcodesolutions | 73593fe642a06a83cde974ba5e6de3a7b396ec84 | [
"MIT"
] | null | null | null | 019_removeNthNodeFromListEnd.py | stuti-rastogi/leetcodesolutions | 73593fe642a06a83cde974ba5e6de3a7b396ec84 | [
"MIT"
] | null | null | null | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:
if not head or not head.next:
return None
# make this stop at n+1th index, if head is 0
first = head
count = 0
while first and count <= n:
first = first.next
count += 1
# move both first and second
# they have n node between them
# when first becomes None (beyond n of list), second is one node behind nth node from end
second = head
while first:
first = first.next
second = second.next
# count == n+1 means exitted naturally
if second == head and count != n+1:
head = second.next
else:
second.next = second.next.next
return head
| 29.242424 | 97 | 0.554404 |
class Solution:
def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:
if not head or not head.next:
return None
first = head
count = 0
while first and count <= n:
first = first.next
count += 1
second = head
while first:
first = first.next
second = second.next
if second == head and count != n+1:
head = second.next
else:
second.next = second.next.next
return head
| true | true |
f70f51695161b773142bb7af5d30320d1eec9d9a | 85 | py | Python | api/tacticalrmm/apiv3/apps.py | infinite8co/tacticalrmm | bd7ce5417ec672552ec3cba325318795ccde972e | [
"MIT"
] | 903 | 2019-10-22T22:56:42.000Z | 2022-03-18T14:15:54.000Z | api/tacticalrmm/apiv3/apps.py | infinite8co/tacticalrmm | bd7ce5417ec672552ec3cba325318795ccde972e | [
"MIT"
] | 720 | 2019-12-07T08:11:26.000Z | 2022-03-17T21:47:04.000Z | api/tacticalrmm/apiv3/apps.py | infinite8co/tacticalrmm | bd7ce5417ec672552ec3cba325318795ccde972e | [
"MIT"
] | 231 | 2020-02-11T14:14:21.000Z | 2022-03-16T21:23:10.000Z | from django.apps import AppConfig
class Apiv3Config(AppConfig):
name = "apiv3"
| 14.166667 | 33 | 0.741176 | from django.apps import AppConfig
class Apiv3Config(AppConfig):
name = "apiv3"
| true | true |
f70f520361f1a11ee403835f6c6ca6e6e2b12a75 | 1,452 | py | Python | samples/generated_samples/logging_v2_generated_config_service_v2_update_settings_sync.py | MtkN1/google-cloud-logging-patch | 05a2f747ad9b233fa3b9890c7369a50d86cabba6 | [
"Apache-2.0"
] | null | null | null | samples/generated_samples/logging_v2_generated_config_service_v2_update_settings_sync.py | MtkN1/google-cloud-logging-patch | 05a2f747ad9b233fa3b9890c7369a50d86cabba6 | [
"Apache-2.0"
] | null | null | null | samples/generated_samples/logging_v2_generated_config_service_v2_update_settings_sync.py | MtkN1/google-cloud-logging-patch | 05a2f747ad9b233fa3b9890c7369a50d86cabba6 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for UpdateSettings
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-logging
# [START logging_v2_generated_ConfigServiceV2_UpdateSettings_sync]
from google.cloud import logging_v2
def sample_update_settings():
# Create a client
client = logging_v2.ConfigServiceV2Client()
# Initialize request argument(s)
request = logging_v2.UpdateSettingsRequest(
name="name_value",
)
# Make the request
response = client.update_settings(request=request)
# Handle the response
print(response)
# [END logging_v2_generated_ConfigServiceV2_UpdateSettings_sync]
| 31.565217 | 85 | 0.759642 |
from google.cloud import logging_v2
def sample_update_settings():
client = logging_v2.ConfigServiceV2Client()
request = logging_v2.UpdateSettingsRequest(
name="name_value",
)
response = client.update_settings(request=request)
print(response)
| true | true |
f70f5392d9696765d55f2f1e0e641681b6ef353f | 16,489 | py | Python | geoviews/operation/projection.py | GonzalezDiazJ/pyviz_geoviews_clone | cac9afd1bc0d25313c84ea617300bbe40207d044 | [
"BSD-3-Clause"
] | null | null | null | geoviews/operation/projection.py | GonzalezDiazJ/pyviz_geoviews_clone | cac9afd1bc0d25313c84ea617300bbe40207d044 | [
"BSD-3-Clause"
] | null | null | null | geoviews/operation/projection.py | GonzalezDiazJ/pyviz_geoviews_clone | cac9afd1bc0d25313c84ea617300bbe40207d044 | [
"BSD-3-Clause"
] | null | null | null | import param
import numpy as np
from cartopy import crs as ccrs
from cartopy.img_transform import warp_array, _determine_bounds
from holoviews.core.util import cartesian_product, get_param_values
from holoviews.operation import Operation
from shapely.geometry import Polygon, LineString, MultiPolygon, MultiLineString
from ..element import (Image, Shape, Polygons, Path, Points, Contours,
RGB, Graph, Nodes, EdgePaths, QuadMesh, VectorField,
HexTiles, Labels)
from ..util import (
project_extents, geom_to_array, wrap_path_data, is_multi_geometry,
polygon_to_geom, path_to_geom
)
class _project_operation(Operation):
"""
Baseclass for projection operations, projecting elements from their
source coordinate reference system to the supplied projection.
"""
projection = param.ClassSelector(default=ccrs.GOOGLE_MERCATOR,
class_=ccrs.Projection,
instantiate=False, doc="""
Projection the shape type is projected to.""")
# Defines the types of elements supported by the operation
supported_types = []
def _process(self, element, key=None):
return element.map(self._process_element, self.supported_types)
class project_path(_project_operation):
"""
Projects Polygons and Path Elements from their source coordinate
reference system to the supplied projection.
"""
supported_types = [Polygons, Path, Contours, EdgePaths]
def _project_path(self, element, path, data, boundary, geom_type, multi_type):
"""
Handle case of continuously varying path
"""
xdim, ydim = path.kdims[:2]
xs, ys = (path.dimension_values(i) for i in range(2))
if not len(xs):
return []
proj_arr = self.p.projection.quick_vertices_transform(
np.column_stack([xs, ys]), element.crs)
if proj_arr is None:
vertices = np.column_stack([xs, ys])
if hasattr(element.crs, '_bbox_and_offset'):
vertices = wrap_path_data(vertices, element.crs, element.crs)
path = geom_type(vertices)
if boundary:
path = path.intersection(boundary)
if not path:
return []
proj = self.p.projection.project_geometry(path, element.crs)
proj_arr = geom_to_array(proj)
data[xdim.name] = proj_arr[:, 0]
data[ydim.name] = proj_arr[:, 1]
return [data]
def _project_contour(self, element, contour, data, boundary, geom_type, multi_type):
"""
Handle case of iso-contour
"""
xdim, ydim = contour.kdims[:2]
data = {k: vals[0] for k, vals in data.items()}
# Wrap longitudes
vertices = contour.array([0, 1])
if hasattr(element.crs, '_bbox_and_offset'):
vertices = wrap_path_data(vertices, element.crs, element.crs)
element = type(element)([vertices])
to_geom = polygon_to_geom if isinstance(element, Polygon) else path_to_geom
# Clip path to projection boundaries
geoms = []
for g in to_geom(element, multi=False, skip_invalid=False):
if np.isinf(np.array(g.array_interface_base['data'])).sum():
# Skip if infinity in path
continue
try:
# Compute boundary intersections
if boundary:
g = g.intersection(boundary)
except:
continue
if is_multi_geometry(g):
for p in g:
try:
geoms.append(geom_type(p))
except:
continue
else:
geoms.append(g)
# Project geometry
projected = []
for g in geoms:
proj = self.p.projection.project_geometry(g, contour.crs)
proj = proj if is_multi_geometry(proj) else [proj]
for geom in proj:
vertices = np.array(geom.array_interface_base['data']).reshape(-1, 2)
xs, ys = vertices.T
if len(xs):
projected.append(dict(data, **{xdim.name: xs, ydim.name: ys}))
return projected
def _project_geodataframe(self, element):
geoms = element.split(datatype='geom')
projected = [self.p.projection.project_geometry(geom, element.crs)
for geom in geoms]
new_data = element.data.copy()
new_data['geometry'] = projected
return element.clone(new_data, crs=self.p.projection)
def _process_element(self, element):
if not len(element):
return element.clone(crs=self.p.projection)
elif element.interface.datatype == 'geodataframe':
return self._project_geodataframe(element)
boundary = element.crs.project_geometry(Polygon(self.p.projection.boundary),
self.p.projection)
if isinstance(element, Polygons):
multi_type, geom_type = MultiPolygon, Polygon
else:
multi_type, geom_type = MultiLineString, LineString
projected = []
paths = element.split()
for path in paths:
data = {vd.name: path.dimension_values(vd, expanded=False) for vd in path.vdims}
if any(len(vals) > 1 for vals in data.values()):
projected += self._project_path(element, path, data, boundary, geom_type, multi_type)
else:
projected += self._project_contour(element, path, data, boundary, geom_type, multi_type)
if len(paths) and len(projected) == 0:
self.warning('While projecting a %s element from a %s coordinate '
'reference system (crs) to a %s projection none of '
'the projected paths were contained within the bounds '
'specified by the projection. Ensure you have specified '
'the correct coordinate system for your data.' %
(type(element).__name__, type(element.crs).__name__,
type(self.p.projection).__name__))
return element.clone(projected, crs=self.p.projection)
class project_shape(_project_operation):
"""
Projects Shape Element from the source coordinate reference system
to the supplied projection.
"""
supported_types = [Shape]
def _process_element(self, element):
if not len(element):
return element.clone(crs=self.p.projection)
geom = element.geom()
vertices = geom_to_array(geom)
if isinstance(geom, (MultiPolygon, Polygon)):
obj = Polygons([vertices])
else:
obj = Path([vertices])
geom = project_path(obj, projection=self.p.projection).geom()
return element.clone(geom, crs=self.p.projection)
class project_points(_project_operation):
supported_types = [Points, Nodes, VectorField, HexTiles, Labels]
def _process_element(self, element):
if not len(element):
return element.clone(crs=self.p.projection)
xdim, ydim = element.dimensions()[:2]
xs, ys = (element.dimension_values(i) for i in range(2))
coordinates = self.p.projection.transform_points(element.crs, xs, ys)
mask = np.isfinite(coordinates[:, 0])
new_data = {k: v[mask] for k, v in element.columns().items()}
new_data[xdim.name] = coordinates[mask, 0]
new_data[ydim.name] = coordinates[mask, 1]
datatype = [element.interface.datatype]+element.datatype
if len(new_data[xdim.name]) == 0:
self.warning('While projecting a %s element from a %s coordinate '
'reference system (crs) to a %s projection none of '
'the projected paths were contained within the bounds '
'specified by the projection. Ensure you have specified '
'the correct coordinate system for your data.' %
(type(element).__name__, type(element.crs).__name__,
type(self.p.projection).__name__))
return element.clone(new_data, crs=self.p.projection,
datatype=datatype)
class project_graph(_project_operation):
supported_types = [Graph]
def _process_element(self, element):
nodes = project_points(element.nodes, projection=self.projection)
data = (element.data, nodes)
if element._edgepaths:
data = data + (project_path(element.edgepaths, projection=self.projection),)
return element.clone(data, crs=self.projection)
class project_quadmesh(_project_operation):
supported_types = [QuadMesh]
def _process_element(self, element):
proj = self.p.projection
irregular = any(element.interface.irregular(element, kd)
for kd in element.kdims)
zs = element.dimension_values(2, flat=False)
if irregular:
X, Y = [np.asarray(element.interface.coords(element, kd, expanded=True))
for kd in element.kdims]
else:
X = element.dimension_values(0, expanded=True)
Y = element.dimension_values(1, expanded=True)
zs = zs.T
coords = proj.transform_points(element.crs, X, Y)
PX, PY = coords[..., 0], coords[..., 1]
# Mask quads which are wrapping around the x-axis
wrap_proj_types = (ccrs._RectangularProjection,
ccrs._WarpedRectangularProjection,
ccrs.InterruptedGoodeHomolosine,
ccrs.Mercator)
if isinstance(proj, wrap_proj_types):
with np.errstate(invalid='ignore'):
edge_lengths = np.hypot(
np.diff(PX , axis=1),
np.diff(PY, axis=1)
)
to_mask = (
(edge_lengths >= abs(proj.x_limits[1] -
proj.x_limits[0]) / 2) |
np.isnan(edge_lengths)
)
if np.any(to_mask):
mask = np.zeros(zs.shape, dtype=np.bool)
mask[:, 1:][to_mask] = True
mask[:, 2:][to_mask[:, :-1]] = True
mask[:, :-1][to_mask] = True
mask[:, :-2][to_mask[:, 1:]] = True
mask[1:, 1:][to_mask[:-1]] = True
mask[1:, :-1][to_mask[:-1]] = True
mask[:-1, 1:][to_mask[1:]] = True
mask[:-1, :-1][to_mask[1:]] = True
zs[mask] = np.NaN
params = get_param_values(element)
if PX.ndim < 2:
PX = PX.reshape(zs.shape)
if PY.ndim < 2:
PY = PY.reshape(zs.shape)
return QuadMesh((PX, PY, zs), crs=self.projection, **params)
class project_image(_project_operation):
"""
Projects an geoviews Image to the specified projection,
returning a regular HoloViews Image type. Works by
regridding the data along projected bounds. Only supports
rectangular projections.
"""
fast = param.Boolean(default=False, doc="""
Whether to enable fast reprojection with (much) better
performance but poorer handling in polar regions.""")
width = param.Integer(default=None, doc="""
Width of the reprojectd Image""")
height = param.Integer(default=None, doc="""
Height of the reprojected Image""")
link_inputs = param.Boolean(default=True, doc="""
By default, the link_inputs parameter is set to True so that
when applying project_image, backends that support linked streams
update RangeXY streams on the inputs of the operation.""")
supported_types = [Image]
def _process(self, img, key=None):
if self.p.fast:
return self._fast_process(img, key)
proj = self.p.projection
if proj == img.crs:
return img
x0, x1 = img.range(0)
y0, y1 = img.range(1)
xn, yn = img.interface.shape(img, gridded=True)[:2]
px0, py0, px1, py1 = project_extents((x0, y0, x1, y1),
img.crs, proj)
src_ext, trgt_ext = (x0, x1, y0, y1), (px0, px1, py0, py1)
arrays = []
for vd in img.vdims:
arr = img.dimension_values(vd, flat=False)
if arr.size:
projected, extents = warp_array(arr, proj, img.crs, (xn, yn),
src_ext, trgt_ext)
else:
projected, extents = arr, trgt_ext
arrays.append(projected)
projected = np.dstack(arrays) if len(arrays) > 1 else arrays[0]
data = np.flipud(projected)
bounds = (extents[0], extents[2], extents[1], extents[3])
return img.clone(data, bounds=bounds, kdims=img.kdims,
vdims=img.vdims, crs=proj, xdensity=None,
ydensity=None)
def _fast_process(self, element, key=None):
# Project coordinates
proj = self.p.projection
if proj == element.crs:
return element
h, w = element.interface.shape(element, gridded=True)[:2]
xs = element.dimension_values(0)
ys = element.dimension_values(1)
if isinstance(element, RGB):
rgb = element.rgb
array = np.dstack([np.flipud(rgb.dimension_values(d, flat=False))
for d in rgb.vdims])
else:
array = element.dimension_values(2, flat=False)
(x0, y0, x1, y1) = element.bounds.lbrt()
width = int(w) if self.p.width is None else self.p.width
height = int(h) if self.p.height is None else self.p.height
bounds = _determine_bounds(xs, ys, element.crs)
yb = bounds['y']
resampled = []
xvalues = []
for xb in bounds['x']:
px0, py0, px1, py1 = project_extents((xb[0], yb[0], xb[1], yb[1]), element.crs, proj)
if len(bounds['x']) > 1:
xfraction = (xb[1]-xb[0])/(x1-x0)
fraction_width = int(width*xfraction)
else:
fraction_width = width
xs = np.linspace(px0, px1, fraction_width)
ys = np.linspace(py0, py1, height)
cxs, cys = cartesian_product([xs, ys])
pxs, pys, _ = element.crs.transform_points(proj, np.asarray(cxs), np.asarray(cys)).T
icxs = (((pxs-x0) / (x1-x0)) * w).astype(int)
icys = (((pys-y0) / (y1-y0)) * h).astype(int)
xvalues.append(xs)
icxs[icxs<0] = 0
icys[icys<0] = 0
icxs[icxs>=w] = w-1
icys[icys>=h] = h-1
resampled_arr = array[icys, icxs]
if isinstance(element, RGB):
nvdims = len(element.vdims)
resampled_arr = resampled_arr.reshape((fraction_width, height, nvdims)).transpose([1, 0, 2])
else:
resampled_arr = resampled_arr.reshape((fraction_width, height)).T
resampled.append(resampled_arr)
xs = np.concatenate(xvalues[::-1])
resampled = np.hstack(resampled[::-1])
datatypes = [element.interface.datatype, 'xarray', 'grid']
data = (xs, ys)
for i in range(len(element.vdims)):
if resampled.ndim > 2:
data = data + (resampled[::-1, :, i],)
else:
data = data + (resampled,)
return element.clone(data, crs=proj, bounds=None, datatype=datatypes)
class project(Operation):
"""
Projects GeoViews Element types to the specified projection.
"""
projection = param.ClassSelector(default=ccrs.GOOGLE_MERCATOR,
class_=ccrs.Projection,
instantiate=False, doc="""
Projection the image type is projected to.""")
_operations = [project_path, project_image, project_shape,
project_graph, project_quadmesh, project_points]
def _process(self, element, key=None):
for op in self._operations:
element = element.map(op.instance(projection=self.p.projection),
op.supported_types)
return element
| 39.73253 | 108 | 0.572321 | import param
import numpy as np
from cartopy import crs as ccrs
from cartopy.img_transform import warp_array, _determine_bounds
from holoviews.core.util import cartesian_product, get_param_values
from holoviews.operation import Operation
from shapely.geometry import Polygon, LineString, MultiPolygon, MultiLineString
from ..element import (Image, Shape, Polygons, Path, Points, Contours,
RGB, Graph, Nodes, EdgePaths, QuadMesh, VectorField,
HexTiles, Labels)
from ..util import (
project_extents, geom_to_array, wrap_path_data, is_multi_geometry,
polygon_to_geom, path_to_geom
)
class _project_operation(Operation):
projection = param.ClassSelector(default=ccrs.GOOGLE_MERCATOR,
class_=ccrs.Projection,
instantiate=False, doc="""
Projection the shape type is projected to.""")
supported_types = []
def _process(self, element, key=None):
return element.map(self._process_element, self.supported_types)
class project_path(_project_operation):
supported_types = [Polygons, Path, Contours, EdgePaths]
def _project_path(self, element, path, data, boundary, geom_type, multi_type):
xdim, ydim = path.kdims[:2]
xs, ys = (path.dimension_values(i) for i in range(2))
if not len(xs):
return []
proj_arr = self.p.projection.quick_vertices_transform(
np.column_stack([xs, ys]), element.crs)
if proj_arr is None:
vertices = np.column_stack([xs, ys])
if hasattr(element.crs, '_bbox_and_offset'):
vertices = wrap_path_data(vertices, element.crs, element.crs)
path = geom_type(vertices)
if boundary:
path = path.intersection(boundary)
if not path:
return []
proj = self.p.projection.project_geometry(path, element.crs)
proj_arr = geom_to_array(proj)
data[xdim.name] = proj_arr[:, 0]
data[ydim.name] = proj_arr[:, 1]
return [data]
def _project_contour(self, element, contour, data, boundary, geom_type, multi_type):
xdim, ydim = contour.kdims[:2]
data = {k: vals[0] for k, vals in data.items()}
vertices = contour.array([0, 1])
if hasattr(element.crs, '_bbox_and_offset'):
vertices = wrap_path_data(vertices, element.crs, element.crs)
element = type(element)([vertices])
to_geom = polygon_to_geom if isinstance(element, Polygon) else path_to_geom
geoms = []
for g in to_geom(element, multi=False, skip_invalid=False):
if np.isinf(np.array(g.array_interface_base['data'])).sum():
continue
try:
if boundary:
g = g.intersection(boundary)
except:
continue
if is_multi_geometry(g):
for p in g:
try:
geoms.append(geom_type(p))
except:
continue
else:
geoms.append(g)
projected = []
for g in geoms:
proj = self.p.projection.project_geometry(g, contour.crs)
proj = proj if is_multi_geometry(proj) else [proj]
for geom in proj:
vertices = np.array(geom.array_interface_base['data']).reshape(-1, 2)
xs, ys = vertices.T
if len(xs):
projected.append(dict(data, **{xdim.name: xs, ydim.name: ys}))
return projected
def _project_geodataframe(self, element):
geoms = element.split(datatype='geom')
projected = [self.p.projection.project_geometry(geom, element.crs)
for geom in geoms]
new_data = element.data.copy()
new_data['geometry'] = projected
return element.clone(new_data, crs=self.p.projection)
def _process_element(self, element):
if not len(element):
return element.clone(crs=self.p.projection)
elif element.interface.datatype == 'geodataframe':
return self._project_geodataframe(element)
boundary = element.crs.project_geometry(Polygon(self.p.projection.boundary),
self.p.projection)
if isinstance(element, Polygons):
multi_type, geom_type = MultiPolygon, Polygon
else:
multi_type, geom_type = MultiLineString, LineString
projected = []
paths = element.split()
for path in paths:
data = {vd.name: path.dimension_values(vd, expanded=False) for vd in path.vdims}
if any(len(vals) > 1 for vals in data.values()):
projected += self._project_path(element, path, data, boundary, geom_type, multi_type)
else:
projected += self._project_contour(element, path, data, boundary, geom_type, multi_type)
if len(paths) and len(projected) == 0:
self.warning('While projecting a %s element from a %s coordinate '
'reference system (crs) to a %s projection none of '
'the projected paths were contained within the bounds '
'specified by the projection. Ensure you have specified '
'the correct coordinate system for your data.' %
(type(element).__name__, type(element.crs).__name__,
type(self.p.projection).__name__))
return element.clone(projected, crs=self.p.projection)
class project_shape(_project_operation):
supported_types = [Shape]
def _process_element(self, element):
if not len(element):
return element.clone(crs=self.p.projection)
geom = element.geom()
vertices = geom_to_array(geom)
if isinstance(geom, (MultiPolygon, Polygon)):
obj = Polygons([vertices])
else:
obj = Path([vertices])
geom = project_path(obj, projection=self.p.projection).geom()
return element.clone(geom, crs=self.p.projection)
class project_points(_project_operation):
supported_types = [Points, Nodes, VectorField, HexTiles, Labels]
def _process_element(self, element):
if not len(element):
return element.clone(crs=self.p.projection)
xdim, ydim = element.dimensions()[:2]
xs, ys = (element.dimension_values(i) for i in range(2))
coordinates = self.p.projection.transform_points(element.crs, xs, ys)
mask = np.isfinite(coordinates[:, 0])
new_data = {k: v[mask] for k, v in element.columns().items()}
new_data[xdim.name] = coordinates[mask, 0]
new_data[ydim.name] = coordinates[mask, 1]
datatype = [element.interface.datatype]+element.datatype
if len(new_data[xdim.name]) == 0:
self.warning('While projecting a %s element from a %s coordinate '
'reference system (crs) to a %s projection none of '
'the projected paths were contained within the bounds '
'specified by the projection. Ensure you have specified '
'the correct coordinate system for your data.' %
(type(element).__name__, type(element.crs).__name__,
type(self.p.projection).__name__))
return element.clone(new_data, crs=self.p.projection,
datatype=datatype)
class project_graph(_project_operation):
supported_types = [Graph]
def _process_element(self, element):
nodes = project_points(element.nodes, projection=self.projection)
data = (element.data, nodes)
if element._edgepaths:
data = data + (project_path(element.edgepaths, projection=self.projection),)
return element.clone(data, crs=self.projection)
class project_quadmesh(_project_operation):
supported_types = [QuadMesh]
def _process_element(self, element):
proj = self.p.projection
irregular = any(element.interface.irregular(element, kd)
for kd in element.kdims)
zs = element.dimension_values(2, flat=False)
if irregular:
X, Y = [np.asarray(element.interface.coords(element, kd, expanded=True))
for kd in element.kdims]
else:
X = element.dimension_values(0, expanded=True)
Y = element.dimension_values(1, expanded=True)
zs = zs.T
coords = proj.transform_points(element.crs, X, Y)
PX, PY = coords[..., 0], coords[..., 1]
wrap_proj_types = (ccrs._RectangularProjection,
ccrs._WarpedRectangularProjection,
ccrs.InterruptedGoodeHomolosine,
ccrs.Mercator)
if isinstance(proj, wrap_proj_types):
with np.errstate(invalid='ignore'):
edge_lengths = np.hypot(
np.diff(PX , axis=1),
np.diff(PY, axis=1)
)
to_mask = (
(edge_lengths >= abs(proj.x_limits[1] -
proj.x_limits[0]) / 2) |
np.isnan(edge_lengths)
)
if np.any(to_mask):
mask = np.zeros(zs.shape, dtype=np.bool)
mask[:, 1:][to_mask] = True
mask[:, 2:][to_mask[:, :-1]] = True
mask[:, :-1][to_mask] = True
mask[:, :-2][to_mask[:, 1:]] = True
mask[1:, 1:][to_mask[:-1]] = True
mask[1:, :-1][to_mask[:-1]] = True
mask[:-1, 1:][to_mask[1:]] = True
mask[:-1, :-1][to_mask[1:]] = True
zs[mask] = np.NaN
params = get_param_values(element)
if PX.ndim < 2:
PX = PX.reshape(zs.shape)
if PY.ndim < 2:
PY = PY.reshape(zs.shape)
return QuadMesh((PX, PY, zs), crs=self.projection, **params)
class project_image(_project_operation):
fast = param.Boolean(default=False, doc="""
Whether to enable fast reprojection with (much) better
performance but poorer handling in polar regions.""")
width = param.Integer(default=None, doc="""
Width of the reprojectd Image""")
height = param.Integer(default=None, doc="""
Height of the reprojected Image""")
link_inputs = param.Boolean(default=True, doc="""
By default, the link_inputs parameter is set to True so that
when applying project_image, backends that support linked streams
update RangeXY streams on the inputs of the operation.""")
supported_types = [Image]
def _process(self, img, key=None):
if self.p.fast:
return self._fast_process(img, key)
proj = self.p.projection
if proj == img.crs:
return img
x0, x1 = img.range(0)
y0, y1 = img.range(1)
xn, yn = img.interface.shape(img, gridded=True)[:2]
px0, py0, px1, py1 = project_extents((x0, y0, x1, y1),
img.crs, proj)
src_ext, trgt_ext = (x0, x1, y0, y1), (px0, px1, py0, py1)
arrays = []
for vd in img.vdims:
arr = img.dimension_values(vd, flat=False)
if arr.size:
projected, extents = warp_array(arr, proj, img.crs, (xn, yn),
src_ext, trgt_ext)
else:
projected, extents = arr, trgt_ext
arrays.append(projected)
projected = np.dstack(arrays) if len(arrays) > 1 else arrays[0]
data = np.flipud(projected)
bounds = (extents[0], extents[2], extents[1], extents[3])
return img.clone(data, bounds=bounds, kdims=img.kdims,
vdims=img.vdims, crs=proj, xdensity=None,
ydensity=None)
def _fast_process(self, element, key=None):
proj = self.p.projection
if proj == element.crs:
return element
h, w = element.interface.shape(element, gridded=True)[:2]
xs = element.dimension_values(0)
ys = element.dimension_values(1)
if isinstance(element, RGB):
rgb = element.rgb
array = np.dstack([np.flipud(rgb.dimension_values(d, flat=False))
for d in rgb.vdims])
else:
array = element.dimension_values(2, flat=False)
(x0, y0, x1, y1) = element.bounds.lbrt()
width = int(w) if self.p.width is None else self.p.width
height = int(h) if self.p.height is None else self.p.height
bounds = _determine_bounds(xs, ys, element.crs)
yb = bounds['y']
resampled = []
xvalues = []
for xb in bounds['x']:
px0, py0, px1, py1 = project_extents((xb[0], yb[0], xb[1], yb[1]), element.crs, proj)
if len(bounds['x']) > 1:
xfraction = (xb[1]-xb[0])/(x1-x0)
fraction_width = int(width*xfraction)
else:
fraction_width = width
xs = np.linspace(px0, px1, fraction_width)
ys = np.linspace(py0, py1, height)
cxs, cys = cartesian_product([xs, ys])
pxs, pys, _ = element.crs.transform_points(proj, np.asarray(cxs), np.asarray(cys)).T
icxs = (((pxs-x0) / (x1-x0)) * w).astype(int)
icys = (((pys-y0) / (y1-y0)) * h).astype(int)
xvalues.append(xs)
icxs[icxs<0] = 0
icys[icys<0] = 0
icxs[icxs>=w] = w-1
icys[icys>=h] = h-1
resampled_arr = array[icys, icxs]
if isinstance(element, RGB):
nvdims = len(element.vdims)
resampled_arr = resampled_arr.reshape((fraction_width, height, nvdims)).transpose([1, 0, 2])
else:
resampled_arr = resampled_arr.reshape((fraction_width, height)).T
resampled.append(resampled_arr)
xs = np.concatenate(xvalues[::-1])
resampled = np.hstack(resampled[::-1])
datatypes = [element.interface.datatype, 'xarray', 'grid']
data = (xs, ys)
for i in range(len(element.vdims)):
if resampled.ndim > 2:
data = data + (resampled[::-1, :, i],)
else:
data = data + (resampled,)
return element.clone(data, crs=proj, bounds=None, datatype=datatypes)
class project(Operation):
projection = param.ClassSelector(default=ccrs.GOOGLE_MERCATOR,
class_=ccrs.Projection,
instantiate=False, doc="""
Projection the image type is projected to.""")
_operations = [project_path, project_image, project_shape,
project_graph, project_quadmesh, project_points]
def _process(self, element, key=None):
for op in self._operations:
element = element.map(op.instance(projection=self.p.projection),
op.supported_types)
return element
| true | true |
f70f53d11f6bad5ff3c69ec373295103fd57b32c | 1,832 | py | Python | end_to_end_tests/golden-record/my_test_api_client/api/default/get_common_parameters.py | kmray/openapi-python-client | 19dd9d8c2407e9f01d73bdb833d8dedf428d897c | [
"MIT"
] | 172 | 2020-02-15T20:14:16.000Z | 2021-06-09T07:09:15.000Z | end_to_end_tests/golden-record/my_test_api_client/api/default/get_common_parameters.py | kmray/openapi-python-client | 19dd9d8c2407e9f01d73bdb833d8dedf428d897c | [
"MIT"
] | 410 | 2020-02-15T19:39:29.000Z | 2021-06-09T19:28:57.000Z | end_to_end_tests/golden-record/my_test_api_client/api/default/get_common_parameters.py | kmray/openapi-python-client | 19dd9d8c2407e9f01d73bdb833d8dedf428d897c | [
"MIT"
] | 38 | 2020-04-12T09:36:27.000Z | 2021-06-11T08:57:07.000Z | from typing import Any, Dict, Union
import httpx
from ...client import Client
from ...types import UNSET, Response, Unset
def _get_kwargs(
*,
client: Client,
common: Union[Unset, None, str] = UNSET,
) -> Dict[str, Any]:
url = "{}/common_parameters".format(client.base_url)
headers: Dict[str, Any] = client.get_headers()
cookies: Dict[str, Any] = client.get_cookies()
params: Dict[str, Any] = {
"common": common,
}
params = {k: v for k, v in params.items() if v is not UNSET and v is not None}
return {
"url": url,
"headers": headers,
"cookies": cookies,
"timeout": client.get_timeout(),
"params": params,
}
def _build_response(*, response: httpx.Response) -> Response[Any]:
return Response(
status_code=response.status_code,
content=response.content,
headers=response.headers,
parsed=None,
)
def sync_detailed(
*,
client: Client,
common: Union[Unset, None, str] = UNSET,
) -> Response[Any]:
"""
Args:
common (Union[Unset, None, str]):
Returns:
Response[Any]
"""
kwargs = _get_kwargs(
client=client,
common=common,
)
response = httpx.get(
verify=client.verify_ssl,
**kwargs,
)
return _build_response(response=response)
async def asyncio_detailed(
*,
client: Client,
common: Union[Unset, None, str] = UNSET,
) -> Response[Any]:
"""
Args:
common (Union[Unset, None, str]):
Returns:
Response[Any]
"""
kwargs = _get_kwargs(
client=client,
common=common,
)
async with httpx.AsyncClient(verify=client.verify_ssl) as _client:
response = await _client.get(**kwargs)
return _build_response(response=response)
| 20.355556 | 82 | 0.593341 | from typing import Any, Dict, Union
import httpx
from ...client import Client
from ...types import UNSET, Response, Unset
def _get_kwargs(
*,
client: Client,
common: Union[Unset, None, str] = UNSET,
) -> Dict[str, Any]:
url = "{}/common_parameters".format(client.base_url)
headers: Dict[str, Any] = client.get_headers()
cookies: Dict[str, Any] = client.get_cookies()
params: Dict[str, Any] = {
"common": common,
}
params = {k: v for k, v in params.items() if v is not UNSET and v is not None}
return {
"url": url,
"headers": headers,
"cookies": cookies,
"timeout": client.get_timeout(),
"params": params,
}
def _build_response(*, response: httpx.Response) -> Response[Any]:
return Response(
status_code=response.status_code,
content=response.content,
headers=response.headers,
parsed=None,
)
def sync_detailed(
*,
client: Client,
common: Union[Unset, None, str] = UNSET,
) -> Response[Any]:
kwargs = _get_kwargs(
client=client,
common=common,
)
response = httpx.get(
verify=client.verify_ssl,
**kwargs,
)
return _build_response(response=response)
async def asyncio_detailed(
*,
client: Client,
common: Union[Unset, None, str] = UNSET,
) -> Response[Any]:
kwargs = _get_kwargs(
client=client,
common=common,
)
async with httpx.AsyncClient(verify=client.verify_ssl) as _client:
response = await _client.get(**kwargs)
return _build_response(response=response)
| true | true |
f70f54a84673a8794196351079055e57ac2e5bdf | 3,370 | py | Python | tests/conftest.py | ilko222/test_python | e77c3979bd9cef425716b8f26d2cdd4491b7ae17 | [
"MIT"
] | null | null | null | tests/conftest.py | ilko222/test_python | e77c3979bd9cef425716b8f26d2cdd4491b7ae17 | [
"MIT"
] | null | null | null | tests/conftest.py | ilko222/test_python | e77c3979bd9cef425716b8f26d2cdd4491b7ae17 | [
"MIT"
] | null | null | null | import pytest
import allure
from _pytest.nodes import Item
from _pytest.runner import CallInfo
from selene.core.exceptions import TimeoutException
from selene.support.shared import browser
@pytest.fixture(scope='function', autouse=True)
def browser_management():
"""
Here, before yield,
goes all "setup" code for each test case
aka "before test function" hook
"""
# def attach_snapshots_on_failure(error: TimeoutException) -> Exception:
# """
# An example of selene hook_wait_failure that attaches snapshots to failed test step.
# It is actually not needed and optional,
# because in the pytest_runtest_makereport hook below
# we attach screenshots to the test body itself,
# that is more handy during analysis of test report
#
# but if you need it, you can enable it by uncommenting
# together with the following ``browser.config.hook_wait_failure =`` line;)
#
# otherwise, you can remove it
# """
# last_screenshot = browser.config.last_screenshot
# if last_screenshot:
# allure.attach.file(source=last_screenshot,
# name='screenshot on failure',
# attachment_type=allure.attachment_type.PNG)
#
# last_page_source = browser.config.last_page_source
# if last_page_source:
# allure.attach.file(source=last_page_source,
# name='page source on failure',
# attachment_type=allure.attachment_type.HTML)
# return error
# browser.config.hook_wait_failure = attach_snapshots_on_failure
browser.config.timeout = 3
# todo: add your before setup here...
yield
"""
Here, after yield,
goes all "tear down" code for each test case
aka "after test function" hook
"""
# todo: add your after setup here...
browser.quit()
prev_test_screenshot = None
prev_test_page_source = None
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_setup(item):
yield
global prev_test_screenshot
prev_test_screenshot = browser.config.last_screenshot
global prev_test_page_source
prev_test_page_source = browser.config.last_page_source
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item: Item, call: CallInfo):
"""
Attach snapshots on test failure
"""
# All code prior to yield statement would be ran prior
# to any other of the same fixtures defined
outcome = yield # Run all other pytest_runtest_makereport non wrapped hooks
result = outcome.get_result()
if result.when == "call" and result.failed:
last_screenshot = browser.config.last_screenshot
if last_screenshot and not last_screenshot == prev_test_screenshot:
allure.attach.file(source=last_screenshot,
name='screenshot',
attachment_type=allure.attachment_type.PNG)
last_page_source = browser.config.last_page_source
if last_page_source and not last_page_source == prev_test_page_source:
allure.attach.file(source=last_page_source,
name='page source',
attachment_type=allure.attachment_type.HTML)
| 34.040404 | 93 | 0.660831 | import pytest
import allure
from _pytest.nodes import Item
from _pytest.runner import CallInfo
from selene.core.exceptions import TimeoutException
from selene.support.shared import browser
@pytest.fixture(scope='function', autouse=True)
def browser_management():
# An example of selene hook_wait_failure that attaches snapshots to failed test step.
# It is actually not needed and optional,
# because in the pytest_runtest_makereport hook below
# we attach screenshots to the test body itself,
# that is more handy during analysis of test report
#
# but if you need it, you can enable it by uncommenting
# together with the following ``browser.config.hook_wait_failure =`` line;)
#
# otherwise, you can remove it
# """
browser.config.timeout = 3
yield
browser.quit()
prev_test_screenshot = None
prev_test_page_source = None
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_setup(item):
yield
global prev_test_screenshot
prev_test_screenshot = browser.config.last_screenshot
global prev_test_page_source
prev_test_page_source = browser.config.last_page_source
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item: Item, call: CallInfo):
outcome = yield
result = outcome.get_result()
if result.when == "call" and result.failed:
last_screenshot = browser.config.last_screenshot
if last_screenshot and not last_screenshot == prev_test_screenshot:
allure.attach.file(source=last_screenshot,
name='screenshot',
attachment_type=allure.attachment_type.PNG)
last_page_source = browser.config.last_page_source
if last_page_source and not last_page_source == prev_test_page_source:
allure.attach.file(source=last_page_source,
name='page source',
attachment_type=allure.attachment_type.HTML)
| true | true |
f70f54d4dd5967223a8d1a1372d9ffeaa51b56df | 5,539 | py | Python | public_talks/2016_02_26_columbia/get_tweets.py | kylepjohnson/ipython_notebooks | 7f77ec06a70169cc479a6f912b4888789bf28ac4 | [
"MIT"
] | 9 | 2016-08-10T09:03:09.000Z | 2021-01-06T21:34:20.000Z | public_talks/2016_02_26_columbia/get_tweets.py | kylepjohnson/ipython | 7f77ec06a70169cc479a6f912b4888789bf28ac4 | [
"MIT"
] | null | null | null | public_talks/2016_02_26_columbia/get_tweets.py | kylepjohnson/ipython | 7f77ec06a70169cc479a6f912b4888789bf28ac4 | [
"MIT"
] | 3 | 2018-10-07T01:56:22.000Z | 2021-01-06T21:33:28.000Z | #!/usr/bin/python
# -*- coding: utf_8 -*-
"""Access and query Twitter's API with the simplistic twitter package (`pip install twitter`).
"""
from __future__ import print_function
from __future__ import unicode_literals
import csv
import os
import time
from twitter import OAuth
from twitter import Twitter
def setup_twitter(config_file='config.py'):
"""Setup auth keys and session with Twitter client."""
config = {}
execfile(config_file, config)
twitter_obj = Twitter(auth=OAuth(config["access_key"],
config["access_secret"],
config["consumer_key"],
config["consumer_secret"]))
return twitter_obj
def search_twitter(twitter_session, query, count=100, status='popular'):
"""Submit query to Twitter API via twitter package."""
status_options = ['mixed', 'recent', 'popular']
assert status in status_options, "'status' must be in {}.".format(status_options)
query = twitter_session.search.tweets(q=query,
lang='en',
result=status,
count=count,
retry=True)
return query
def parse_twitter_response(twitter_response, min_rts=500, strip_non_ascii=True):
"""Extract requested variables from Twitter API response. Yield each tweet
one at a time with a generator. Available keys:
[u'contributors', u'truncated', u'text', u'is_quote_status',
u'in_reply_to_status_id', u'id', u'favorite_count', u'source',
u'retweeted', u'coordinates', u'entities', u'in_reply_to_screen_name',
u'in_reply_to_user_id', u'retweet_count', u'id_str', u'favorited',
u'retweeted_status', u'user', u'geo', u'in_reply_to_user_id_str',
u'possibly_sensitive', u'lang', u'created_at',
u'in_reply_to_status_id_str', u'place', u'metadata']
"""
for result in twitter_response['statuses']:
tweet_datetime = result['created_at']
text = result['text'].encode('utf_8')
if strip_non_ascii:
text = ''.join([i if ord(i) < 128 else ' ' for i in text])
# Strip 'RT ' from head of retweets, redundant
if text.startswith('RT '):
text = text[3:]
# Ch newlines to spaces
text = ''.join([' ' if c == '\n' else c for c in text])
rt_count = result['retweet_count']
yield {'_tweet_datetime': tweet_datetime,
'_text': text,
'_rt_count': rt_count}
def search_parse_write_tweets(query_str,
total_to_fetch,
status,
minimum_rts,
low_rt_threshold):
twitter = setup_twitter()
query_response = search_twitter(twitter_session=twitter,
query=query_disjunction,
count=TWEETS_TO_FETCH,
status=status)
print("Search complete ({} seconds)".format(query_response["search_metadata"]["completed_in"]))
tweets_data = parse_twitter_response(query_response, min_rts=minimum_rts) # yields generator
fieldnames = []
if not fieldnames:
for row in tweets_data:
fieldnames = row.keys()
fieldnames_len = len(row.keys())
break
# Set up csv writers
file1 = 'tweets/tweets_popular.csv'
f1_write_header = False
if not os.path.isfile(file1):
f1_write_header = True
csv_popular_open = open(file1, 'ab')
csv_popular_writer = csv.DictWriter(csv_popular_open, delimiter=b'|', fieldnames=fieldnames)
if f1_write_header:
csv_popular_writer.writeheader()
file2 = 'tweets/tweets_not_popular.csv'
f2_write_header = False
if not os.path.isfile(file2):
f2_write_header = True
csv_not_popular_open = open(file2, 'ab')
csv_not_popular_writer = csv.DictWriter(csv_not_popular_open, delimiter=b'|', fieldnames=fieldnames)
if f2_write_header:
csv_not_popular_writer.writeheader()
# Loop thru generator of dicts, write row to right file
for tweet_data in tweets_data:
if tweet_data['rt_count'] >= minimum_rts:
if len(tweet_data.keys()) == fieldnames_len:
csv_popular_writer.writerow(tweet_data)
elif tweet_data['rt_count'] <= low_rt_threshold:
if len(tweet_data.keys()) == fieldnames_len:
csv_not_popular_writer.writerow(tweet_data)
if __name__ == '__main__':
TWEETS_TO_FETCH = 1000
query_string = 'the a u i me she you he they for rt at tweet'.split(' ')
query_disjunction = ' OR '.join(query_string)
#status = 'popular' # ['mixed', 'recent', 'popular']
minimum_rts = 500
low_rt_threshold = 10
while True:
time.sleep(60)
search_parse_write_tweets(query_str=query_disjunction,
total_to_fetch=TWEETS_TO_FETCH,
status='popular',
minimum_rts=minimum_rts,
low_rt_threshold=low_rt_threshold)
search_parse_write_tweets(query_str=query_disjunction,
total_to_fetch=TWEETS_TO_FETCH,
status='mixed',
minimum_rts=minimum_rts,
low_rt_threshold=low_rt_threshold)
| 36.682119 | 104 | 0.596498 |
from __future__ import print_function
from __future__ import unicode_literals
import csv
import os
import time
from twitter import OAuth
from twitter import Twitter
def setup_twitter(config_file='config.py'):
config = {}
execfile(config_file, config)
twitter_obj = Twitter(auth=OAuth(config["access_key"],
config["access_secret"],
config["consumer_key"],
config["consumer_secret"]))
return twitter_obj
def search_twitter(twitter_session, query, count=100, status='popular'):
status_options = ['mixed', 'recent', 'popular']
assert status in status_options, "'status' must be in {}.".format(status_options)
query = twitter_session.search.tweets(q=query,
lang='en',
result=status,
count=count,
retry=True)
return query
def parse_twitter_response(twitter_response, min_rts=500, strip_non_ascii=True):
for result in twitter_response['statuses']:
tweet_datetime = result['created_at']
text = result['text'].encode('utf_8')
if strip_non_ascii:
text = ''.join([i if ord(i) < 128 else ' ' for i in text])
if text.startswith('RT '):
text = text[3:]
text = ''.join([' ' if c == '\n' else c for c in text])
rt_count = result['retweet_count']
yield {'_tweet_datetime': tweet_datetime,
'_text': text,
'_rt_count': rt_count}
def search_parse_write_tweets(query_str,
total_to_fetch,
status,
minimum_rts,
low_rt_threshold):
twitter = setup_twitter()
query_response = search_twitter(twitter_session=twitter,
query=query_disjunction,
count=TWEETS_TO_FETCH,
status=status)
print("Search complete ({} seconds)".format(query_response["search_metadata"]["completed_in"]))
tweets_data = parse_twitter_response(query_response, min_rts=minimum_rts)
fieldnames = []
if not fieldnames:
for row in tweets_data:
fieldnames = row.keys()
fieldnames_len = len(row.keys())
break
file1 = 'tweets/tweets_popular.csv'
f1_write_header = False
if not os.path.isfile(file1):
f1_write_header = True
csv_popular_open = open(file1, 'ab')
csv_popular_writer = csv.DictWriter(csv_popular_open, delimiter=b'|', fieldnames=fieldnames)
if f1_write_header:
csv_popular_writer.writeheader()
file2 = 'tweets/tweets_not_popular.csv'
f2_write_header = False
if not os.path.isfile(file2):
f2_write_header = True
csv_not_popular_open = open(file2, 'ab')
csv_not_popular_writer = csv.DictWriter(csv_not_popular_open, delimiter=b'|', fieldnames=fieldnames)
if f2_write_header:
csv_not_popular_writer.writeheader()
for tweet_data in tweets_data:
if tweet_data['rt_count'] >= minimum_rts:
if len(tweet_data.keys()) == fieldnames_len:
csv_popular_writer.writerow(tweet_data)
elif tweet_data['rt_count'] <= low_rt_threshold:
if len(tweet_data.keys()) == fieldnames_len:
csv_not_popular_writer.writerow(tweet_data)
if __name__ == '__main__':
TWEETS_TO_FETCH = 1000
query_string = 'the a u i me she you he they for rt at tweet'.split(' ')
query_disjunction = ' OR '.join(query_string)
t_threshold = 10
while True:
time.sleep(60)
search_parse_write_tweets(query_str=query_disjunction,
total_to_fetch=TWEETS_TO_FETCH,
status='popular',
minimum_rts=minimum_rts,
low_rt_threshold=low_rt_threshold)
search_parse_write_tweets(query_str=query_disjunction,
total_to_fetch=TWEETS_TO_FETCH,
status='mixed',
minimum_rts=minimum_rts,
low_rt_threshold=low_rt_threshold)
| true | true |
f70f54f0630586e54171415610e5bf23d7c87168 | 26,276 | py | Python | q2_feature_table/plugin_setup.py | cguccione/q2-feature-table | 90b75bb4848371bd640fe7c4baf14bc448d597c9 | [
"BSD-3-Clause"
] | null | null | null | q2_feature_table/plugin_setup.py | cguccione/q2-feature-table | 90b75bb4848371bd640fe7c4baf14bc448d597c9 | [
"BSD-3-Clause"
] | null | null | null | q2_feature_table/plugin_setup.py | cguccione/q2-feature-table | 90b75bb4848371bd640fe7c4baf14bc448d597c9 | [
"BSD-3-Clause"
] | null | null | null | # ----------------------------------------------------------------------------
# Copyright (c) 2016-2022, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from qiime2.plugin import (Plugin, Int, Float, Range, Metadata, Str, Bool,
Choices, MetadataColumn, Categorical, List,
Citations, TypeMatch, TypeMap)
import q2_feature_table
from q2_types.feature_table import (
FeatureTable, Frequency, RelativeFrequency, PresenceAbsence, Composition)
from q2_types.feature_data import (
FeatureData, Sequence, Taxonomy, AlignedSequence)
from .examples import (feature_table_merge_example,
feature_table_merge_three_tables_example)
citations = Citations.load('citations.bib', package='q2_feature_table')
plugin = Plugin(
name='feature-table',
version=q2_feature_table.__version__,
website='https://github.com/qiime2/q2-feature-table',
package='q2_feature_table',
short_description=('Plugin for working with sample by feature tables.'),
description=('This is a QIIME 2 plugin supporting operations on sample '
'by feature tables, such as filtering, merging, and '
'transforming tables.')
)
plugin.methods.register_function(
function=q2_feature_table.rarefy,
inputs={'table': FeatureTable[Frequency]},
parameters={'sampling_depth': Int % Range(1, None),
'with_replacement': Bool},
outputs=[('rarefied_table', FeatureTable[Frequency])],
input_descriptions={'table': 'The feature table to be rarefied.'},
parameter_descriptions={
'sampling_depth': ('The total frequency that each sample should be '
'rarefied to. Samples where the sum of frequencies '
'is less than the sampling depth will be not be '
'included in the resulting table unless '
'subsampling is performed with replacement.'),
'with_replacement': ('Rarefy with replacement by sampling from the '
'multinomial distribution instead of rarefying '
'without replacement.')
},
output_descriptions={
'rarefied_table': 'The resulting rarefied feature table.'
},
name='Rarefy table',
description=("Subsample frequencies from all samples so that the sum of "
"frequencies in each sample is equal to sampling-depth."),
citations=[citations['Weiss2017']]
)
plugin.methods.register_function(
function=q2_feature_table.subsample,
inputs={'table': FeatureTable[Frequency]},
parameters={'subsampling_depth': Int % Range(1, None),
'axis': Str % Choices(['sample', 'feature'])},
outputs=[('sampled_table', FeatureTable[Frequency])],
input_descriptions={'table': 'The feature table to be sampled.'},
parameter_descriptions={
'subsampling_depth': ('The total number of samples or features to be '
'randomly sampled. Samples or features that are '
'reduced to a zero sum will not be included in '
'the resulting table.'),
'axis': ('The axis to sample over. If "sample" then samples will be '
'randomly selected to be retained. If "feature" then '
'a random set of features will be selected to be retained.')
},
output_descriptions={
'sampled_table': 'The resulting subsampled feature table.'
},
name='Subsample table',
description=("Randomly pick samples or features, without replacement, "
"from the table.")
)
plugin.methods.register_function(
function=q2_feature_table.presence_absence,
inputs={'table': FeatureTable[Frequency | RelativeFrequency]},
parameters={},
outputs=[('presence_absence_table', FeatureTable[PresenceAbsence])],
input_descriptions={
'table': ('The feature table to be converted into presence/absence '
'abundances.')
},
parameter_descriptions={},
output_descriptions={
'presence_absence_table': ('The resulting presence/absence feature '
'table.')
},
name="Convert to presence/absence",
description="Convert frequencies to binary values indicating presence or "
"absence of a feature in a sample."
)
plugin.methods.register_function(
function=q2_feature_table.relative_frequency,
inputs={'table': FeatureTable[Frequency]},
parameters={},
outputs=[
('relative_frequency_table',
FeatureTable[RelativeFrequency])],
input_descriptions={
'table': 'The feature table to be converted into relative frequencies.'
},
parameter_descriptions={},
output_descriptions={
'relative_frequency_table': ('The resulting relative frequency '
'feature table.')
},
name="Convert to relative frequencies",
description="Convert frequencies to relative frequencies by dividing each "
"frequency in a sample by the sum of frequencies in that "
"sample."
)
plugin.methods.register_function(
function=q2_feature_table.transpose,
inputs={'table': FeatureTable[Frequency]},
parameters={},
outputs=[('transposed_feature_table',
FeatureTable[Frequency])],
input_descriptions={
'table': 'The feature table to be transposed.'
},
parameter_descriptions={},
output_descriptions={
'transposed_feature_table': ('The resulting transposed feature table.')
},
name='Transpose a feature table.',
description='Transpose the rows and columns '
'(typically samples and features) of a feature table.'
)
plugin.methods.register_function(
function=q2_feature_table.group,
inputs={'table': FeatureTable[Frequency]},
parameters={
'mode': Str % Choices({'sum', 'median-ceiling', 'mean-ceiling'}),
'metadata': MetadataColumn[Categorical],
'axis': Str % Choices({'sample', 'feature'})
},
outputs=[
('grouped_table', FeatureTable[Frequency])
],
input_descriptions={
'table': 'The table to group samples or features on.'
},
parameter_descriptions={
'mode': 'How to combine samples or features within a group. `sum` '
'will sum the frequencies across all samples or features '
'within a group; `mean-ceiling` will take the ceiling of the '
'mean of these frequencies; `median-ceiling` will take the '
'ceiling of the median of these frequencies.',
'metadata': 'A column defining the groups. Each unique value will '
'become a new ID for the table on the given `axis`.',
'axis': 'Along which axis to group. Each ID in the given axis must '
'exist in `metadata`.'
},
output_descriptions={
'grouped_table': 'A table that has been grouped along the given '
'`axis`. IDs on that axis are replaced by values in '
'the `metadata` column.'
},
name="Group samples or features by a metadata column",
description="Group samples or features in a feature table using metadata "
"to define the mapping of IDs to a group."
)
i_table, p_overlap_method, o_table = TypeMap({
(FeatureTable[Frequency],
Str % Choices(sorted(q2_feature_table.overlap_methods()))):
FeatureTable[Frequency],
(FeatureTable[RelativeFrequency],
# We don't want to allow summing of RelativeFrequency tables, so remove
# that option from the overlap methods
Str % Choices(sorted(q2_feature_table.overlap_methods() - {'sum'}))):
FeatureTable[RelativeFrequency]
})
plugin.methods.register_function(
function=q2_feature_table.merge,
inputs={'tables': List[i_table]},
parameters={
'overlap_method': p_overlap_method
},
outputs=[
('merged_table', o_table)],
input_descriptions={
'tables': 'The collection of feature tables to be merged.',
},
parameter_descriptions={
'overlap_method': 'Method for handling overlapping ids.',
},
output_descriptions={
'merged_table': ('The resulting merged feature table.'),
},
name="Combine multiple tables",
description="Combines feature tables using the `overlap_method` provided.",
examples={'basic': feature_table_merge_example,
'three_tables': feature_table_merge_three_tables_example},
)
plugin.methods.register_function(
function=q2_feature_table.merge_seqs,
inputs={'data': List[FeatureData[Sequence]]},
parameters={},
outputs=[
('merged_data', FeatureData[Sequence])],
input_descriptions={
'data': 'The collection of feature sequences to be merged.',
},
parameter_descriptions={},
output_descriptions={
'merged_data': ('The resulting collection of feature sequences '
'containing all feature sequences provided.')
},
name="Combine collections of feature sequences",
description="Combines feature data objects which may or may not "
"contain data for the same features. If different feature "
"data is present for the same feature id in the inputs, "
"the data from the first will be propagated to the result."
)
plugin.methods.register_function(
function=q2_feature_table.merge_taxa,
inputs={'data': List[FeatureData[Taxonomy]]},
parameters={},
outputs=[
('merged_data', FeatureData[Taxonomy])],
input_descriptions={
'data': 'The collection of feature taxonomies to be merged.',
},
parameter_descriptions={},
output_descriptions={
'merged_data': ('The resulting collection of feature taxonomies '
'containing all feature taxonomies provided.')
},
name="Combine collections of feature taxonomies",
description="Combines a pair of feature data objects which may or may not "
"contain data for the same features. If different feature "
"data is present for the same feature id in the inputs, "
"the data from the first will be propagated to the result."
)
T1 = TypeMatch([Frequency, RelativeFrequency, PresenceAbsence, Composition])
plugin.methods.register_function(
function=q2_feature_table.rename_ids,
inputs={
'table': FeatureTable[T1],
},
parameters={
'metadata': MetadataColumn[Categorical],
'strict': Bool,
'axis': Str % Choices({'sample', 'feature'})
},
outputs=[
('renamed_table', FeatureTable[T1])
],
input_descriptions={
'table': 'The table to be renamed',
},
parameter_descriptions={
'metadata': 'A metadata column defining the new ids. Each original id '
'must map to a new unique id. If strict mode is used, '
'then every id in the original table must have a new id.',
'strict': 'Whether the naming needs to be strict (each id in '
'the table must have a new id). Otherwise, only the '
'ids described in `metadata` will be renamed and '
'the others will keep their original id names.',
'axis': 'Along which axis to rename the ids.',
},
output_descriptions={
'renamed_table': 'A table which has new ids, where the ids are '
'replaced by values in the `metadata` column.',
},
name='Renames sample or feature ids in a table',
description='Renames the sample or feature ids in a feature table using '
'metadata to define the new ids.',
)
# TODO: constrain min/max frequency when optional is handled by typemap
plugin.methods.register_function(
function=q2_feature_table.filter_samples,
inputs={'table': FeatureTable[T1]},
parameters={'min_frequency': Int,
'max_frequency': Int,
'min_features': Int,
'max_features': Int,
'metadata': Metadata,
'where': Str,
'exclude_ids': Bool,
'filter_empty_features': Bool},
outputs=[('filtered_table', FeatureTable[T1])],
input_descriptions={
'table': 'The feature table from which samples should be filtered.'
},
parameter_descriptions={
'min_frequency': ('The minimum total frequency that a sample must '
'have to be retained.'),
'max_frequency': ('The maximum total frequency that a sample can '
'have to be retained. If no value is provided '
'this will default to infinity (i.e., no maximum '
'frequency filter will be applied).'),
'min_features': ('The minimum number of features that a sample must '
'have to be retained.'),
'max_features': ('The maximum number of features that a sample can '
'have to be retained. If no value is provided '
'this will default to infinity (i.e., no maximum '
'feature filter will be applied).'),
'metadata': 'Sample metadata used with `where` parameter when '
'selecting samples to retain, or with `exclude_ids` '
'when selecting samples to discard.',
'where': 'SQLite WHERE clause specifying sample metadata criteria '
'that must be met to be included in the filtered feature '
'table. If not provided, all samples in `metadata` that are '
'also in the feature table will be retained.',
'exclude_ids': 'If true, the samples selected by `metadata` or '
'`where` parameters will be excluded from the filtered '
'table instead of being retained.',
'filter_empty_features': 'If true, features which are not present in '
'any retained samples are dropped.',
},
output_descriptions={
'filtered_table': 'The resulting feature table filtered by sample.'
},
name="Filter samples from table",
description="Filter samples from table based on frequency and/or "
"metadata. Any features with a frequency of zero after sample "
"filtering will also be removed. See the filtering tutorial "
"on https://docs.qiime2.org for additional details."
)
plugin.methods.register_function(
function=q2_feature_table.filter_features_conditionally,
inputs={'table': FeatureTable[T1]},
parameters={'prevalence': Float % Range(0, 1),
'abundance': Float % Range(0, 1)
},
outputs=[('filtered_table', FeatureTable[T1])],
input_descriptions={
'table': 'The feature table from which features should be filtered.'
},
parameter_descriptions={
'abundance': ('The minimum relative abundance for a feature to be '
'retained.'),
'prevalence': ('The minimum portion of samples that a feature '
'must have a relative abundance of at least '
'`abundance` to be retained.')
},
output_descriptions={
'filtered_table': 'The resulting feature table filtered by feature.'
},
name="Filter features from a table based on abundance and prevalence",
description=("Filter features based on the relative abundance in a "
"certain portion of samples (i.e., features must have a "
"relative abundance of at least `abundance` in at least "
"`prevalence` number of samples). Any samples with a "
"frequency of zero after feature filtering will also be "
"removed.")
)
plugin.methods.register_function(
function=q2_feature_table.filter_features,
inputs={'table': FeatureTable[Frequency]},
parameters={'min_frequency': Int,
'max_frequency': Int,
'min_samples': Int,
'max_samples': Int,
'metadata': Metadata,
'where': Str,
'exclude_ids': Bool,
'filter_empty_samples': Bool},
outputs=[('filtered_table', FeatureTable[Frequency])],
input_descriptions={
'table': 'The feature table from which features should be filtered.'
},
parameter_descriptions={
'min_frequency': ('The minimum total frequency that a feature must '
'have to be retained.'),
'max_frequency': ('The maximum total frequency that a feature can '
'have to be retained. If no value is provided '
'this will default to infinity (i.e., no maximum '
'frequency filter will be applied).'),
'min_samples': ('The minimum number of samples that a feature must '
'be observed in to be retained.'),
'max_samples': ('The maximum number of samples that a feature can '
'be observed in to be retained. If no value is '
'provided this will default to infinity (i.e., no '
'maximum sample filter will be applied).'),
'metadata': 'Feature metadata used with `where` parameter when '
'selecting features to retain, or with `exclude_ids` '
'when selecting features to discard.',
'where': 'SQLite WHERE clause specifying feature metadata criteria '
'that must be met to be included in the filtered feature '
'table. If not provided, all features in `metadata` that are '
'also in the feature table will be retained.',
'exclude_ids': 'If true, the features selected by `metadata` or '
'`where` parameters will be excluded from the filtered '
'table instead of being retained.',
'filter_empty_samples': 'If true, drop any samples where none of the '
'retained features are present.',
},
output_descriptions={
'filtered_table': 'The resulting feature table filtered by feature.'
},
name="Filter features from table",
description="Filter features from table based on frequency and/or "
"metadata. Any samples with a frequency of zero after feature "
"filtering will also be removed. See the filtering tutorial "
"on https://docs.qiime2.org for additional details."
)
T2 = TypeMatch([Sequence, AlignedSequence])
plugin.methods.register_function(
function=q2_feature_table.filter_seqs,
inputs={
'data': FeatureData[T2],
'table': FeatureTable[Frequency],
},
parameters={
'metadata': Metadata,
'where': Str,
'exclude_ids': Bool
},
outputs=[('filtered_data', FeatureData[T2])],
input_descriptions={
'data': 'The sequences from which features should be filtered.',
'table': 'Table containing feature ids used for id-based filtering.'
},
parameter_descriptions={
'metadata': 'Feature metadata used for id-based filtering, with '
'`where` parameter when selecting features to retain, or '
'with `exclude_ids` when selecting features to discard.',
'where': 'SQLite WHERE clause specifying feature metadata criteria '
'that must be met to be included in the filtered feature '
'table. If not provided, all features in `metadata` that are '
'also in the sequences will be retained.',
'exclude_ids': 'If true, the features selected by the `metadata` '
'(with or without the `where` parameter) or `table` '
'parameter will be excluded from the filtered '
'sequences instead of being retained.'
},
output_descriptions={
'filtered_data': 'The resulting filtered sequences.'
},
name="Filter features from sequences",
description="Filter features from sequences based on a feature table or "
"metadata. See the filtering tutorial on "
"https://docs.qiime2.org for additional details. This method "
"can filter based on ids in a table or a metadata file, but "
"not both (i.e., the table and metadata options are mutually "
"exclusive)."
)
plugin.visualizers.register_function(
function=q2_feature_table.summarize,
inputs={'table': FeatureTable[Frequency | RelativeFrequency |
PresenceAbsence]},
parameters={'sample_metadata': Metadata},
input_descriptions={'table': 'The feature table to be summarized.'},
parameter_descriptions={'sample_metadata': 'The sample metadata.'},
name="Summarize table",
description="Generate visual and tabular summaries of a feature table."
)
plugin.visualizers.register_function(
function=q2_feature_table.tabulate_seqs,
inputs={'data': FeatureData[Sequence | AlignedSequence]},
parameters={},
input_descriptions={'data': 'The feature sequences to be tabulated.'},
parameter_descriptions={},
name='View sequence associated with each feature',
description="Generate tabular view of feature identifier to sequence "
"mapping, including links to BLAST each sequence against "
"the NCBI nt database.",
citations=[citations['NCBI'], citations['NCBI-BLAST']]
)
plugin.visualizers.register_function(
function=q2_feature_table.core_features,
inputs={
'table': FeatureTable[Frequency]
},
parameters={
'min_fraction': Float % Range(0.0, 1.0, inclusive_start=False),
'max_fraction': Float % Range(0.0, 1.0, inclusive_end=True),
'steps': Int % Range(2, None)
},
name='Identify core features in table',
description=('Identify "core" features, which are features observed in a '
'user-defined fraction of the samples. Since the core '
'features are a function of the fraction of samples that the '
'feature must be observed in to be considered core, this is '
'computed over a range of fractions defined by the '
'`min_fraction`, `max_fraction`, and `steps` parameters.'),
input_descriptions={
'table': 'The feature table to use in core features calculations.'
},
parameter_descriptions={
'min_fraction': 'The minimum fraction of samples that a feature must '
'be observed in for that feature to be considered a '
'core feature.',
'max_fraction': 'The maximum fraction of samples that a feature must '
'be observed in for that feature to be considered a '
'core feature.',
'steps': 'The number of steps to take between `min_fraction` and '
'`max_fraction` for core features calculations. This '
'parameter has no effect if `min_fraction` and '
'`max_fraction` are the same value.'
}
)
plugin.visualizers.register_function(
function=q2_feature_table.heatmap,
inputs={
'table': FeatureTable[Frequency]
},
parameters={
'sample_metadata': MetadataColumn[Categorical],
'feature_metadata': MetadataColumn[Categorical],
'normalize': Bool,
'title': Str,
'metric': Str % Choices(q2_feature_table.heatmap_choices['metric']),
'method': Str % Choices(q2_feature_table.heatmap_choices['method']),
'cluster': Str % Choices(q2_feature_table.heatmap_choices['cluster']),
'color_scheme': Str % Choices(
q2_feature_table.heatmap_choices['color_scheme']),
},
name='Generate a heatmap representation of a feature table',
description='Generate a heatmap representation of a feature table with '
'optional clustering on both the sample and feature axes.\n\n'
'Tip: To generate a heatmap containing taxonomic annotations, '
'use `qiime taxa collapse` to collapse the feature table at '
'the desired taxonomic level.',
input_descriptions={
'table': 'The feature table to visualize.'
},
parameter_descriptions={
'sample_metadata': 'Annotate the sample IDs with these sample '
'metadata values. When metadata is present and '
'`cluster`=\'feature\', samples will be sorted by '
'the metadata values.',
'feature_metadata': 'Annotate the feature IDs with these feature '
'metadata values. When metadata is present and '
'`cluster`=\'sample\', features will be sorted by '
'the metadata values.',
'normalize': 'Normalize the feature table by adding a psuedocount '
'of 1 and then taking the log10 of the table.',
'title': 'Optional custom plot title.',
'metric': 'Metrics exposed by seaborn (see http://seaborn.pydata.org/'
'generated/seaborn.clustermap.html#seaborn.clustermap for '
'more detail).',
'method': 'Clustering methods exposed by seaborn (see http://seaborn.'
'pydata.org/generated/seaborn.clustermap.html#seaborn.clust'
'ermap for more detail).',
'cluster': 'Specify which axes to cluster.',
'color_scheme': 'The matplotlib colorscheme to generate the heatmap '
'with.',
},
citations=[citations['Hunter2007Matplotlib']]
)
| 45.460208 | 79 | 0.614439 |
from qiime2.plugin import (Plugin, Int, Float, Range, Metadata, Str, Bool,
Choices, MetadataColumn, Categorical, List,
Citations, TypeMatch, TypeMap)
import q2_feature_table
from q2_types.feature_table import (
FeatureTable, Frequency, RelativeFrequency, PresenceAbsence, Composition)
from q2_types.feature_data import (
FeatureData, Sequence, Taxonomy, AlignedSequence)
from .examples import (feature_table_merge_example,
feature_table_merge_three_tables_example)
citations = Citations.load('citations.bib', package='q2_feature_table')
plugin = Plugin(
name='feature-table',
version=q2_feature_table.__version__,
website='https://github.com/qiime2/q2-feature-table',
package='q2_feature_table',
short_description=('Plugin for working with sample by feature tables.'),
description=('This is a QIIME 2 plugin supporting operations on sample '
'by feature tables, such as filtering, merging, and '
'transforming tables.')
)
plugin.methods.register_function(
function=q2_feature_table.rarefy,
inputs={'table': FeatureTable[Frequency]},
parameters={'sampling_depth': Int % Range(1, None),
'with_replacement': Bool},
outputs=[('rarefied_table', FeatureTable[Frequency])],
input_descriptions={'table': 'The feature table to be rarefied.'},
parameter_descriptions={
'sampling_depth': ('The total frequency that each sample should be '
'rarefied to. Samples where the sum of frequencies '
'is less than the sampling depth will be not be '
'included in the resulting table unless '
'subsampling is performed with replacement.'),
'with_replacement': ('Rarefy with replacement by sampling from the '
'multinomial distribution instead of rarefying '
'without replacement.')
},
output_descriptions={
'rarefied_table': 'The resulting rarefied feature table.'
},
name='Rarefy table',
description=("Subsample frequencies from all samples so that the sum of "
"frequencies in each sample is equal to sampling-depth."),
citations=[citations['Weiss2017']]
)
plugin.methods.register_function(
function=q2_feature_table.subsample,
inputs={'table': FeatureTable[Frequency]},
parameters={'subsampling_depth': Int % Range(1, None),
'axis': Str % Choices(['sample', 'feature'])},
outputs=[('sampled_table', FeatureTable[Frequency])],
input_descriptions={'table': 'The feature table to be sampled.'},
parameter_descriptions={
'subsampling_depth': ('The total number of samples or features to be '
'randomly sampled. Samples or features that are '
'reduced to a zero sum will not be included in '
'the resulting table.'),
'axis': ('The axis to sample over. If "sample" then samples will be '
'randomly selected to be retained. If "feature" then '
'a random set of features will be selected to be retained.')
},
output_descriptions={
'sampled_table': 'The resulting subsampled feature table.'
},
name='Subsample table',
description=("Randomly pick samples or features, without replacement, "
"from the table.")
)
plugin.methods.register_function(
function=q2_feature_table.presence_absence,
inputs={'table': FeatureTable[Frequency | RelativeFrequency]},
parameters={},
outputs=[('presence_absence_table', FeatureTable[PresenceAbsence])],
input_descriptions={
'table': ('The feature table to be converted into presence/absence '
'abundances.')
},
parameter_descriptions={},
output_descriptions={
'presence_absence_table': ('The resulting presence/absence feature '
'table.')
},
name="Convert to presence/absence",
description="Convert frequencies to binary values indicating presence or "
"absence of a feature in a sample."
)
plugin.methods.register_function(
function=q2_feature_table.relative_frequency,
inputs={'table': FeatureTable[Frequency]},
parameters={},
outputs=[
('relative_frequency_table',
FeatureTable[RelativeFrequency])],
input_descriptions={
'table': 'The feature table to be converted into relative frequencies.'
},
parameter_descriptions={},
output_descriptions={
'relative_frequency_table': ('The resulting relative frequency '
'feature table.')
},
name="Convert to relative frequencies",
description="Convert frequencies to relative frequencies by dividing each "
"frequency in a sample by the sum of frequencies in that "
"sample."
)
plugin.methods.register_function(
function=q2_feature_table.transpose,
inputs={'table': FeatureTable[Frequency]},
parameters={},
outputs=[('transposed_feature_table',
FeatureTable[Frequency])],
input_descriptions={
'table': 'The feature table to be transposed.'
},
parameter_descriptions={},
output_descriptions={
'transposed_feature_table': ('The resulting transposed feature table.')
},
name='Transpose a feature table.',
description='Transpose the rows and columns '
'(typically samples and features) of a feature table.'
)
plugin.methods.register_function(
function=q2_feature_table.group,
inputs={'table': FeatureTable[Frequency]},
parameters={
'mode': Str % Choices({'sum', 'median-ceiling', 'mean-ceiling'}),
'metadata': MetadataColumn[Categorical],
'axis': Str % Choices({'sample', 'feature'})
},
outputs=[
('grouped_table', FeatureTable[Frequency])
],
input_descriptions={
'table': 'The table to group samples or features on.'
},
parameter_descriptions={
'mode': 'How to combine samples or features within a group. `sum` '
'will sum the frequencies across all samples or features '
'within a group; `mean-ceiling` will take the ceiling of the '
'mean of these frequencies; `median-ceiling` will take the '
'ceiling of the median of these frequencies.',
'metadata': 'A column defining the groups. Each unique value will '
'become a new ID for the table on the given `axis`.',
'axis': 'Along which axis to group. Each ID in the given axis must '
'exist in `metadata`.'
},
output_descriptions={
'grouped_table': 'A table that has been grouped along the given '
'`axis`. IDs on that axis are replaced by values in '
'the `metadata` column.'
},
name="Group samples or features by a metadata column",
description="Group samples or features in a feature table using metadata "
"to define the mapping of IDs to a group."
)
i_table, p_overlap_method, o_table = TypeMap({
(FeatureTable[Frequency],
Str % Choices(sorted(q2_feature_table.overlap_methods()))):
FeatureTable[Frequency],
(FeatureTable[RelativeFrequency],
# that option from the overlap methods
Str % Choices(sorted(q2_feature_table.overlap_methods() - {'sum'}))):
FeatureTable[RelativeFrequency]
})
plugin.methods.register_function(
function=q2_feature_table.merge,
inputs={'tables': List[i_table]},
parameters={
'overlap_method': p_overlap_method
},
outputs=[
('merged_table', o_table)],
input_descriptions={
'tables': 'The collection of feature tables to be merged.',
},
parameter_descriptions={
'overlap_method': 'Method for handling overlapping ids.',
},
output_descriptions={
'merged_table': ('The resulting merged feature table.'),
},
name="Combine multiple tables",
description="Combines feature tables using the `overlap_method` provided.",
examples={'basic': feature_table_merge_example,
'three_tables': feature_table_merge_three_tables_example},
)
plugin.methods.register_function(
function=q2_feature_table.merge_seqs,
inputs={'data': List[FeatureData[Sequence]]},
parameters={},
outputs=[
('merged_data', FeatureData[Sequence])],
input_descriptions={
'data': 'The collection of feature sequences to be merged.',
},
parameter_descriptions={},
output_descriptions={
'merged_data': ('The resulting collection of feature sequences '
'containing all feature sequences provided.')
},
name="Combine collections of feature sequences",
description="Combines feature data objects which may or may not "
"contain data for the same features. If different feature "
"data is present for the same feature id in the inputs, "
"the data from the first will be propagated to the result."
)
plugin.methods.register_function(
function=q2_feature_table.merge_taxa,
inputs={'data': List[FeatureData[Taxonomy]]},
parameters={},
outputs=[
('merged_data', FeatureData[Taxonomy])],
input_descriptions={
'data': 'The collection of feature taxonomies to be merged.',
},
parameter_descriptions={},
output_descriptions={
'merged_data': ('The resulting collection of feature taxonomies '
'containing all feature taxonomies provided.')
},
name="Combine collections of feature taxonomies",
description="Combines a pair of feature data objects which may or may not "
"contain data for the same features. If different feature "
"data is present for the same feature id in the inputs, "
"the data from the first will be propagated to the result."
)
T1 = TypeMatch([Frequency, RelativeFrequency, PresenceAbsence, Composition])
plugin.methods.register_function(
function=q2_feature_table.rename_ids,
inputs={
'table': FeatureTable[T1],
},
parameters={
'metadata': MetadataColumn[Categorical],
'strict': Bool,
'axis': Str % Choices({'sample', 'feature'})
},
outputs=[
('renamed_table', FeatureTable[T1])
],
input_descriptions={
'table': 'The table to be renamed',
},
parameter_descriptions={
'metadata': 'A metadata column defining the new ids. Each original id '
'must map to a new unique id. If strict mode is used, '
'then every id in the original table must have a new id.',
'strict': 'Whether the naming needs to be strict (each id in '
'the table must have a new id). Otherwise, only the '
'ids described in `metadata` will be renamed and '
'the others will keep their original id names.',
'axis': 'Along which axis to rename the ids.',
},
output_descriptions={
'renamed_table': 'A table which has new ids, where the ids are '
'replaced by values in the `metadata` column.',
},
name='Renames sample or feature ids in a table',
description='Renames the sample or feature ids in a feature table using '
'metadata to define the new ids.',
)
# TODO: constrain min/max frequency when optional is handled by typemap
plugin.methods.register_function(
function=q2_feature_table.filter_samples,
inputs={'table': FeatureTable[T1]},
parameters={'min_frequency': Int,
'max_frequency': Int,
'min_features': Int,
'max_features': Int,
'metadata': Metadata,
'where': Str,
'exclude_ids': Bool,
'filter_empty_features': Bool},
outputs=[('filtered_table', FeatureTable[T1])],
input_descriptions={
'table': 'The feature table from which samples should be filtered.'
},
parameter_descriptions={
'min_frequency': ('The minimum total frequency that a sample must '
'have to be retained.'),
'max_frequency': ('The maximum total frequency that a sample can '
'have to be retained. If no value is provided '
'this will default to infinity (i.e., no maximum '
'frequency filter will be applied).'),
'min_features': ('The minimum number of features that a sample must '
'have to be retained.'),
'max_features': ('The maximum number of features that a sample can '
'have to be retained. If no value is provided '
'this will default to infinity (i.e., no maximum '
'feature filter will be applied).'),
'metadata': 'Sample metadata used with `where` parameter when '
'selecting samples to retain, or with `exclude_ids` '
'when selecting samples to discard.',
'where': 'SQLite WHERE clause specifying sample metadata criteria '
'that must be met to be included in the filtered feature '
'table. If not provided, all samples in `metadata` that are '
'also in the feature table will be retained.',
'exclude_ids': 'If true, the samples selected by `metadata` or '
'`where` parameters will be excluded from the filtered '
'table instead of being retained.',
'filter_empty_features': 'If true, features which are not present in '
'any retained samples are dropped.',
},
output_descriptions={
'filtered_table': 'The resulting feature table filtered by sample.'
},
name="Filter samples from table",
description="Filter samples from table based on frequency and/or "
"metadata. Any features with a frequency of zero after sample "
"filtering will also be removed. See the filtering tutorial "
"on https://docs.qiime2.org for additional details."
)
plugin.methods.register_function(
function=q2_feature_table.filter_features_conditionally,
inputs={'table': FeatureTable[T1]},
parameters={'prevalence': Float % Range(0, 1),
'abundance': Float % Range(0, 1)
},
outputs=[('filtered_table', FeatureTable[T1])],
input_descriptions={
'table': 'The feature table from which features should be filtered.'
},
parameter_descriptions={
'abundance': ('The minimum relative abundance for a feature to be '
'retained.'),
'prevalence': ('The minimum portion of samples that a feature '
'must have a relative abundance of at least '
'`abundance` to be retained.')
},
output_descriptions={
'filtered_table': 'The resulting feature table filtered by feature.'
},
name="Filter features from a table based on abundance and prevalence",
description=("Filter features based on the relative abundance in a "
"certain portion of samples (i.e., features must have a "
"relative abundance of at least `abundance` in at least "
"`prevalence` number of samples). Any samples with a "
"frequency of zero after feature filtering will also be "
"removed.")
)
plugin.methods.register_function(
function=q2_feature_table.filter_features,
inputs={'table': FeatureTable[Frequency]},
parameters={'min_frequency': Int,
'max_frequency': Int,
'min_samples': Int,
'max_samples': Int,
'metadata': Metadata,
'where': Str,
'exclude_ids': Bool,
'filter_empty_samples': Bool},
outputs=[('filtered_table', FeatureTable[Frequency])],
input_descriptions={
'table': 'The feature table from which features should be filtered.'
},
parameter_descriptions={
'min_frequency': ('The minimum total frequency that a feature must '
'have to be retained.'),
'max_frequency': ('The maximum total frequency that a feature can '
'have to be retained. If no value is provided '
'this will default to infinity (i.e., no maximum '
'frequency filter will be applied).'),
'min_samples': ('The minimum number of samples that a feature must '
'be observed in to be retained.'),
'max_samples': ('The maximum number of samples that a feature can '
'be observed in to be retained. If no value is '
'provided this will default to infinity (i.e., no '
'maximum sample filter will be applied).'),
'metadata': 'Feature metadata used with `where` parameter when '
'selecting features to retain, or with `exclude_ids` '
'when selecting features to discard.',
'where': 'SQLite WHERE clause specifying feature metadata criteria '
'that must be met to be included in the filtered feature '
'table. If not provided, all features in `metadata` that are '
'also in the feature table will be retained.',
'exclude_ids': 'If true, the features selected by `metadata` or '
'`where` parameters will be excluded from the filtered '
'table instead of being retained.',
'filter_empty_samples': 'If true, drop any samples where none of the '
'retained features are present.',
},
output_descriptions={
'filtered_table': 'The resulting feature table filtered by feature.'
},
name="Filter features from table",
description="Filter features from table based on frequency and/or "
"metadata. Any samples with a frequency of zero after feature "
"filtering will also be removed. See the filtering tutorial "
"on https://docs.qiime2.org for additional details."
)
T2 = TypeMatch([Sequence, AlignedSequence])
plugin.methods.register_function(
function=q2_feature_table.filter_seqs,
inputs={
'data': FeatureData[T2],
'table': FeatureTable[Frequency],
},
parameters={
'metadata': Metadata,
'where': Str,
'exclude_ids': Bool
},
outputs=[('filtered_data', FeatureData[T2])],
input_descriptions={
'data': 'The sequences from which features should be filtered.',
'table': 'Table containing feature ids used for id-based filtering.'
},
parameter_descriptions={
'metadata': 'Feature metadata used for id-based filtering, with '
'`where` parameter when selecting features to retain, or '
'with `exclude_ids` when selecting features to discard.',
'where': 'SQLite WHERE clause specifying feature metadata criteria '
'that must be met to be included in the filtered feature '
'table. If not provided, all features in `metadata` that are '
'also in the sequences will be retained.',
'exclude_ids': 'If true, the features selected by the `metadata` '
'(with or without the `where` parameter) or `table` '
'parameter will be excluded from the filtered '
'sequences instead of being retained.'
},
output_descriptions={
'filtered_data': 'The resulting filtered sequences.'
},
name="Filter features from sequences",
description="Filter features from sequences based on a feature table or "
"metadata. See the filtering tutorial on "
"https://docs.qiime2.org for additional details. This method "
"can filter based on ids in a table or a metadata file, but "
"not both (i.e., the table and metadata options are mutually "
"exclusive)."
)
plugin.visualizers.register_function(
function=q2_feature_table.summarize,
inputs={'table': FeatureTable[Frequency | RelativeFrequency |
PresenceAbsence]},
parameters={'sample_metadata': Metadata},
input_descriptions={'table': 'The feature table to be summarized.'},
parameter_descriptions={'sample_metadata': 'The sample metadata.'},
name="Summarize table",
description="Generate visual and tabular summaries of a feature table."
)
plugin.visualizers.register_function(
function=q2_feature_table.tabulate_seqs,
inputs={'data': FeatureData[Sequence | AlignedSequence]},
parameters={},
input_descriptions={'data': 'The feature sequences to be tabulated.'},
parameter_descriptions={},
name='View sequence associated with each feature',
description="Generate tabular view of feature identifier to sequence "
"mapping, including links to BLAST each sequence against "
"the NCBI nt database.",
citations=[citations['NCBI'], citations['NCBI-BLAST']]
)
plugin.visualizers.register_function(
function=q2_feature_table.core_features,
inputs={
'table': FeatureTable[Frequency]
},
parameters={
'min_fraction': Float % Range(0.0, 1.0, inclusive_start=False),
'max_fraction': Float % Range(0.0, 1.0, inclusive_end=True),
'steps': Int % Range(2, None)
},
name='Identify core features in table',
description=('Identify "core" features, which are features observed in a '
'user-defined fraction of the samples. Since the core '
'features are a function of the fraction of samples that the '
'feature must be observed in to be considered core, this is '
'computed over a range of fractions defined by the '
'`min_fraction`, `max_fraction`, and `steps` parameters.'),
input_descriptions={
'table': 'The feature table to use in core features calculations.'
},
parameter_descriptions={
'min_fraction': 'The minimum fraction of samples that a feature must '
'be observed in for that feature to be considered a '
'core feature.',
'max_fraction': 'The maximum fraction of samples that a feature must '
'be observed in for that feature to be considered a '
'core feature.',
'steps': 'The number of steps to take between `min_fraction` and '
'`max_fraction` for core features calculations. This '
'parameter has no effect if `min_fraction` and '
'`max_fraction` are the same value.'
}
)
plugin.visualizers.register_function(
function=q2_feature_table.heatmap,
inputs={
'table': FeatureTable[Frequency]
},
parameters={
'sample_metadata': MetadataColumn[Categorical],
'feature_metadata': MetadataColumn[Categorical],
'normalize': Bool,
'title': Str,
'metric': Str % Choices(q2_feature_table.heatmap_choices['metric']),
'method': Str % Choices(q2_feature_table.heatmap_choices['method']),
'cluster': Str % Choices(q2_feature_table.heatmap_choices['cluster']),
'color_scheme': Str % Choices(
q2_feature_table.heatmap_choices['color_scheme']),
},
name='Generate a heatmap representation of a feature table',
description='Generate a heatmap representation of a feature table with '
'optional clustering on both the sample and feature axes.\n\n'
'Tip: To generate a heatmap containing taxonomic annotations, '
'use `qiime taxa collapse` to collapse the feature table at '
'the desired taxonomic level.',
input_descriptions={
'table': 'The feature table to visualize.'
},
parameter_descriptions={
'sample_metadata': 'Annotate the sample IDs with these sample '
'metadata values. When metadata is present and '
'`cluster`=\'feature\', samples will be sorted by '
'the metadata values.',
'feature_metadata': 'Annotate the feature IDs with these feature '
'metadata values. When metadata is present and '
'`cluster`=\'sample\', features will be sorted by '
'the metadata values.',
'normalize': 'Normalize the feature table by adding a psuedocount '
'of 1 and then taking the log10 of the table.',
'title': 'Optional custom plot title.',
'metric': 'Metrics exposed by seaborn (see http://seaborn.pydata.org/'
'generated/seaborn.clustermap.html
'more detail).',
'method': 'Clustering methods exposed by seaborn (see http://seaborn.'
'pydata.org/generated/seaborn.clustermap.html
'ermap for more detail).',
'cluster': 'Specify which axes to cluster.',
'color_scheme': 'The matplotlib colorscheme to generate the heatmap '
'with.',
},
citations=[citations['Hunter2007Matplotlib']]
)
| true | true |
f70f557616fe80f5978c4f28c24fbbe3bf2fa1a3 | 2,795 | py | Python | resampling/foundations_for_inference/gender_discrimination.py | necromuralist/resampling | 0b48a51cb5f8e21a3f52508ecc74f12fa03d9b25 | [
"MIT"
] | null | null | null | resampling/foundations_for_inference/gender_discrimination.py | necromuralist/resampling | 0b48a51cb5f8e21a3f52508ecc74f12fa03d9b25 | [
"MIT"
] | null | null | null | resampling/foundations_for_inference/gender_discrimination.py | necromuralist/resampling | 0b48a51cb5f8e21a3f52508ecc74f12fa03d9b25 | [
"MIT"
] | null | null | null |
# pandas standard library
import sys
# third-party
import pandas
import matplotlib
import matplotlib.pyplot as plot
matplotlib.style.use('ggplot')
GENDER_COUNT = 24
MALES_PROMOTED = 21
FEMALES_PROMOTED = 14
GENDER_DIFFERENCE = MALES_PROMOTED - FEMALES_PROMOTED
FEMALES_NOT_PROMOTED = GENDER_COUNT - FEMALES_PROMOTED
MALES_NOT_PROMOTED = GENDER_COUNT - MALES_PROMOTED
experiment_data = pandas.DataFrame({"Promoted": [MALES_PROMOTED,
FEMALES_PROMOTED],
"Not Promoted": [MALES_NOT_PROMOTED,
FEMALES_NOT_PROMOTED]},
index='male female'.split(),
columns=["Promoted", "Not Promoted"])
experiment_frame = experiment_data.copy()
experiment_frame['Total'] = sum((experiment_frame[column] for column in
experiment_frame.columns))
last_row = pandas.DataFrame(experiment_frame.sum()).transpose()
last_row.index = pandas.Index(['Total'])
experiment_frame = pandas.concat((experiment_frame, last_row))
class IndentOutput(object):
"""Fake file output for csv-writing """
@classmethod
def write(cls, line):
"""Write line to stdout with three spaces prepended"""
sys.stdout.write(" {0}".format(line))
print('.. csv-table:: Experiment Outcome')
print(' :header: ,{0}\n'.format(','.join(experiment_frame.columns)))
experiment_frame.to_csv(IndentOutput, header=False)
print('.. csv-table:: Experiment proportions')
print(' :header: ,{0}\n'.format(','.join(experiment_frame.columns)))
totals = pandas.Series([GENDER_COUNT, GENDER_COUNT, GENDER_COUNT * 2],
index='male female Total'.split())
total_frame = pandas.DataFrame({'Promoted': totals,
"Not Promoted": totals,
"Total": totals})
proportions = experiment_frame/total_frame
proportions.to_csv(IndentOutput, header=False,
columns=['Promoted', 'Not Promoted', 'Total'],
float_format="%.3f")
path = 'figures/gender_experiment_bar.svg'
figure = plot.figure()
axe = figure.gca()
experiment_data.plot(kind='bar', ax=axe)
figure.savefig(path)
print('.. image:: {0}'.format(path))
print(" \\frac{{{0}}}{{{2}}}- \\frac{{{1}}}{{{2}}}&=\\frac{{{3}}}{{{2}}}\\\\".format(MALES_PROMOTED,
FEMALES_PROMOTED,
GENDER_COUNT,
GENDER_DIFFERENCE))
print(" &\\approx {:.3f}\\\\".format(GENDER_DIFFERENCE/GENDER_COUNT))
| 39.928571 | 102 | 0.572093 |
import sys
import pandas
import matplotlib
import matplotlib.pyplot as plot
matplotlib.style.use('ggplot')
GENDER_COUNT = 24
MALES_PROMOTED = 21
FEMALES_PROMOTED = 14
GENDER_DIFFERENCE = MALES_PROMOTED - FEMALES_PROMOTED
FEMALES_NOT_PROMOTED = GENDER_COUNT - FEMALES_PROMOTED
MALES_NOT_PROMOTED = GENDER_COUNT - MALES_PROMOTED
experiment_data = pandas.DataFrame({"Promoted": [MALES_PROMOTED,
FEMALES_PROMOTED],
"Not Promoted": [MALES_NOT_PROMOTED,
FEMALES_NOT_PROMOTED]},
index='male female'.split(),
columns=["Promoted", "Not Promoted"])
experiment_frame = experiment_data.copy()
experiment_frame['Total'] = sum((experiment_frame[column] for column in
experiment_frame.columns))
last_row = pandas.DataFrame(experiment_frame.sum()).transpose()
last_row.index = pandas.Index(['Total'])
experiment_frame = pandas.concat((experiment_frame, last_row))
class IndentOutput(object):
@classmethod
def write(cls, line):
sys.stdout.write(" {0}".format(line))
print('.. csv-table:: Experiment Outcome')
print(' :header: ,{0}\n'.format(','.join(experiment_frame.columns)))
experiment_frame.to_csv(IndentOutput, header=False)
print('.. csv-table:: Experiment proportions')
print(' :header: ,{0}\n'.format(','.join(experiment_frame.columns)))
totals = pandas.Series([GENDER_COUNT, GENDER_COUNT, GENDER_COUNT * 2],
index='male female Total'.split())
total_frame = pandas.DataFrame({'Promoted': totals,
"Not Promoted": totals,
"Total": totals})
proportions = experiment_frame/total_frame
proportions.to_csv(IndentOutput, header=False,
columns=['Promoted', 'Not Promoted', 'Total'],
float_format="%.3f")
path = 'figures/gender_experiment_bar.svg'
figure = plot.figure()
axe = figure.gca()
experiment_data.plot(kind='bar', ax=axe)
figure.savefig(path)
print('.. image:: {0}'.format(path))
print(" \\frac{{{0}}}{{{2}}}- \\frac{{{1}}}{{{2}}}&=\\frac{{{3}}}{{{2}}}\\\\".format(MALES_PROMOTED,
FEMALES_PROMOTED,
GENDER_COUNT,
GENDER_DIFFERENCE))
print(" &\\approx {:.3f}\\\\".format(GENDER_DIFFERENCE/GENDER_COUNT))
| true | true |
f70f56d578d8a75e9fea3ab56d50aba5f5fd47bd | 10,462 | py | Python | utils/manipulator.py | hugeinteger/InterFaceGAN | 59e75c0b4dcdbcea693b31ff11cf239c39e14ed1 | [
"MIT"
] | null | null | null | utils/manipulator.py | hugeinteger/InterFaceGAN | 59e75c0b4dcdbcea693b31ff11cf239c39e14ed1 | [
"MIT"
] | null | null | null | utils/manipulator.py | hugeinteger/InterFaceGAN | 59e75c0b4dcdbcea693b31ff11cf239c39e14ed1 | [
"MIT"
] | null | null | null | # python3.7
"""Utility functions for latent codes manipulation."""
import numpy as np
from sklearn import svm
from .logger import setup_logger
__all__ = ['train_boundary', 'project_boundary', 'linear_interpolate']
def train_boundary(latent_codes,
scores,
chosen_num_or_ratio=0.02,
split_ratio=0.7,
invalid_value=None,
logger=None):
"""Trains boundary in latent space with offline predicted attribute scores.
Given a collection of latent codes and the attribute scores predicted from the
corresponding images, this function will train a linear SVM by treating it as
a bi-classification problem. Basically, the samples with highest attribute
scores are treated as positive samples, while those with lowest scores as
negative. For now, the latent code can ONLY be with 1 dimension.
NOTE: The returned boundary is with shape (1, latent_space_dim), and also
normalized with unit norm.
Args:
latent_codes: Input latent codes as training data.
scores: Input attribute scores used to generate training labels.
chosen_num_or_ratio: How many samples will be chosen as positive (negative)
samples. If this field lies in range (0, 0.5], `chosen_num_or_ratio *
latent_codes_num` will be used. Otherwise, `min(chosen_num_or_ratio,
0.5 * latent_codes_num)` will be used. (default: 0.02)
split_ratio: Ratio to split training and validation sets. (default: 0.7)
invalid_value: This field is used to filter out data. (default: None)
logger: Logger for recording log messages. If set as `None`, a default
logger, which prints messages from all levels to screen, will be created.
(default: None)
Returns:
A decision boundary with type `numpy.ndarray`.
Raises:
ValueError: If the input `latent_codes` or `scores` are with invalid format.
"""
if not logger:
logger = setup_logger(work_dir='', logger_name='train_boundary')
if (not isinstance(latent_codes, np.ndarray) or
not len(latent_codes.shape) == 2):
raise ValueError(f'Input `latent_codes` should be with type'
f'`numpy.ndarray`, and shape [num_samples, '
f'latent_space_dim]!')
num_samples = latent_codes.shape[0]
latent_space_dim = latent_codes.shape[1]
if (not isinstance(scores, np.ndarray) or not len(scores.shape) == 2 or
not scores.shape[0] == num_samples or not scores.shape[1] == 1):
raise ValueError(f'Input `scores` should be with type `numpy.ndarray`, and '
f'shape [num_samples, 1], where `num_samples` should be '
f'exactly same as that of input `latent_codes`!')
if chosen_num_or_ratio <= 0:
raise ValueError(f'Input `chosen_num_or_ratio` should be positive, '
f'but {chosen_num_or_ratio} received!')
logger.info(f'Filtering training data.')
if invalid_value is not None:
latent_codes = latent_codes[scores != invalid_value]
scores = scores[scores != invalid_value]
logger.info(f'Sorting scores to get positive and negative samples.')
sorted_idx = np.argsort(scores, axis=0)[::-1, 0]
latent_codes = latent_codes[sorted_idx]
scores = scores[sorted_idx]
num_samples = latent_codes.shape[0]
if 0 < chosen_num_or_ratio <= 1:
chosen_num = int(num_samples * chosen_num_or_ratio)
else:
chosen_num = chosen_num_or_ratio
chosen_num = min(chosen_num, num_samples // 2)
logger.info(f'Spliting training and validation sets:')
train_num = int(chosen_num * split_ratio)
val_num = chosen_num - train_num
# Positive samples.
positive_idx = np.arange(chosen_num)
np.random.shuffle(positive_idx)
positive_train = latent_codes[:chosen_num][positive_idx[:train_num]]
positive_val = latent_codes[:chosen_num][positive_idx[train_num:]]
# Negative samples.
negative_idx = np.arange(chosen_num)
np.random.shuffle(negative_idx)
negative_train = latent_codes[-chosen_num:][negative_idx[:train_num]]
negative_val = latent_codes[-chosen_num:][negative_idx[train_num:]]
# Training set.
train_data = np.concatenate([positive_train, negative_train], axis=0)
train_label = np.concatenate([np.ones(train_num, dtype=np.int),
np.zeros(train_num, dtype=np.int)], axis=0)
logger.info(f' Training: {train_num} positive, {train_num} negative.')
# Validation set.
val_data = np.concatenate([positive_val, negative_val], axis=0)
val_label = np.concatenate([np.ones(val_num, dtype=np.int),
np.zeros(val_num, dtype=np.int)], axis=0)
logger.info(f' Validation: {val_num} positive, {val_num} negative.')
# Remaining set.
remaining_num = num_samples - chosen_num * 2
remaining_data = latent_codes[chosen_num:-chosen_num]
remaining_scores = scores[chosen_num:-chosen_num]
decision_value = (scores[0] + scores[-1]) / 2
remaining_label = np.ones(remaining_num, dtype=np.int)
remaining_label[remaining_scores.ravel() < decision_value] = 0
remaining_positive_num = np.sum(remaining_label == 1)
remaining_negative_num = np.sum(remaining_label == 0)
logger.info(f' Remaining: {remaining_positive_num} positive, '
f'{remaining_negative_num} negative.')
logger.info(f'Training boundary.')
clf = svm.SVC(kernel='linear')
classifier = clf.fit(train_data, train_label)
logger.info(f'Finish training.')
if val_num:
val_prediction = classifier.predict(val_data)
correct_num = np.sum(val_label == val_prediction)
logger.info(f'Accuracy for validation set: '
f'{correct_num} / {val_num * 2} = '
f'{correct_num / (val_num * 2):.6f}')
if remaining_num:
remaining_prediction = classifier.predict(remaining_data)
correct_num = np.sum(remaining_label == remaining_prediction)
logger.info(f'Accuracy for remaining set: '
f'{correct_num} / {remaining_num} = '
f'{correct_num / remaining_num:.6f}')
a = classifier.coef_.reshape(1, latent_space_dim).astype(np.float32)
return a / np.linalg.norm(a)
def project_boundary(primal, *args):
"""Projects the primal boundary onto condition boundaries.
The function is used for conditional manipulation, where the projected vector
will be subscribed from the normal direction of the original boundary. Here,
all input boundaries are supposed to have already been normalized to unit
norm, and with same shape [1, latent_space_dim].
NOTE: For now, at most two condition boundaries are supported.
Args:
primal: The primal boundary.
*args: Other boundaries as conditions.
Returns:
A projected boundary (also normalized to unit norm), which is orthogonal to
all condition boundaries.
Raises:
NotImplementedError: If there are more than two condition boundaries.
"""
if len(args) > 2:
raise NotImplementedError(f'This function supports projecting with at most '
f'two conditions.')
assert len(primal.shape) == 2 and primal.shape[0] == 1
if not args:
return primal
if len(args) == 1:
cond = args[0]
assert (len(cond.shape) == 2 and cond.shape[0] == 1 and
cond.shape[1] == primal.shape[1])
new = primal - primal.dot(cond.T) * cond
return new / np.linalg.norm(new)
if len(args) == 2:
cond_1 = args[0]
cond_2 = args[1]
assert (len(cond_1.shape) == 2 and cond_1.shape[0] == 1 and
cond_1.shape[1] == primal.shape[1])
assert (len(cond_2.shape) == 2 and cond_2.shape[0] == 1 and
cond_2.shape[1] == primal.shape[1])
primal_cond_1 = primal.dot(cond_1.T)
primal_cond_2 = primal.dot(cond_2.T)
cond_1_cond_2 = cond_1.dot(cond_2.T)
alpha = (primal_cond_1 - primal_cond_2 * cond_1_cond_2) / (
1 - cond_1_cond_2 ** 2 + 1e-8)
beta = (primal_cond_2 - primal_cond_1 * cond_1_cond_2) / (
1 - cond_1_cond_2 ** 2 + 1e-8)
new = primal - alpha * cond_1 - beta * cond_2
return new / np.linalg.norm(new)
raise NotImplementedError
def linear_interpolate(latent_code,
boundary,
start_distance=-3.0,
end_distance=3.0,
steps=10):
"""Manipulates the given latent code with respect to a particular boundary.
Basically, this function takes a latent code and a boundary as inputs, and
outputs a collection of manipulated latent codes. For example, let `steps` to
be 10, then the input `latent_code` is with shape [1, latent_space_dim], input
`boundary` is with shape [1, latent_space_dim] and unit norm, the output is
with shape [10, latent_space_dim]. The first output latent code is
`start_distance` away from the given `boundary`, while the last output latent
code is `end_distance` away from the given `boundary`. Remaining latent codes
are linearly interpolated.
Input `latent_code` can also be with shape [1, num_layers, latent_space_dim]
to support W+ space in Style GAN. In this case, all features in W+ space will
be manipulated same as each other. Accordingly, the output will be with shape
[10, num_layers, latent_space_dim].
NOTE: Distance is sign sensitive.
Args:
latent_code: The input latent code for manipulation.
boundary: The semantic boundary as reference.
start_distance: The distance to the boundary where the manipulation starts.
(default: -3.0)
end_distance: The distance to the boundary where the manipulation ends.
(default: 3.0)
steps: Number of steps to move the latent code from start position to end
position. (default: 10)
"""
assert (latent_code.shape[0] == 1 and boundary.shape[0] == 1 and
len(boundary.shape) == 2 and
boundary.shape[1] == latent_code.shape[-1])
linspace = np.linspace(start_distance, end_distance, steps)
if len(latent_code.shape) == 2:
linspace = linspace - latent_code.dot(boundary.T)
linspace = linspace.reshape(-1, 1).astype(np.float32)
return latent_code + linspace * boundary
if len(latent_code.shape) == 3:
linspace = linspace.reshape(-1, 1, 1).astype(np.float32)
return latent_code + linspace * boundary.reshape(1, 1, -1)
raise ValueError(f'Input `latent_code` should be with shape '
f'[1, latent_space_dim] or [1, N, latent_space_dim] for '
f'W+ space in Style GAN!\n'
f'But {latent_code.shape} is received.')
| 42.877049 | 80 | 0.688014 |
import numpy as np
from sklearn import svm
from .logger import setup_logger
__all__ = ['train_boundary', 'project_boundary', 'linear_interpolate']
def train_boundary(latent_codes,
scores,
chosen_num_or_ratio=0.02,
split_ratio=0.7,
invalid_value=None,
logger=None):
if not logger:
logger = setup_logger(work_dir='', logger_name='train_boundary')
if (not isinstance(latent_codes, np.ndarray) or
not len(latent_codes.shape) == 2):
raise ValueError(f'Input `latent_codes` should be with type'
f'`numpy.ndarray`, and shape [num_samples, '
f'latent_space_dim]!')
num_samples = latent_codes.shape[0]
latent_space_dim = latent_codes.shape[1]
if (not isinstance(scores, np.ndarray) or not len(scores.shape) == 2 or
not scores.shape[0] == num_samples or not scores.shape[1] == 1):
raise ValueError(f'Input `scores` should be with type `numpy.ndarray`, and '
f'shape [num_samples, 1], where `num_samples` should be '
f'exactly same as that of input `latent_codes`!')
if chosen_num_or_ratio <= 0:
raise ValueError(f'Input `chosen_num_or_ratio` should be positive, '
f'but {chosen_num_or_ratio} received!')
logger.info(f'Filtering training data.')
if invalid_value is not None:
latent_codes = latent_codes[scores != invalid_value]
scores = scores[scores != invalid_value]
logger.info(f'Sorting scores to get positive and negative samples.')
sorted_idx = np.argsort(scores, axis=0)[::-1, 0]
latent_codes = latent_codes[sorted_idx]
scores = scores[sorted_idx]
num_samples = latent_codes.shape[0]
if 0 < chosen_num_or_ratio <= 1:
chosen_num = int(num_samples * chosen_num_or_ratio)
else:
chosen_num = chosen_num_or_ratio
chosen_num = min(chosen_num, num_samples // 2)
logger.info(f'Spliting training and validation sets:')
train_num = int(chosen_num * split_ratio)
val_num = chosen_num - train_num
positive_idx = np.arange(chosen_num)
np.random.shuffle(positive_idx)
positive_train = latent_codes[:chosen_num][positive_idx[:train_num]]
positive_val = latent_codes[:chosen_num][positive_idx[train_num:]]
negative_idx = np.arange(chosen_num)
np.random.shuffle(negative_idx)
negative_train = latent_codes[-chosen_num:][negative_idx[:train_num]]
negative_val = latent_codes[-chosen_num:][negative_idx[train_num:]]
train_data = np.concatenate([positive_train, negative_train], axis=0)
train_label = np.concatenate([np.ones(train_num, dtype=np.int),
np.zeros(train_num, dtype=np.int)], axis=0)
logger.info(f' Training: {train_num} positive, {train_num} negative.')
val_data = np.concatenate([positive_val, negative_val], axis=0)
val_label = np.concatenate([np.ones(val_num, dtype=np.int),
np.zeros(val_num, dtype=np.int)], axis=0)
logger.info(f' Validation: {val_num} positive, {val_num} negative.')
remaining_num = num_samples - chosen_num * 2
remaining_data = latent_codes[chosen_num:-chosen_num]
remaining_scores = scores[chosen_num:-chosen_num]
decision_value = (scores[0] + scores[-1]) / 2
remaining_label = np.ones(remaining_num, dtype=np.int)
remaining_label[remaining_scores.ravel() < decision_value] = 0
remaining_positive_num = np.sum(remaining_label == 1)
remaining_negative_num = np.sum(remaining_label == 0)
logger.info(f' Remaining: {remaining_positive_num} positive, '
f'{remaining_negative_num} negative.')
logger.info(f'Training boundary.')
clf = svm.SVC(kernel='linear')
classifier = clf.fit(train_data, train_label)
logger.info(f'Finish training.')
if val_num:
val_prediction = classifier.predict(val_data)
correct_num = np.sum(val_label == val_prediction)
logger.info(f'Accuracy for validation set: '
f'{correct_num} / {val_num * 2} = '
f'{correct_num / (val_num * 2):.6f}')
if remaining_num:
remaining_prediction = classifier.predict(remaining_data)
correct_num = np.sum(remaining_label == remaining_prediction)
logger.info(f'Accuracy for remaining set: '
f'{correct_num} / {remaining_num} = '
f'{correct_num / remaining_num:.6f}')
a = classifier.coef_.reshape(1, latent_space_dim).astype(np.float32)
return a / np.linalg.norm(a)
def project_boundary(primal, *args):
if len(args) > 2:
raise NotImplementedError(f'This function supports projecting with at most '
f'two conditions.')
assert len(primal.shape) == 2 and primal.shape[0] == 1
if not args:
return primal
if len(args) == 1:
cond = args[0]
assert (len(cond.shape) == 2 and cond.shape[0] == 1 and
cond.shape[1] == primal.shape[1])
new = primal - primal.dot(cond.T) * cond
return new / np.linalg.norm(new)
if len(args) == 2:
cond_1 = args[0]
cond_2 = args[1]
assert (len(cond_1.shape) == 2 and cond_1.shape[0] == 1 and
cond_1.shape[1] == primal.shape[1])
assert (len(cond_2.shape) == 2 and cond_2.shape[0] == 1 and
cond_2.shape[1] == primal.shape[1])
primal_cond_1 = primal.dot(cond_1.T)
primal_cond_2 = primal.dot(cond_2.T)
cond_1_cond_2 = cond_1.dot(cond_2.T)
alpha = (primal_cond_1 - primal_cond_2 * cond_1_cond_2) / (
1 - cond_1_cond_2 ** 2 + 1e-8)
beta = (primal_cond_2 - primal_cond_1 * cond_1_cond_2) / (
1 - cond_1_cond_2 ** 2 + 1e-8)
new = primal - alpha * cond_1 - beta * cond_2
return new / np.linalg.norm(new)
raise NotImplementedError
def linear_interpolate(latent_code,
boundary,
start_distance=-3.0,
end_distance=3.0,
steps=10):
assert (latent_code.shape[0] == 1 and boundary.shape[0] == 1 and
len(boundary.shape) == 2 and
boundary.shape[1] == latent_code.shape[-1])
linspace = np.linspace(start_distance, end_distance, steps)
if len(latent_code.shape) == 2:
linspace = linspace - latent_code.dot(boundary.T)
linspace = linspace.reshape(-1, 1).astype(np.float32)
return latent_code + linspace * boundary
if len(latent_code.shape) == 3:
linspace = linspace.reshape(-1, 1, 1).astype(np.float32)
return latent_code + linspace * boundary.reshape(1, 1, -1)
raise ValueError(f'Input `latent_code` should be with shape '
f'[1, latent_space_dim] or [1, N, latent_space_dim] for '
f'W+ space in Style GAN!\n'
f'But {latent_code.shape} is received.')
| true | true |
f70f5789ebf11040a90ecc0c05c6851ad4bc8bd0 | 1,240 | py | Python | setup.py | sliderSun/scrapy-djangoitem | f68e60806d6d0175b88943adcac78e002f17a43c | [
"BSD-3-Clause"
] | 509 | 2015-08-11T23:04:42.000Z | 2022-02-16T01:33:55.000Z | setup.py | sliderSun/scrapy-djangoitem | f68e60806d6d0175b88943adcac78e002f17a43c | [
"BSD-3-Clause"
] | 19 | 2015-08-06T04:06:28.000Z | 2021-09-28T00:38:50.000Z | setup.py | sliderSun/scrapy-djangoitem | f68e60806d6d0175b88943adcac78e002f17a43c | [
"BSD-3-Clause"
] | 107 | 2015-09-03T20:21:56.000Z | 2021-09-28T14:20:49.000Z | from setuptools import setup, find_packages
setup(
name='scrapy-djangoitem',
version='1.1.1',
url='https://github.com/scrapy-plugins/scrapy-djangoitem',
description='Scrapy extension to write scraped items using Django models',
long_description=open('README.rst').read(),
author='Scrapy developers',
license='BSD',
packages=find_packages(exclude=('tests', 'tests.*')),
include_package_data=True,
zip_safe=False,
classifiers=[
'Framework :: Scrapy',
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Utilities',
'Framework :: Django',
'Framework :: Scrapy',
],
install_requires=['six'],
requires=['scrapy (>=0.24.5)', 'django'],
)
| 34.444444 | 78 | 0.607258 | from setuptools import setup, find_packages
setup(
name='scrapy-djangoitem',
version='1.1.1',
url='https://github.com/scrapy-plugins/scrapy-djangoitem',
description='Scrapy extension to write scraped items using Django models',
long_description=open('README.rst').read(),
author='Scrapy developers',
license='BSD',
packages=find_packages(exclude=('tests', 'tests.*')),
include_package_data=True,
zip_safe=False,
classifiers=[
'Framework :: Scrapy',
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Utilities',
'Framework :: Django',
'Framework :: Scrapy',
],
install_requires=['six'],
requires=['scrapy (>=0.24.5)', 'django'],
)
| true | true |
f70f59f0cd48a2f126ee48d3a32435225abc914a | 1,310 | py | Python | tests/subtractions/test_utils.py | ReeceHoffmann/virtool | f9befad060fe16fa29fb80124e674ac5a9c4f538 | [
"MIT"
] | 39 | 2016-10-31T23:28:59.000Z | 2022-01-15T00:00:42.000Z | tests/subtractions/test_utils.py | ReeceHoffmann/virtool | f9befad060fe16fa29fb80124e674ac5a9c4f538 | [
"MIT"
] | 1,690 | 2017-02-07T23:39:48.000Z | 2022-03-31T22:30:44.000Z | tests/subtractions/test_utils.py | ReeceHoffmann/virtool | f9befad060fe16fa29fb80124e674ac5a9c4f538 | [
"MIT"
] | 25 | 2017-02-08T18:25:31.000Z | 2021-09-20T22:55:25.000Z | import os
import pytest
from virtool.subtractions.utils import (
check_subtraction_file_type,
get_subtraction_files,
join_subtraction_path,
rename_bowtie_files,
)
def test_join_subtraction_path(tmp_path, config):
assert join_subtraction_path(config, "bar") == tmp_path / "subtractions" / "bar"
async def test_get_subtraction_files(snapshot, pg, test_subtraction_files):
assert await get_subtraction_files(pg, "foo") == snapshot
def test_rename_bowtie_files(tmp_path):
test_dir = tmp_path / "subtractions"
test_dir.mkdir()
test_dir.joinpath("reference.1.bt2").write_text("Bowtie2 file")
test_dir.joinpath("reference.2.bt2").write_text("Bowtie2 file")
test_dir.joinpath("reference.3.bt2").write_text("Bowtie2 file")
rename_bowtie_files(test_dir)
assert set(os.listdir(test_dir)) == {
"subtraction.1.bt2",
"subtraction.2.bt2",
"subtraction.3.bt2",
}
@pytest.mark.parametrize("file_type", ["fasta", "bowtie2"])
def test_check_subtraction_file_type(file_type):
if file_type == "fasta":
result = check_subtraction_file_type("subtraction.fa.gz")
assert result == "fasta"
if file_type == "bowtie2":
result = check_subtraction_file_type("subtraction.1.bt2")
assert result == "bowtie2"
| 27.87234 | 84 | 0.710687 | import os
import pytest
from virtool.subtractions.utils import (
check_subtraction_file_type,
get_subtraction_files,
join_subtraction_path,
rename_bowtie_files,
)
def test_join_subtraction_path(tmp_path, config):
assert join_subtraction_path(config, "bar") == tmp_path / "subtractions" / "bar"
async def test_get_subtraction_files(snapshot, pg, test_subtraction_files):
assert await get_subtraction_files(pg, "foo") == snapshot
def test_rename_bowtie_files(tmp_path):
test_dir = tmp_path / "subtractions"
test_dir.mkdir()
test_dir.joinpath("reference.1.bt2").write_text("Bowtie2 file")
test_dir.joinpath("reference.2.bt2").write_text("Bowtie2 file")
test_dir.joinpath("reference.3.bt2").write_text("Bowtie2 file")
rename_bowtie_files(test_dir)
assert set(os.listdir(test_dir)) == {
"subtraction.1.bt2",
"subtraction.2.bt2",
"subtraction.3.bt2",
}
@pytest.mark.parametrize("file_type", ["fasta", "bowtie2"])
def test_check_subtraction_file_type(file_type):
if file_type == "fasta":
result = check_subtraction_file_type("subtraction.fa.gz")
assert result == "fasta"
if file_type == "bowtie2":
result = check_subtraction_file_type("subtraction.1.bt2")
assert result == "bowtie2"
| true | true |
f70f5aa44820a47f32419ad9f28bef8a4d2813ba | 23,039 | py | Python | reVX/least_cost_xmission/least_cost_xmission.py | NREL/reVX | 4d62eb2c003c3b53b959f7a58bdc342d18098884 | [
"BSD-3-Clause"
] | 7 | 2020-04-06T00:29:55.000Z | 2022-01-23T20:00:14.000Z | reVX/least_cost_xmission/least_cost_xmission.py | NREL/reVX | 4d62eb2c003c3b53b959f7a58bdc342d18098884 | [
"BSD-3-Clause"
] | 67 | 2020-02-28T20:15:35.000Z | 2022-03-31T21:34:52.000Z | reVX/least_cost_xmission/least_cost_xmission.py | NREL/reVX | 4d62eb2c003c3b53b959f7a58bdc342d18098884 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Module to compute least cost xmission paths, distances, and costs one or
more SC points
"""
from concurrent.futures import as_completed
import geopandas as gpd
import json
import logging
import numpy as np
import os
import pandas as pd
from pyproj.crs import CRS
import rasterio
from scipy.spatial import cKDTree
from shapely.geometry import Point
import time
from reV.handlers.exclusions import ExclusionLayers
from reV.supply_curve.points import SupplyCurveExtent
from rex.utilities.execution import SpawnProcessPool
from rex.utilities.loggers import log_mem
from reVX.least_cost_xmission.config import (TRANS_LINE_CAT, LOAD_CENTER_CAT,
SINK_CAT, SUBSTATION_CAT)
from reVX.least_cost_xmission.least_cost_paths import LeastCostPaths
from reVX.least_cost_xmission.trans_cap_costs import TransCapCosts
logger = logging.getLogger(__name__)
class LeastCostXmission(LeastCostPaths):
"""
Compute Least Cost tie-line paths and full transmission cap cost
for all possible connections to all supply curve points
-
"""
REQUIRED_LAYRES = ['transmission_barrier', 'ISO_regions']
def __init__(self, cost_fpath, features_fpath, resolution=128,
xmission_config=None):
"""
Parameters
----------
cost_fpath : str
Path to h5 file with cost rasters and other required layers
features_fpath : str
Path to geopackage with transmission features
resolution : int, optional
SC point resolution, by default 128
xmission_config : str | dict | XmissionConfig, optional
Path to Xmission config .json, dictionary of Xmission config
.jsons, or preloaded XmissionConfig objects, by default None
"""
self._check_layers(cost_fpath)
self._config = TransCapCosts._parse_config(
xmission_config=xmission_config)
(self._sc_points, self._features,
self._sub_lines_mapping, self._shape) =\
self._map_to_costs(cost_fpath, features_fpath,
resolution=resolution)
self._cost_fpath = cost_fpath
self._tree = None
self._sink_coords = None
self._min_line_len = (resolution * 0.09) / 2
logger.debug('{} initialized'.format(self))
def __repr__(self):
msg = ("{} to be computed for {} sc_points and {} features"
.format(self.__class__.__name__,
len(self.sc_points),
len(self.features)))
return msg
@property
def sc_points(self):
"""
Table of supply curve points
Returns
-------
gpd.GeoDataFrame
"""
return self._sc_points
@property
def features(self):
"""
Table of features to compute paths for
Returns
-------
pandas.DataFrame
"""
return self._features
@property
def sub_lines_mapping(self):
"""
Series mapping substations to the transmission lines connected
to each substation
Returns
-------
pandas.Series
"""
return self._sub_lines_mapping
@property
def sink_coords(self):
"""
Inf sink coordinates (row, col)
Returns
-------
ndarray
"""
if self._sink_coords is None:
mask = self.features['category'] == SINK_CAT
self._sink_coords = self.features.loc[mask, ['row', 'col']].values
return self._sink_coords
@property
def sink_tree(self):
"""
cKDTree for infinite sinks
Returns
-------
cKDTree
"""
if self._tree is None:
self._tree = cKDTree(self.sink_coords)
return self._tree
@staticmethod
def _load_trans_feats(features_fpath):
"""
Load existing transmission features from disk. Substations will be
loaded from cache file if it exists
Parameters
----------
features_fpath : str
Path to geopackage with trans features
Returns
-------
features : gpd.GeoDataFrame
DataFrame of transmission features
sub_line_map : pandas.Series
Mapping of sub-station trans_gid to connected tranmission line
trans_gids
"""
logger.debug('Loading transmission features')
features = gpd.read_file(features_fpath)
features = features.drop(columns=['bgid', 'egid', 'cap_left'],
errors='ignore')
mapping = {'gid': 'trans_gid', 'trans_gids': 'trans_line_gids'}
features = features.rename(columns=mapping)
features['min_volts'] = 0
features['max_volts'] = 0
# Transmission lines
mask = features['category'] == TRANS_LINE_CAT
voltage = features.loc[mask, 'voltage'].values
features.loc[mask, 'min_volts'] = voltage
features.loc[mask, 'max_volts'] = voltage
# Load Center and Sinks
mask = features['category'].isin([LOAD_CENTER_CAT, SINK_CAT])
features.loc[mask, 'min_volts'] = 1
features.loc[mask, 'max_volts'] = 9999
sub_lines_map = {}
mask = features['category'] == SUBSTATION_CAT
bad_subs = np.zeros(len(features), dtype=bool)
for idx, row in features.loc[mask].iterrows():
gid = row['trans_gid']
lines = row['trans_line_gids']
if isinstance(lines, str):
lines = json.loads(lines)
sub_lines_map[gid] = lines
lines_mask = features['trans_gid'].isin(lines)
voltage = features.loc[lines_mask, 'voltage'].values
if np.max(voltage) >= 69:
features.loc[idx, 'min_volts'] = np.min(voltage)
features.loc[idx, 'max_volts'] = np.max(voltage)
else:
bad_subs[idx] = True
if any(bad_subs):
msg = ("The following sub-stations do not have the minimum "
"required voltage of 69 kV and will be dropped:\n{}"
.format(features.loc[bad_subs, 'trans_gid']))
logger.warning(msg)
features = features.loc[~bad_subs].reset_index(drop=True)
return features, pd.Series(sub_lines_map)
@staticmethod
def _create_sc_points(cost_fpath, resolution=128):
"""
Load SC points, covert row/col to array wide, and determine x/y for
reV projection
Parameters
----------
cost_fpath : str
Path to h5 file with cost rasters and other required layers
resolution : int, optional
SC point resolution, by default 128
Returns
sc_points : gpd.GeoDataFrame
SC points
"""
logger.debug('Loading Supply Curve Points')
sce = SupplyCurveExtent(cost_fpath, resolution=resolution)
sc_points = sce.points.rename(columns={'row_ind': 'sc_row_ind',
'col_ind': 'sc_col_ind'})
shape = sce.excl_shape
sc_points['sc_point_gid'] = sc_points.index.values
row = np.round(sc_points['sc_row_ind'] * resolution + resolution / 2)
row = np.where(row >= shape[0], shape[0] - 1, row)
sc_points['row'] = row.astype(int)
col = np.round(sc_points['sc_col_ind'] * resolution + resolution / 2)
col = np.where(col >= shape[1], shape[1] - 1, col)
sc_points['col'] = col.astype(int)
return sc_points
@staticmethod
def _get_feature_cost_indices(features, crs, transform, shape):
"""
Map features to cost row, col indicies using rasterio transform
Parameters
----------
features : gpd.GeoDataFrame
GeoDataFrame of features to map to cost raster
crs : pyproj.crs.CRS
CRS of cost raster
transform : raster.Affine
Transform of cost raster
shape : tuple
Cost raster shape
Returns
-------
row : ndarray
Vector of row indicies for each feature
col : ndarray
Vector of col indicies for each features
mask : ndarray
Boolean mask of features with indicies outside of cost raster
"""
row, col, mask = super(LeastCostXmission,
LeastCostXmission)._get_feature_cost_indices(
features, crs, transform, shape)
t_lines = features['category'] == TRANS_LINE_CAT
mask |= t_lines
row[t_lines] = np.where(row[t_lines] >= 0, row[t_lines], 0)
row[t_lines] = np.where(row[t_lines] < shape[0], row[t_lines],
shape[0] - 1)
col[t_lines] = np.where(col[t_lines] >= 0, col[t_lines], 0)
col[t_lines] = np.where(col[t_lines] < shape[1], col[t_lines],
shape[1] - 1)
return row, col, mask
@classmethod
def _map_to_costs(cls, cost_fpath, features_fpath, resolution=128):
"""
Map supply curve points and transmission features to cost array pixel
indices
Parameters
----------
cost_fpath : str
Path to h5 file with cost rasters and other required layers
features_fpath : str
Path to geopackage with transmission features
resolution : int, optional
SC point resolution, by default 128
Returns
-------
sc_point : gpd.GeoDataFrame
Table of supply curve points to connect to tranmission
features : gpd.GeoDataFrame
Table of transmission features
sub_lines_map : pandas.Series
Series mapping substations to the transmission lines connected
to each substation
"""
with ExclusionLayers(cost_fpath) as f:
crs = CRS.from_string(f.crs)
transform = rasterio.Affine(*f.profile['transform'])
shape = f.shape
regions = f['ISO_regions']
features, sub_lines_map = cls._load_trans_feats(features_fpath)
row, col, mask = cls._get_feature_cost_indices(features, crs,
transform, shape)
if any(~mask):
msg = ("The following features are outside of the cost exclusion "
"domain and will be dropped:\n{}"
.format(features.loc[~mask, 'trans_gid']))
logger.warning(msg)
row = row[mask]
col = col[mask]
features = features.loc[mask].reset_index(drop=True)
features['row'] = row
features['col'] = col
features['region'] = regions[row, col]
logger.debug('Converting SC points to GeoDataFrame')
sc_points = cls._create_sc_points(cost_fpath, resolution=resolution)
x, y = rasterio.transform.xy(transform, sc_points['row'].values,
sc_points['col'].values)
geo = [Point(xy) for xy in zip(x, y)]
sc_points = gpd.GeoDataFrame(sc_points, crs=features.crs,
geometry=geo)
return sc_points, features, sub_lines_map, shape
def _clip_to_sc_point(self, sc_point, tie_line_voltage, nn_sinks=2,
clipping_buffer=1.05):
"""
Clip costs raster to AOI around SC point, and get substations,
load centers, and sinks within the clipped region.
Parameters
----------
sc_point : gpd.GeoSeries
SC point to clip raster around
nn_sinks : int, optional
Number of nearest neighbor sinks to clip to
clipping_buffer : float, optional
Buffer to increase clipping radius by, by default 1.05
Returns
-------
radius : int
Clipping radius in cost raster pixels
x_feats : pd.DataFrame
Substatations, load centers, sinks, and nearest points on t-lines
to SC point
"""
logger.debug('Clipping features to sc_point {}'.format(sc_point.name))
if len(self.sink_coords) > 2:
row, col = sc_point[['row', 'col']].values
_, pos = self.sink_tree.query([row, col], k=nn_sinks)
radius = np.abs(self.sink_coords[pos] - np.array([row, col])).max()
radius = int(np.ceil(radius * clipping_buffer))
logger.debug('Radius to {} nearest sink is: {}'
.format(nn_sinks, radius))
row_min = max(row - radius, 0)
row_max = min(row + radius, self._shape[0])
col_min = max(col - radius, 0)
col_max = min(col + radius, self._shape[1])
logger.debug('Extracting all transmission features in the row '
'slice {}:{} and column slice {}:{}'
.format(row_min, row_max, col_min, col_max))
# Clip transmission features
mask = self.features['row'] >= row_min
mask &= self.features['row'] < row_max
mask &= self.features['col'] >= col_min
mask &= self.features['col'] < col_max
sc_features = self.features.loc[mask].copy(deep=True)
logger.debug('{} transmission features found in clipped area with '
'radius {}'
.format(len(sc_features), radius))
else:
radius = None
sc_features = self.features.copy(deep=True)
mask = self.features['max_volts'] >= tie_line_voltage
sc_features = sc_features.loc[mask].copy(deep=True)
logger.debug('{} transmission features found in clipped area with '
'minimum max voltage of {}'
.format(len(sc_features), tie_line_voltage))
# Find t-lines connected to substations within clip
logger.debug('Collecting transmission lines connected to substations')
mask = sc_features['category'] == SUBSTATION_CAT
if mask.any():
trans_gids = sc_features.loc[mask, 'trans_gid'].values
trans_gids = \
np.concatenate(self.sub_lines_mapping.loc[trans_gids].values)
trans_gids = np.unique(trans_gids)
line_mask = self.features['trans_gid'].isin(trans_gids)
trans_lines = self.features.loc[line_mask].copy(deep=True)
line_mask = trans_lines['trans_gid'].isin(sc_features['trans_gid'])
trans_lines = trans_lines.loc[~line_mask]
logger.debug('Adding all {} transmission lines connected to '
'substations with minimum max voltage of {}'
.format(len(trans_lines), tie_line_voltage))
sc_features = sc_features.append(trans_lines)
return sc_features, radius
def process_sc_points(self, capacity_class, sc_point_gids=None, nn_sinks=2,
clipping_buffer=1.05, barrier_mult=100,
max_workers=None):
"""
Compute Least Cost Tranmission for desired sc_points
Parameters
----------
capacity_class : str | int
Capacity class of transmission features to connect supply curve
points to
sc_point_gids : list, optional
List of sc_point_gids to connect to, by default None
nn_sinks : int, optional
Number of nearest neighbor sinks to use for clipping radius
calculation, by default 2
clipping_buffer : float, optional
Buffer to expand clipping radius by, by default 1.05
barrier_mult : int, optional
Tranmission barrier multiplier, used when computing the least
cost tie-line path, by default 100
max_workers : int, optional
Number of workers to use for processing, if 1 run in serial,
if None use all available cores, by default None
Returns
-------
least_costs : pandas.DataFrame
Least cost connections between all supply curve points and the
transmission features with the given capacity class that are within
"nn_sink" nearest infinite sinks
"""
max_workers = os.cpu_count() if max_workers is None else max_workers
if sc_point_gids is None:
sc_point_gids = self.sc_points['sc_point_gid'].values
tie_line_voltage = self._config.capacity_to_kv(capacity_class)
least_costs = []
if max_workers > 1:
logger.info('Computing Least Cost Transmission for SC points in '
'parallel on {} workers'.format(max_workers))
loggers = [__name__, 'reV', 'reVX']
with SpawnProcessPool(max_workers=max_workers,
loggers=loggers) as exe:
futures = []
for _, sc_point in self.sc_points.iterrows():
gid = sc_point['sc_point_gid']
if gid in sc_point_gids:
sc_features, radius = self._clip_to_sc_point(
sc_point, tie_line_voltage, nn_sinks=nn_sinks,
clipping_buffer=clipping_buffer)
future = exe.submit(TransCapCosts.run,
self._cost_fpath,
sc_point.copy(deep=True),
sc_features, capacity_class,
radius=radius,
xmission_config=self._config,
barrier_mult=barrier_mult,
min_line_length=self._min_line_len)
futures.append(future)
for i, future in enumerate(as_completed(futures)):
sc_costs = future.result()
if sc_costs is not None:
least_costs.append(sc_costs)
logger.debug('SC point {} of {} complete!'
.format(i + 1, len(futures)))
log_mem(logger)
else:
logger.info('Computing Least Cost Transmission for SC points in '
'serial')
i = 1
for _, sc_point in self.sc_points.iterrows():
gid = sc_point['sc_point_gid']
if gid in sc_point_gids:
sc_features, radius = self._clip_to_sc_point(
sc_point, tie_line_voltage, nn_sinks=nn_sinks,
clipping_buffer=clipping_buffer)
sc_costs = TransCapCosts.run(
self._cost_fpath, sc_point.copy(deep=True),
sc_features, capacity_class,
radius=radius,
xmission_config=self._config,
barrier_mult=barrier_mult,
min_line_length=self._min_line_len)
if sc_costs is not None:
least_costs.append(sc_costs)
logger.debug('SC point {} of {} complete!'
.format(i, len(sc_point_gids)))
log_mem(logger)
i += 1
least_costs = pd.concat(least_costs).sort_values(['sc_point_gid',
'trans_gid'])
capacity_class = self._config._parse_cap_class(capacity_class)
least_costs['max_cap'] = self._config['power_classes'][capacity_class]
lcp_frac = (len(least_costs['sc_point_gid'].unique())
/ len(sc_point_gids) * 100)
logger.info('{:.4f}% of requested sc point gids were succesfully '
'mapped to transmission features'.format(lcp_frac))
return least_costs.reset_index(drop=True)
@classmethod
def run(cls, cost_fpath, features_fpath, capacity_class, resolution=128,
xmission_config=None, sc_point_gids=None, nn_sinks=2,
clipping_buffer=1.05, barrier_mult=100, max_workers=None):
"""
Find Least Cost Tranmission connections between desired sc_points to
given tranmission features for desired capacity class
Parameters
----------
cost_fpath : str
Path to h5 file with cost rasters and other required layers
features_fpath : str
Path to geopackage with transmission features
capacity_class : str | int
Capacity class of transmission features to connect supply curve
points to
resolution : int, optional
SC point resolution, by default 128
xmission_config : str | dict | XmissionConfig, optional
Path to Xmission config .json, dictionary of Xmission config
.jsons, or preloaded XmissionConfig objects, by default None
sc_point_gids : list, optional
List of sc_point_gids to connect to, by default None
nn_sinks : int, optional
Number of nearest neighbor sinks to use for clipping radius
calculation, by default 2
clipping_buffer : float, optional
Buffer to expand clipping radius by, by default 1.05
barrier_mult : int, optional
Tranmission barrier multiplier, used when computing the least
cost tie-line path, by default 100
max_workers : int, optional
Number of workers to use for processing, if 1 run in serial,
if None use all available cores, by default None
Returns
-------
least_costs : pandas.DataFrame
Least cost connections between all supply curve points and the
transmission features with the given capacity class that are within
"nn_sink" nearest infinite sinks
"""
ts = time.time()
lcx = cls(cost_fpath, features_fpath, resolution=resolution,
xmission_config=xmission_config)
least_costs = lcx.process_sc_points(capacity_class,
sc_point_gids=sc_point_gids,
nn_sinks=nn_sinks,
clipping_buffer=clipping_buffer,
barrier_mult=barrier_mult,
max_workers=max_workers)
logger.info('{} connections were made to {} SC points in {:.4f} '
'minutes'
.format(len(least_costs),
len(least_costs['sc_point_gid'].unique()),
(time.time() - ts) / 60))
return least_costs
| 39.11545 | 79 | 0.570164 |
from concurrent.futures import as_completed
import geopandas as gpd
import json
import logging
import numpy as np
import os
import pandas as pd
from pyproj.crs import CRS
import rasterio
from scipy.spatial import cKDTree
from shapely.geometry import Point
import time
from reV.handlers.exclusions import ExclusionLayers
from reV.supply_curve.points import SupplyCurveExtent
from rex.utilities.execution import SpawnProcessPool
from rex.utilities.loggers import log_mem
from reVX.least_cost_xmission.config import (TRANS_LINE_CAT, LOAD_CENTER_CAT,
SINK_CAT, SUBSTATION_CAT)
from reVX.least_cost_xmission.least_cost_paths import LeastCostPaths
from reVX.least_cost_xmission.trans_cap_costs import TransCapCosts
logger = logging.getLogger(__name__)
class LeastCostXmission(LeastCostPaths):
REQUIRED_LAYRES = ['transmission_barrier', 'ISO_regions']
def __init__(self, cost_fpath, features_fpath, resolution=128,
xmission_config=None):
self._check_layers(cost_fpath)
self._config = TransCapCosts._parse_config(
xmission_config=xmission_config)
(self._sc_points, self._features,
self._sub_lines_mapping, self._shape) =\
self._map_to_costs(cost_fpath, features_fpath,
resolution=resolution)
self._cost_fpath = cost_fpath
self._tree = None
self._sink_coords = None
self._min_line_len = (resolution * 0.09) / 2
logger.debug('{} initialized'.format(self))
def __repr__(self):
msg = ("{} to be computed for {} sc_points and {} features"
.format(self.__class__.__name__,
len(self.sc_points),
len(self.features)))
return msg
@property
def sc_points(self):
return self._sc_points
@property
def features(self):
return self._features
@property
def sub_lines_mapping(self):
return self._sub_lines_mapping
@property
def sink_coords(self):
if self._sink_coords is None:
mask = self.features['category'] == SINK_CAT
self._sink_coords = self.features.loc[mask, ['row', 'col']].values
return self._sink_coords
@property
def sink_tree(self):
if self._tree is None:
self._tree = cKDTree(self.sink_coords)
return self._tree
@staticmethod
def _load_trans_feats(features_fpath):
logger.debug('Loading transmission features')
features = gpd.read_file(features_fpath)
features = features.drop(columns=['bgid', 'egid', 'cap_left'],
errors='ignore')
mapping = {'gid': 'trans_gid', 'trans_gids': 'trans_line_gids'}
features = features.rename(columns=mapping)
features['min_volts'] = 0
features['max_volts'] = 0
mask = features['category'] == TRANS_LINE_CAT
voltage = features.loc[mask, 'voltage'].values
features.loc[mask, 'min_volts'] = voltage
features.loc[mask, 'max_volts'] = voltage
mask = features['category'].isin([LOAD_CENTER_CAT, SINK_CAT])
features.loc[mask, 'min_volts'] = 1
features.loc[mask, 'max_volts'] = 9999
sub_lines_map = {}
mask = features['category'] == SUBSTATION_CAT
bad_subs = np.zeros(len(features), dtype=bool)
for idx, row in features.loc[mask].iterrows():
gid = row['trans_gid']
lines = row['trans_line_gids']
if isinstance(lines, str):
lines = json.loads(lines)
sub_lines_map[gid] = lines
lines_mask = features['trans_gid'].isin(lines)
voltage = features.loc[lines_mask, 'voltage'].values
if np.max(voltage) >= 69:
features.loc[idx, 'min_volts'] = np.min(voltage)
features.loc[idx, 'max_volts'] = np.max(voltage)
else:
bad_subs[idx] = True
if any(bad_subs):
msg = ("The following sub-stations do not have the minimum "
"required voltage of 69 kV and will be dropped:\n{}"
.format(features.loc[bad_subs, 'trans_gid']))
logger.warning(msg)
features = features.loc[~bad_subs].reset_index(drop=True)
return features, pd.Series(sub_lines_map)
@staticmethod
def _create_sc_points(cost_fpath, resolution=128):
logger.debug('Loading Supply Curve Points')
sce = SupplyCurveExtent(cost_fpath, resolution=resolution)
sc_points = sce.points.rename(columns={'row_ind': 'sc_row_ind',
'col_ind': 'sc_col_ind'})
shape = sce.excl_shape
sc_points['sc_point_gid'] = sc_points.index.values
row = np.round(sc_points['sc_row_ind'] * resolution + resolution / 2)
row = np.where(row >= shape[0], shape[0] - 1, row)
sc_points['row'] = row.astype(int)
col = np.round(sc_points['sc_col_ind'] * resolution + resolution / 2)
col = np.where(col >= shape[1], shape[1] - 1, col)
sc_points['col'] = col.astype(int)
return sc_points
@staticmethod
def _get_feature_cost_indices(features, crs, transform, shape):
row, col, mask = super(LeastCostXmission,
LeastCostXmission)._get_feature_cost_indices(
features, crs, transform, shape)
t_lines = features['category'] == TRANS_LINE_CAT
mask |= t_lines
row[t_lines] = np.where(row[t_lines] >= 0, row[t_lines], 0)
row[t_lines] = np.where(row[t_lines] < shape[0], row[t_lines],
shape[0] - 1)
col[t_lines] = np.where(col[t_lines] >= 0, col[t_lines], 0)
col[t_lines] = np.where(col[t_lines] < shape[1], col[t_lines],
shape[1] - 1)
return row, col, mask
@classmethod
def _map_to_costs(cls, cost_fpath, features_fpath, resolution=128):
with ExclusionLayers(cost_fpath) as f:
crs = CRS.from_string(f.crs)
transform = rasterio.Affine(*f.profile['transform'])
shape = f.shape
regions = f['ISO_regions']
features, sub_lines_map = cls._load_trans_feats(features_fpath)
row, col, mask = cls._get_feature_cost_indices(features, crs,
transform, shape)
if any(~mask):
msg = ("The following features are outside of the cost exclusion "
"domain and will be dropped:\n{}"
.format(features.loc[~mask, 'trans_gid']))
logger.warning(msg)
row = row[mask]
col = col[mask]
features = features.loc[mask].reset_index(drop=True)
features['row'] = row
features['col'] = col
features['region'] = regions[row, col]
logger.debug('Converting SC points to GeoDataFrame')
sc_points = cls._create_sc_points(cost_fpath, resolution=resolution)
x, y = rasterio.transform.xy(transform, sc_points['row'].values,
sc_points['col'].values)
geo = [Point(xy) for xy in zip(x, y)]
sc_points = gpd.GeoDataFrame(sc_points, crs=features.crs,
geometry=geo)
return sc_points, features, sub_lines_map, shape
def _clip_to_sc_point(self, sc_point, tie_line_voltage, nn_sinks=2,
clipping_buffer=1.05):
logger.debug('Clipping features to sc_point {}'.format(sc_point.name))
if len(self.sink_coords) > 2:
row, col = sc_point[['row', 'col']].values
_, pos = self.sink_tree.query([row, col], k=nn_sinks)
radius = np.abs(self.sink_coords[pos] - np.array([row, col])).max()
radius = int(np.ceil(radius * clipping_buffer))
logger.debug('Radius to {} nearest sink is: {}'
.format(nn_sinks, radius))
row_min = max(row - radius, 0)
row_max = min(row + radius, self._shape[0])
col_min = max(col - radius, 0)
col_max = min(col + radius, self._shape[1])
logger.debug('Extracting all transmission features in the row '
'slice {}:{} and column slice {}:{}'
.format(row_min, row_max, col_min, col_max))
mask = self.features['row'] >= row_min
mask &= self.features['row'] < row_max
mask &= self.features['col'] >= col_min
mask &= self.features['col'] < col_max
sc_features = self.features.loc[mask].copy(deep=True)
logger.debug('{} transmission features found in clipped area with '
'radius {}'
.format(len(sc_features), radius))
else:
radius = None
sc_features = self.features.copy(deep=True)
mask = self.features['max_volts'] >= tie_line_voltage
sc_features = sc_features.loc[mask].copy(deep=True)
logger.debug('{} transmission features found in clipped area with '
'minimum max voltage of {}'
.format(len(sc_features), tie_line_voltage))
logger.debug('Collecting transmission lines connected to substations')
mask = sc_features['category'] == SUBSTATION_CAT
if mask.any():
trans_gids = sc_features.loc[mask, 'trans_gid'].values
trans_gids = \
np.concatenate(self.sub_lines_mapping.loc[trans_gids].values)
trans_gids = np.unique(trans_gids)
line_mask = self.features['trans_gid'].isin(trans_gids)
trans_lines = self.features.loc[line_mask].copy(deep=True)
line_mask = trans_lines['trans_gid'].isin(sc_features['trans_gid'])
trans_lines = trans_lines.loc[~line_mask]
logger.debug('Adding all {} transmission lines connected to '
'substations with minimum max voltage of {}'
.format(len(trans_lines), tie_line_voltage))
sc_features = sc_features.append(trans_lines)
return sc_features, radius
def process_sc_points(self, capacity_class, sc_point_gids=None, nn_sinks=2,
clipping_buffer=1.05, barrier_mult=100,
max_workers=None):
max_workers = os.cpu_count() if max_workers is None else max_workers
if sc_point_gids is None:
sc_point_gids = self.sc_points['sc_point_gid'].values
tie_line_voltage = self._config.capacity_to_kv(capacity_class)
least_costs = []
if max_workers > 1:
logger.info('Computing Least Cost Transmission for SC points in '
'parallel on {} workers'.format(max_workers))
loggers = [__name__, 'reV', 'reVX']
with SpawnProcessPool(max_workers=max_workers,
loggers=loggers) as exe:
futures = []
for _, sc_point in self.sc_points.iterrows():
gid = sc_point['sc_point_gid']
if gid in sc_point_gids:
sc_features, radius = self._clip_to_sc_point(
sc_point, tie_line_voltage, nn_sinks=nn_sinks,
clipping_buffer=clipping_buffer)
future = exe.submit(TransCapCosts.run,
self._cost_fpath,
sc_point.copy(deep=True),
sc_features, capacity_class,
radius=radius,
xmission_config=self._config,
barrier_mult=barrier_mult,
min_line_length=self._min_line_len)
futures.append(future)
for i, future in enumerate(as_completed(futures)):
sc_costs = future.result()
if sc_costs is not None:
least_costs.append(sc_costs)
logger.debug('SC point {} of {} complete!'
.format(i + 1, len(futures)))
log_mem(logger)
else:
logger.info('Computing Least Cost Transmission for SC points in '
'serial')
i = 1
for _, sc_point in self.sc_points.iterrows():
gid = sc_point['sc_point_gid']
if gid in sc_point_gids:
sc_features, radius = self._clip_to_sc_point(
sc_point, tie_line_voltage, nn_sinks=nn_sinks,
clipping_buffer=clipping_buffer)
sc_costs = TransCapCosts.run(
self._cost_fpath, sc_point.copy(deep=True),
sc_features, capacity_class,
radius=radius,
xmission_config=self._config,
barrier_mult=barrier_mult,
min_line_length=self._min_line_len)
if sc_costs is not None:
least_costs.append(sc_costs)
logger.debug('SC point {} of {} complete!'
.format(i, len(sc_point_gids)))
log_mem(logger)
i += 1
least_costs = pd.concat(least_costs).sort_values(['sc_point_gid',
'trans_gid'])
capacity_class = self._config._parse_cap_class(capacity_class)
least_costs['max_cap'] = self._config['power_classes'][capacity_class]
lcp_frac = (len(least_costs['sc_point_gid'].unique())
/ len(sc_point_gids) * 100)
logger.info('{:.4f}% of requested sc point gids were succesfully '
'mapped to transmission features'.format(lcp_frac))
return least_costs.reset_index(drop=True)
@classmethod
def run(cls, cost_fpath, features_fpath, capacity_class, resolution=128,
xmission_config=None, sc_point_gids=None, nn_sinks=2,
clipping_buffer=1.05, barrier_mult=100, max_workers=None):
ts = time.time()
lcx = cls(cost_fpath, features_fpath, resolution=resolution,
xmission_config=xmission_config)
least_costs = lcx.process_sc_points(capacity_class,
sc_point_gids=sc_point_gids,
nn_sinks=nn_sinks,
clipping_buffer=clipping_buffer,
barrier_mult=barrier_mult,
max_workers=max_workers)
logger.info('{} connections were made to {} SC points in {:.4f} '
'minutes'
.format(len(least_costs),
len(least_costs['sc_point_gid'].unique()),
(time.time() - ts) / 60))
return least_costs
| true | true |
f70f5b4ab3752fa1ced9abdd8789bbb663073dde | 5,427 | py | Python | site-packages/oslo_service/_options.py | hariza17/freezer_libraries | e0bd890eba5e7438976fb3b4d66c41c128bab790 | [
"PSF-2.0"
] | null | null | null | site-packages/oslo_service/_options.py | hariza17/freezer_libraries | e0bd890eba5e7438976fb3b4d66c41c128bab790 | [
"PSF-2.0"
] | null | null | null | site-packages/oslo_service/_options.py | hariza17/freezer_libraries | e0bd890eba5e7438976fb3b4d66c41c128bab790 | [
"PSF-2.0"
] | null | null | null | # Copyright 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
help_for_backdoor_port = (
"Acceptable values are 0, <port>, and <start>:<end>, where 0 results "
"in listening on a random tcp port number; <port> results in listening "
"on the specified port number (and not enabling backdoor if that port "
"is in use); and <start>:<end> results in listening on the smallest "
"unused port number within the specified range of port numbers. The "
"chosen port is displayed in the service's log file.")
eventlet_backdoor_opts = [
cfg.StrOpt('backdoor_port',
help="Enable eventlet backdoor. %s" % help_for_backdoor_port),
cfg.StrOpt('backdoor_socket',
help="Enable eventlet backdoor, using the provided path"
" as a unix socket that can receive connections. This"
" option is mutually exclusive with 'backdoor_port' in"
" that only one should be provided. If both are provided"
" then the existence of this option overrides the usage of"
" that option.")
]
periodic_opts = [
cfg.BoolOpt('run_external_periodic_tasks',
default=True,
help='Some periodic tasks can be run in a separate process. '
'Should we run them here?'),
]
service_opts = [
cfg.BoolOpt('log_options',
default=True,
help='Enables or disables logging values of all registered '
'options when starting a service (at DEBUG level).'),
cfg.IntOpt('graceful_shutdown_timeout',
default=60,
help='Specify a timeout after which a gracefully shutdown '
'server will exit. Zero value means endless wait.'),
]
wsgi_opts = [
cfg.StrOpt('api_paste_config',
default="api-paste.ini",
help='File name for the paste.deploy config for api service'),
cfg.StrOpt('wsgi_log_format',
default='%(client_ip)s "%(request_line)s" status: '
'%(status_code)s len: %(body_length)s time:'
' %(wall_seconds).7f',
help='A python format string that is used as the template to '
'generate log lines. The following values can be'
'formatted into it: client_ip, date_time, request_line, '
'status_code, body_length, wall_seconds.'),
cfg.IntOpt('tcp_keepidle',
default=600,
help="Sets the value of TCP_KEEPIDLE in seconds for each "
"server socket. Not supported on OS X."),
cfg.IntOpt('wsgi_default_pool_size',
default=100,
help="Size of the pool of greenthreads used by wsgi"),
cfg.IntOpt('max_header_line',
default=16384,
help="Maximum line size of message headers to be accepted. "
"max_header_line may need to be increased when using "
"large tokens (typically those generated when keystone "
"is configured to use PKI tokens with big service "
"catalogs)."),
cfg.BoolOpt('wsgi_keep_alive',
default=True,
help="If False, closes the client socket connection "
"explicitly."),
cfg.IntOpt('client_socket_timeout', default=900,
help="Timeout for client connections' socket operations. "
"If an incoming connection is idle for this number of "
"seconds it will be closed. A value of '0' means "
"wait forever."),
]
ssl_opts = [
cfg.StrOpt('ca_file',
help="CA certificate file to use to verify "
"connecting clients.",
deprecated_group='DEFAULT',
deprecated_name='ssl_ca_file'),
cfg.StrOpt('cert_file',
help="Certificate file to use when starting "
"the server securely.",
deprecated_group='DEFAULT',
deprecated_name='ssl_cert_file'),
cfg.StrOpt('key_file',
help="Private key file to use when starting "
"the server securely.",
deprecated_group='DEFAULT',
deprecated_name='ssl_key_file'),
cfg.StrOpt('version',
help='SSL version to use (valid only if SSL enabled). '
'Valid values are TLSv1 and SSLv23. SSLv2, SSLv3, '
'TLSv1_1, and TLSv1_2 may be available on some '
'distributions.'
),
cfg.StrOpt('ciphers',
help='Sets the list of available ciphers. value should be a '
'string in the OpenSSL cipher list format.'
),
]
| 45.605042 | 79 | 0.585775 |
from oslo_config import cfg
help_for_backdoor_port = (
"Acceptable values are 0, <port>, and <start>:<end>, where 0 results "
"in listening on a random tcp port number; <port> results in listening "
"on the specified port number (and not enabling backdoor if that port "
"is in use); and <start>:<end> results in listening on the smallest "
"unused port number within the specified range of port numbers. The "
"chosen port is displayed in the service's log file.")
eventlet_backdoor_opts = [
cfg.StrOpt('backdoor_port',
help="Enable eventlet backdoor. %s" % help_for_backdoor_port),
cfg.StrOpt('backdoor_socket',
help="Enable eventlet backdoor, using the provided path"
" as a unix socket that can receive connections. This"
" option is mutually exclusive with 'backdoor_port' in"
" that only one should be provided. If both are provided"
" then the existence of this option overrides the usage of"
" that option.")
]
periodic_opts = [
cfg.BoolOpt('run_external_periodic_tasks',
default=True,
help='Some periodic tasks can be run in a separate process. '
'Should we run them here?'),
]
service_opts = [
cfg.BoolOpt('log_options',
default=True,
help='Enables or disables logging values of all registered '
'options when starting a service (at DEBUG level).'),
cfg.IntOpt('graceful_shutdown_timeout',
default=60,
help='Specify a timeout after which a gracefully shutdown '
'server will exit. Zero value means endless wait.'),
]
wsgi_opts = [
cfg.StrOpt('api_paste_config',
default="api-paste.ini",
help='File name for the paste.deploy config for api service'),
cfg.StrOpt('wsgi_log_format',
default='%(client_ip)s "%(request_line)s" status: '
'%(status_code)s len: %(body_length)s time:'
' %(wall_seconds).7f',
help='A python format string that is used as the template to '
'generate log lines. The following values can be'
'formatted into it: client_ip, date_time, request_line, '
'status_code, body_length, wall_seconds.'),
cfg.IntOpt('tcp_keepidle',
default=600,
help="Sets the value of TCP_KEEPIDLE in seconds for each "
"server socket. Not supported on OS X."),
cfg.IntOpt('wsgi_default_pool_size',
default=100,
help="Size of the pool of greenthreads used by wsgi"),
cfg.IntOpt('max_header_line',
default=16384,
help="Maximum line size of message headers to be accepted. "
"max_header_line may need to be increased when using "
"large tokens (typically those generated when keystone "
"is configured to use PKI tokens with big service "
"catalogs)."),
cfg.BoolOpt('wsgi_keep_alive',
default=True,
help="If False, closes the client socket connection "
"explicitly."),
cfg.IntOpt('client_socket_timeout', default=900,
help="Timeout for client connections' socket operations. "
"If an incoming connection is idle for this number of "
"seconds it will be closed. A value of '0' means "
"wait forever."),
]
ssl_opts = [
cfg.StrOpt('ca_file',
help="CA certificate file to use to verify "
"connecting clients.",
deprecated_group='DEFAULT',
deprecated_name='ssl_ca_file'),
cfg.StrOpt('cert_file',
help="Certificate file to use when starting "
"the server securely.",
deprecated_group='DEFAULT',
deprecated_name='ssl_cert_file'),
cfg.StrOpt('key_file',
help="Private key file to use when starting "
"the server securely.",
deprecated_group='DEFAULT',
deprecated_name='ssl_key_file'),
cfg.StrOpt('version',
help='SSL version to use (valid only if SSL enabled). '
'Valid values are TLSv1 and SSLv23. SSLv2, SSLv3, '
'TLSv1_1, and TLSv1_2 may be available on some '
'distributions.'
),
cfg.StrOpt('ciphers',
help='Sets the list of available ciphers. value should be a '
'string in the OpenSSL cipher list format.'
),
]
| true | true |
f70f5cd57a1fd153f57517e918891d2f039f7ddc | 425 | py | Python | interpriters/__init__.py | yakumo-saki/smart_to_zabbix | 04dd1debe0c831b4ec94962884543c989ad57730 | [
"MIT"
] | null | null | null | interpriters/__init__.py | yakumo-saki/smart_to_zabbix | 04dd1debe0c831b4ec94962884543c989ad57730 | [
"MIT"
] | 23 | 2021-08-30T14:59:27.000Z | 2021-11-05T16:51:08.000Z | interpriters/__init__.py | yakumo-saki/smart_to_zabbix | 04dd1debe0c831b4ec94962884543c989ad57730 | [
"MIT"
] | null | null | null | from interpriters.smart.IntelX25Interpriter import IntelX25Interpriter
from interpriters.smart.SmartBasicInterpriter import SmartBasicInterpriter
from interpriters.smart.SanDiskInterpriter import SmartSanDiskInterpriter
from interpriters.nvme.NvmeBasicInterpriter import NvmeBasicInterpriter
SPECIAL_INTERPRITERS = [SmartSanDiskInterpriter(), IntelX25Interpriter()]
BASIC = [SmartBasicInterpriter(), NvmeBasicInterpriter()] | 53.125 | 74 | 0.884706 | from interpriters.smart.IntelX25Interpriter import IntelX25Interpriter
from interpriters.smart.SmartBasicInterpriter import SmartBasicInterpriter
from interpriters.smart.SanDiskInterpriter import SmartSanDiskInterpriter
from interpriters.nvme.NvmeBasicInterpriter import NvmeBasicInterpriter
SPECIAL_INTERPRITERS = [SmartSanDiskInterpriter(), IntelX25Interpriter()]
BASIC = [SmartBasicInterpriter(), NvmeBasicInterpriter()] | true | true |
f70f5e7eb40489e986f62438a0124259082259d8 | 24,487 | py | Python | morgana/GUIs/fluo.py | Nikoula86/organoidSegment | b5d00256c15302ccd76b8b7a412852750476504b | [
"MIT"
] | 8 | 2021-09-08T10:49:53.000Z | 2022-02-25T13:28:03.000Z | morgana/GUIs/fluo.py | Nikoula86/organoidSegment | b5d00256c15302ccd76b8b7a412852750476504b | [
"MIT"
] | null | null | null | morgana/GUIs/fluo.py | Nikoula86/organoidSegment | b5d00256c15302ccd76b8b7a412852750476504b | [
"MIT"
] | 1 | 2021-11-24T08:10:41.000Z | 2021-11-24T08:10:41.000Z | from PyQt5.QtWidgets import (QApplication, QComboBox, QGridLayout, QGroupBox, QLabel, QPushButton,
QFileDialog, QMessageBox, QWidget, QSizePolicy, QCheckBox)
from matplotlib.backends.backend_qt5agg import FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
import numpy as np
import warnings, os, time
from skimage.io import imsave
import scipy.ndimage as ndi
from matplotlib.figure import Figure
from scipy.interpolate import interp1d
import matplotlib as mpl
warnings.filterwarnings("ignore")
from matplotlib import rc
rc('font', size=12)
rc('font', family='Arial')
# rc('font', serif='Times')
rc('pdf', fonttype=42)
# rc('text', usetex=True)
class profileAP_condMode(QWidget):
def __init__(self, data_all, channel, colors, profileType='APprofile', parent=None, ylabel='Intensity (a.u.)'):
super(profileAP_condMode, self).__init__(parent)
self.data_all = data_all
self.channel = channel
self.colors = colors
self.profileType = profileType
self.ylabel = ylabel
self.make()
def make(self):
self.figure = Figure(figsize=(4, 2.5), dpi=100)
self.canvas = FigureCanvas(self.figure)
self.canvas.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.figure.clear()
ax = self.figure.add_subplot(111)
self.figure.subplots_adjust(top=0.95,right=0.95,left=0.2,bottom=0.25)
ax.set_xlabel(self.profileType)
ax.ticklabel_format(axis="x", style="sci", scilimits=(2,2))
ax.set_ylabel(self.ylabel)
ax.ticklabel_format(axis="y", style="sci", scilimits=(0,2))
# ax.axis('off')
self.canvas.draw()
self.YnormBtn = QComboBox()
self.YnormBtn.addItem('No normalization')
self.YnormBtn.addItem('Global percentile')
self.YnormBtn.addItem('Group percentile')
self.YnormBtn.addItem('Folder percentile')
self.YnormBtn.addItem('Manual')
self.XnormBtn = QCheckBox('')
self.XnormBtn.setChecked(False)
self.XnormBtn.stateChanged.connect(self.onCheckingXnormBtn)
self.bckgBtn = QComboBox()
self.bckgBtn.addItem('None')
self.bckgBtn.addItem('Background')
self.bckgBtn.addItem('Minimum')
self.orientationBtn = QComboBox()
self.orientationBtn.addItem('Signal based')
self.orientationBtn.addItem('NO')
self.alignmentBtn = QComboBox()
self.alignmentBtn.addItem('Left')
self.alignmentBtn.addItem('Right')
self.alignmentBtn.addItem('Center')
self.groupSelection = self.makeGroupSelectionBtns()
self.applyBtn = QPushButton('Apply Settings')
self.applyBtn.clicked.connect(self.remakePlot)
lay = QGridLayout(self)
lay.setSpacing(10)
lay.addWidget(NavigationToolbar(self.canvas, self),0,0,1,2)
lay.addWidget(self.canvas,1,0,1,2)
lay.addWidget(QLabel('Background subtraction type:'),2,0,1,1)
lay.addWidget(self.bckgBtn,2,1,1,1)
lay.addWidget(QLabel('Y axis normalization:'),4,0,1,1)
lay.addWidget(self.YnormBtn,4,1,1,1)
lay.addWidget(QLabel('X axis normalization:'),5,0,1,1)
lay.addWidget(self.XnormBtn,5,1,1,1)
lay.addWidget(QLabel('A-P orientation correction:'),6,0,1,1)
lay.addWidget(self.orientationBtn,6,1,1,1)
lay.addWidget(QLabel('Alignment:'),7,0,1,1)
lay.addWidget(self.alignmentBtn,7,1,1,1)
lay.addWidget(self.groupSelection,8,0,1,2)
lay.addWidget(self.applyBtn,9,0,1,2)
self.remakePlot()
self.setWindowTitle('Channel')
QApplication.setStyle('Fusion')
def onCheckingXnormBtn(self):
if self.XnormBtn.isChecked():
self.alignmentBtn.setEnabled(False)
else:
self.alignmentBtn.setEnabled(True)
def makeGroupSelectionBtns(self):
group = QGroupBox("Groups to plot")
self.groupPlotBtn = []
for i in range(len(self.data_all)):
self.groupPlotBtn.append(QCheckBox('Group '+str(i)))
self.groupPlotBtn[-1].setChecked(True)
self.legendBtn = QCheckBox('Legend')
self.legendBtn.setChecked(False)
self.rawBtn = QCheckBox('Plot raw data')
self.rawBtn.setChecked(True)
lay = QGridLayout()
for i in range(len(self.data_all)):
lay.addWidget(self.groupPlotBtn[i],i,0,1,1)
lay.addWidget(self.legendBtn,0,1,1,1)
lay.addWidget(self.rawBtn,1,1,1,1)
group.setLayout(lay)
return group
def remakePlot(self):
self.figure.clear()
ax = self.figure.add_subplot(111)
self.figure.subplots_adjust(top=0.95,right=0.95,left=0.2,bottom=0.25)
ax.set_xlabel(self.profileType)
ax.ticklabel_format(axis="x", style="sci", scilimits=(2,2))
ax.set_ylabel(self.ylabel)
ax.ticklabel_format(axis="y", style="sci", scilimits=(0,2))
# ax.axis('off')
n_groups = len(self.data_all)
n_folders = [len(self.data_all[group_idx]) for group_idx in range(n_groups)]
n_gastr = [[len(self.data_all[group_idx][folder_idx]['input_file']) for folder_idx in range(n_folders[group_idx])] for group_idx in range(n_groups)]
# rearrange dataset
profiles_all = [[[0 for k in range(n_gastr[i][j])] for j in range(n_folders[i])] for i in range(n_groups)]
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
profiles_all[i][j][k] = np.array(self.data_all[i][j][self.profileType][k][self.channel])
# subtract background or not
if self.bckgBtn.currentText() == 'Background':
profiles_all[i][j][k] -= self.data_all[i][j]['Background'][k][self.channel]
if self.bckgBtn.currentText() == 'Minimum':
profiles_all[i][j][k] -= np.min(profiles_all[i][j][k])
# normalize fluorescence intensity accordingly
if self.YnormBtn.currentText() == 'Global percentile':
flat = []
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
for l in profiles_all[i][j][k]:
flat.append(l)
percs = np.percentile(np.array(flat),(.3,99.7))
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
profile = np.array(profiles_all[i][j][k])
profiles_all[i][j][k] = np.clip((profile-percs[0])/(percs[1]-percs[0]),0,1.)
elif self.YnormBtn.currentText() == 'Group percentile':
flat = [[]for i in range(n_groups)]
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
for l in profiles_all[i][j][k]:
flat[i].append(l)
percs = [np.percentile(np.array(f),(.3,99.7)) for f in flat]
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
# print(percs[i])
profile = np.array(profiles_all[i][j][k])
profiles_all[i][j][k] = np.clip((profile-percs[i][0])/(percs[i][1]-percs[i][0]),0,1.)
elif self.YnormBtn.currentText() == 'Folder percentile':
flat = [[[] for j in range(n_folders[i])] for i in range(n_groups)]
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
for l in profiles_all[i][j][k]:
flat[i][j].append(l)
percs = [[np.percentile(np.array(f),(.3,99.7)) for f in ff] for ff in flat]
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
# print(percs[i][j])
profile = np.array(profiles_all[i][j][k])
profiles_all[i][j][k] = np.clip((profile-percs[i][j][0])/(percs[i][j][1]-percs[i][j][0]),0,1.)
# normalize AP axis if necessary
if self.XnormBtn.isChecked():
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
profile = profiles_all[i][j][k]
x = np.linspace(0,1,len(profile))
fun = interp1d(x,profile)
new_x = np.linspace(0,1,101)
profiles_all[i][j][k] = fun(new_x)
# compute length of longest gastruloid
max_length = []
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
max_length.append(len(profiles_all[i][j][k]))
max_length = np.max(max_length)
# orient plots according to setting
if self.orientationBtn.currentText() == 'Signal based':
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
y = np.array(profiles_all[i][j][k])[~np.isnan(profiles_all[i][j][k])]
n_p = len(y)
if np.sum(y[:int(n_p/2)])>np.sum(y[int(n_p-n_p/2):]):
profiles_all[i][j][k] = profiles_all[i][j][k][::-1]
# pad array to the right or left
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
w = max_length-len(profiles_all[i][j][k])
if self.alignmentBtn.currentText() == 'Left':
pad_width = (0,w)
if self.alignmentBtn.currentText() == 'Right':
pad_width = (w,0)
elif self.alignmentBtn.currentText() == 'Center':
if 2*int(w/2)==w:
pad_width = (int(w/2),int(w/2))
else:
pad_width = (int(w/2)+1,int(w/2))
profiles_all[i][j][k] = np.pad(profiles_all[i][j][k],pad_width,mode='constant',constant_values=np.nan)
### make plot
lines = []
for i in range(n_groups):
# plot this group only if the button is checked
if self.groupPlotBtn[i].isChecked():
ydata_group = []
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
ydata_group.append(profiles_all[i][j][k])
# plot the raw data if the button is checked
if self.rawBtn.isChecked():
ax.plot(ydata_group[-1],'-', lw=.5, c=self.colors[i], alpha = 0.2)
# compute and plot mean and std
max_length = np.max([len(d) for d in ydata_group])
_mean = np.zeros(max_length)
_std = np.zeros(max_length)
for j in range(max_length):
datapoint = []
for data in ydata_group:
datapoint.append(data[j])
_mean[j] = np.nanmean(datapoint)
_std[j] = np.nanstd(datapoint)
line = ax.plot(_mean,'-',lw=1,c=self.colors[i],label='Mean')[0]
ax.fill_between(range(len(_mean)),_mean-_std,_mean+_std,facecolor=self.colors[i],alpha=.2, linewidth=0.,label='Std')
lines.append(line)
# adjust axes lims
ax.set_ylim(0,None)
ax.set_xlim(0,None)
if self.XnormBtn.isChecked():
ax.set_xlim(0,100)
if self.YnormBtn.currentText() != 'No normalization':
ax.set_ylim(0,1)
# add legend
if self.legendBtn.isChecked():
l = ax.legend(lines,['Group '+str(i+1) for i in range(len(self.groupPlotBtn)) if self.groupPlotBtn[i].isChecked()])
l.get_frame().set_linewidth(0.0)
self.canvas.draw()
class profileAP_tlMode(QWidget):
#############
# TO BE IMPLEMENTED!!!
#############
def __init__(self, data_all, channel, colors, profileType='APprofile', parent=None):
super(profileAP_tlMode, self).__init__(parent)
self.data_all = data_all
self.n_groups = len(data_all)
self.channel = channel
self.colors = colors
self.profileType = profileType
self.make()
def make(self):
self.figure = Figure(figsize=(4, 2.5), dpi=100)
self.canvas = FigureCanvas(self.figure)
self.canvas.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.figure.clear()
ax = self.figure.add_subplot(111)
self.figure.subplots_adjust(top=0.95,right=0.95,left=0.2,bottom=0.25)
ax.set_xlabel(self.profileType)
ax.ticklabel_format(axis="x", style="sci", scilimits=(2,2))
ax.set_ylabel('Time')
ax.ticklabel_format(axis="y", style="sci", scilimits=(0,2))
# ax.axis('off')
self.canvas.draw()
###############################################
settings_group = QGroupBox('Plot settings')
self.YnormBtn = QComboBox()
self.YnormBtn.addItem('No normalization')
self.YnormBtn.addItem('Global percentile')
self.YnormBtn.addItem('Group percentile')
self.YnormBtn.addItem('Folder percentile')
self.YnormBtn.addItem('Manual')
self.XnormBtn = QCheckBox('')
self.XnormBtn.setChecked(False)
self.XnormBtn.stateChanged.connect(self.onCheckingXnormBtn)
self.bckgBtn = QComboBox()
self.bckgBtn.addItem('None')
self.bckgBtn.addItem('Background')
self.bckgBtn.addItem('Minimum')
self.orientationBtn = QComboBox()
self.orientationBtn.addItem('Signal based')
self.orientationBtn.addItem('NO')
self.alignmentBtn = QComboBox()
self.alignmentBtn.addItem('Left')
self.alignmentBtn.addItem('Right')
self.alignmentBtn.addItem('Center')
self.aspectRatioBtn = QCheckBox('')
self.aspectRatioBtn.setChecked(True)
self.groupPlotBtn = QComboBox()
for i in range(len(self.data_all)):
self.groupPlotBtn.addItem('Group '+str(i+1))
lay = QGridLayout(self)
lay.addWidget(QLabel('Background subtraction:'),2,0,1,1)
lay.addWidget(self.bckgBtn,2,1,1,1)
lay.addWidget(QLabel('Y axis normalization:'),4,0,1,1)
lay.addWidget(self.YnormBtn,4,1,1,1)
lay.addWidget(QLabel('X axis normalization:'),5,0,1,1)
lay.addWidget(self.XnormBtn,5,1,1,1)
lay.addWidget(QLabel('A-P orientation correction:'),6,0,1,1)
lay.addWidget(self.orientationBtn,6,1,1,1)
lay.addWidget(QLabel('Alignment:'),7,0,1,1)
lay.addWidget(self.alignmentBtn,7,1,1,1)
lay.addWidget(QLabel('Set axes aspect ratio to equal:'),8,0,1,1)
lay.addWidget(self.aspectRatioBtn,8,1,1,1)
lay.addWidget(QLabel('Current group:'),9,0,1,1)
lay.addWidget(self.groupPlotBtn,9,1,1,2)
settings_group.setLayout(lay)
#######################
self.applyBtn = QPushButton('Apply Settings')
self.applyBtn.clicked.connect(self.remakePlot)
self.saveBtn = QPushButton('Save Tif image')
self.saveBtn.clicked.connect(self.save_tif)
lay = QGridLayout(self)
lay.setSpacing(10)
lay.addWidget(NavigationToolbar(self.canvas, self),0,0,1,2)
lay.addWidget(self.canvas,1,0,1,2)
lay.addWidget(settings_group,2,0,1,2)
lay.addWidget(self.applyBtn,3,0,1,2)
lay.addWidget(self.saveBtn,4,0,1,2)
self.remakePlot()
self.setWindowTitle('Channel')
QApplication.setStyle('Macintosh')
def onCheckingXnormBtn(self):
if self.XnormBtn.isChecked():
self.alignmentBtn.setEnabled(False)
else:
self.alignmentBtn.setEnabled(True)
def remakePlot(self):
n_groups = len(self.data_all)
n_folders = [len(self.data_all[group_idx]) for group_idx in range(n_groups)]
n_gastr = [[len(self.data_all[group_idx][folder_idx]['input_file']) for folder_idx in range(n_folders[group_idx])] for group_idx in range(n_groups)]
# rearrange dataset
profiles_all = [[[0 for k in range(n_gastr[i][j])] for j in range(n_folders[i])] for i in range(n_groups)]
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
profiles_all[i][j][k] = np.array(self.data_all[i][j][self.profileType][k][self.channel])
# subtract background or not
if self.bckgBtn.currentText() == 'Background':
profiles_all[i][j][k] -= self.data_all[i][j]['Background'][k][self.channel]
if self.bckgBtn.currentText() == 'Minimum':
profiles_all[i][j][k] -= np.min(profiles_all[i][j][k])
# normalize fluorescence intensity accordingly
percs = [None,None]
if self.YnormBtn.currentText() == 'Global percentile':
flat = []
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
for l in profiles_all[i][j][k]:
flat.append(l)
percs = np.percentile(np.array(flat),(.3,99.7))
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
profile = np.array(profiles_all[i][j][k])
profiles_all[i][j][k] = np.clip((profile-percs[0])/(percs[1]-percs[0]),0,1.)
elif self.YnormBtn.currentText() == 'Group percentile':
flat = [[]for i in range(n_groups)]
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
for l in profiles_all[i][j][k]:
flat[i].append(l)
percs = [np.percentile(np.array(f),(.3,99.7)) for f in flat]
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
# print(percs[i])
profile = np.array(profiles_all[i][j][k])
profiles_all[i][j][k] = np.clip((profile-percs[i][0])/(percs[i][1]-percs[i][0]),0,1.)
elif self.YnormBtn.currentText() == 'Folder percentile':
flat = [[[] for j in range(n_folders[i])] for i in range(n_groups)]
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
for l in profiles_all[i][j][k]:
flat[i][j].append(l)
percs = [[np.percentile(np.array(f),(.3,99.7)) for f in ff] for ff in flat]
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
# print(percs[i][j])
profile = np.array(profiles_all[i][j][k])
profiles_all[i][j][k] = np.clip((profile-percs[i][j][0])/(percs[i][j][1]-percs[i][j][0]),0,1.)
self.percs = percs
# normalize AP axis if necessary
if self.XnormBtn.isChecked():
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
profile = profiles_all[i][j][k]
x = np.linspace(0,1,len(profile))
fun = interp1d(x,profile)
new_x = np.linspace(0,1,101)
profiles_all[i][j][k] = fun(new_x)
# compute length of longest gastruloid
max_length = []
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
max_length.append(len(profiles_all[i][j][k]))
max_length = np.max(max_length)
# orient plots according to setting
if self.orientationBtn.currentText() == 'Signal based':
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
y = np.array(profiles_all[i][j][k])[~np.isnan(profiles_all[i][j][k])]
n_p = len(y)
if np.sum(y[:int(n_p/2)])>np.sum(y[int(n_p-n_p/2):]):
profiles_all[i][j][k] = profiles_all[i][j][k][::-1]
# pad array to the right or left
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
w = max_length-len(profiles_all[i][j][k])
if self.alignmentBtn.currentText() == 'Left':
pad_width = (0,w)
if self.alignmentBtn.currentText() == 'Right':
pad_width = (w,0)
elif self.alignmentBtn.currentText() == 'Center':
if 2*int(w/2)==w:
pad_width = (int(w/2),int(w/2))
else:
pad_width = (int(w/2)+1,int(w/2))
profiles_all[i][j][k] = np.pad(profiles_all[i][j][k],pad_width,mode='constant',constant_values=np.nan)
### make plot
# lines = []
self.figure.clear()
ax = self.figure.add_subplot(111)
self.figure.subplots_adjust(top=0.95,right=0.95,left=0.2,bottom=0.25)
ax.set_xlabel(self.profileType)
ax.ticklabel_format(axis="x", style="sci", scilimits=(2,2))
ax.set_ylabel('Time')
ax.ticklabel_format(axis="y", style="sci", scilimits=(0,2))
# ax.axis('off')
# plot the selected group only
i = self.groupPlotBtn.currentIndex()
# compute and plot mean and std of the selected group
# prepare blank image
max_t = np.max([n_gastr[i][j] for j in range(n_folders[i])])
max_l = np.max([len(profiles_all[i][j][k]) for j in range(n_folders[i]) for k in range(n_gastr[i][j])])
data_mean = np.zeros((max_t,max_l))
data_count = np.zeros((max_t,max_l))
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
data = np.nan_to_num(profiles_all[i][j][k])
data_mean[k,:] += data
data_count[k,:] += data!=0
# plot the raw data if the button is checked
# if self.rawBtn.isChecked():
# ax.plot(data_group[-1],'-', lw=.5, c=self.colors[i], alpha = 0.2)
data_mean = data_mean.astype(np.float)/data_count.astype(np.float)
data_mean = np.nan_to_num(data_mean)
aspect = 'auto'
if self.aspectRatioBtn.isChecked():
aspect = 'equal'
ax.imshow(data_mean, aspect=aspect)
ax.set_title('Group '+str(i+1))
self.tif_data = data_mean
self.canvas.draw()
def save_tif(self):
name,_ = QFileDialog.getSaveFileName(self, 'Save Overview File')
if name != '':
### check file extension: allow to save in other formats, but bias towards tif
if os.path.splitext(name)[-1]!='.tif':
buttonReply = QMessageBox.question(self,'File format warning!','File format not recommended. Do you want to save the image as tif?')
if buttonReply == QMessageBox.Yes:
name = os.path.splitext(name)[0]+'.tif'
# convert the image into int16 with the right brightness and contrast
if self.percs[0]!=None:
self.tif_data = (2**16-1)*(self.tif_data-self.percs[0])/(self.percs[1]-self.percs[0])
imsave(name+'', self.tif_data.astype(np.uint16))
| 43.883513 | 156 | 0.548822 | from PyQt5.QtWidgets import (QApplication, QComboBox, QGridLayout, QGroupBox, QLabel, QPushButton,
QFileDialog, QMessageBox, QWidget, QSizePolicy, QCheckBox)
from matplotlib.backends.backend_qt5agg import FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
import numpy as np
import warnings, os, time
from skimage.io import imsave
import scipy.ndimage as ndi
from matplotlib.figure import Figure
from scipy.interpolate import interp1d
import matplotlib as mpl
warnings.filterwarnings("ignore")
from matplotlib import rc
rc('font', size=12)
rc('font', family='Arial')
rc('pdf', fonttype=42)
class profileAP_condMode(QWidget):
def __init__(self, data_all, channel, colors, profileType='APprofile', parent=None, ylabel='Intensity (a.u.)'):
super(profileAP_condMode, self).__init__(parent)
self.data_all = data_all
self.channel = channel
self.colors = colors
self.profileType = profileType
self.ylabel = ylabel
self.make()
def make(self):
self.figure = Figure(figsize=(4, 2.5), dpi=100)
self.canvas = FigureCanvas(self.figure)
self.canvas.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.figure.clear()
ax = self.figure.add_subplot(111)
self.figure.subplots_adjust(top=0.95,right=0.95,left=0.2,bottom=0.25)
ax.set_xlabel(self.profileType)
ax.ticklabel_format(axis="x", style="sci", scilimits=(2,2))
ax.set_ylabel(self.ylabel)
ax.ticklabel_format(axis="y", style="sci", scilimits=(0,2))
self.canvas.draw()
self.YnormBtn = QComboBox()
self.YnormBtn.addItem('No normalization')
self.YnormBtn.addItem('Global percentile')
self.YnormBtn.addItem('Group percentile')
self.YnormBtn.addItem('Folder percentile')
self.YnormBtn.addItem('Manual')
self.XnormBtn = QCheckBox('')
self.XnormBtn.setChecked(False)
self.XnormBtn.stateChanged.connect(self.onCheckingXnormBtn)
self.bckgBtn = QComboBox()
self.bckgBtn.addItem('None')
self.bckgBtn.addItem('Background')
self.bckgBtn.addItem('Minimum')
self.orientationBtn = QComboBox()
self.orientationBtn.addItem('Signal based')
self.orientationBtn.addItem('NO')
self.alignmentBtn = QComboBox()
self.alignmentBtn.addItem('Left')
self.alignmentBtn.addItem('Right')
self.alignmentBtn.addItem('Center')
self.groupSelection = self.makeGroupSelectionBtns()
self.applyBtn = QPushButton('Apply Settings')
self.applyBtn.clicked.connect(self.remakePlot)
lay = QGridLayout(self)
lay.setSpacing(10)
lay.addWidget(NavigationToolbar(self.canvas, self),0,0,1,2)
lay.addWidget(self.canvas,1,0,1,2)
lay.addWidget(QLabel('Background subtraction type:'),2,0,1,1)
lay.addWidget(self.bckgBtn,2,1,1,1)
lay.addWidget(QLabel('Y axis normalization:'),4,0,1,1)
lay.addWidget(self.YnormBtn,4,1,1,1)
lay.addWidget(QLabel('X axis normalization:'),5,0,1,1)
lay.addWidget(self.XnormBtn,5,1,1,1)
lay.addWidget(QLabel('A-P orientation correction:'),6,0,1,1)
lay.addWidget(self.orientationBtn,6,1,1,1)
lay.addWidget(QLabel('Alignment:'),7,0,1,1)
lay.addWidget(self.alignmentBtn,7,1,1,1)
lay.addWidget(self.groupSelection,8,0,1,2)
lay.addWidget(self.applyBtn,9,0,1,2)
self.remakePlot()
self.setWindowTitle('Channel')
QApplication.setStyle('Fusion')
def onCheckingXnormBtn(self):
if self.XnormBtn.isChecked():
self.alignmentBtn.setEnabled(False)
else:
self.alignmentBtn.setEnabled(True)
def makeGroupSelectionBtns(self):
group = QGroupBox("Groups to plot")
self.groupPlotBtn = []
for i in range(len(self.data_all)):
self.groupPlotBtn.append(QCheckBox('Group '+str(i)))
self.groupPlotBtn[-1].setChecked(True)
self.legendBtn = QCheckBox('Legend')
self.legendBtn.setChecked(False)
self.rawBtn = QCheckBox('Plot raw data')
self.rawBtn.setChecked(True)
lay = QGridLayout()
for i in range(len(self.data_all)):
lay.addWidget(self.groupPlotBtn[i],i,0,1,1)
lay.addWidget(self.legendBtn,0,1,1,1)
lay.addWidget(self.rawBtn,1,1,1,1)
group.setLayout(lay)
return group
def remakePlot(self):
self.figure.clear()
ax = self.figure.add_subplot(111)
self.figure.subplots_adjust(top=0.95,right=0.95,left=0.2,bottom=0.25)
ax.set_xlabel(self.profileType)
ax.ticklabel_format(axis="x", style="sci", scilimits=(2,2))
ax.set_ylabel(self.ylabel)
ax.ticklabel_format(axis="y", style="sci", scilimits=(0,2))
n_groups = len(self.data_all)
n_folders = [len(self.data_all[group_idx]) for group_idx in range(n_groups)]
n_gastr = [[len(self.data_all[group_idx][folder_idx]['input_file']) for folder_idx in range(n_folders[group_idx])] for group_idx in range(n_groups)]
profiles_all = [[[0 for k in range(n_gastr[i][j])] for j in range(n_folders[i])] for i in range(n_groups)]
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
profiles_all[i][j][k] = np.array(self.data_all[i][j][self.profileType][k][self.channel])
if self.bckgBtn.currentText() == 'Background':
profiles_all[i][j][k] -= self.data_all[i][j]['Background'][k][self.channel]
if self.bckgBtn.currentText() == 'Minimum':
profiles_all[i][j][k] -= np.min(profiles_all[i][j][k])
if self.YnormBtn.currentText() == 'Global percentile':
flat = []
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
for l in profiles_all[i][j][k]:
flat.append(l)
percs = np.percentile(np.array(flat),(.3,99.7))
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
profile = np.array(profiles_all[i][j][k])
profiles_all[i][j][k] = np.clip((profile-percs[0])/(percs[1]-percs[0]),0,1.)
elif self.YnormBtn.currentText() == 'Group percentile':
flat = [[]for i in range(n_groups)]
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
for l in profiles_all[i][j][k]:
flat[i].append(l)
percs = [np.percentile(np.array(f),(.3,99.7)) for f in flat]
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
profile = np.array(profiles_all[i][j][k])
profiles_all[i][j][k] = np.clip((profile-percs[i][0])/(percs[i][1]-percs[i][0]),0,1.)
elif self.YnormBtn.currentText() == 'Folder percentile':
flat = [[[] for j in range(n_folders[i])] for i in range(n_groups)]
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
for l in profiles_all[i][j][k]:
flat[i][j].append(l)
percs = [[np.percentile(np.array(f),(.3,99.7)) for f in ff] for ff in flat]
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
profile = np.array(profiles_all[i][j][k])
profiles_all[i][j][k] = np.clip((profile-percs[i][j][0])/(percs[i][j][1]-percs[i][j][0]),0,1.)
if self.XnormBtn.isChecked():
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
profile = profiles_all[i][j][k]
x = np.linspace(0,1,len(profile))
fun = interp1d(x,profile)
new_x = np.linspace(0,1,101)
profiles_all[i][j][k] = fun(new_x)
max_length = []
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
max_length.append(len(profiles_all[i][j][k]))
max_length = np.max(max_length)
if self.orientationBtn.currentText() == 'Signal based':
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
y = np.array(profiles_all[i][j][k])[~np.isnan(profiles_all[i][j][k])]
n_p = len(y)
if np.sum(y[:int(n_p/2)])>np.sum(y[int(n_p-n_p/2):]):
profiles_all[i][j][k] = profiles_all[i][j][k][::-1]
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
w = max_length-len(profiles_all[i][j][k])
if self.alignmentBtn.currentText() == 'Left':
pad_width = (0,w)
if self.alignmentBtn.currentText() == 'Right':
pad_width = (w,0)
elif self.alignmentBtn.currentText() == 'Center':
if 2*int(w/2)==w:
pad_width = (int(w/2),int(w/2))
else:
pad_width = (int(w/2)+1,int(w/2))
profiles_all[i][j][k] = np.pad(profiles_all[i][j][k],pad_width,mode='constant',constant_values=np.nan)
for i in range(n_groups):
if self.groupPlotBtn[i].isChecked():
ydata_group = []
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
ydata_group.append(profiles_all[i][j][k])
if self.rawBtn.isChecked():
ax.plot(ydata_group[-1],'-', lw=.5, c=self.colors[i], alpha = 0.2)
max_length = np.max([len(d) for d in ydata_group])
_mean = np.zeros(max_length)
_std = np.zeros(max_length)
for j in range(max_length):
datapoint = []
for data in ydata_group:
datapoint.append(data[j])
_mean[j] = np.nanmean(datapoint)
_std[j] = np.nanstd(datapoint)
line = ax.plot(_mean,'-',lw=1,c=self.colors[i],label='Mean')[0]
ax.fill_between(range(len(_mean)),_mean-_std,_mean+_std,facecolor=self.colors[i],alpha=.2, linewidth=0.,label='Std')
lines.append(line)
ax.set_ylim(0,None)
ax.set_xlim(0,None)
if self.XnormBtn.isChecked():
ax.set_xlim(0,100)
if self.YnormBtn.currentText() != 'No normalization':
ax.set_ylim(0,1)
if self.legendBtn.isChecked():
l = ax.legend(lines,['Group '+str(i+1) for i in range(len(self.groupPlotBtn)) if self.groupPlotBtn[i].isChecked()])
l.get_frame().set_linewidth(0.0)
self.canvas.draw()
class profileAP_tlMode(QWidget):
self.data_all = data_all
self.n_groups = len(data_all)
self.channel = channel
self.colors = colors
self.profileType = profileType
self.make()
def make(self):
self.figure = Figure(figsize=(4, 2.5), dpi=100)
self.canvas = FigureCanvas(self.figure)
self.canvas.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.figure.clear()
ax = self.figure.add_subplot(111)
self.figure.subplots_adjust(top=0.95,right=0.95,left=0.2,bottom=0.25)
ax.set_xlabel(self.profileType)
ax.ticklabel_format(axis="x", style="sci", scilimits=(2,2))
ax.set_ylabel('Time')
ax.ticklabel_format(axis="y", style="sci", scilimits=(0,2))
self.canvas.draw()
for i in range(len(self.data_all)):
self.groupPlotBtn.addItem('Group '+str(i+1))
lay = QGridLayout(self)
lay.addWidget(QLabel('Background subtraction:'),2,0,1,1)
lay.addWidget(self.bckgBtn,2,1,1,1)
lay.addWidget(QLabel('Y axis normalization:'),4,0,1,1)
lay.addWidget(self.YnormBtn,4,1,1,1)
lay.addWidget(QLabel('X axis normalization:'),5,0,1,1)
lay.addWidget(self.XnormBtn,5,1,1,1)
lay.addWidget(QLabel('A-P orientation correction:'),6,0,1,1)
lay.addWidget(self.orientationBtn,6,1,1,1)
lay.addWidget(QLabel('Alignment:'),7,0,1,1)
lay.addWidget(self.alignmentBtn,7,1,1,1)
lay.addWidget(QLabel('Set axes aspect ratio to equal:'),8,0,1,1)
lay.addWidget(self.aspectRatioBtn,8,1,1,1)
lay.addWidget(QLabel('Current group:'),9,0,1,1)
lay.addWidget(self.groupPlotBtn,9,1,1,2)
settings_group.setLayout(lay)
lay.setSpacing(10)
lay.addWidget(NavigationToolbar(self.canvas, self),0,0,1,2)
lay.addWidget(self.canvas,1,0,1,2)
lay.addWidget(settings_group,2,0,1,2)
lay.addWidget(self.applyBtn,3,0,1,2)
lay.addWidget(self.saveBtn,4,0,1,2)
self.remakePlot()
self.setWindowTitle('Channel')
QApplication.setStyle('Macintosh')
def onCheckingXnormBtn(self):
if self.XnormBtn.isChecked():
self.alignmentBtn.setEnabled(False)
else:
self.alignmentBtn.setEnabled(True)
def remakePlot(self):
n_groups = len(self.data_all)
n_folders = [len(self.data_all[group_idx]) for group_idx in range(n_groups)]
n_gastr = [[len(self.data_all[group_idx][folder_idx]['input_file']) for folder_idx in range(n_folders[group_idx])] for group_idx in range(n_groups)]
profiles_all = [[[0 for k in range(n_gastr[i][j])] for j in range(n_folders[i])] for i in range(n_groups)]
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
profiles_all[i][j][k] = np.array(self.data_all[i][j][self.profileType][k][self.channel])
if self.bckgBtn.currentText() == 'Background':
profiles_all[i][j][k] -= self.data_all[i][j]['Background'][k][self.channel]
if self.bckgBtn.currentText() == 'Minimum':
profiles_all[i][j][k] -= np.min(profiles_all[i][j][k])
percs = [None,None]
if self.YnormBtn.currentText() == 'Global percentile':
flat = []
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
for l in profiles_all[i][j][k]:
flat.append(l)
percs = np.percentile(np.array(flat),(.3,99.7))
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
profile = np.array(profiles_all[i][j][k])
profiles_all[i][j][k] = np.clip((profile-percs[0])/(percs[1]-percs[0]),0,1.)
elif self.YnormBtn.currentText() == 'Group percentile':
flat = [[]for i in range(n_groups)]
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
for l in profiles_all[i][j][k]:
flat[i].append(l)
percs = [np.percentile(np.array(f),(.3,99.7)) for f in flat]
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
profile = np.array(profiles_all[i][j][k])
profiles_all[i][j][k] = np.clip((profile-percs[i][0])/(percs[i][1]-percs[i][0]),0,1.)
elif self.YnormBtn.currentText() == 'Folder percentile':
flat = [[[] for j in range(n_folders[i])] for i in range(n_groups)]
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
for l in profiles_all[i][j][k]:
flat[i][j].append(l)
percs = [[np.percentile(np.array(f),(.3,99.7)) for f in ff] for ff in flat]
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
profile = np.array(profiles_all[i][j][k])
profiles_all[i][j][k] = np.clip((profile-percs[i][j][0])/(percs[i][j][1]-percs[i][j][0]),0,1.)
self.percs = percs
if self.XnormBtn.isChecked():
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
profile = profiles_all[i][j][k]
x = np.linspace(0,1,len(profile))
fun = interp1d(x,profile)
new_x = np.linspace(0,1,101)
profiles_all[i][j][k] = fun(new_x)
max_length = []
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
max_length.append(len(profiles_all[i][j][k]))
max_length = np.max(max_length)
if self.orientationBtn.currentText() == 'Signal based':
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
y = np.array(profiles_all[i][j][k])[~np.isnan(profiles_all[i][j][k])]
n_p = len(y)
if np.sum(y[:int(n_p/2)])>np.sum(y[int(n_p-n_p/2):]):
profiles_all[i][j][k] = profiles_all[i][j][k][::-1]
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
w = max_length-len(profiles_all[i][j][k])
if self.alignmentBtn.currentText() == 'Left':
pad_width = (0,w)
if self.alignmentBtn.currentText() == 'Right':
pad_width = (w,0)
elif self.alignmentBtn.currentText() == 'Center':
if 2*int(w/2)==w:
pad_width = (int(w/2),int(w/2))
else:
pad_width = (int(w/2)+1,int(w/2))
profiles_all[i][j][k] = np.pad(profiles_all[i][j][k],pad_width,mode='constant',constant_values=np.nan)
figure.clear()
ax = self.figure.add_subplot(111)
self.figure.subplots_adjust(top=0.95,right=0.95,left=0.2,bottom=0.25)
ax.set_xlabel(self.profileType)
ax.ticklabel_format(axis="x", style="sci", scilimits=(2,2))
ax.set_ylabel('Time')
ax.ticklabel_format(axis="y", style="sci", scilimits=(0,2))
i = self.groupPlotBtn.currentIndex()
max_t = np.max([n_gastr[i][j] for j in range(n_folders[i])])
max_l = np.max([len(profiles_all[i][j][k]) for j in range(n_folders[i]) for k in range(n_gastr[i][j])])
data_mean = np.zeros((max_t,max_l))
data_count = np.zeros((max_t,max_l))
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
data = np.nan_to_num(profiles_all[i][j][k])
data_mean[k,:] += data
data_count[k,:] += data!=0
data_mean = data_mean.astype(np.float)/data_count.astype(np.float)
data_mean = np.nan_to_num(data_mean)
aspect = 'auto'
if self.aspectRatioBtn.isChecked():
aspect = 'equal'
ax.imshow(data_mean, aspect=aspect)
ax.set_title('Group '+str(i+1))
self.tif_data = data_mean
self.canvas.draw()
def save_tif(self):
name,_ = QFileDialog.getSaveFileName(self, 'Save Overview File')
if name != '':
mended. Do you want to save the image as tif?')
if buttonReply == QMessageBox.Yes:
name = os.path.splitext(name)[0]+'.tif'
if self.percs[0]!=None:
self.tif_data = (2**16-1)*(self.tif_data-self.percs[0])/(self.percs[1]-self.percs[0])
imsave(name+'', self.tif_data.astype(np.uint16))
| true | true |
f70f5ed7f55124b815811b8e5f3e9fd3b4c6d9bc | 28 | py | Python | rpxdock/rosetta/__init__.py | willsheffler/tcdock | c7b8614221f4a94750054bfe5dfb12298e8d05b8 | [
"Apache-2.0"
] | 6 | 2020-09-08T09:31:52.000Z | 2022-03-29T09:53:30.000Z | rpxdock/rosetta/__init__.py | willsheffler/tcdock | c7b8614221f4a94750054bfe5dfb12298e8d05b8 | [
"Apache-2.0"
] | 9 | 2020-04-13T21:11:50.000Z | 2022-03-09T00:57:37.000Z | rpxdock/rosetta/__init__.py | willsheffler/tcdock | c7b8614221f4a94750054bfe5dfb12298e8d05b8 | [
"Apache-2.0"
] | 3 | 2020-04-13T20:04:20.000Z | 2021-12-16T22:43:50.000Z | from .rosetta_util import *
| 14 | 27 | 0.785714 | from .rosetta_util import *
| true | true |
f70f5fe84e1700532cb04c37da25039ecfc01520 | 1,202 | py | Python | tests/test_graph.py | shawnbrown/toron | 3a89273b5afbda17baf973f654a0947f6667ef4e | [
"Apache-2.0"
] | 1 | 2015-08-15T18:28:16.000Z | 2015-08-15T18:28:16.000Z | tests/test_graph.py | shawnbrown/gpn | 70b9933277a66e884cced3f5fbc795e0e0cd2b9f | [
"Apache-2.0"
] | null | null | null | tests/test_graph.py | shawnbrown/gpn | 70b9933277a66e884cced3f5fbc795e0e0cd2b9f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
try:
from StringIO import StringIO
except ImportError:
from io import StringIO # New stdlib location in 3.0
from . import _unittest as unittest
from .common import TempDirTestCase
from toron.graph import Graph
from toron._gpn_node import Node
from toron import IN_MEMORY
class TestInstantiation(TempDirTestCase):
def setUp(self):
self.addCleanup(self.cleanup_temp_files)
def test_from_collection(self):
old_boundary = Node(mode=IN_MEMORY, name='old_boundary')
new_boundary = Node(mode=IN_MEMORY, name='new_boundary')
collection = [old_boundary, new_boundary]
graph = Graph(nodes=collection) # Load nodes from list.
node_names = set(graph.nodes.keys())
self.assertSetEqual(set(['old_boundary', 'new_boundary']), node_names)
def test_from_cwd(self):
old_boundary = Node('old_boundary.node')
new_boundary = Node('new_boundary.node')
graph = Graph(path='.') # Load node files in current directory.
node_names = set(graph.nodes.keys())
self.assertSetEqual(set(['old_boundary', 'new_boundary']), node_names)
if __name__ == '__main__':
unittest.main()
| 30.820513 | 78 | 0.694676 |
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from . import _unittest as unittest
from .common import TempDirTestCase
from toron.graph import Graph
from toron._gpn_node import Node
from toron import IN_MEMORY
class TestInstantiation(TempDirTestCase):
def setUp(self):
self.addCleanup(self.cleanup_temp_files)
def test_from_collection(self):
old_boundary = Node(mode=IN_MEMORY, name='old_boundary')
new_boundary = Node(mode=IN_MEMORY, name='new_boundary')
collection = [old_boundary, new_boundary]
graph = Graph(nodes=collection)
node_names = set(graph.nodes.keys())
self.assertSetEqual(set(['old_boundary', 'new_boundary']), node_names)
def test_from_cwd(self):
old_boundary = Node('old_boundary.node')
new_boundary = Node('new_boundary.node')
graph = Graph(path='.')
node_names = set(graph.nodes.keys())
self.assertSetEqual(set(['old_boundary', 'new_boundary']), node_names)
if __name__ == '__main__':
unittest.main()
| true | true |
f70f60c0f5ef6d0d238412ab8c0bcbba577b180c | 3,261 | py | Python | tensorflow/python/kernel_tests/softplus_op_test.py | AlexChrisF/udacity | b7f85a74058fc63ccb7601c418450ab934ef5953 | [
"Apache-2.0"
] | 65 | 2016-09-26T01:30:40.000Z | 2021-08-11T17:00:41.000Z | tensorflow/python/kernel_tests/softplus_op_test.py | AlexChrisF/udacity | b7f85a74058fc63ccb7601c418450ab934ef5953 | [
"Apache-2.0"
] | 7 | 2017-07-13T09:40:59.000Z | 2019-04-08T22:46:51.000Z | tensorflow/python/kernel_tests/softplus_op_test.py | AlexChrisF/udacity | b7f85a74058fc63ccb7601c418450ab934ef5953 | [
"Apache-2.0"
] | 38 | 2017-04-28T04:15:48.000Z | 2019-09-28T05:11:46.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Softplus and SoftplusGrad."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class SoftplusTest(test.TestCase):
def _npSoftplus(self, np_features):
np_features = np.asarray(np_features)
zero = np.asarray(0).astype(np_features.dtype)
return np.logaddexp(zero, np_features)
def _testSoftplus(self, np_features, use_gpu=False):
np_softplus = self._npSoftplus(np_features)
with self.test_session(use_gpu=use_gpu):
softplus = nn_ops.softplus(np_features)
tf_softplus = softplus.eval()
self.assertAllCloseAccordingToType(np_softplus, tf_softplus)
self.assertTrue(np.all(tf_softplus > 0))
self.assertShapeEqual(np_softplus, softplus)
def testNumbers(self):
for t in [np.float16, np.float32, np.float64]:
self._testSoftplus(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
use_gpu=False)
self._testSoftplus(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
use_gpu=True)
log_eps = np.log(np.finfo(t).eps)
one = t(1)
ten = t(10)
self._testSoftplus(
[
log_eps, log_eps - one, log_eps + one, log_eps - ten,
log_eps + ten, -log_eps, -log_eps - one, -log_eps + one,
-log_eps - ten, -log_eps + ten
],
use_gpu=False)
self._testSoftplus(
[
log_eps, log_eps - one, log_eps + one, log_eps - ten,
log_eps + ten - log_eps, -log_eps - one, -log_eps + one,
-log_eps - ten, -log_eps + ten
],
use_gpu=True)
def testGradient(self):
with self.test_session():
x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
shape=[2, 5],
name="x")
y = nn_ops.softplus(x, name="softplus")
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker.compute_gradient_error(
x, [2, 5], y, [2, 5], x_init_value=x_init)
print("softplus (float) gradient err = ", err)
self.assertLess(err, 1e-4)
if __name__ == "__main__":
test.main()
| 35.835165 | 80 | 0.623735 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad
from tensorflow.python.platform import test
class SoftplusTest(test.TestCase):
def _npSoftplus(self, np_features):
np_features = np.asarray(np_features)
zero = np.asarray(0).astype(np_features.dtype)
return np.logaddexp(zero, np_features)
def _testSoftplus(self, np_features, use_gpu=False):
np_softplus = self._npSoftplus(np_features)
with self.test_session(use_gpu=use_gpu):
softplus = nn_ops.softplus(np_features)
tf_softplus = softplus.eval()
self.assertAllCloseAccordingToType(np_softplus, tf_softplus)
self.assertTrue(np.all(tf_softplus > 0))
self.assertShapeEqual(np_softplus, softplus)
def testNumbers(self):
for t in [np.float16, np.float32, np.float64]:
self._testSoftplus(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
use_gpu=False)
self._testSoftplus(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
use_gpu=True)
log_eps = np.log(np.finfo(t).eps)
one = t(1)
ten = t(10)
self._testSoftplus(
[
log_eps, log_eps - one, log_eps + one, log_eps - ten,
log_eps + ten, -log_eps, -log_eps - one, -log_eps + one,
-log_eps - ten, -log_eps + ten
],
use_gpu=False)
self._testSoftplus(
[
log_eps, log_eps - one, log_eps + one, log_eps - ten,
log_eps + ten - log_eps, -log_eps - one, -log_eps + one,
-log_eps - ten, -log_eps + ten
],
use_gpu=True)
def testGradient(self):
with self.test_session():
x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
shape=[2, 5],
name="x")
y = nn_ops.softplus(x, name="softplus")
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker.compute_gradient_error(
x, [2, 5], y, [2, 5], x_init_value=x_init)
print("softplus (float) gradient err = ", err)
self.assertLess(err, 1e-4)
if __name__ == "__main__":
test.main()
| true | true |
f70f620578b48f44b3909f01dd65ebfbea4e404e | 1,774 | py | Python | rezip.py | costerwi/rezip | c5386b96d8ed81a91b191b019ba48cf8377be578 | [
"Apache-2.0"
] | 35 | 2016-09-03T12:15:15.000Z | 2022-03-14T11:04:33.000Z | rezip.py | costerwi/rezip | c5386b96d8ed81a91b191b019ba48cf8377be578 | [
"Apache-2.0"
] | 4 | 2017-02-08T07:25:42.000Z | 2019-06-07T01:58:27.000Z | rezip.py | costerwi/rezip | c5386b96d8ed81a91b191b019ba48cf8377be578 | [
"Apache-2.0"
] | 7 | 2017-01-25T13:48:12.000Z | 2020-09-19T23:35:44.000Z | #!/usr/bin/env python
"""Read zip format file from stdin and write new zip to stdout.
With the --store option the output will be an uncompressed zip.
Uncompressed files are stored more efficiently in Git.
https://github.com/costerwi/rezip
"""
import sys
import io
from zipfile import *
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--store",
help="Store data to stdout zip without compression",
action="store_true")
parser.add_argument("-d", "--deterministic",
help="Remove any file metadata in order to obtain a deterministic "\
"zip file. This is usefull in order to make sure that e.g. the "\
"modification date of the zipped files is irrelevant.",
action="store_true")
args = parser.parse_args()
if args.store:
compression = ZIP_STORED
else:
compression = ZIP_DEFLATED
if not hasattr(sys.stdout, 'buffer'):
raise RuntimeError('Sorry, Python3 is required.')
# Use BytesIO objects as random access source and destination files
with io.BytesIO(sys.stdin.buffer.read()) as source, io.BytesIO() as dest:
# Read and re-zip the file in memory
with ZipFile(source, 'r') as source_zip, ZipFile(dest, 'w') as dest_zip:
for info in source_zip.infolist(): # Iterate over each file in zip
if args.deterministic:
newinfo = ZipInfo(info.filename)
newinfo.create_system = 0 # everything else is fixed
else:
newinfo = info
dest_zip.writestr(newinfo, source_zip.read(info), compression)
dest_zip.comment = source_zip.comment # Copy the comment if any
# Write the dest file as binary to stdout
dest.seek(0)
sys.stdout.buffer.write(dest.read())
| 35.48 | 78 | 0.683202 |
import sys
import io
from zipfile import *
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--store",
help="Store data to stdout zip without compression",
action="store_true")
parser.add_argument("-d", "--deterministic",
help="Remove any file metadata in order to obtain a deterministic "\
"zip file. This is usefull in order to make sure that e.g. the "\
"modification date of the zipped files is irrelevant.",
action="store_true")
args = parser.parse_args()
if args.store:
compression = ZIP_STORED
else:
compression = ZIP_DEFLATED
if not hasattr(sys.stdout, 'buffer'):
raise RuntimeError('Sorry, Python3 is required.')
with io.BytesIO(sys.stdin.buffer.read()) as source, io.BytesIO() as dest:
with ZipFile(source, 'r') as source_zip, ZipFile(dest, 'w') as dest_zip:
for info in source_zip.infolist():
if args.deterministic:
newinfo = ZipInfo(info.filename)
newinfo.create_system = 0
else:
newinfo = info
dest_zip.writestr(newinfo, source_zip.read(info), compression)
dest_zip.comment = source_zip.comment
dest.seek(0)
sys.stdout.buffer.write(dest.read())
| true | true |
f70f62b25660fe1a403bdc5ec0c24fed82b48c60 | 1,202 | py | Python | domonic/webapi/dragndrop.py | byteface/domonic | 971c2ae6ce2253e302873d40fd5b6e46a8f9ca95 | [
"MIT"
] | 94 | 2020-07-12T12:02:07.000Z | 2022-03-25T03:04:57.000Z | domonic/webapi/dragndrop.py | byteface/domonic | 971c2ae6ce2253e302873d40fd5b6e46a8f9ca95 | [
"MIT"
] | 41 | 2021-06-02T10:51:58.000Z | 2022-02-21T09:58:43.000Z | domonic/webapi/dragndrop.py | byteface/domonic | 971c2ae6ce2253e302873d40fd5b6e46a8f9ca95 | [
"MIT"
] | 17 | 2021-06-10T00:34:27.000Z | 2022-02-21T09:47:30.000Z | """
domonic.webapi.dragndrop
====================================
https://developer.mozilla.org/en-US/docs/Web/API/HTML_Drag_and_Drop_API
"""
from domonic.events import DragEvent
class DataTransfer:
def __init__(self):
self.data = {}
self.types = []
self.files = []
self.items = []
self.dropEffect = ""
self.effectAllowed = ""
def clearData(self, type):
self.data[type] = ""
self.types.remove(type)
def getData(self, type):
return self.data[type]
def setData(self, type, data):
self.data[type] = data
self.types.append(type)
def setDragImage(self, image, x, y):
pass
def addElement(self, element):
self.items.append(element)
# def addFile(self, file):
# self.files.append(file)
# class DataTransferItem:
# def __init__(self, type, data):
# self.type = type
# self.data = data
# def getAsString(self):
# return self.data
# def getAsFile(self):
# return self.data
# def getAsFileSystemHandle(self):
# return self.data
# def webkitGetAsEntry(self):
# return self.data
| 20.724138 | 75 | 0.56406 |
from domonic.events import DragEvent
class DataTransfer:
def __init__(self):
self.data = {}
self.types = []
self.files = []
self.items = []
self.dropEffect = ""
self.effectAllowed = ""
def clearData(self, type):
self.data[type] = ""
self.types.remove(type)
def getData(self, type):
return self.data[type]
def setData(self, type, data):
self.data[type] = data
self.types.append(type)
def setDragImage(self, image, x, y):
pass
def addElement(self, element):
self.items.append(element)
| true | true |
f70f62fe541c2c1b6d4e273273e528bf2f968458 | 3,533 | py | Python | gfwanalysis/routes/api/v1/whrc_biomass_router.py | archelogos/gfw-umd-gee | b1f5492f798a775eee24517bccbc10da513663f6 | [
"MIT"
] | 5 | 2017-11-01T21:13:19.000Z | 2022-03-03T09:26:19.000Z | gfwanalysis/routes/api/v1/whrc_biomass_router.py | gfw-api/gfw-umd-gee | b1f5492f798a775eee24517bccbc10da513663f6 | [
"MIT"
] | 11 | 2018-01-22T09:05:23.000Z | 2019-09-20T11:55:57.000Z | gfwanalysis/routes/api/v1/whrc_biomass_router.py | gfw-api/gfw-umd-gee | b1f5492f798a775eee24517bccbc10da513663f6 | [
"MIT"
] | 3 | 2020-07-10T13:30:58.000Z | 2020-10-14T07:43:12.000Z | """API ROUTER"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
from flask import jsonify, Blueprint
from gfwanalysis.errors import WHRCBiomassError
from gfwanalysis.middleware import get_geo_by_hash, get_geo_by_use, get_geo_by_wdpa, \
get_geo_by_national, get_geo_by_subnational, get_geo_by_regional
from gfwanalysis.routes.api import error, set_params
from gfwanalysis.serializers import serialize_whrc_biomass
from gfwanalysis.services.analysis.whrc_biomass_service import WHRCBiomassService
from gfwanalysis.validators import validate_geostore
whrc_biomass_endpoints_v1 = Blueprint('whrc_biomass', __name__)
def analyze(geojson, area_ha):
"""Analyze WHRC Biomass"""
logging.info('[ROUTER]: WHRC Getting biomass')
if not geojson:
return error(status=400, detail='A Geojson argument is required')
threshold, start, end, table = set_params()
logging.info(f'[ROUTER]: whrc biomass params {threshold}, {start}, {end}')
try:
data = WHRCBiomassService.analyze(
geojson=geojson,
threshold=threshold)
except WHRCBiomassError as e:
logging.error('[ROUTER]: ' + e.message)
return error(status=500, detail=e.message)
except Exception as e:
logging.error('[ROUTER]: ' + str(e))
return error(status=500, detail='Generic Error')
data['area_ha'] = area_ha
data['biomass_density'] = data['biomass'] / data['tree_cover'] if data['tree_cover'] > 0 else 0
# logging.info(f"[Router WHRC Biomass] - response from service: biomass density {data.get('biomass_density')}")
# logging.info(f"[Router WHRC Biomass] - response from service: biomass {data.get('biomass')}")
return jsonify(data=serialize_whrc_biomass(data, 'whrc-biomass')), 200
@whrc_biomass_endpoints_v1.route('/', strict_slashes=False, methods=['GET', 'POST'])
@validate_geostore
@get_geo_by_hash
def get_by_geostore(geojson, area_ha):
"""By Geostore Endpoint"""
logging.info('[ROUTER]: Getting biomass by geostore')
return analyze(geojson, area_ha)
@whrc_biomass_endpoints_v1.route('/use/<name>/<id>', strict_slashes=False, methods=['GET'])
@get_geo_by_use
def get_by_use(name, id, geojson, area_ha):
"""Use Endpoint"""
logging.info('[ROUTER]: Getting biomass by use')
return analyze(geojson, area_ha)
@whrc_biomass_endpoints_v1.route('/wdpa/<id>', strict_slashes=False, methods=['GET'])
@get_geo_by_wdpa
def get_by_wdpa(id, geojson, area_ha):
"""Wdpa Endpoint"""
logging.info('[ROUTER]: Getting biomass by wdpa')
return analyze(geojson, area_ha)
@whrc_biomass_endpoints_v1.route('/admin/<iso>', strict_slashes=False, methods=['GET'])
@get_geo_by_national
def get_by_national(iso, geojson, area_ha):
"""National Endpoint"""
logging.info('[ROUTER]: Getting biomass loss by iso')
return analyze(geojson, area_ha)
@whrc_biomass_endpoints_v1.route('/admin/<iso>/<id1>', strict_slashes=False, methods=['GET'])
@get_geo_by_subnational
def get_by_subnational(iso, id1, geojson, area_ha):
"""Subnational Endpoint"""
logging.info('[ROUTER]: Getting biomass loss by admin1')
return analyze(geojson, area_ha)
@whrc_biomass_endpoints_v1.route('/admin/<iso>/<id1>/<id2>', strict_slashes=False, methods=['GET'])
@get_geo_by_regional
def get_by_regional(iso, id1, id2, geojson, area_ha):
"""Subnational Endpoint"""
logging.info('[ROUTER]: Getting biomass loss by admin2 ')
return analyze(geojson, area_ha)
| 37.585106 | 115 | 0.731956 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
from flask import jsonify, Blueprint
from gfwanalysis.errors import WHRCBiomassError
from gfwanalysis.middleware import get_geo_by_hash, get_geo_by_use, get_geo_by_wdpa, \
get_geo_by_national, get_geo_by_subnational, get_geo_by_regional
from gfwanalysis.routes.api import error, set_params
from gfwanalysis.serializers import serialize_whrc_biomass
from gfwanalysis.services.analysis.whrc_biomass_service import WHRCBiomassService
from gfwanalysis.validators import validate_geostore
whrc_biomass_endpoints_v1 = Blueprint('whrc_biomass', __name__)
def analyze(geojson, area_ha):
logging.info('[ROUTER]: WHRC Getting biomass')
if not geojson:
return error(status=400, detail='A Geojson argument is required')
threshold, start, end, table = set_params()
logging.info(f'[ROUTER]: whrc biomass params {threshold}, {start}, {end}')
try:
data = WHRCBiomassService.analyze(
geojson=geojson,
threshold=threshold)
except WHRCBiomassError as e:
logging.error('[ROUTER]: ' + e.message)
return error(status=500, detail=e.message)
except Exception as e:
logging.error('[ROUTER]: ' + str(e))
return error(status=500, detail='Generic Error')
data['area_ha'] = area_ha
data['biomass_density'] = data['biomass'] / data['tree_cover'] if data['tree_cover'] > 0 else 0
return jsonify(data=serialize_whrc_biomass(data, 'whrc-biomass')), 200
@whrc_biomass_endpoints_v1.route('/', strict_slashes=False, methods=['GET', 'POST'])
@validate_geostore
@get_geo_by_hash
def get_by_geostore(geojson, area_ha):
logging.info('[ROUTER]: Getting biomass by geostore')
return analyze(geojson, area_ha)
@whrc_biomass_endpoints_v1.route('/use/<name>/<id>', strict_slashes=False, methods=['GET'])
@get_geo_by_use
def get_by_use(name, id, geojson, area_ha):
logging.info('[ROUTER]: Getting biomass by use')
return analyze(geojson, area_ha)
@whrc_biomass_endpoints_v1.route('/wdpa/<id>', strict_slashes=False, methods=['GET'])
@get_geo_by_wdpa
def get_by_wdpa(id, geojson, area_ha):
logging.info('[ROUTER]: Getting biomass by wdpa')
return analyze(geojson, area_ha)
@whrc_biomass_endpoints_v1.route('/admin/<iso>', strict_slashes=False, methods=['GET'])
@get_geo_by_national
def get_by_national(iso, geojson, area_ha):
logging.info('[ROUTER]: Getting biomass loss by iso')
return analyze(geojson, area_ha)
@whrc_biomass_endpoints_v1.route('/admin/<iso>/<id1>', strict_slashes=False, methods=['GET'])
@get_geo_by_subnational
def get_by_subnational(iso, id1, geojson, area_ha):
logging.info('[ROUTER]: Getting biomass loss by admin1')
return analyze(geojson, area_ha)
@whrc_biomass_endpoints_v1.route('/admin/<iso>/<id1>/<id2>', strict_slashes=False, methods=['GET'])
@get_geo_by_regional
def get_by_regional(iso, id1, id2, geojson, area_ha):
logging.info('[ROUTER]: Getting biomass loss by admin2 ')
return analyze(geojson, area_ha)
| true | true |
f70f63a933620850da4b7b78a47ed2fd877b2fba | 9,951 | py | Python | BayCatEncoder/code.py | libinruan/hierarchical_bayesian_target_encoder | 7510028a8ad1dea308802c4ca3d3a05533a9c89b | [
"MIT"
] | 1 | 2021-11-28T19:05:55.000Z | 2021-11-28T19:05:55.000Z | BayCatEncoder/code.py | libinruan/hierarchical_bayesian_target_encoder | 7510028a8ad1dea308802c4ca3d3a05533a9c89b | [
"MIT"
] | null | null | null | BayCatEncoder/code.py | libinruan/hierarchical_bayesian_target_encoder | 7510028a8ad1dea308802c4ca3d3a05533a9c89b | [
"MIT"
] | 1 | 2021-11-28T19:07:01.000Z | 2021-11-28T19:07:01.000Z | #%%
import numpy as np
import pandas as pd
import time
from sklearn.base import BaseEstimator, TransformerMixin
from collections import defaultdict
from sklearn.model_selection import KFold, StratifiedKFold
class Timer:
def __enter__(self):
self.start=time.time()
return self
def __exit__(self, *args):
self.end=time.time()
self.hour, temp = divmod((self.end - self.start), 3600)
self.min, self.second = divmod(temp, 60)
self.hour, self.min, self.second = int(self.hour), int(self.min), round(self.second, 2)
return self
class BayCatEncoder(BaseEstimator, TransformerMixin):
def __init__(self,
group_cols,
target_col='target',
N_min=1, # the higher, the more regularization is introduced into the update.
CV=True,
n_fold=5,
verbosity=True,
delimiter='.',
drop_original=False,
drop_intermediate=False,
random_seed=2020):
self.group_cols = [group_cols] if isinstance(group_cols, str) else group_cols # List of column names combination: e.g. ['n1.n2.n4', 'n3.n4', 'n2'].
self.target_col = target_col # String: 'target' by default.
self.stats = defaultdict(dict) # key: column names combination; value: corresponding info about n, N, and computed code.
self.N_min = N_min # regularization control
self.drop_original = drop_original # toggle key for whether to drop original column name(s) or not.
self.CV = CV # Bool
self.n_fold = n_fold
self.drop_intermediate = drop_intermediate
self.delimiter = delimiter
self.verbosity = verbosity # Bool
self.seed = random_seed
self.set_original_col = set()
def fit(self, X, y):
self.col_subsets = self._generate_subsets(self.group_cols)
df = pd.concat([X.copy(), y.copy()], axis=1)
assert(isinstance(self.target_col, str))
df.columns = X.columns.tolist() + [self.target_col]
assert(self._check_col_consistency(X))
if not self.CV:
self._single_fit(df)
else:
self._cv_fit(df)
return self
def _single_fit(self, df):
size_col_subsets = len(self.col_subsets)
count_subset = 0
print(f'start bayesian target encoding on cross features in the following order: {self.col_subsets}')
for subset in self.col_subsets:
count_subset += 1
with Timer() as t:
if self.verbosity: print(f'{subset} - Order {count_subset}/{size_col_subsets}')
df_stat, stat, cross_features = self._update(df, subset)
features_encoded = cross_features + '_code'
self.stats[cross_features] = pd.merge(
stat,
df_stat.groupby(subset)[features_encoded].mean(),
left_index=True,
right_index=True)
if self.verbosity: print(f'time elapsed: {t.hour} hours {t.min} mins {t.second} seconds')
return self
def _cv_fit(self, df):
kf = StratifiedKFold(n_splits = self.n_fold, shuffle = True, random_state=self.seed)
size_col_subsets = len(self.col_subsets)
count_subset = 0
for subset in self.col_subsets:
count_subset += 1
with Timer() as t:
for i, (tr_idx, val_idx) in enumerate(kf.split(df.drop(self.target_col, axis=1), df[self.target_col])):
if self.verbosity: print(f'{subset} - Order {count_subset}/{size_col_subsets} - Round {i+1}/{self.n_fold}')
df_tr, df_val = df.iloc[tr_idx].copy(), df.iloc[val_idx].copy() # Vital for avoid "A value is trying to be set on a copy of a slice from a DataFrame." warning.
df_stat, stat, cross_features = self._update(df_tr, subset)
features_encoded = cross_features + '_code'
df.loc[df.index[val_idx], features_encoded] = pd.merge(
df_val[subset],
df_stat.groupby(subset)[features_encoded].mean(),
left_on=subset,
right_index=True,
how='left'
)[features_encoded].copy() \
.fillna(df[self.target_col].mean())
self.stats[cross_features] = df.groupby(subset)[features_encoded].mean().to_frame()
if self.verbosity: print(f'time elapsed: {t.hour} hours {t.min} mins {t.second} seconds')
return self
def _update(self, df, subset):
self.global_prior_mean = df[self.target_col].mean()
if len(subset) == 1:
self.set_original_col.add(*subset)
upper_level_cols = 'global'
if not upper_level_cols + '_prior_mean' in df.columns:
df.loc[:, upper_level_cols + '_prior_mean'] = self.global_prior_mean
else:
upper_level_cols = self.delimiter.join(subset[:-1]) # e.g. the n1.n2 subset's upper level feature is `n1`.
if not upper_level_cols + '_prior_mean' in df.columns:
df.loc[:, upper_level_cols + '_prior_mean'] = pd.merge(
df[subset[:-1]],
self.stats[upper_level_cols][upper_level_cols + '_code'],
left_on=subset[:-1],
right_index=True,
how='left'
)[upper_level_cols + '_code'].copy()
stat = df.groupby(subset).agg(
n=(self.target_col, 'sum'),
N=(self.target_col, 'count'),
prior_mean=(upper_level_cols + '_prior_mean', 'mean')
)
# Calculate posterior mean
df_stat = pd.merge(df[subset], stat, left_on=subset, right_index=True, how='left')
df_stat['n'].mask(df_stat['n'].isnull(), df_stat['prior_mean'], inplace=True)
df_stat['N'].fillna(1., inplace=True)
df_stat.loc[:, 'N_prior'] = df_stat['N'].map(lambda x: max(self.N_min - x, 0))
df_stat.loc[:, 'alpha_prior'] = df_stat['prior_mean'] * df_stat['N_prior']
df_stat.loc[:, 'beta_prior'] = (1. - df_stat['prior_mean']) * df_stat['N_prior'] # Large N -> zero N_prior -> zero alpha_prior and zero beta_prior -> if n is zero as well -> alpha prior, beta prior both zero -> alpha zero -> posterior mean = zero as well.
if len(subset) == 1:
cross_features = subset[0]
else:
cross_features = self.delimiter.join(subset)
df_stat.loc[:, cross_features + '_code'] = df_stat.apply(self._stat_mean, axis=1) # core # TEST set!!
return df_stat, stat, cross_features
def _generate_subsets(self, groups, delimiter='.'):
subsets = defaultdict(list)
for g in groups:
chain = g.split(delimiter)
for i in range(len(chain)):
if chain[i] and not chain[:i+1] in subsets[i]: subsets[i].append(chain[:i+1])
ret = []
for _, v in subsets.items():
if not v in ret: ret.extend(v)
return ret
def _stat_mean(self, X):
df = X.copy()
alpha = df['alpha_prior'] + df['n']
beta = df['beta_prior'] + df['N'] - df['n']
return alpha / (alpha + beta)
def _check_col_consistency(self, df):
"""Check whether columns specified in `self.group_cols` are all included in `df`.
"""
s = set()
for col_subset in self.col_subsets:
s |= set(col_subset)
for col in s:
if not col in df.columns: return False
return True
def transform(self, X):
assert(self._check_col_consistency(X))
for subset in self.col_subsets:
key = '.'.join(subset)
X = pd.merge(
X,
self.stats[key][key + '_code'],
left_on=subset,
right_index=True,
how='left')
if len(subset) == 1:
X[key + '_code'].fillna(self.global_prior_mean, inplace=True)
else:
parent_key = '.'.join(subset[:-1]) + '_code'
X[key + '_code'].fillna(X[parent_key].mask(X[parent_key] > self.global_prior_mean, self.global_prior_mean), inplace=True)
if self.drop_original:
for col in self.set_original_col:
X.drop(col, axis=1, inplace=True)
X.rename(columns={col+'_code': col}, inplace=True)
if self.drop_intermediate:
for col in X.columns:
if col.endswith('_code') and not col.strip('_code') in self.group_cols:
X.drop(col, axis=1, inplace=True)
return X
#%%
if __name__ == '__main__':
np.random.seed(1)
k = 15
n1 = np.random.choice(['a','b'], k)
n2 = np.random.choice(['c','d'], k)
n3 = np.random.choice(['e','f'], k)
target = np.random.randint(0, 2, size=k)
train = pd.DataFrame(
{'n1': n1, 'n2': n2, 'n3':n3, 'target': target},
columns=['n1', 'n2', 'n3', 'target']
)
train.columns = ['n1','n2','n3', 'target']
train
k = 6
n4 = np.random.choice(['a','b'], k)
n5 = np.random.choice(['c','d'], k)
n6 = np.random.choice(['e','f'], k)
test = pd.DataFrame({'n4': n4, 'n2': n5, 'n3':n6})
test.columns = ['n1','n2','n3']
test
te = BayCatEncoder(
'n1.n2.n3', #['n1.n2.n3', 'n2.n3', 'n3'],
target_col='target',
drop_original=False,
drop_intermediate=False,
CV=False
) \
.fit(train.drop('target', axis=1), train.target)
# te.transform(test)
te.transform(test)
# %%
| 44.226667 | 274 | 0.553613 |
import numpy as np
import pandas as pd
import time
from sklearn.base import BaseEstimator, TransformerMixin
from collections import defaultdict
from sklearn.model_selection import KFold, StratifiedKFold
class Timer:
def __enter__(self):
self.start=time.time()
return self
def __exit__(self, *args):
self.end=time.time()
self.hour, temp = divmod((self.end - self.start), 3600)
self.min, self.second = divmod(temp, 60)
self.hour, self.min, self.second = int(self.hour), int(self.min), round(self.second, 2)
return self
class BayCatEncoder(BaseEstimator, TransformerMixin):
def __init__(self,
group_cols,
target_col='target',
N_min=1,
CV=True,
n_fold=5,
verbosity=True,
delimiter='.',
drop_original=False,
drop_intermediate=False,
random_seed=2020):
self.group_cols = [group_cols] if isinstance(group_cols, str) else group_cols
self.target_col = target_col
self.stats = defaultdict(dict)
self.N_min = N_min
self.drop_original = drop_original
self.CV = CV
self.n_fold = n_fold
self.drop_intermediate = drop_intermediate
self.delimiter = delimiter
self.verbosity = verbosity
self.seed = random_seed
self.set_original_col = set()
def fit(self, X, y):
self.col_subsets = self._generate_subsets(self.group_cols)
df = pd.concat([X.copy(), y.copy()], axis=1)
assert(isinstance(self.target_col, str))
df.columns = X.columns.tolist() + [self.target_col]
assert(self._check_col_consistency(X))
if not self.CV:
self._single_fit(df)
else:
self._cv_fit(df)
return self
def _single_fit(self, df):
size_col_subsets = len(self.col_subsets)
count_subset = 0
print(f'start bayesian target encoding on cross features in the following order: {self.col_subsets}')
for subset in self.col_subsets:
count_subset += 1
with Timer() as t:
if self.verbosity: print(f'{subset} - Order {count_subset}/{size_col_subsets}')
df_stat, stat, cross_features = self._update(df, subset)
features_encoded = cross_features + '_code'
self.stats[cross_features] = pd.merge(
stat,
df_stat.groupby(subset)[features_encoded].mean(),
left_index=True,
right_index=True)
if self.verbosity: print(f'time elapsed: {t.hour} hours {t.min} mins {t.second} seconds')
return self
def _cv_fit(self, df):
kf = StratifiedKFold(n_splits = self.n_fold, shuffle = True, random_state=self.seed)
size_col_subsets = len(self.col_subsets)
count_subset = 0
for subset in self.col_subsets:
count_subset += 1
with Timer() as t:
for i, (tr_idx, val_idx) in enumerate(kf.split(df.drop(self.target_col, axis=1), df[self.target_col])):
if self.verbosity: print(f'{subset} - Order {count_subset}/{size_col_subsets} - Round {i+1}/{self.n_fold}')
df_tr, df_val = df.iloc[tr_idx].copy(), df.iloc[val_idx].copy()
df_stat, stat, cross_features = self._update(df_tr, subset)
features_encoded = cross_features + '_code'
df.loc[df.index[val_idx], features_encoded] = pd.merge(
df_val[subset],
df_stat.groupby(subset)[features_encoded].mean(),
left_on=subset,
right_index=True,
how='left'
)[features_encoded].copy() \
.fillna(df[self.target_col].mean())
self.stats[cross_features] = df.groupby(subset)[features_encoded].mean().to_frame()
if self.verbosity: print(f'time elapsed: {t.hour} hours {t.min} mins {t.second} seconds')
return self
def _update(self, df, subset):
self.global_prior_mean = df[self.target_col].mean()
if len(subset) == 1:
self.set_original_col.add(*subset)
upper_level_cols = 'global'
if not upper_level_cols + '_prior_mean' in df.columns:
df.loc[:, upper_level_cols + '_prior_mean'] = self.global_prior_mean
else:
upper_level_cols = self.delimiter.join(subset[:-1])
if not upper_level_cols + '_prior_mean' in df.columns:
df.loc[:, upper_level_cols + '_prior_mean'] = pd.merge(
df[subset[:-1]],
self.stats[upper_level_cols][upper_level_cols + '_code'],
left_on=subset[:-1],
right_index=True,
how='left'
)[upper_level_cols + '_code'].copy()
stat = df.groupby(subset).agg(
n=(self.target_col, 'sum'),
N=(self.target_col, 'count'),
prior_mean=(upper_level_cols + '_prior_mean', 'mean')
)
# Calculate posterior mean
df_stat = pd.merge(df[subset], stat, left_on=subset, right_index=True, how='left')
df_stat['n'].mask(df_stat['n'].isnull(), df_stat['prior_mean'], inplace=True)
df_stat['N'].fillna(1., inplace=True)
df_stat.loc[:, 'N_prior'] = df_stat['N'].map(lambda x: max(self.N_min - x, 0))
df_stat.loc[:, 'alpha_prior'] = df_stat['prior_mean'] * df_stat['N_prior']
df_stat.loc[:, 'beta_prior'] = (1. - df_stat['prior_mean']) * df_stat['N_prior'] # Large N -> zero N_prior -> zero alpha_prior and zero beta_prior -> if n is zero as well -> alpha prior, beta prior both zero -> alpha zero -> posterior mean = zero as well.
if len(subset) == 1:
cross_features = subset[0]
else:
cross_features = self.delimiter.join(subset)
df_stat.loc[:, cross_features + '_code'] = df_stat.apply(self._stat_mean, axis=1) # core # TEST set!!
return df_stat, stat, cross_features
def _generate_subsets(self, groups, delimiter='.'):
subsets = defaultdict(list)
for g in groups:
chain = g.split(delimiter)
for i in range(len(chain)):
if chain[i] and not chain[:i+1] in subsets[i]: subsets[i].append(chain[:i+1])
ret = []
for _, v in subsets.items():
if not v in ret: ret.extend(v)
return ret
def _stat_mean(self, X):
df = X.copy()
alpha = df['alpha_prior'] + df['n']
beta = df['beta_prior'] + df['N'] - df['n']
return alpha / (alpha + beta)
def _check_col_consistency(self, df):
s = set()
for col_subset in self.col_subsets:
s |= set(col_subset)
for col in s:
if not col in df.columns: return False
return True
def transform(self, X):
assert(self._check_col_consistency(X))
for subset in self.col_subsets:
key = '.'.join(subset)
X = pd.merge(
X,
self.stats[key][key + '_code'],
left_on=subset,
right_index=True,
how='left')
if len(subset) == 1:
X[key + '_code'].fillna(self.global_prior_mean, inplace=True)
else:
parent_key = '.'.join(subset[:-1]) + '_code'
X[key + '_code'].fillna(X[parent_key].mask(X[parent_key] > self.global_prior_mean, self.global_prior_mean), inplace=True)
if self.drop_original:
for col in self.set_original_col:
X.drop(col, axis=1, inplace=True)
X.rename(columns={col+'_code': col}, inplace=True)
if self.drop_intermediate:
for col in X.columns:
if col.endswith('_code') and not col.strip('_code') in self.group_cols:
X.drop(col, axis=1, inplace=True)
return X
#%%
if __name__ == '__main__':
np.random.seed(1)
k = 15
n1 = np.random.choice(['a','b'], k)
n2 = np.random.choice(['c','d'], k)
n3 = np.random.choice(['e','f'], k)
target = np.random.randint(0, 2, size=k)
train = pd.DataFrame(
{'n1': n1, 'n2': n2, 'n3':n3, 'target': target},
columns=['n1', 'n2', 'n3', 'target']
)
train.columns = ['n1','n2','n3', 'target']
train
k = 6
n4 = np.random.choice(['a','b'], k)
n5 = np.random.choice(['c','d'], k)
n6 = np.random.choice(['e','f'], k)
test = pd.DataFrame({'n4': n4, 'n2': n5, 'n3':n6})
test.columns = ['n1','n2','n3']
test
te = BayCatEncoder(
'n1.n2.n3', #['n1.n2.n3', 'n2.n3', 'n3'],
target_col='target',
drop_original=False,
drop_intermediate=False,
CV=False
) \
.fit(train.drop('target', axis=1), train.target)
# te.transform(test)
te.transform(test)
# %%
| true | true |
f70f64b1c0e459c2b611f15e5ff01ad74993ec1e | 1,318 | py | Python | commands/ports.py | retr0-13/routeros-scanner | 9a834d3459aa4621db33dab32a50d6c297bda059 | [
"MIT"
] | 1 | 2022-03-17T00:19:41.000Z | 2022-03-17T00:19:41.000Z | commands/ports.py | retr0-13/routeros-scanner | 9a834d3459aa4621db33dab32a50d6c297bda059 | [
"MIT"
] | null | null | null | commands/ports.py | retr0-13/routeros-scanner | 9a834d3459aa4621db33dab32a50d6c297bda059 | [
"MIT"
] | 1 | 2022-03-22T10:47:12.000Z | 2022-03-22T10:47:12.000Z | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from commands.basecommand import BaseCommand
class Ports(BaseCommand):
def __init__(self):
self.__name__ = 'Ports'
def run_ssh(self, sshc):
res = self._ssh_data_with_header(sshc, '/ip service print detail')
sus_dns, recommendation = self.check_results_ssh(res)
return {'raw_data': res,
'suspicious': sus_dns,
'recommendation': recommendation}
def check_results_ssh(self, res):
sus_ports = []
recommendation = []
def_ports = {'telnet': 23, 'ftp': 21, 'www': 80, 'ssh': 22, 'www-ssl': 443, 'api': 8728, 'winbox': 8291,
'api-ssl': 8729}
for item in res:
service = item['name']
if def_ports[service] != int(item['port']):
sus_ports.append(f'The port for {service}, has changed from {def_ports[service]} to {item["port"]} - '
f'severity: low')
if (service == 'ssh') and (int(item['port']) == 22):
recommendation.append('The port for ssh protocol is as ssh default port (22)- Mikrotik company '
'recommended to change it')
return sus_ports, recommendation
| 28.652174 | 118 | 0.560698 |
from commands.basecommand import BaseCommand
class Ports(BaseCommand):
def __init__(self):
self.__name__ = 'Ports'
def run_ssh(self, sshc):
res = self._ssh_data_with_header(sshc, '/ip service print detail')
sus_dns, recommendation = self.check_results_ssh(res)
return {'raw_data': res,
'suspicious': sus_dns,
'recommendation': recommendation}
def check_results_ssh(self, res):
sus_ports = []
recommendation = []
def_ports = {'telnet': 23, 'ftp': 21, 'www': 80, 'ssh': 22, 'www-ssl': 443, 'api': 8728, 'winbox': 8291,
'api-ssl': 8729}
for item in res:
service = item['name']
if def_ports[service] != int(item['port']):
sus_ports.append(f'The port for {service}, has changed from {def_ports[service]} to {item["port"]} - '
f'severity: low')
if (service == 'ssh') and (int(item['port']) == 22):
recommendation.append('The port for ssh protocol is as ssh default port (22)- Mikrotik company '
'recommended to change it')
return sus_ports, recommendation
| true | true |
f70f675e28b12cb449e04ec8328c32afe56f38dc | 1,876 | py | Python | exercicios_resolvidos3/exercicios3/capitulo 09/exercicio-09-35.py | tiagosm1/Python_Nilo_Ney | b5380dcc8fcf64e9c047ebc353585caba3d7b397 | [
"MIT"
] | null | null | null | exercicios_resolvidos3/exercicios3/capitulo 09/exercicio-09-35.py | tiagosm1/Python_Nilo_Ney | b5380dcc8fcf64e9c047ebc353585caba3d7b397 | [
"MIT"
] | null | null | null | exercicios_resolvidos3/exercicios3/capitulo 09/exercicio-09-35.py | tiagosm1/Python_Nilo_Ney | b5380dcc8fcf64e9c047ebc353585caba3d7b397 | [
"MIT"
] | null | null | null | ##############################################################################
# Parte do livro Introdução à Programação com Python
# Autor: Nilo Ney Coutinho Menezes
# Editora Novatec (c) 2010-2020
# Primeira edição - Novembro/2010 - ISBN 978-85-7522-250-8
# Segunda edição - Junho/2014 - ISBN 978-85-7522-408-3
# Terceira Edição - Janeiro/2019 - ISBN 978-85-7522-718-3
#
# Site: https://python.nilo.pro.br/
#
# Arquivo: exercicios3\capitulo 09\exercicio-09-35.py
##############################################################################
import sys
import os
import os.path
# este módulo ajuda com a conversão de nomes de arquivos para links
# válidos em HTML
import urllib.request
mascara_do_estilo = "'margin: 5px 0px 5px %dpx;'"
def gera_estilo(nível):
return mascara_do_estilo % (nível * 20)
def gera_listagem(página, diretório):
nraiz = os.path.abspath(diretório).count(os.sep)
for raiz, diretórios, arquivos in os.walk(diretório):
nível = raiz.count(os.sep) - nraiz
página.write(f"<p style={gera_estilo(nível)}>{raiz}</p>")
estilo = gera_estilo(nível+1)
for a in arquivos:
caminho_completo = os.path.join(raiz, a)
tamanho = os.path.getsize(caminho_completo)
link = urllib.request.pathname2url(caminho_completo)
página.write(f"<p style={estilo}><a href='{link}'>{a}</a> ({tamanho} bytes)</p>")
if len(sys.argv) < 2:
print("Digite o nome do diretório para coletar os arquivos!")
sys.exit(1)
diretório = sys.argv[1]
página = open("arquivos.html", "w", encoding="utf-8")
página.write("""
<!DOCTYPE html>
<html lang="pt-BR">
<head>
<meta charset="utf-8">
<title>Arquivos</title>
</head>
<body>
""")
página.write(f"Arquivos encontrados a partir do diretório: {diretório}")
gera_listagem(página, diretório)
página.write("""
</body>
</html>
""")
página.close()
| 29.3125 | 94 | 0.623134 | true | true | |
f70f67632a3e49c17b9d54c76405c03ad7b33836 | 2,341 | py | Python | bomber.py | SashaSarzh/InstagramDirectBomber | 050f601860d6d1bd094cd209fba86a8f02ffc2b0 | [
"Unlicense"
] | 5 | 2020-10-13T13:16:43.000Z | 2021-12-10T08:20:44.000Z | bomber.py | SashaSarzh/InstagramDirectBomber | 050f601860d6d1bd094cd209fba86a8f02ffc2b0 | [
"Unlicense"
] | 1 | 2021-12-10T09:05:56.000Z | 2021-12-10T09:05:56.000Z | bomber.py | SashaSarzh/InstagramDirectBomber | 050f601860d6d1bd094cd209fba86a8f02ffc2b0 | [
"Unlicense"
] | 3 | 2020-10-13T16:03:57.000Z | 2021-12-28T06:34:35.000Z | from InstagramAPI import InstagramAPI
from colorama import Fore, Back, Style
import getpass
import sys
import webbrowser
import time
import requests
import json
print(Fore.GREEN + """
░░███╗░░███╗░░██╗░██████╗████████╗░░██╗██╗██████╗░░█████╗░███╗░░░███╗██████╗░██████╗░██████╗░
░████║░░████╗░██║██╔════╝╚══██╔══╝░██╔╝██║██╔══██╗██╔══██╗████╗░████║██╔══██╗╚════██╗██╔══██╗
██╔██║░░██╔██╗██║╚█████╗░░░░██║░░░██╔╝░██║██████╦╝██║░░██║██╔████╔██║██████╦╝░█████╔╝██████╔╝
╚═╝██║░░██║╚████║░╚═══██╗░░░██║░░░███████║██╔══██╗██║░░██║██║╚██╔╝██║██╔══██╗░╚═══██╗██╔══██╗
███████╗██║░╚███║██████╔╝░░░██║░░░╚════██║██████╦╝╚█████╔╝██║░╚═╝░██║██████╦╝██████╔╝██║░░██║
╚══════╝╚═╝░░╚══╝╚═════╝░░░░╚═╝░░░░░░░░╚═╝╚═════╝░░╚════╝░╚═╝░░░░░╚═╝╚═════╝░╚═════╝░╚═╝░░╚═╝
Author: @SashaSarzh """ + Style.RESET_ALL)
nostop = 0
accounts = input("Input here list of your account(If haven't type Enter): ")
if not accounts:
username = input("Your Login: ")
password = getpass.getpass()
api = InstagramAPI(username, password)
api.login()
istimes = 0
else:
f = open(accounts, 'r')
NumberOfLine = 0
for line in f:
NumberOfLine += 1
username, password = line.split(':')
print ("Login found: ", username)
api = InstagramAPI(username, password)
api.login()
istimes = 0
user = input("Victims nickname: ")
url = "https://www.instagram.com/web/search/topsearch/?context=blended&query="+user+"&rank_token=0.3953592318270893&count=1"
response = requests.get(url)
respJSON = response.json()
user_id = str( respJSON['users'][0].get("user").get("pk") )
while True:
if user == "niggvard" or user == "jidkiypuk2":
print("No no no")
sys.exit()
else: break
message = input("Text of message: ")
if istimes == 0:
times = int(input("How many messages you want send: "))
elif istimes == 1:
times = NumberOfLine
print("You will use bomber ", times,"times ", user_id, "with message: ", message, ".")
ask = input("Do you want continue[y/n]: ")
if ask == 'y':
print('Starting..')
elif ask == 'n':
print('Stopping..')
sys.exit()
else:
print('Stopping')
sys.exit()
while times > nostop:
nostop = nostop + 1
api.sendMessage(user_id,message)
print(nostop, ">> Send", user, ": ", message)
| 28.204819 | 125 | 0.470739 | from InstagramAPI import InstagramAPI
from colorama import Fore, Back, Style
import getpass
import sys
import webbrowser
import time
import requests
import json
print(Fore.GREEN + """
░░███╗░░███╗░░██╗░██████╗████████╗░░██╗██╗██████╗░░█████╗░███╗░░░███╗██████╗░██████╗░██████╗░
░████║░░████╗░██║██╔════╝╚══██╔══╝░██╔╝██║██╔══██╗██╔══██╗████╗░████║██╔══██╗╚════██╗██╔══██╗
██╔██║░░██╔██╗██║╚█████╗░░░░██║░░░██╔╝░██║██████╦╝██║░░██║██╔████╔██║██████╦╝░█████╔╝██████╔╝
╚═╝██║░░██║╚████║░╚═══██╗░░░██║░░░███████║██╔══██╗██║░░██║██║╚██╔╝██║██╔══██╗░╚═══██╗██╔══██╗
███████╗██║░╚███║██████╔╝░░░██║░░░╚════██║██████╦╝╚█████╔╝██║░╚═╝░██║██████╦╝██████╔╝██║░░██║
╚══════╝╚═╝░░╚══╝╚═════╝░░░░╚═╝░░░░░░░░╚═╝╚═════╝░░╚════╝░╚═╝░░░░░╚═╝╚═════╝░╚═════╝░╚═╝░░╚═╝
Author: @SashaSarzh """ + Style.RESET_ALL)
nostop = 0
accounts = input("Input here list of your account(If haven't type Enter): ")
if not accounts:
username = input("Your Login: ")
password = getpass.getpass()
api = InstagramAPI(username, password)
api.login()
istimes = 0
else:
f = open(accounts, 'r')
NumberOfLine = 0
for line in f:
NumberOfLine += 1
username, password = line.split(':')
print ("Login found: ", username)
api = InstagramAPI(username, password)
api.login()
istimes = 0
user = input("Victims nickname: ")
url = "https://www.instagram.com/web/search/topsearch/?context=blended&query="+user+"&rank_token=0.3953592318270893&count=1"
response = requests.get(url)
respJSON = response.json()
user_id = str( respJSON['users'][0].get("user").get("pk") )
while True:
if user == "niggvard" or user == "jidkiypuk2":
print("No no no")
sys.exit()
else: break
message = input("Text of message: ")
if istimes == 0:
times = int(input("How many messages you want send: "))
elif istimes == 1:
times = NumberOfLine
print("You will use bomber ", times,"times ", user_id, "with message: ", message, ".")
ask = input("Do you want continue[y/n]: ")
if ask == 'y':
print('Starting..')
elif ask == 'n':
print('Stopping..')
sys.exit()
else:
print('Stopping')
sys.exit()
while times > nostop:
nostop = nostop + 1
api.sendMessage(user_id,message)
print(nostop, ">> Send", user, ": ", message)
| true | true |
f70f68ac77916fe59476135cbaa77a1fc88fd642 | 17,623 | py | Python | exports_and_stats.py | UB-Dortmund/mms | 39dbb4bee68ee951ee0a5324c684b86a76d0ab6d | [
"Unlicense"
] | null | null | null | exports_and_stats.py | UB-Dortmund/mms | 39dbb4bee68ee951ee0a5324c684b86a76d0ab6d | [
"Unlicense"
] | null | null | null | exports_and_stats.py | UB-Dortmund/mms | 39dbb4bee68ee951ee0a5324c684b86a76d0ab6d | [
"Unlicense"
] | null | null | null | # The MIT License
#
# Copyright 2015-2017 University Library Bochum <bibliogaphie-ub@rub.de> and UB Dortmund <api.ub@tu-dortmund.de>.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import (absolute_import, division, print_function, unicode_literals)
import logging
import re
from logging.handlers import RotatingFileHandler
from bs4 import BeautifulSoup
import requests
import simplejson as json
from flask import Flask, request, jsonify, url_for
from flask import make_response
from flask_cors import CORS
from flask_swagger import swagger
from flask_wtf.csrf import CSRFProtect
from forms.forms import *
import persistence
from utils.solr_handler import Solr
try:
import local_stats_secrets as secrets
except ImportError:
import stats_secrets as secrets
class ReverseProxied(object):
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
script_name = environ.get('HTTP_X_SCRIPT_NAME', '')
if script_name:
environ['SCRIPT_NAME'] = script_name
path_info = environ['PATH_INFO']
if path_info.startswith(script_name):
environ['PATH_INFO'] = path_info[len(script_name):]
scheme = environ.get('HTTP_X_SCHEME', '')
if scheme:
environ['wsgi.url_scheme'] = scheme
return self.app(environ, start_response)
app = Flask(__name__)
CORS(app)
if secrets.DIFFERENT_PROXY_PATH:
app.wsgi_app = ReverseProxied(app.wsgi_app)
app.debug = secrets.DEBUG
app.secret_key = secrets.DEBUG_KEY
app.config['DEBUG_TB_INTERCEPT_REDIRECTS '] = False
csrf = CSRFProtect(app)
log_formatter = logging.Formatter("[%(asctime)s] {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s")
handler = RotatingFileHandler(secrets.LOGFILE, maxBytes=10000, backupCount=1)
handler.setLevel(logging.INFO)
handler.setFormatter(log_formatter)
app.logger.addHandler(handler)
log = logging.getLogger('werkzeug')
log.setLevel(logging.DEBUG)
log.addHandler(handler)
# ---------- EXPORT ----------
@app.route('/export/openapc/<year>', methods=['GET'])
@csrf.exempt
def export_openapc(year=''):
'''
Getting a bibliography
swagger_from_file: api_doc/export_openapc.yml
'''
if theme(request.access_route) == 'dortmund':
affiliation = 'tudo'
affiliation_str = 'TU Dortmund'
elif theme(request.access_route) == 'bochum':
affiliation = 'rubi'
affiliation_str = 'Ruhr-Universität Bochum'
else:
affiliation = ''
affiliation_str = ''
if affiliation:
csv = '"institution";"period";"euro";"doi";"is_hybrid";"publisher";"journal_full_title";"issn";"url";"local_id"\n'
oa_solr = Solr(host=secrets.SOLR_HOST, port=secrets.SOLR_PORT,
application=secrets.SOLR_APP, core='hb2', handler='query',
query='oa_funds:true', facet='false', rows=100000,
fquery=['%s:true' % affiliation, 'fdate:%s' % year])
oa_solr.request()
results = oa_solr.results
if len(results) > 0:
for record in results:
thedata = json.loads(record.get('wtf_json'))
doi = record.get('doi')[0]
is_hybrid = False
if record.get('is_hybrid'):
is_hybrid = record.get('is_hybrid')
publisher = ''
journal_title = ''
issn = ''
url = ''
if not doi:
journal_title = ''
if record.get('is_part_of_id'):
if record.get('is_part_of_id')[0]:
host = persistence.get_work(record.get('is_part_of_id')[0])
if host:
record = json.loads(host.get('wtf_json'))
# print(json.dumps(record, indent=4))
journal_title = record.get('title')
if record.get('fsubseries'):
journal_title = record.get('fsubseries')
publisher = ''
if record.get('publisher'):
publisher = record.get('publisher')
issn = ''
if record.get('ISSN'):
for entry in record.get('ISSN'):
if entry:
issn = entry
break
url = ''
if thedata.get('uri'):
for uri in thedata.get('uri'):
url = uri
break
csv += '"%s";%s;%s;"%s";"%s";"%s";"%s";"%s";"%s";"%s"\n' % (
affiliation_str,
year,
0.00,
doi,
is_hybrid,
publisher,
journal_title,
issn,
url,
record.get('id')
)
resp = make_response(csv, 200)
resp.headers['Content-Type'] = 'text/csv; charset=utf-8'
return resp
else:
return make_response('No results', 404)
else:
return make_response('No affiliation parameter set. Please contact the administrator!', 400)
@app.route('/export/oa_report/<year>', methods=['GET'])
@csrf.exempt
def export_oa_report(year=''):
'''
Getting a bibliography
swagger_from_file: api_doc/export_oa_report.yml
'''
pubtype = request.args.get('pubtype', 'ArticleJournal')
if theme(request.access_route) == 'dortmund':
affiliation = 'tudo'
affiliation_str = 'TU Dortmund'
elif theme(request.access_route) == 'bochum':
affiliation = 'rubi'
affiliation_str = 'Ruhr-Universität Bochum'
else:
affiliation = ''
affiliation_str = ''
if affiliation:
csv = '"AU";"TI";"SO";"DT";"RP";"EM";"OI";"PU";"ISSN";"E-ISSN";"DOI";"OA";"RP TUDO";"Fak"\n'
# TODO search for all publications of the given year
oa_solr = Solr(host=secrets.SOLR_HOST, port=secrets.SOLR_PORT,
application=secrets.SOLR_APP, core='hb2', handler='query',
query='*:*', facet='false', rows=100000,
fquery=['%s:true' % affiliation, 'fdate:%s' % year, 'pubtype:%s' % pubtype])
oa_solr.request()
results = oa_solr.results
if results:
for record in results:
thedata = json.loads(record.get('wtf_json'))
author = ''
corresponding_author = ''
corresponding_affiliation = ''
faks = ''
for person in thedata.get('person'):
if 'aut' in person.get('role'):
author += person.get('name') + ';'
if person.get('corresponding_author'):
corresponding_author = person.get('name')
if person.get('tudo'):
corresponding_affiliation = True
if person.get('gnd'):
tudo = persistence.get_person(person.get('gnd'))
# print(person.get('gnd'))
if tudo:
if tudo.get('affiliation_id'):
faks = ''
for entry in tudo.get('affiliation_id'):
affil = persistence.get_orga(entry)
fak = ''
if affil:
has_parent = False
fak = affil.get('pref_label')
if affil.get('parent_id'):
has_parent = True
fak = '%s / %s' % (affil.get('parent_label'), affil.get('pref_label'))
while has_parent:
affil = persistence.get_orga(affil.get('parent_id'))
if affil.get('parent_id'):
has_parent = True
fak = '%s / %s' % (affil.get('parent_label'), affil.get('pref_label'))
else:
has_parent = False
else:
fak = 'LinkError: Person %s' % person.get('gnd')
faks += fak + ';'
faks = faks[:-1]
author = author[:-1]
publisher = ''
journal_title = ''
issn = ''
journal_title = ''
if record.get('is_part_of_id'):
if record.get('is_part_of_id')[0]:
host = persistence.get_work(record.get('is_part_of_id')[0])
if host:
record = json.loads(host.get('wtf_json'))
# print(json.dumps(record, indent=4))
journal_title = record.get('title')
if record.get('fsubseries'):
journal_title = record.get('fsubseries')
publisher = ''
if record.get('publisher'):
publisher = record.get('publisher')
issn = ''
if record.get('ISSN'):
for entry in record.get('ISSN'):
if entry:
issn = entry
break
csv += '"%s";"%s";"%s";"%s";"%s";"%s";"%s";"%s";"%s";"%s";"%s";"%s";"%s";"%s"\n' % (
author,
thedata.get('title'),
journal_title,
'article',
corresponding_author,
'',
'',
publisher,
issn,
'',
thedata.get('DOI')[0],
thedata.get('oa_funded'),
corresponding_affiliation,
faks,
)
resp = make_response(csv, 200)
resp.headers['Content-Type'] = 'text/csv; charset=utf-8'
return resp
else:
return make_response('No affiliation parameter set. Please contact the administrator!', 400)
# ---------- STATISTICS ----------
# ---------- REST ----------
@app.route("/export/spec")
def spec():
swag = swagger(app, from_file_keyword='swagger_from_file')
swag['info']['version'] = secrets.SWAGGER_API_VERSION
swag['info']['title'] = secrets.SWAGGER_TITLE
swag['info']['description'] = secrets.SWAGGER_DESCRIPTION
swag['schemes'] = secrets.SWAGGER_SCHEMES
swag['host'] = secrets.SWAGGER_HOST
swag['basePath'] = secrets.SWAGGER_BASEPATH
swag['tags'] = [
{
'name': 'monitoring',
'description': 'Methods for monitoring the service'
},
{
'name': 'export',
'description': 'Special data views as exports'
},
{
'name': 'statistics',
'description': 'Statistics'
},
]
return jsonify(swag)
@app.route('/export/_ping')
@csrf.exempt
def _ping():
"""
Ping the service
swagger_from_file: bibliography_doc/_ping.yml
"""
try:
if 'failed' in json.dumps(dependencies_health(), indent=4):
return make_response('One or more dependencies unavailable!', 500)
else:
return make_response('pong', 200)
except Exception:
return make_response('One or more dependencies unavailable!', 500)
@app.route('/export/_health')
@csrf.exempt
def _health():
"""
Showing the health of the service an its dependencies
swagger_from_file: bibliography_doc/_health.yml
"""
health_json = {
"name": "hb2_flask",
"timestamp": timestamp(),
"dependencies": dependencies_health()
}
json_string = json.dumps(health_json, indent=4)
status = 200
if 'failed' in json_string:
status = 500
response = make_response(json_string, status)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Content-type'] = 'application/json'
return response
def dependencies_health():
dependencies = []
# health of Solr cores
try:
status = requests.get(
'http://%s:%s/%s/hb2/admin/ping?wt=json' % (secrets.SOLR_HOST, secrets.SOLR_PORT, secrets.SOLR_APP),
headers={'Accept': 'application/json'}).json().get('status').lower()
except requests.exceptions.ConnectionError:
status = 'failed'
dependencies.append({
'service': 'Solr Core "hb2"',
'status': status,
'description': 'Storage for bibliographic data',
'external': False
})
try:
status = requests.get(
'http://%s:%s/%s/hb2_users/admin/ping?wt=json' % (secrets.SOLR_HOST, secrets.SOLR_PORT, secrets.SOLR_APP),
headers={'Accept': 'application/json'}).json().get('status').lower()
except requests.exceptions.ConnectionError:
status = 'failed'
dependencies.append({
'service': 'Solr Core "hb2_users"',
'status': status,
'description': 'Storage for registered users',
'external': False
})
try:
status = requests.get(
'http://%s:%s/%s/group/admin/ping?wt=json' % (secrets.SOLR_HOST, secrets.SOLR_PORT, secrets.SOLR_APP),
headers={'Accept': 'application/json'}).json().get('status').lower()
except requests.exceptions.ConnectionError:
status = 'failed'
dependencies.append({
'service': 'Solr Core "group',
'status': status,
'description': 'Storage for working groups or projects data',
'external': False
})
try:
status = requests.get(
'http://%s:%s/%s/organisation/admin/ping?wt=json' % (secrets.SOLR_HOST, secrets.SOLR_PORT, secrets.SOLR_APP),
headers={'Accept': 'application/json'}).json().get('status').lower()
except requests.exceptions.ConnectionError:
status = 'failed'
dependencies.append({
'service': 'Solr Core "organisation',
'status': status,
'description': 'Storage for organisations data',
'external': False
})
try:
status = requests.get(
'http://%s:%s/%s/person/admin/ping?wt=json' % (secrets.SOLR_HOST, secrets.SOLR_PORT, secrets.SOLR_APP),
headers={'Accept': 'application/json'}).json().get('status').lower()
except requests.exceptions.ConnectionError:
status = 'failed'
dependencies.append({
'service': 'Solr Core "person',
'status': status,
'description': 'Storage for persons data',
'external': False
})
return dependencies
# ---------- MAIN ----------
def str2bool(v):
if str(v).lower() in ("yes", "true", "True", "t", "1"):
return True
else:
return False
def theme(ip):
# logging.info('IPs: %s' % len(ip))
# logging.info('IPs: %s' % ip)
site = 'dortmund'
try:
idx = len(ip)-2
except Exception:
idx = ip[0]
if ip[idx].startswith('134.147'):
site = 'bochum'
elif ip[idx].startswith('129.217'):
site = 'dortmund'
return site
def cleanhtml(raw_html):
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, '', raw_html)
return cleantext
if __name__ == '__main__':
app.run(port=secrets.APP_PORT)
| 35.74645 | 130 | 0.511037 |
from __future__ import (absolute_import, division, print_function, unicode_literals)
import logging
import re
from logging.handlers import RotatingFileHandler
from bs4 import BeautifulSoup
import requests
import simplejson as json
from flask import Flask, request, jsonify, url_for
from flask import make_response
from flask_cors import CORS
from flask_swagger import swagger
from flask_wtf.csrf import CSRFProtect
from forms.forms import *
import persistence
from utils.solr_handler import Solr
try:
import local_stats_secrets as secrets
except ImportError:
import stats_secrets as secrets
class ReverseProxied(object):
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
script_name = environ.get('HTTP_X_SCRIPT_NAME', '')
if script_name:
environ['SCRIPT_NAME'] = script_name
path_info = environ['PATH_INFO']
if path_info.startswith(script_name):
environ['PATH_INFO'] = path_info[len(script_name):]
scheme = environ.get('HTTP_X_SCHEME', '')
if scheme:
environ['wsgi.url_scheme'] = scheme
return self.app(environ, start_response)
app = Flask(__name__)
CORS(app)
if secrets.DIFFERENT_PROXY_PATH:
app.wsgi_app = ReverseProxied(app.wsgi_app)
app.debug = secrets.DEBUG
app.secret_key = secrets.DEBUG_KEY
app.config['DEBUG_TB_INTERCEPT_REDIRECTS '] = False
csrf = CSRFProtect(app)
log_formatter = logging.Formatter("[%(asctime)s] {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s")
handler = RotatingFileHandler(secrets.LOGFILE, maxBytes=10000, backupCount=1)
handler.setLevel(logging.INFO)
handler.setFormatter(log_formatter)
app.logger.addHandler(handler)
log = logging.getLogger('werkzeug')
log.setLevel(logging.DEBUG)
log.addHandler(handler)
@app.route('/export/openapc/<year>', methods=['GET'])
@csrf.exempt
def export_openapc(year=''):
if theme(request.access_route) == 'dortmund':
affiliation = 'tudo'
affiliation_str = 'TU Dortmund'
elif theme(request.access_route) == 'bochum':
affiliation = 'rubi'
affiliation_str = 'Ruhr-Universität Bochum'
else:
affiliation = ''
affiliation_str = ''
if affiliation:
csv = '"institution";"period";"euro";"doi";"is_hybrid";"publisher";"journal_full_title";"issn";"url";"local_id"\n'
oa_solr = Solr(host=secrets.SOLR_HOST, port=secrets.SOLR_PORT,
application=secrets.SOLR_APP, core='hb2', handler='query',
query='oa_funds:true', facet='false', rows=100000,
fquery=['%s:true' % affiliation, 'fdate:%s' % year])
oa_solr.request()
results = oa_solr.results
if len(results) > 0:
for record in results:
thedata = json.loads(record.get('wtf_json'))
doi = record.get('doi')[0]
is_hybrid = False
if record.get('is_hybrid'):
is_hybrid = record.get('is_hybrid')
publisher = ''
journal_title = ''
issn = ''
url = ''
if not doi:
journal_title = ''
if record.get('is_part_of_id'):
if record.get('is_part_of_id')[0]:
host = persistence.get_work(record.get('is_part_of_id')[0])
if host:
record = json.loads(host.get('wtf_json'))
journal_title = record.get('title')
if record.get('fsubseries'):
journal_title = record.get('fsubseries')
publisher = ''
if record.get('publisher'):
publisher = record.get('publisher')
issn = ''
if record.get('ISSN'):
for entry in record.get('ISSN'):
if entry:
issn = entry
break
url = ''
if thedata.get('uri'):
for uri in thedata.get('uri'):
url = uri
break
csv += '"%s";%s;%s;"%s";"%s";"%s";"%s";"%s";"%s";"%s"\n' % (
affiliation_str,
year,
0.00,
doi,
is_hybrid,
publisher,
journal_title,
issn,
url,
record.get('id')
)
resp = make_response(csv, 200)
resp.headers['Content-Type'] = 'text/csv; charset=utf-8'
return resp
else:
return make_response('No results', 404)
else:
return make_response('No affiliation parameter set. Please contact the administrator!', 400)
@app.route('/export/oa_report/<year>', methods=['GET'])
@csrf.exempt
def export_oa_report(year=''):
pubtype = request.args.get('pubtype', 'ArticleJournal')
if theme(request.access_route) == 'dortmund':
affiliation = 'tudo'
affiliation_str = 'TU Dortmund'
elif theme(request.access_route) == 'bochum':
affiliation = 'rubi'
affiliation_str = 'Ruhr-Universität Bochum'
else:
affiliation = ''
affiliation_str = ''
if affiliation:
csv = '"AU";"TI";"SO";"DT";"RP";"EM";"OI";"PU";"ISSN";"E-ISSN";"DOI";"OA";"RP TUDO";"Fak"\n'
oa_solr = Solr(host=secrets.SOLR_HOST, port=secrets.SOLR_PORT,
application=secrets.SOLR_APP, core='hb2', handler='query',
query='*:*', facet='false', rows=100000,
fquery=['%s:true' % affiliation, 'fdate:%s' % year, 'pubtype:%s' % pubtype])
oa_solr.request()
results = oa_solr.results
if results:
for record in results:
thedata = json.loads(record.get('wtf_json'))
author = ''
corresponding_author = ''
corresponding_affiliation = ''
faks = ''
for person in thedata.get('person'):
if 'aut' in person.get('role'):
author += person.get('name') + ';'
if person.get('corresponding_author'):
corresponding_author = person.get('name')
if person.get('tudo'):
corresponding_affiliation = True
if person.get('gnd'):
tudo = persistence.get_person(person.get('gnd'))
if tudo:
if tudo.get('affiliation_id'):
faks = ''
for entry in tudo.get('affiliation_id'):
affil = persistence.get_orga(entry)
fak = ''
if affil:
has_parent = False
fak = affil.get('pref_label')
if affil.get('parent_id'):
has_parent = True
fak = '%s / %s' % (affil.get('parent_label'), affil.get('pref_label'))
while has_parent:
affil = persistence.get_orga(affil.get('parent_id'))
if affil.get('parent_id'):
has_parent = True
fak = '%s / %s' % (affil.get('parent_label'), affil.get('pref_label'))
else:
has_parent = False
else:
fak = 'LinkError: Person %s' % person.get('gnd')
faks += fak + ';'
faks = faks[:-1]
author = author[:-1]
publisher = ''
journal_title = ''
issn = ''
journal_title = ''
if record.get('is_part_of_id'):
if record.get('is_part_of_id')[0]:
host = persistence.get_work(record.get('is_part_of_id')[0])
if host:
record = json.loads(host.get('wtf_json'))
journal_title = record.get('title')
if record.get('fsubseries'):
journal_title = record.get('fsubseries')
publisher = ''
if record.get('publisher'):
publisher = record.get('publisher')
issn = ''
if record.get('ISSN'):
for entry in record.get('ISSN'):
if entry:
issn = entry
break
csv += '"%s";"%s";"%s";"%s";"%s";"%s";"%s";"%s";"%s";"%s";"%s";"%s";"%s";"%s"\n' % (
author,
thedata.get('title'),
journal_title,
'article',
corresponding_author,
'',
'',
publisher,
issn,
'',
thedata.get('DOI')[0],
thedata.get('oa_funded'),
corresponding_affiliation,
faks,
)
resp = make_response(csv, 200)
resp.headers['Content-Type'] = 'text/csv; charset=utf-8'
return resp
else:
return make_response('No affiliation parameter set. Please contact the administrator!', 400)
@app.route("/export/spec")
def spec():
swag = swagger(app, from_file_keyword='swagger_from_file')
swag['info']['version'] = secrets.SWAGGER_API_VERSION
swag['info']['title'] = secrets.SWAGGER_TITLE
swag['info']['description'] = secrets.SWAGGER_DESCRIPTION
swag['schemes'] = secrets.SWAGGER_SCHEMES
swag['host'] = secrets.SWAGGER_HOST
swag['basePath'] = secrets.SWAGGER_BASEPATH
swag['tags'] = [
{
'name': 'monitoring',
'description': 'Methods for monitoring the service'
},
{
'name': 'export',
'description': 'Special data views as exports'
},
{
'name': 'statistics',
'description': 'Statistics'
},
]
return jsonify(swag)
@app.route('/export/_ping')
@csrf.exempt
def _ping():
try:
if 'failed' in json.dumps(dependencies_health(), indent=4):
return make_response('One or more dependencies unavailable!', 500)
else:
return make_response('pong', 200)
except Exception:
return make_response('One or more dependencies unavailable!', 500)
@app.route('/export/_health')
@csrf.exempt
def _health():
health_json = {
"name": "hb2_flask",
"timestamp": timestamp(),
"dependencies": dependencies_health()
}
json_string = json.dumps(health_json, indent=4)
status = 200
if 'failed' in json_string:
status = 500
response = make_response(json_string, status)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Content-type'] = 'application/json'
return response
def dependencies_health():
dependencies = []
try:
status = requests.get(
'http://%s:%s/%s/hb2/admin/ping?wt=json' % (secrets.SOLR_HOST, secrets.SOLR_PORT, secrets.SOLR_APP),
headers={'Accept': 'application/json'}).json().get('status').lower()
except requests.exceptions.ConnectionError:
status = 'failed'
dependencies.append({
'service': 'Solr Core "hb2"',
'status': status,
'description': 'Storage for bibliographic data',
'external': False
})
try:
status = requests.get(
'http://%s:%s/%s/hb2_users/admin/ping?wt=json' % (secrets.SOLR_HOST, secrets.SOLR_PORT, secrets.SOLR_APP),
headers={'Accept': 'application/json'}).json().get('status').lower()
except requests.exceptions.ConnectionError:
status = 'failed'
dependencies.append({
'service': 'Solr Core "hb2_users"',
'status': status,
'description': 'Storage for registered users',
'external': False
})
try:
status = requests.get(
'http://%s:%s/%s/group/admin/ping?wt=json' % (secrets.SOLR_HOST, secrets.SOLR_PORT, secrets.SOLR_APP),
headers={'Accept': 'application/json'}).json().get('status').lower()
except requests.exceptions.ConnectionError:
status = 'failed'
dependencies.append({
'service': 'Solr Core "group',
'status': status,
'description': 'Storage for working groups or projects data',
'external': False
})
try:
status = requests.get(
'http://%s:%s/%s/organisation/admin/ping?wt=json' % (secrets.SOLR_HOST, secrets.SOLR_PORT, secrets.SOLR_APP),
headers={'Accept': 'application/json'}).json().get('status').lower()
except requests.exceptions.ConnectionError:
status = 'failed'
dependencies.append({
'service': 'Solr Core "organisation',
'status': status,
'description': 'Storage for organisations data',
'external': False
})
try:
status = requests.get(
'http://%s:%s/%s/person/admin/ping?wt=json' % (secrets.SOLR_HOST, secrets.SOLR_PORT, secrets.SOLR_APP),
headers={'Accept': 'application/json'}).json().get('status').lower()
except requests.exceptions.ConnectionError:
status = 'failed'
dependencies.append({
'service': 'Solr Core "person',
'status': status,
'description': 'Storage for persons data',
'external': False
})
return dependencies
# ---------- MAIN ----------
def str2bool(v):
if str(v).lower() in ("yes", "true", "True", "t", "1"):
return True
else:
return False
def theme(ip):
# logging.info('IPs: %s' % len(ip))
# logging.info('IPs: %s' % ip)
site = 'dortmund'
try:
idx = len(ip)-2
except Exception:
idx = ip[0]
if ip[idx].startswith('134.147'):
site = 'bochum'
elif ip[idx].startswith('129.217'):
site = 'dortmund'
return site
def cleanhtml(raw_html):
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, '', raw_html)
return cleantext
if __name__ == '__main__':
app.run(port=secrets.APP_PORT)
| true | true |
f70f69db064aa00e8a22d18234f4e4bfb925c067 | 5,053 | py | Python | tools/harness-automation/autothreadharness/harness_controller.py | ltaoti/openthread | b24192267d56c9a175739d8b2a285bc4b701deaf | [
"BSD-3-Clause"
] | 3 | 2018-06-20T11:13:33.000Z | 2020-12-08T15:15:10.000Z | tools/harness-automation/autothreadharness/harness_controller.py | ltaoti/openthread | b24192267d56c9a175739d8b2a285bc4b701deaf | [
"BSD-3-Clause"
] | 2 | 2017-03-23T07:47:54.000Z | 2017-08-21T03:12:31.000Z | tools/harness-automation/autothreadharness/harness_controller.py | ltaoti/openthread | b24192267d56c9a175739d8b2a285bc4b701deaf | [
"BSD-3-Clause"
] | 3 | 2017-08-29T01:31:57.000Z | 2020-05-07T22:56:52.000Z | #!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import ConfigParser
import logging
import os
import subprocess
import time
from autothreadharness import settings
logger = logging.getLogger(__name__)
HARNESS_SVN_VERSION_R44 = 1471
"""int: this is the first published release that miniweb was removed from Harness"""
def _try_kill(proc):
logger.info('Try kill process')
times = 1
while proc.poll() is None:
proc.kill()
time.sleep(5)
if proc.poll() is not None:
logger.info('Process has been killed')
break
logger.info('Trial %d failed', times)
times += 1
if times > 3:
raise SystemExit()
class HarnessController(object):
"""Harness service control
This controls harness service, including the harness back-end and front-end.
"""
harness = None
"""harness back-end"""
miniweb = None
"""harness front-end"""
def __init__(self, result_dir=None):
self.result_dir = result_dir
self.harness_file = ''
harness_info = ConfigParser.ConfigParser()
harness_info.read('%s\\info.ini' % settings.HARNESS_HOME)
self.version = harness_info.getint('Thread_Harness_Info', 'SVN')
def start(self):
logger.info('Starting harness service')
if self.harness:
logger.warning('Harness already started')
else:
env = dict(os.environ, PYTHONPATH='%s\\Thread_Harness;%s\\ThirdParty\\hsdk-python\\src'
% (settings.HARNESS_HOME, settings.HARNESS_HOME))
self.harness_file = '%s\\harness-%s.log' % (self.result_dir, time.strftime('%Y%m%d%H%M%S'))
with open(self.harness_file, 'w') as harness_out:
self.harness = subprocess.Popen([settings.HARNESS_HOME + '\\Python27\\python.exe',
settings.HARNESS_HOME + '\\Thread_Harness\\Run.py'],
cwd=settings.HARNESS_HOME,
stdout=harness_out,
stderr=harness_out,
env=env)
time.sleep(2)
if self.version >= HARNESS_SVN_VERSION_R44:
return
if self.miniweb:
logger.warning('Miniweb already started')
else:
with open('%s\\miniweb-%s.log' % (self.result_dir, time.strftime('%Y%m%d%H%M%S')), 'w') as miniweb_out:
self.miniweb = subprocess.Popen([settings.HARNESS_HOME + '\\MiniWeb\\miniweb.exe'],
stdout=miniweb_out,
stderr=miniweb_out,
cwd=settings.HARNESS_HOME + '\\MiniWeb')
def stop(self):
logger.info('Stopping harness service')
if self.harness:
_try_kill(self.harness)
self.harness = None
else:
logger.warning('Harness not started yet')
if self.version >= HARNESS_SVN_VERSION_R44:
return
if self.miniweb:
_try_kill(self.miniweb)
self.miniweb = None
else:
logger.warning('Miniweb not started yet')
def tail(self):
with open(self.harness_file) as harness_out:
harness_out.seek(-100, 2)
return ''.join(harness_out.readlines())
def __del__(self):
self.stop()
| 36.883212 | 115 | 0.622798 |
import ConfigParser
import logging
import os
import subprocess
import time
from autothreadharness import settings
logger = logging.getLogger(__name__)
HARNESS_SVN_VERSION_R44 = 1471
def _try_kill(proc):
logger.info('Try kill process')
times = 1
while proc.poll() is None:
proc.kill()
time.sleep(5)
if proc.poll() is not None:
logger.info('Process has been killed')
break
logger.info('Trial %d failed', times)
times += 1
if times > 3:
raise SystemExit()
class HarnessController(object):
harness = None
miniweb = None
def __init__(self, result_dir=None):
self.result_dir = result_dir
self.harness_file = ''
harness_info = ConfigParser.ConfigParser()
harness_info.read('%s\\info.ini' % settings.HARNESS_HOME)
self.version = harness_info.getint('Thread_Harness_Info', 'SVN')
def start(self):
logger.info('Starting harness service')
if self.harness:
logger.warning('Harness already started')
else:
env = dict(os.environ, PYTHONPATH='%s\\Thread_Harness;%s\\ThirdParty\\hsdk-python\\src'
% (settings.HARNESS_HOME, settings.HARNESS_HOME))
self.harness_file = '%s\\harness-%s.log' % (self.result_dir, time.strftime('%Y%m%d%H%M%S'))
with open(self.harness_file, 'w') as harness_out:
self.harness = subprocess.Popen([settings.HARNESS_HOME + '\\Python27\\python.exe',
settings.HARNESS_HOME + '\\Thread_Harness\\Run.py'],
cwd=settings.HARNESS_HOME,
stdout=harness_out,
stderr=harness_out,
env=env)
time.sleep(2)
if self.version >= HARNESS_SVN_VERSION_R44:
return
if self.miniweb:
logger.warning('Miniweb already started')
else:
with open('%s\\miniweb-%s.log' % (self.result_dir, time.strftime('%Y%m%d%H%M%S')), 'w') as miniweb_out:
self.miniweb = subprocess.Popen([settings.HARNESS_HOME + '\\MiniWeb\\miniweb.exe'],
stdout=miniweb_out,
stderr=miniweb_out,
cwd=settings.HARNESS_HOME + '\\MiniWeb')
def stop(self):
logger.info('Stopping harness service')
if self.harness:
_try_kill(self.harness)
self.harness = None
else:
logger.warning('Harness not started yet')
if self.version >= HARNESS_SVN_VERSION_R44:
return
if self.miniweb:
_try_kill(self.miniweb)
self.miniweb = None
else:
logger.warning('Miniweb not started yet')
def tail(self):
with open(self.harness_file) as harness_out:
harness_out.seek(-100, 2)
return ''.join(harness_out.readlines())
def __del__(self):
self.stop()
| true | true |
f70f6bcb10f8446af8069e8b93a6ae0bef8dac89 | 4,415 | py | Python | dragonchain/lib/dao/smart_contract_dao.py | cheeseandcereal/dragonchain | 34d34e344b887c2a0eeb591ede2015cc2506a323 | [
"Apache-2.0"
] | null | null | null | dragonchain/lib/dao/smart_contract_dao.py | cheeseandcereal/dragonchain | 34d34e344b887c2a0eeb591ede2015cc2506a323 | [
"Apache-2.0"
] | null | null | null | dragonchain/lib/dao/smart_contract_dao.py | cheeseandcereal/dragonchain | 34d34e344b887c2a0eeb591ede2015cc2506a323 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Dragonchain, Inc.
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
# You may obtain a copy of the Apache License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
from typing import List, Optional, Dict
from dragonchain import logger
from dragonchain import exceptions
from dragonchain.lib.dto import smart_contract_model
from dragonchain.lib.interfaces import storage
from dragonchain.lib.database import redisearch
from dragonchain.lib import faas
# Constants
FOLDER = "SMARTCONTRACT"
_log = logger.get_logger()
def get_contract_id_by_txn_type(txn_type: str) -> str:
results = redisearch.search(
index=redisearch.Indexes.smartcontract.value, query_str=f"@sc_name:{{{redisearch.get_escaped_redisearch_string(txn_type)}}}", only_id=True
).docs
if results:
return results[0].id
raise exceptions.NotFound(f"Smart contract {txn_type} could not be found.")
def get_contract_by_txn_type(txn_type: str) -> smart_contract_model.SmartContractModel:
"""Searches for a contract by txn_type"""
return smart_contract_model.new_from_at_rest(storage.get_json_from_object(f"{FOLDER}/{get_contract_id_by_txn_type(txn_type)}/metadata.json"))
def list_all_contract_ids() -> List[str]:
query_result = redisearch.search(index=redisearch.Indexes.smartcontract.value, query_str="*", limit=10000, only_id=True)
contract_ids = []
for index in query_result.docs:
contract_ids.append(index.id)
return contract_ids
def get_serial_contracts() -> List[smart_contract_model.SmartContractModel]:
"""
Searches for serial contracts
Please note this function fetches all smart contract metadata from storage each time it is run, so should be used sparingly
"""
# First check and remove bad contracts or this function could fail
remove_bad_contracts()
serial_contracts = []
for sc_id in list_all_contract_ids():
sc_model = get_contract_by_id(sc_id)
if sc_model.execution_order == "serial":
serial_contracts.append(sc_model)
return serial_contracts
def remove_bad_contracts() -> None:
"""Remove contract(s) from the index if its metadata doesn't exist"""
for sc_id in list_all_contract_ids():
try:
get_contract_by_id(sc_id)
except exceptions.NotFound:
redisearch.delete_document(index=redisearch.Indexes.smartcontract.value, doc_name=sc_id)
def add_smart_contract_index(contract: smart_contract_model.SmartContractModel) -> None:
"""Add the index for a smart contract"""
redisearch.put_document(redisearch.Indexes.smartcontract.value, contract.id, {"sc_name": contract.txn_type}, upsert=True)
def remove_smart_contract_index(contract_id: str) -> None:
"""Remove the index for a smart contract"""
redisearch.delete_document(redisearch.Indexes.smartcontract.value, contract_id)
def get_contract_by_id(contract_id: str) -> smart_contract_model.SmartContractModel:
"""Searches for a contract by contract_id"""
return smart_contract_model.new_from_at_rest(storage.get_json_from_object(f"{FOLDER}/{contract_id}/metadata.json"))
def contract_does_exist(contract_id: str) -> bool:
"""Checks if a contract exists or not"""
return storage.does_object_exist(f"{FOLDER}/{contract_id}/metadata.json")
def get_contract_logs(contract_id, since: Optional[str], tail: Optional[int]) -> List[Dict[str, str]]:
"""Returns a list of smart contract logs from openfaas"""
return faas.get_logs(contract_id, since, tail)
| 43.284314 | 146 | 0.755379 |
from typing import List, Optional, Dict
from dragonchain import logger
from dragonchain import exceptions
from dragonchain.lib.dto import smart_contract_model
from dragonchain.lib.interfaces import storage
from dragonchain.lib.database import redisearch
from dragonchain.lib import faas
FOLDER = "SMARTCONTRACT"
_log = logger.get_logger()
def get_contract_id_by_txn_type(txn_type: str) -> str:
results = redisearch.search(
index=redisearch.Indexes.smartcontract.value, query_str=f"@sc_name:{{{redisearch.get_escaped_redisearch_string(txn_type)}}}", only_id=True
).docs
if results:
return results[0].id
raise exceptions.NotFound(f"Smart contract {txn_type} could not be found.")
def get_contract_by_txn_type(txn_type: str) -> smart_contract_model.SmartContractModel:
return smart_contract_model.new_from_at_rest(storage.get_json_from_object(f"{FOLDER}/{get_contract_id_by_txn_type(txn_type)}/metadata.json"))
def list_all_contract_ids() -> List[str]:
query_result = redisearch.search(index=redisearch.Indexes.smartcontract.value, query_str="*", limit=10000, only_id=True)
contract_ids = []
for index in query_result.docs:
contract_ids.append(index.id)
return contract_ids
def get_serial_contracts() -> List[smart_contract_model.SmartContractModel]:
remove_bad_contracts()
serial_contracts = []
for sc_id in list_all_contract_ids():
sc_model = get_contract_by_id(sc_id)
if sc_model.execution_order == "serial":
serial_contracts.append(sc_model)
return serial_contracts
def remove_bad_contracts() -> None:
for sc_id in list_all_contract_ids():
try:
get_contract_by_id(sc_id)
except exceptions.NotFound:
redisearch.delete_document(index=redisearch.Indexes.smartcontract.value, doc_name=sc_id)
def add_smart_contract_index(contract: smart_contract_model.SmartContractModel) -> None:
redisearch.put_document(redisearch.Indexes.smartcontract.value, contract.id, {"sc_name": contract.txn_type}, upsert=True)
def remove_smart_contract_index(contract_id: str) -> None:
redisearch.delete_document(redisearch.Indexes.smartcontract.value, contract_id)
def get_contract_by_id(contract_id: str) -> smart_contract_model.SmartContractModel:
return smart_contract_model.new_from_at_rest(storage.get_json_from_object(f"{FOLDER}/{contract_id}/metadata.json"))
def contract_does_exist(contract_id: str) -> bool:
return storage.does_object_exist(f"{FOLDER}/{contract_id}/metadata.json")
def get_contract_logs(contract_id, since: Optional[str], tail: Optional[int]) -> List[Dict[str, str]]:
return faas.get_logs(contract_id, since, tail)
| true | true |
f70f6c1aeecb37a118fbcfe765ce6106f98b5a2d | 3,643 | py | Python | paddle_quantum/QAOA/example/main.py | xinwang1/Quantum | 0f56e36e9e6111547547ae1b6cd5df307b41c1ac | [
"Apache-2.0"
] | null | null | null | paddle_quantum/QAOA/example/main.py | xinwang1/Quantum | 0f56e36e9e6111547547ae1b6cd5df307b41c1ac | [
"Apache-2.0"
] | null | null | null | paddle_quantum/QAOA/example/main.py | xinwang1/Quantum | 0f56e36e9e6111547547ae1b6cd5df307b41c1ac | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020 Institute for Quantum Computing, Baidu Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
main
"""
from paddle import fluid
import os
import numpy as np
import matplotlib.pyplot as plt
import networkx as nx
from paddle_quantum.utils import pauli_str_to_matrix
from paddle_quantum.QAOA.Paddle_QAOA import Paddle_QAOA
from paddle_quantum.QAOA.QAOA_Prefunc import generate_graph, H_generator
def main(N=4):
# number of qubits or number of nodes in the graph
N = 4
classical_graph, classical_graph_adjacency = generate_graph(N, GRAPHMETHOD=1)
print(classical_graph_adjacency)
# Convert the Hamiltonian's list form to matrix form
H_matrix = pauli_str_to_matrix(H_generator(N, classical_graph_adjacency), N)
H_diag = np.diag(H_matrix).real
H_max = np.max(H_diag)
H_min = np.min(H_diag)
print(H_diag)
print('H_max:', H_max, ' H_min:', H_min)
pos = nx.circular_layout(classical_graph)
nx.draw(classical_graph, pos, width=4, with_labels=True, font_weight='bold')
plt.show()
classical_graph, classical_graph_adjacency = generate_graph(N, 1)
opt_cir = Paddle_QAOA(classical_graph_adjacency, N=4, P=4, METHOD=1, ITR=120, LR=0.1)
# Load the data of QAOA
x1 = np.load('./output/summary_data.npz')
H_min = np.ones([len(x1['iter'])]) * H_min
# Plot loss
loss_QAOA, = plt.plot(x1['iter'], x1['energy'], alpha=0.7, marker='', linestyle="--", linewidth=2, color='m')
benchmark, = plt.plot(x1['iter'], H_min, alpha=0.7, marker='', linestyle=":", linewidth=2, color='b')
plt.xlabel('Number of iteration')
plt.ylabel('Performance of the loss function for QAOA')
plt.legend(handles=[
loss_QAOA,
benchmark
],
labels=[
r'Loss function $\left\langle {\psi \left( {\bf{\theta }} \right)} '
r'\right|H\left| {\psi \left( {\bf{\theta }} \right)} \right\rangle $',
'The benchmark result',
], loc='best')
# Show the plot
plt.show()
with fluid.dygraph.guard():
# Measure the output state of the QAOA circuit for 1024 shots by default
prob_measure = opt_cir.measure(plot=True)
# Find the max value in measured probability of bitstrings
max_prob = max(prob_measure.values())
# Find the bitstring with max probability
solution_list = [result[0] for result in prob_measure.items() if result[1] == max_prob]
print("The output bitstring:", solution_list)
# Draw the graph representing the first bitstring in the solution_list to the MaxCut-like problem
head_bitstring = solution_list[0]
node_cut = ["blue" if head_bitstring[node] == "1" else "red" for node in classical_graph]
edge_cut = [
"solid" if head_bitstring[node_row] == head_bitstring[node_col] else "dashed"
for node_row, node_col in classical_graph.edges()
]
nx.draw(
classical_graph,
pos,
node_color=node_cut,
style=edge_cut,
width=4,
with_labels=True,
font_weight="bold",
)
plt.show()
if __name__ == "__main__":
main()
| 32.526786 | 113 | 0.681581 |
from paddle import fluid
import os
import numpy as np
import matplotlib.pyplot as plt
import networkx as nx
from paddle_quantum.utils import pauli_str_to_matrix
from paddle_quantum.QAOA.Paddle_QAOA import Paddle_QAOA
from paddle_quantum.QAOA.QAOA_Prefunc import generate_graph, H_generator
def main(N=4):
N = 4
classical_graph, classical_graph_adjacency = generate_graph(N, GRAPHMETHOD=1)
print(classical_graph_adjacency)
H_matrix = pauli_str_to_matrix(H_generator(N, classical_graph_adjacency), N)
H_diag = np.diag(H_matrix).real
H_max = np.max(H_diag)
H_min = np.min(H_diag)
print(H_diag)
print('H_max:', H_max, ' H_min:', H_min)
pos = nx.circular_layout(classical_graph)
nx.draw(classical_graph, pos, width=4, with_labels=True, font_weight='bold')
plt.show()
classical_graph, classical_graph_adjacency = generate_graph(N, 1)
opt_cir = Paddle_QAOA(classical_graph_adjacency, N=4, P=4, METHOD=1, ITR=120, LR=0.1)
# Load the data of QAOA
x1 = np.load('./output/summary_data.npz')
H_min = np.ones([len(x1['iter'])]) * H_min
# Plot loss
loss_QAOA, = plt.plot(x1['iter'], x1['energy'], alpha=0.7, marker='', linestyle="--", linewidth=2, color='m')
benchmark, = plt.plot(x1['iter'], H_min, alpha=0.7, marker='', linestyle=":", linewidth=2, color='b')
plt.xlabel('Number of iteration')
plt.ylabel('Performance of the loss function for QAOA')
plt.legend(handles=[
loss_QAOA,
benchmark
],
labels=[
r'Loss function $\left\langle {\psi \left( {\bf{\theta }} \right)} '
r'\right|H\left| {\psi \left( {\bf{\theta }} \right)} \right\rangle $',
'The benchmark result',
], loc='best')
# Show the plot
plt.show()
with fluid.dygraph.guard():
# Measure the output state of the QAOA circuit for 1024 shots by default
prob_measure = opt_cir.measure(plot=True)
# Find the max value in measured probability of bitstrings
max_prob = max(prob_measure.values())
# Find the bitstring with max probability
solution_list = [result[0] for result in prob_measure.items() if result[1] == max_prob]
print("The output bitstring:", solution_list)
# Draw the graph representing the first bitstring in the solution_list to the MaxCut-like problem
head_bitstring = solution_list[0]
node_cut = ["blue" if head_bitstring[node] == "1" else "red" for node in classical_graph]
edge_cut = [
"solid" if head_bitstring[node_row] == head_bitstring[node_col] else "dashed"
for node_row, node_col in classical_graph.edges()
]
nx.draw(
classical_graph,
pos,
node_color=node_cut,
style=edge_cut,
width=4,
with_labels=True,
font_weight="bold",
)
plt.show()
if __name__ == "__main__":
main()
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.