blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
51f8dd0fcc33da09eebeff0ed99acc61cb6159a6
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/azurestack/azure-mgmt-azurestack/azure/mgmt/azurestack/aio/operations/_products_operations.py
|
55db1d24bfe14b1d0da005187a83aa6d8037329c
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 39,045
|
py
|
_products_operations.py
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._products_operations import (
build_get_product_request,
build_get_products_request,
build_get_request,
build_list_details_request,
build_list_products_request,
build_list_request,
build_upload_log_request,
)
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ProductsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.azurestack.aio.AzureStackManagementClient`'s
:attr:`products` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(self, resource_group: str, registration_name: str, **kwargs: Any) -> AsyncIterable["_models.Product"]:
"""Returns a list of products.
:param resource_group: Name of the resource group. Required.
:type resource_group: str
:param registration_name: Name of the Azure Stack registration. Required.
:type registration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Product or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.azurestack.models.Product]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-06-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.ProductList] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group=resource_group,
registration_name=registration_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ProductList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.AzureStack/registrations/{registrationName}/products"
}
@distributed_trace_async
async def get(
self, resource_group: str, registration_name: str, product_name: str, **kwargs: Any
) -> _models.Product:
"""Returns the specified product.
:param resource_group: Name of the resource group. Required.
:type resource_group: str
:param registration_name: Name of the Azure Stack registration. Required.
:type registration_name: str
:param product_name: Name of the product. Required.
:type product_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Product or the result of cls(response)
:rtype: ~azure.mgmt.azurestack.models.Product
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-06-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.Product] = kwargs.pop("cls", None)
request = build_get_request(
resource_group=resource_group,
registration_name=registration_name,
product_name=product_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("Product", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.AzureStack/registrations/{registrationName}/products/{productName}"
}
@distributed_trace_async
async def list_details(
self, resource_group: str, registration_name: str, product_name: str, **kwargs: Any
) -> _models.ExtendedProduct:
"""Returns the extended properties of a product.
:param resource_group: Name of the resource group. Required.
:type resource_group: str
:param registration_name: Name of the Azure Stack registration. Required.
:type registration_name: str
:param product_name: Name of the product. Required.
:type product_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExtendedProduct or the result of cls(response)
:rtype: ~azure.mgmt.azurestack.models.ExtendedProduct
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-06-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.ExtendedProduct] = kwargs.pop("cls", None)
request = build_list_details_request(
resource_group=resource_group,
registration_name=registration_name,
product_name=product_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_details.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("ExtendedProduct", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_details.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.AzureStack/registrations/{registrationName}/products/{productName}/listDetails"
}
@overload
async def list_products(
self,
resource_group: str,
registration_name: str,
product_name: str,
device_configuration: Optional[_models.DeviceConfiguration] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.ProductList:
"""Returns a list of products.
:param resource_group: Name of the resource group. Required.
:type resource_group: str
:param registration_name: Name of the Azure Stack registration. Required.
:type registration_name: str
:param product_name: Name of the product. Required.
:type product_name: str
:param device_configuration: Device configuration. Default value is None.
:type device_configuration: ~azure.mgmt.azurestack.models.DeviceConfiguration
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ProductList or the result of cls(response)
:rtype: ~azure.mgmt.azurestack.models.ProductList
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def list_products(
self,
resource_group: str,
registration_name: str,
product_name: str,
device_configuration: Optional[IO] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.ProductList:
"""Returns a list of products.
:param resource_group: Name of the resource group. Required.
:type resource_group: str
:param registration_name: Name of the Azure Stack registration. Required.
:type registration_name: str
:param product_name: Name of the product. Required.
:type product_name: str
:param device_configuration: Device configuration. Default value is None.
:type device_configuration: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ProductList or the result of cls(response)
:rtype: ~azure.mgmt.azurestack.models.ProductList
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def list_products(
self,
resource_group: str,
registration_name: str,
product_name: str,
device_configuration: Optional[Union[_models.DeviceConfiguration, IO]] = None,
**kwargs: Any
) -> _models.ProductList:
"""Returns a list of products.
:param resource_group: Name of the resource group. Required.
:type resource_group: str
:param registration_name: Name of the Azure Stack registration. Required.
:type registration_name: str
:param product_name: Name of the product. Required.
:type product_name: str
:param device_configuration: Device configuration. Is either a model type or a IO type. Default
value is None.
:type device_configuration: ~azure.mgmt.azurestack.models.DeviceConfiguration or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ProductList or the result of cls(response)
:rtype: ~azure.mgmt.azurestack.models.ProductList
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-06-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.ProductList] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(device_configuration, (IO, bytes)):
_content = device_configuration
else:
if device_configuration is not None:
_json = self._serialize.body(device_configuration, "DeviceConfiguration")
else:
_json = None
request = build_list_products_request(
resource_group=resource_group,
registration_name=registration_name,
product_name=product_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.list_products.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("ProductList", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_products.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.AzureStack/registrations/{registrationName}/products/{productName}/listProducts"
}
@overload
async def get_products(
self,
resource_group: str,
registration_name: str,
product_name: str,
device_configuration: Optional[_models.DeviceConfiguration] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.ProductList:
"""Returns a list of products.
:param resource_group: Name of the resource group. Required.
:type resource_group: str
:param registration_name: Name of the Azure Stack registration. Required.
:type registration_name: str
:param product_name: Name of the product. Required.
:type product_name: str
:param device_configuration: Device configuration. Default value is None.
:type device_configuration: ~azure.mgmt.azurestack.models.DeviceConfiguration
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ProductList or the result of cls(response)
:rtype: ~azure.mgmt.azurestack.models.ProductList
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def get_products(
self,
resource_group: str,
registration_name: str,
product_name: str,
device_configuration: Optional[IO] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.ProductList:
"""Returns a list of products.
:param resource_group: Name of the resource group. Required.
:type resource_group: str
:param registration_name: Name of the Azure Stack registration. Required.
:type registration_name: str
:param product_name: Name of the product. Required.
:type product_name: str
:param device_configuration: Device configuration. Default value is None.
:type device_configuration: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ProductList or the result of cls(response)
:rtype: ~azure.mgmt.azurestack.models.ProductList
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def get_products(
self,
resource_group: str,
registration_name: str,
product_name: str,
device_configuration: Optional[Union[_models.DeviceConfiguration, IO]] = None,
**kwargs: Any
) -> _models.ProductList:
"""Returns a list of products.
:param resource_group: Name of the resource group. Required.
:type resource_group: str
:param registration_name: Name of the Azure Stack registration. Required.
:type registration_name: str
:param product_name: Name of the product. Required.
:type product_name: str
:param device_configuration: Device configuration. Is either a model type or a IO type. Default
value is None.
:type device_configuration: ~azure.mgmt.azurestack.models.DeviceConfiguration or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ProductList or the result of cls(response)
:rtype: ~azure.mgmt.azurestack.models.ProductList
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-06-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.ProductList] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(device_configuration, (IO, bytes)):
_content = device_configuration
else:
if device_configuration is not None:
_json = self._serialize.body(device_configuration, "DeviceConfiguration")
else:
_json = None
request = build_get_products_request(
resource_group=resource_group,
registration_name=registration_name,
product_name=product_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.get_products.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("ProductList", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_products.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.AzureStack/registrations/{registrationName}/products/{productName}/getProducts"
}
@overload
async def get_product(
self,
resource_group: str,
registration_name: str,
product_name: str,
device_configuration: Optional[_models.DeviceConfiguration] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.Product:
"""Returns the specified product.
:param resource_group: Name of the resource group. Required.
:type resource_group: str
:param registration_name: Name of the Azure Stack registration. Required.
:type registration_name: str
:param product_name: Name of the product. Required.
:type product_name: str
:param device_configuration: Device configuration. Default value is None.
:type device_configuration: ~azure.mgmt.azurestack.models.DeviceConfiguration
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Product or the result of cls(response)
:rtype: ~azure.mgmt.azurestack.models.Product
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def get_product(
self,
resource_group: str,
registration_name: str,
product_name: str,
device_configuration: Optional[IO] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.Product:
"""Returns the specified product.
:param resource_group: Name of the resource group. Required.
:type resource_group: str
:param registration_name: Name of the Azure Stack registration. Required.
:type registration_name: str
:param product_name: Name of the product. Required.
:type product_name: str
:param device_configuration: Device configuration. Default value is None.
:type device_configuration: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Product or the result of cls(response)
:rtype: ~azure.mgmt.azurestack.models.Product
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def get_product(
self,
resource_group: str,
registration_name: str,
product_name: str,
device_configuration: Optional[Union[_models.DeviceConfiguration, IO]] = None,
**kwargs: Any
) -> _models.Product:
"""Returns the specified product.
:param resource_group: Name of the resource group. Required.
:type resource_group: str
:param registration_name: Name of the Azure Stack registration. Required.
:type registration_name: str
:param product_name: Name of the product. Required.
:type product_name: str
:param device_configuration: Device configuration. Is either a model type or a IO type. Default
value is None.
:type device_configuration: ~azure.mgmt.azurestack.models.DeviceConfiguration or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Product or the result of cls(response)
:rtype: ~azure.mgmt.azurestack.models.Product
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-06-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.Product] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(device_configuration, (IO, bytes)):
_content = device_configuration
else:
if device_configuration is not None:
_json = self._serialize.body(device_configuration, "DeviceConfiguration")
else:
_json = None
request = build_get_product_request(
resource_group=resource_group,
registration_name=registration_name,
product_name=product_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.get_product.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("Product", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_product.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.AzureStack/registrations/{registrationName}/products/{productName}/getProduct"
}
@overload
async def upload_log(
self,
resource_group: str,
registration_name: str,
product_name: str,
marketplace_product_log_update: Optional[_models.MarketplaceProductLogUpdate] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.ProductLog:
"""Returns the specified product.
:param resource_group: Name of the resource group. Required.
:type resource_group: str
:param registration_name: Name of the Azure Stack registration. Required.
:type registration_name: str
:param product_name: Name of the product. Required.
:type product_name: str
:param marketplace_product_log_update: Update details for product log. Default value is None.
:type marketplace_product_log_update: ~azure.mgmt.azurestack.models.MarketplaceProductLogUpdate
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ProductLog or the result of cls(response)
:rtype: ~azure.mgmt.azurestack.models.ProductLog
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def upload_log(
self,
resource_group: str,
registration_name: str,
product_name: str,
marketplace_product_log_update: Optional[IO] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.ProductLog:
"""Returns the specified product.
:param resource_group: Name of the resource group. Required.
:type resource_group: str
:param registration_name: Name of the Azure Stack registration. Required.
:type registration_name: str
:param product_name: Name of the product. Required.
:type product_name: str
:param marketplace_product_log_update: Update details for product log. Default value is None.
:type marketplace_product_log_update: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ProductLog or the result of cls(response)
:rtype: ~azure.mgmt.azurestack.models.ProductLog
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def upload_log(
self,
resource_group: str,
registration_name: str,
product_name: str,
marketplace_product_log_update: Optional[Union[_models.MarketplaceProductLogUpdate, IO]] = None,
**kwargs: Any
) -> _models.ProductLog:
"""Returns the specified product.
:param resource_group: Name of the resource group. Required.
:type resource_group: str
:param registration_name: Name of the Azure Stack registration. Required.
:type registration_name: str
:param product_name: Name of the product. Required.
:type product_name: str
:param marketplace_product_log_update: Update details for product log. Is either a model type
or a IO type. Default value is None.
:type marketplace_product_log_update: ~azure.mgmt.azurestack.models.MarketplaceProductLogUpdate
or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ProductLog or the result of cls(response)
:rtype: ~azure.mgmt.azurestack.models.ProductLog
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-06-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.ProductLog] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(marketplace_product_log_update, (IO, bytes)):
_content = marketplace_product_log_update
else:
if marketplace_product_log_update is not None:
_json = self._serialize.body(marketplace_product_log_update, "MarketplaceProductLogUpdate")
else:
_json = None
request = build_upload_log_request(
resource_group=resource_group,
registration_name=registration_name,
product_name=product_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.upload_log.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("ProductLog", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
upload_log.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.AzureStack/registrations/{registrationName}/products/{productName}/uploadProductLog"
}
|
19db7b320da4046498a26badff23e9a7401d6a00
|
2337351b228818e41be3002bd38f68f77c2aa074
|
/sa/profiles/CData/xPON/get_arp.py
|
9f39e0f564a8d18ef0276691e26d6f8ece1cdba4
|
[
"BSD-3-Clause"
] |
permissive
|
nocproject/noc
|
57d40c680a1499374463e472434f9595ed6d1374
|
6e6d71574e9b9d822bec572cc629a0ea73604a59
|
refs/heads/master
| 2023-08-31T01:11:33.544573
| 2023-08-30T17:31:11
| 2023-08-30T17:31:11
| 107,815,776
| 105
| 33
|
BSD-3-Clause
| 2023-07-31T07:57:45
| 2017-10-21T21:04:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,267
|
py
|
get_arp.py
|
# ---------------------------------------------------------------------
# CData.xPON.get_arp
# ---------------------------------------------------------------------
# Copyright (C) 2007-2022 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetarp import IGetARP
class Script(BaseScript):
name = "CData.xPON.get_arp"
interface = IGetARP
rx_line = re.compile(
r"^(?P<ip>\S+)\s+(?P<mac>\S+)\s+\d+\s+(?P<interface>[xgpl]\S+\d)\s+\S+\s+\d+\s*\n",
re.MULTILINE,
)
rx_line2 = re.compile(
r"^(?P<ip>\S+)\s+(?P<mac>\S+)\s+\d+\s+s\s+\S+\s+\d+\s+(?P<interface>[xgpl]\S+\d)\s+.+\n",
re.MULTILINE,
)
def execute_cli(self):
r = []
with self.configure():
try:
v = self.cli("show arp all")
for match in self.rx_line.finditer(v):
r += [match.groupdict()]
except self.CLISyntaxError:
v = self.cli("show arp entry all")
for match in self.rx_line2.finditer(v):
r += [match.groupdict()]
return r
|
00466cdcf26a7f4441642f195e233771343a21ea
|
d540a0d7c5e1c3676f7d765a04f119a6b270abcf
|
/homework/02/test.py
|
99ac2458b721c71ff3540ffea0edb77b6ee42bf4
|
[
"MIT"
] |
permissive
|
mtrempoltsev/msu_cpp_lectures
|
91e93b83be5d896c580a5233272a0bd687e832b7
|
81ea1003d4d365b33391696f56a93c578e6e599d
|
refs/heads/master
| 2022-09-21T04:02:31.902777
| 2022-08-29T18:16:51
| 2022-08-29T18:16:51
| 150,443,779
| 165
| 120
|
MIT
| 2022-09-19T20:12:14
| 2018-09-26T14:54:58
|
C++
|
UTF-8
|
Python
| false
| false
| 1,431
|
py
|
test.py
|
import subprocess
import time
def run(command):
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = process.stdout.readlines()
code = process.wait()
return code, out
def test(command, expected_code, expected_value):
print command
code, out = run(command)
if code != expected_code:
print 'return value', expected_code, '(expected) !=', code
return
i = 0
for line in out:
try:
if line.rstrip() != expected_value[i]:
print expected_value[i], '(expected) !=', line.rstrip()
return
i += 1
except ValueError:
print 'invalid output'
return
except IndexError:
print 'invalid output'
return
if i != len(expected_value):
print 'empty output'
return
print 'ok'
test('./test "2"', 0, [ '2' ])
test('./test "-2"', 0, [ '-2' ])
test('./test "2 + 2"', 0, [ '4' ])
test('./test "2 + 2 "', 0, [ '4' ])
test('./test "2 +- 2"', 0, [ '0' ])
test('./test " 2+-4"', 0, [ '-2' ])
test('./test "- 4- -4"', 0, [ '0' ])
test('./test "2-3*4+-5/2"', 0, [ '-12' ])
test('./test "2-3*4*2+1--2+-5/2"', 0, [ '-21' ])
test('./test', 1, [ 'error' ])
test('./test 2 + 3', 1, [ 'error' ])
test('./test "2/0"', 1, [ 'error' ])
test('./test "2/"', 1, [ 'error' ])
test('./test "3 + a"', 1, [ 'error' ])
|
2ea2a3e6f52fb4b6d549ef5089f9caed4cf4352b
|
edca7e5d1f5247fd6807840e5f9caf2700839787
|
/tests/cli/simple_mp4_parser.py
|
3fd6ae5241137a0a9f8355f60fa7fd8b2c53438e
|
[
"BSD-2-Clause"
] |
permissive
|
mapillary/mapillary_tools
|
e0e91e458e4287220e7786747c306c8f8ac8360b
|
338bffd28bf501caeb29ebebbd953124922b0401
|
refs/heads/main
| 2023-08-31T01:16:33.130949
| 2023-08-25T07:46:13
| 2023-08-25T07:46:13
| 18,833,475
| 236
| 153
|
BSD-2-Clause
| 2023-09-13T12:53:04
| 2014-04-16T09:05:12
|
Python
|
UTF-8
|
Python
| false
| false
| 7,815
|
py
|
simple_mp4_parser.py
|
import argparse
import io
import logging
import pathlib
import sys
import typing as T
from mapillary_tools import utils
from mapillary_tools.geotag import (
construct_mp4_parser as cparser,
mp4_sample_parser as sample_parser,
simple_mp4_parser as parser,
)
LOG = logging.getLogger(__name__)
box_list_types = {
b"dinf",
b"edts",
b"gmhd",
b"mdia",
b"minf",
b"moof",
b"moov",
b"mvex",
b"schi",
b"stbl",
b"traf",
b"trak",
b"udta",
}
def _validate_samples(
path: pathlib.Path, filters: T.Optional[T.Container[bytes]] = None
):
samples: T.List[sample_parser.RawSample] = []
with open(path, "rb") as fp:
for h, s in parser.parse_path(
fp, [b"moov", b"trak", b"mdia", b"minf", b"stbl"]
):
(
descriptions,
raw_samples,
) = sample_parser.parse_raw_samples_from_stbl(s, maxsize=h.maxsize)
samples.extend(
sample
for sample in raw_samples
if filters is None
or descriptions[sample.description_idx]["format"] in filters
)
samples.sort(key=lambda s: s.offset)
if not samples:
return
last_sample = None
last_read = samples[0].offset
for sample in samples:
if sample.offset < last_read:
LOG.warning(f"overlap found:\n{last_sample}\n{sample}")
elif sample.offset == last_read:
pass
else:
LOG.warning(f"gap found:\n{last_sample}\n{sample}")
last_read = sample.offset + sample.size
last_sample = sample
def _parse_structs(fp: T.BinaryIO):
for h, d, s in parser.parse_boxes_recursive(fp, box_list_types=box_list_types):
margin = "\t" * d
if h.size32 == 0:
header = f"{str(h.type)} {h.box_size} (open-ended):"
elif h.size32 == 1:
header = f"{str(h.type)} {h.box_size} (extended):"
else:
header = f"{str(h.type)} {h.box_size}:"
if h.type in box_list_types:
print(margin, header)
else:
if h.maxsize == -1:
data = s.read(32)
else:
data = s.read(min(h.maxsize, 32))
print(margin, header, data)
def _dump_box_data_at(fp: T.BinaryIO, box_type_path: T.List[bytes]):
for h, s in parser.parse_path(fp, box_type_path):
max_chunk_size = 1024
read = 0
while read < h.maxsize or h.maxsize == -1:
data = s.read(
max_chunk_size
if h.maxsize == -1
else min((h.maxsize - read), max_chunk_size)
)
if not data:
break
sys.stdout.buffer.write(data)
read += len(data)
break
def _parse_samples(fp: T.BinaryIO, filters: T.Optional[T.Container[bytes]] = None):
for h, s in parser.parse_path(fp, [b"moov", b"trak"]):
offset = s.tell()
for h1, s1 in parser.parse_path(s, [b"mdia", b"mdhd"], maxsize=h.maxsize):
box = cparser.MediaHeaderBox.parse(s1.read(h.maxsize))
LOG.info(box)
LOG.info(sample_parser.to_datetime(box.creation_time))
LOG.info(box.duration / box.timescale)
s.seek(offset, io.SEEK_SET)
for sample in sample_parser.parse_samples_from_trak(s, maxsize=h.maxsize):
if filters is None or sample.description["format"] in filters:
print(sample)
def _dump_samples(fp: T.BinaryIO, filters: T.Optional[T.Container[bytes]] = None):
for h, s in parser.parse_path(fp, [b"moov", b"trak"]):
for sample in sample_parser.parse_samples_from_trak(s, maxsize=h.maxsize):
if filters is None or sample.description["format"] in filters:
fp.seek(sample.offset, io.SEEK_SET)
data = fp.read(sample.size)
sys.stdout.buffer.write(data)
def _parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--samples", action="store_true", default=False, help="show sample structs"
)
parser.add_argument(
"--filter_samples",
help="filter sample by types",
)
parser.add_argument(
"--validate_samples",
action="store_true",
default=False,
help="validate samples",
)
parser.add_argument(
"--dump",
action="store_true",
default=False,
help="dump as bytes or not",
)
parser.add_argument(
"--full",
action="store_true",
default=False,
help="parse MP4 with the full parser or not, otherwise parse with the quick parser",
)
parser.add_argument(
"--simple",
action="store_true",
default=False,
help="parse MP4 with the simple parser or not, otherwise parse with the quick parser",
)
parser.add_argument(
"--box_path",
required=False,
help="show box data at path like this: moov/trak/minf",
)
parser.add_argument("path", nargs="+")
return parser.parse_args()
def _process_path(parsed_args, path: pathlib.Path):
if parsed_args.filter_samples is None:
filter_samples = None
else:
filter_samples = parsed_args.filter_samples.encode("utf8").split(b",")
if parsed_args.validate_samples:
LOG.info(f"validating samples {path}")
_validate_samples(path, filter_samples)
if parsed_args.samples:
if parsed_args.dump:
with open(path, "rb") as fp:
_dump_samples(fp, filter_samples)
else:
LOG.info(f"sampling {path}")
with open(path, "rb") as fp:
_parse_samples(fp, filter_samples)
else:
if parsed_args.box_path is None:
box_path = None
else:
box_path = parsed_args.box_path.encode("utf8").split(b"/")
if parsed_args.dump:
LOG.info(f"dumping {path}")
assert box_path is not None, "must specify --box_path"
with open(path, "rb") as fp:
_dump_box_data_at(fp, box_path)
else:
LOG.info(f"parsing {path}")
with open(path, "rb") as fp:
if parsed_args.simple:
if box_path is None:
_parse_structs(fp)
else:
data = parser.parse_mp4_data_firstx(fp, box_path)
_parse_structs(io.BytesIO(data))
elif parsed_args.full:
if box_path is None:
boxes = cparser.MP4ParserConstruct.BoxList.parse_stream(fp)
else:
data = parser.parse_mp4_data_firstx(fp, box_path)
boxes = cparser.MP4ParserConstruct.BoxList.parse_stream(
io.BytesIO(data)
)
print(boxes)
else:
if box_path is None:
boxes = (
cparser.MP4WithoutSTBLParserConstruct.BoxList.parse_stream(
fp
)
)
else:
data = parser.parse_mp4_data_firstx(fp, box_path)
boxes = (
cparser.MP4WithoutSTBLParserConstruct.BoxList.parse_stream(
io.BytesIO(data)
)
)
print(boxes)
def main():
parsed_args = _parse_args()
for p in utils.find_videos([pathlib.Path(p) for p in parsed_args.path]):
_process_path(parsed_args, p)
if __name__ == "__main__":
main()
|
ca4323c702e5ce28c7f7fe679888f0794fe76c78
|
3f70e754981a941dbc3a24d15edb0a5abe3d4788
|
/yotta/init.py
|
3d9f78dd21edc47f1a592f9528b534794b70f3fc
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
ARMmbed/yotta
|
66cfa634f03a25594311a569ea369a916cff70bf
|
82d854b43d391abb5a006b05e7beffe7d0d6ffbf
|
refs/heads/master
| 2023-03-16T11:57:12.852163
| 2021-01-15T13:49:47
| 2021-01-15T13:49:47
| 16,579,440
| 184
| 87
|
Apache-2.0
| 2021-01-15T13:46:43
| 2014-02-06T13:03:45
|
Python
|
UTF-8
|
Python
| false
| false
| 7,752
|
py
|
init.py
|
# Copyright 2014 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
# standard library modules, , ,
from __future__ import print_function
import os
import logging
import re
# Component, , represents an installed component, internal
from yotta.lib import component
# version, , represent versions and specifications, internal
from yotta.lib import version
# validate, , validate various things, internal
from yotta.lib import validate
Known_Licenses = {
'isc': 'ISC',
'apache-2.0': 'Apache-2.0',
'mit': 'MIT',
'bsd-3-clause': 'BSD-3-Clause'
}
Git_Repo_RE = re.compile("^(git[+a-zA-Z-]*:.*|.*\.git|.*git@.*github\.com.*)$")
HG_Repo_RE = re.compile("^(hg[+a-zA-Z-]*:.*|.*\.hg)$")
SVN_Repo_RE = re.compile("^svn[+a-zA-Z-]*:.*$")
def getUserInput(question, default=None, type_class=str):
# python 2 + 3 compatibility
try:
global input
input = raw_input
except NameError:
pass
while True:
default_descr = ''
if default is not None:
default_descr = ' <%s> ' % str(default)
value = input(question + default_descr)
if default is not None and not value:
if type_class:
return type_class(default)
else:
return default
try:
typed_value = type_class(value)
break
except:
allowed_message = ''
if hasattr(type_class, '__allowed_message'):
allowed_message = type_class.__allowed_message
print('"%s" isn\'t a valid "%s" value.%s' % (value, type_class.__name__, allowed_message))
return typed_value
def yesNo(string):
if string.strip().lower() in ('yes', 'y'):
return True
elif string.strip().lower() in ('no', 'n'):
return False
else:
raise ValueError()
yesNo.__name__ = "Yes/No"
yesNo.__allowed_message = ' Please reply "Yes", or "No".'
def isBannedName(name):
return name in ('test', 'source', 'include', 'yotta_modules', 'yotta_targets')
def notBannedName(s):
if isBannedName(s):
raise ValueError('invalid name');
else:
return s
notBannedName.__name__ = 'module name'
notBannedName.__allowed_message = ' Names must be lowercase, start with a letter, use only a-z0-9 and -, and not be a reserved name.'
def repoObject(string):
string = string.strip()
if not string:
return None
elif Git_Repo_RE.match(string):
repo_type = 'git'
url = Git_Repo_RE.match(string).group(0)
elif HG_Repo_RE.match(string):
repo_type = 'hg'
url = HG_Repo_RE.match(string).group(0)
elif SVN_Repo_RE.match(string):
repo_type = 'svn'
url = SVN_Repo_RE.match(string).group(0)
else:
raise ValueError()
return {'type':repo_type, 'url':url}
def listOfWords(string):
if isinstance(string, list):
return string
else:
return list(filter(bool, re.split(",|\\s", string)))
def addOptions(parser):
pass
def execCommand(args, following_args):
c = component.Component(os.getcwd())
if c:
logging.info('The current directory already a contains a module: existing description will be modified')
elif os.path.isfile(c.getDescriptionFile()):
logging.error('A module description exists but could not be loaded:')
logging.error(c.error)
return 1
if args.interactive:
return initInteractive(args, c)
else:
return initNonInteractive(args, c)
def createFolders(c, moduletype='library'):
# default set of folders
folders_to_create = ["./source", "./test"]
if moduletype == 'library':
folders_to_create.append("./" + c.getName())
for folder_name in folders_to_create:
if not os.path.exists(folder_name):
os.mkdir(folder_name)
def defaultDescription():
return 'A short description of what your module does goes here.'
def defaultAuthor():
return 'Your Name <youremail@yourdomain.com>'
def defaultLicense():
return 'Apache-2.0'
def initNonInteractive(args, c):
if not 'name' in c.description:
c.description['name'] = validate.componentNameCoerced(os.path.split(os.getcwd())[1])
if not 'version' in c.description:
c.setVersion("0.0.0")
if not 'description' in c.description:
c.description['description'] = defaultDescription()
if not 'keywords' in c.description:
c.description['keywords'] = []
if not 'author' in c.description:
c.description['author'] = defaultAuthor()
if not 'repository' in c.description:
c.description['repository'] = repoObject('git@github.com:yourName/%s' % c.description['name'])
if not 'homepage' in c.description:
c.description['homepage'] = '%s-module-homepage.com' % c.description['name']
if not 'license' in c.description and not 'licenses' in c.description:
c.description['license'] = defaultLicense()
if not 'dependencies' in c.description:
c.description['dependencies'] = {}
createFolders(c)
c.writeDescription()
def initInteractive(args, c):
def current(x):
return c.description[x] if x in c.description else None
default_name = c.getName()
if not default_name:
default_name = validate.componentNameCoerced(os.path.split(os.getcwd())[1])
if isBannedName(default_name):
default_name = 'unnamed'
c.setName(getUserInput("Enter the module name:", default_name, notBannedName))
c.setVersion(getUserInput("Enter the initial version:", str(c.getVersion() or "0.0.0"), version.Version))
default_isexe = 'no'
if current('bin'):
default_isexe = 'yes'
isexe = getUserInput("Is this an executable (instead of a re-usable library module)?", default_isexe, yesNo)
if isexe:
c.description['bin'] = './source'
# set exe modules to private by default, to prevent publishing
# applications
if current('private') is None:
c.description['private'] = True
description = getUserInput("Short description: ", current('description'))
if len(description):
c.description['description'] = description
elif 'description' in c.description:
del c.description['description']
if not isexe:
c.description['keywords'] = getUserInput("Keywords: ", ' '.join(current('keywords') or []), listOfWords)
c.description['author'] = getUserInput("Author: ", current('author'))
if not isexe:
current_repo_url = current('repository')
if isinstance(current_repo_url, dict):
current_repo_url = current_repo_url['url']
new_repo_url = getUserInput("Repository url (where people can submit bugfixes): ", current_repo_url, repoObject)
if new_repo_url:
c.description['repository'] = new_repo_url
new_homepage = getUserInput("Homepage: ", current('homepage'))
if (not len(new_homepage.strip())) and 'homepage' in c.description:
del c.description['homepage']
elif len(new_homepage.strip()):
c.description['homepage'] = new_homepage
if not current('licenses') or current('license'):
license = getUserInput('What is the license for this project (Apache-2.0, ISC, MIT etc.)? ', 'Apache-2.0')
if license.lower().strip() in Known_Licenses:
c.description['license'] = Known_Licenses[license.lower().strip()]
else:
c.description['license'] = license
c.description['dependencies'] = current('dependencies') or {}
if isexe:
createFolders(c, 'executable')
else:
createFolders(c, 'library')
c.writeDescription()
|
bcf3cedd1ed18b7b11b941b70301e2c2c2d88052
|
45e376ae66b78b17788b1d3575b334b2cb1d0b1c
|
/checkov/terraform/checks/resource/aws/StateMachineLoggingExecutionHistory.py
|
5083984ce7333cf72cba363aa7dc409f168af37d
|
[
"Apache-2.0"
] |
permissive
|
bridgecrewio/checkov
|
aeb8febed2ed90e61d5755f8f9d80b125362644d
|
e64cbd27ffb6f09c2c9f081b45b7a821a3aa1a4d
|
refs/heads/main
| 2023-08-31T06:57:21.990147
| 2023-08-30T23:01:47
| 2023-08-30T23:01:47
| 224,386,599
| 5,929
| 1,056
|
Apache-2.0
| 2023-09-14T20:10:23
| 2019-11-27T08:55:14
|
Python
|
UTF-8
|
Python
| false
| false
| 706
|
py
|
StateMachineLoggingExecutionHistory.py
|
from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
from checkov.common.models.enums import CheckCategories
class StateMachineLoggingExecutionHistory(BaseResourceValueCheck):
def __init__(self):
name = "Ensure State Machine has execution history logging enabled"
id = "CKV_AWS_285"
supported_resources = ['aws_sfn_state_machine']
categories = [CheckCategories.LOGGING]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def get_inspected_key(self):
return "logging_configuration/[0]/include_execution_data"
check = StateMachineLoggingExecutionHistory()
|
3830cf161feebdfee907c840c906d1f6ed31270d
|
ec7b8378698ed9dfc5e62b94c20524bf3aefc3c3
|
/tests/models/test_hovernet.py
|
bf77b46ba523979149e03b45274694cffb883516
|
[
"BSD-3-Clause"
] |
permissive
|
TissueImageAnalytics/tiatoolbox
|
52fe15704b396a055d9b4fccc678787ef489aed8
|
f26387f46f675a7b9a8a48c95dad26e819229f2f
|
refs/heads/develop
| 2023-08-16T15:47:19.282604
| 2023-08-14T16:50:45
| 2023-08-14T16:50:45
| 267,705,904
| 222
| 44
|
NOASSERTION
| 2023-09-14T16:57:15
| 2020-05-28T22:02:51
|
Python
|
UTF-8
|
Python
| false
| false
| 4,892
|
py
|
test_hovernet.py
|
"""Unit test package for HoVerNet."""
from typing import Callable
import numpy as np
import pytest
import torch
from torch import nn
from tiatoolbox.models import HoVerNet
from tiatoolbox.models.architecture import fetch_pretrained_weights
from tiatoolbox.models.architecture.hovernet import (
DenseBlock,
ResidualBlock,
TFSamepaddingLayer,
)
from tiatoolbox.wsicore.wsireader import WSIReader
def test_functionality(remote_sample: Callable) -> None:
"""Functionality test."""
sample_wsi = str(remote_sample("wsi1_2k_2k_svs"))
reader = WSIReader.open(sample_wsi)
# * test fast mode (architecture used in PanNuke paper)
patch = reader.read_bounds(
(0, 0, 256, 256),
resolution=0.25,
units="mpp",
coord_space="resolution",
)
batch = torch.from_numpy(patch)[None]
model = HoVerNet(num_types=6, mode="fast")
weights_path = fetch_pretrained_weights("hovernet_fast-pannuke")
pretrained = torch.load(weights_path)
model.load_state_dict(pretrained)
output = model.infer_batch(model, batch, on_gpu=False)
output = [v[0] for v in output]
output = model.postproc(output)
assert len(output[1]) > 0, "Must have some nuclei."
# * test fast mode (architecture used for MoNuSAC data)
patch = reader.read_bounds(
(0, 0, 256, 256),
resolution=0.25,
units="mpp",
coord_space="resolution",
)
batch = torch.from_numpy(patch)[None]
model = HoVerNet(num_types=5, mode="fast")
weights_path = fetch_pretrained_weights("hovernet_fast-monusac")
pretrained = torch.load(weights_path)
model.load_state_dict(pretrained)
output = model.infer_batch(model, batch, on_gpu=False)
output = [v[0] for v in output]
output = model.postproc(output)
assert len(output[1]) > 0, "Must have some nuclei."
# * test original mode on CoNSeP dataset (architecture used in HoVerNet paper)
patch = reader.read_bounds(
(0, 0, 270, 270),
resolution=0.25,
units="mpp",
coord_space="resolution",
)
batch = torch.from_numpy(patch)[None]
model = HoVerNet(num_types=5, mode="original")
weights_path = fetch_pretrained_weights("hovernet_original-consep")
pretrained = torch.load(weights_path)
model.load_state_dict(pretrained)
output = model.infer_batch(model, batch, on_gpu=False)
output = [v[0] for v in output]
output = model.postproc(output)
assert len(output[1]) > 0, "Must have some nuclei."
# * test original mode on Kumar dataset (architecture used in HoVerNet paper)
patch = reader.read_bounds(
(0, 0, 270, 270),
resolution=0.25,
units="mpp",
coord_space="resolution",
)
batch = torch.from_numpy(patch)[None]
model = HoVerNet(num_types=None, mode="original")
weights_path = fetch_pretrained_weights("hovernet_original-kumar")
pretrained = torch.load(weights_path)
model.load_state_dict(pretrained)
output = model.infer_batch(model, batch, on_gpu=False)
output = [v[0] for v in output]
output = model.postproc(output)
assert len(output[1]) > 0, "Must have some nuclei."
# test crash when providing exotic mode
with pytest.raises(ValueError, match=r".*Invalid mode.*"):
model = HoVerNet(num_types=None, mode="super")
def test_unit_blocks() -> None:
"""Test for blocks within HoVerNet."""
# padding
model = nn.Sequential(TFSamepaddingLayer(7, 1), nn.Conv2d(3, 3, 7, 1, padding=0))
sample = torch.rand((1, 3, 14, 14), dtype=torch.float32)
output = model(sample)
assert np.sum(output.shape - np.array([1, 3, 14, 14])) == 0, f"{output.shape}"
# padding with stride and odd shape
model = nn.Sequential(TFSamepaddingLayer(7, 2), nn.Conv2d(3, 3, 7, 2, padding=0))
sample = torch.rand((1, 3, 15, 15), dtype=torch.float32)
output = model(sample)
assert np.sum(output.shape - np.array([1, 3, 8, 8])) == 0, f"{output.shape}"
# *
sample = torch.rand((1, 16, 15, 15), dtype=torch.float32)
block = ResidualBlock(16, [1, 3, 1], [16, 16, 16], 3)
assert block.shortcut is None
output = block(sample)
assert np.sum(output.shape - np.array([1, 16, 15, 15])) == 0, f"{output.shape}"
block = ResidualBlock(16, [1, 3, 1], [16, 16, 32], 3)
assert block.shortcut is not None
output = block(sample)
assert np.sum(output.shape - np.array([1, 32, 15, 15])) == 0, f"{output.shape}"
#
block = DenseBlock(16, [1, 3], [16, 16], 3)
output = block(sample)
assert output.shape[1] == 16 * 4, f"{output.shape}"
# test crash when providing exotic mode
with pytest.raises(ValueError, match=r".*Unbalance Unit Info.*"):
_ = DenseBlock(16, [1, 3, 1], [16, 16], 3)
with pytest.raises(ValueError, match=r".*Unbalance Unit Info.*"):
_ = ResidualBlock(16, [1, 3, 1], [16, 16], 3)
|
987687fad5e07af7df82893b5bb2f7e6e1c2aa64
|
a41e1498e3c080f47abd8e8e57157548df3ebbf1
|
/pandas/io/sas/sas_constants.py
|
62c17bd03927e5f852af708e6b9ef6cf7e74d57c
|
[
"BSD-3-Clause"
] |
permissive
|
pandas-dev/pandas
|
e7e639454a298bebc272622e66faa9829ea393bb
|
c7325d7e7e77ecb4a4e57b48bc25265277c75712
|
refs/heads/main
| 2023-09-01T12:42:07.927176
| 2023-09-01T11:14:10
| 2023-09-01T11:14:10
| 858,127
| 36,166
| 18,728
|
BSD-3-Clause
| 2023-09-14T21:18:41
| 2010-08-24T01:37:33
|
Python
|
UTF-8
|
Python
| false
| false
| 8,719
|
py
|
sas_constants.py
|
from __future__ import annotations
from typing import Final
magic: Final = (
b"\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\xc2\xea\x81\x60"
b"\xb3\x14\x11\xcf\xbd\x92\x08\x00"
b"\x09\xc7\x31\x8c\x18\x1f\x10\x11"
)
align_1_checker_value: Final = b"3"
align_1_offset: Final = 32
align_1_length: Final = 1
align_1_value: Final = 4
u64_byte_checker_value: Final = b"3"
align_2_offset: Final = 35
align_2_length: Final = 1
align_2_value: Final = 4
endianness_offset: Final = 37
endianness_length: Final = 1
platform_offset: Final = 39
platform_length: Final = 1
encoding_offset: Final = 70
encoding_length: Final = 1
dataset_offset: Final = 92
dataset_length: Final = 64
file_type_offset: Final = 156
file_type_length: Final = 8
date_created_offset: Final = 164
date_created_length: Final = 8
date_modified_offset: Final = 172
date_modified_length: Final = 8
header_size_offset: Final = 196
header_size_length: Final = 4
page_size_offset: Final = 200
page_size_length: Final = 4
page_count_offset: Final = 204
page_count_length: Final = 4
sas_release_offset: Final = 216
sas_release_length: Final = 8
sas_server_type_offset: Final = 224
sas_server_type_length: Final = 16
os_version_number_offset: Final = 240
os_version_number_length: Final = 16
os_maker_offset: Final = 256
os_maker_length: Final = 16
os_name_offset: Final = 272
os_name_length: Final = 16
page_bit_offset_x86: Final = 16
page_bit_offset_x64: Final = 32
subheader_pointer_length_x86: Final = 12
subheader_pointer_length_x64: Final = 24
page_type_offset: Final = 0
page_type_length: Final = 2
block_count_offset: Final = 2
block_count_length: Final = 2
subheader_count_offset: Final = 4
subheader_count_length: Final = 2
page_type_mask: Final = 0x0F00
# Keep "page_comp_type" bits
page_type_mask2: Final = 0xF000 | page_type_mask
page_meta_type: Final = 0x0000
page_data_type: Final = 0x0100
page_mix_type: Final = 0x0200
page_amd_type: Final = 0x0400
page_meta2_type: Final = 0x4000
page_comp_type: Final = 0x9000
page_meta_types: Final = [page_meta_type, page_meta2_type]
subheader_pointers_offset: Final = 8
truncated_subheader_id: Final = 1
compressed_subheader_id: Final = 4
compressed_subheader_type: Final = 1
text_block_size_length: Final = 2
row_length_offset_multiplier: Final = 5
row_count_offset_multiplier: Final = 6
col_count_p1_multiplier: Final = 9
col_count_p2_multiplier: Final = 10
row_count_on_mix_page_offset_multiplier: Final = 15
column_name_pointer_length: Final = 8
column_name_text_subheader_offset: Final = 0
column_name_text_subheader_length: Final = 2
column_name_offset_offset: Final = 2
column_name_offset_length: Final = 2
column_name_length_offset: Final = 4
column_name_length_length: Final = 2
column_data_offset_offset: Final = 8
column_data_length_offset: Final = 8
column_data_length_length: Final = 4
column_type_offset: Final = 14
column_type_length: Final = 1
column_format_text_subheader_index_offset: Final = 22
column_format_text_subheader_index_length: Final = 2
column_format_offset_offset: Final = 24
column_format_offset_length: Final = 2
column_format_length_offset: Final = 26
column_format_length_length: Final = 2
column_label_text_subheader_index_offset: Final = 28
column_label_text_subheader_index_length: Final = 2
column_label_offset_offset: Final = 30
column_label_offset_length: Final = 2
column_label_length_offset: Final = 32
column_label_length_length: Final = 2
rle_compression: Final = b"SASYZCRL"
rdc_compression: Final = b"SASYZCR2"
compression_literals: Final = [rle_compression, rdc_compression]
# Incomplete list of encodings, using SAS nomenclature:
# https://support.sas.com/documentation/onlinedoc/dfdmstudio/2.6/dmpdmsug/Content/dfU_Encodings_SAS.html
# corresponding to the Python documentation of standard encodings
# https://docs.python.org/3/library/codecs.html#standard-encodings
encoding_names: Final = {
20: "utf-8",
29: "latin1",
30: "latin2",
31: "latin3",
32: "latin4",
33: "cyrillic",
34: "arabic",
35: "greek",
36: "hebrew",
37: "latin5",
38: "latin6",
39: "cp874",
40: "latin9",
41: "cp437",
42: "cp850",
43: "cp852",
44: "cp857",
45: "cp858",
46: "cp862",
47: "cp864",
48: "cp865",
49: "cp866",
50: "cp869",
51: "cp874",
# 52: "", # not found
# 53: "", # not found
# 54: "", # not found
55: "cp720",
56: "cp737",
57: "cp775",
58: "cp860",
59: "cp863",
60: "cp1250",
61: "cp1251",
62: "cp1252",
63: "cp1253",
64: "cp1254",
65: "cp1255",
66: "cp1256",
67: "cp1257",
68: "cp1258",
118: "cp950",
# 119: "", # not found
123: "big5",
125: "gb2312",
126: "cp936",
134: "euc_jp",
136: "cp932",
138: "shift_jis",
140: "euc-kr",
141: "cp949",
227: "latin8",
# 228: "", # not found
# 229: "" # not found
}
class SASIndex:
row_size_index: Final = 0
column_size_index: Final = 1
subheader_counts_index: Final = 2
column_text_index: Final = 3
column_name_index: Final = 4
column_attributes_index: Final = 5
format_and_label_index: Final = 6
column_list_index: Final = 7
data_subheader_index: Final = 8
subheader_signature_to_index: Final = {
b"\xF7\xF7\xF7\xF7": SASIndex.row_size_index,
b"\x00\x00\x00\x00\xF7\xF7\xF7\xF7": SASIndex.row_size_index,
b"\xF7\xF7\xF7\xF7\x00\x00\x00\x00": SASIndex.row_size_index,
b"\xF7\xF7\xF7\xF7\xFF\xFF\xFB\xFE": SASIndex.row_size_index,
b"\xF6\xF6\xF6\xF6": SASIndex.column_size_index,
b"\x00\x00\x00\x00\xF6\xF6\xF6\xF6": SASIndex.column_size_index,
b"\xF6\xF6\xF6\xF6\x00\x00\x00\x00": SASIndex.column_size_index,
b"\xF6\xF6\xF6\xF6\xFF\xFF\xFB\xFE": SASIndex.column_size_index,
b"\x00\xFC\xFF\xFF": SASIndex.subheader_counts_index,
b"\xFF\xFF\xFC\x00": SASIndex.subheader_counts_index,
b"\x00\xFC\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.subheader_counts_index,
b"\xFF\xFF\xFF\xFF\xFF\xFF\xFC\x00": SASIndex.subheader_counts_index,
b"\xFD\xFF\xFF\xFF": SASIndex.column_text_index,
b"\xFF\xFF\xFF\xFD": SASIndex.column_text_index,
b"\xFD\xFF\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.column_text_index,
b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFD": SASIndex.column_text_index,
b"\xFF\xFF\xFF\xFF": SASIndex.column_name_index,
b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.column_name_index,
b"\xFC\xFF\xFF\xFF": SASIndex.column_attributes_index,
b"\xFF\xFF\xFF\xFC": SASIndex.column_attributes_index,
b"\xFC\xFF\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.column_attributes_index,
b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFC": SASIndex.column_attributes_index,
b"\xFE\xFB\xFF\xFF": SASIndex.format_and_label_index,
b"\xFF\xFF\xFB\xFE": SASIndex.format_and_label_index,
b"\xFE\xFB\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.format_and_label_index,
b"\xFF\xFF\xFF\xFF\xFF\xFF\xFB\xFE": SASIndex.format_and_label_index,
b"\xFE\xFF\xFF\xFF": SASIndex.column_list_index,
b"\xFF\xFF\xFF\xFE": SASIndex.column_list_index,
b"\xFE\xFF\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.column_list_index,
b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFE": SASIndex.column_list_index,
}
# List of frequently used SAS date and datetime formats
# http://support.sas.com/documentation/cdl/en/etsug/60372/HTML/default/viewer.htm#etsug_intervals_sect009.htm
# https://github.com/epam/parso/blob/master/src/main/java/com/epam/parso/impl/SasFileConstants.java
sas_date_formats: Final = (
"DATE",
"DAY",
"DDMMYY",
"DOWNAME",
"JULDAY",
"JULIAN",
"MMDDYY",
"MMYY",
"MMYYC",
"MMYYD",
"MMYYP",
"MMYYS",
"MMYYN",
"MONNAME",
"MONTH",
"MONYY",
"QTR",
"QTRR",
"NENGO",
"WEEKDATE",
"WEEKDATX",
"WEEKDAY",
"WEEKV",
"WORDDATE",
"WORDDATX",
"YEAR",
"YYMM",
"YYMMC",
"YYMMD",
"YYMMP",
"YYMMS",
"YYMMN",
"YYMON",
"YYMMDD",
"YYQ",
"YYQC",
"YYQD",
"YYQP",
"YYQS",
"YYQN",
"YYQR",
"YYQRC",
"YYQRD",
"YYQRP",
"YYQRS",
"YYQRN",
"YYMMDDP",
"YYMMDDC",
"E8601DA",
"YYMMDDN",
"MMDDYYC",
"MMDDYYS",
"MMDDYYD",
"YYMMDDS",
"B8601DA",
"DDMMYYN",
"YYMMDDD",
"DDMMYYB",
"DDMMYYP",
"MMDDYYP",
"YYMMDDB",
"MMDDYYN",
"DDMMYYC",
"DDMMYYD",
"DDMMYYS",
"MINGUO",
)
sas_datetime_formats: Final = (
"DATETIME",
"DTWKDATX",
"B8601DN",
"B8601DT",
"B8601DX",
"B8601DZ",
"B8601LX",
"E8601DN",
"E8601DT",
"E8601DX",
"E8601DZ",
"E8601LX",
"DATEAMPM",
"DTDATE",
"DTMONYY",
"DTMONYY",
"DTWKDATX",
"DTYEAR",
"TOD",
"MDYAMPM",
)
|
382aabb090614be5f29b7c42e1330fe4f94844e6
|
5da5473ff3026165a47f98744bac82903cf008e0
|
/packages/google-cloud-securitycenter/google/cloud/securitycenter_v1p1beta1/types/finding.py
|
a5cf4156a79917d9604755218e5aa5e0055cdde5
|
[
"Apache-2.0"
] |
permissive
|
googleapis/google-cloud-python
|
ed61a5f03a476ab6053870f4da7bc5534e25558b
|
93c4e63408c65129422f65217325f4e7d41f7edf
|
refs/heads/main
| 2023-09-04T09:09:07.852632
| 2023-08-31T22:49:26
| 2023-08-31T22:49:26
| 16,316,451
| 2,792
| 917
|
Apache-2.0
| 2023-09-14T21:45:18
| 2014-01-28T15:51:47
|
Python
|
UTF-8
|
Python
| false
| false
| 7,735
|
py
|
finding.py
|
# -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from typing import MutableMapping, MutableSequence
from google.protobuf import struct_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
import proto # type: ignore
from google.cloud.securitycenter_v1p1beta1.types import (
security_marks as gcs_security_marks,
)
__protobuf__ = proto.module(
package="google.cloud.securitycenter.v1p1beta1",
manifest={
"Finding",
},
)
class Finding(proto.Message):
r"""Security Command Center finding.
A finding is a record of assessment data (security, risk, health
or privacy) ingested into Security Command Center for
presentation, notification, analysis, policy testing, and
enforcement. For example, an XSS vulnerability in an App Engine
application is a finding.
Attributes:
name (str):
The relative resource name of this finding. See:
https://cloud.google.com/apis/design/resource_names#relative_resource_name
Example:
"organizations/{organization_id}/sources/{source_id}/findings/{finding_id}".
parent (str):
The relative resource name of the source the finding belongs
to. See:
https://cloud.google.com/apis/design/resource_names#relative_resource_name
This field is immutable after creation time. For example:
"organizations/{organization_id}/sources/{source_id}".
resource_name (str):
For findings on Google Cloud resources, the full resource
name of the Google Cloud resource this finding is for. See:
https://cloud.google.com/apis/design/resource_names#full_resource_name
When the finding is for a non-Google Cloud resource, the
resourceName can be a customer or partner defined string.
This field is immutable after creation time.
state (google.cloud.securitycenter_v1p1beta1.types.Finding.State):
The state of the finding.
category (str):
The additional taxonomy group within findings from a given
source. This field is immutable after creation time.
Example: "XSS_FLASH_INJECTION".
external_uri (str):
The URI that, if available, points to a web
page outside of Security Command Center where
additional information about the finding can be
found. This field is guaranteed to be either
empty or a well formed URL.
source_properties (MutableMapping[str, google.protobuf.struct_pb2.Value]):
Source specific properties. These properties are managed by
the source that writes the finding. The key names in the
source_properties map must be between 1 and 255 characters,
and must start with a letter and contain alphanumeric
characters or underscores only.
security_marks (google.cloud.securitycenter_v1p1beta1.types.SecurityMarks):
Output only. User specified security marks.
These marks are entirely managed by the user and
come from the SecurityMarks resource that
belongs to the finding.
event_time (google.protobuf.timestamp_pb2.Timestamp):
The time at which the event took place, or
when an update to the finding occurred. For
example, if the finding represents an open
firewall it would capture the time the detector
believes the firewall became open. The accuracy
is determined by the detector. If the finding
were to be resolved afterward, this time would
reflect when the finding was resolved. Must not
be set to a value greater than the current
timestamp.
create_time (google.protobuf.timestamp_pb2.Timestamp):
The time at which the finding was created in
Security Command Center.
severity (google.cloud.securitycenter_v1p1beta1.types.Finding.Severity):
The severity of the finding. This field is
managed by the source that writes the finding.
canonical_name (str):
The canonical name of the finding. It's either
"organizations/{organization_id}/sources/{source_id}/findings/{finding_id}",
"folders/{folder_id}/sources/{source_id}/findings/{finding_id}"
or
"projects/{project_number}/sources/{source_id}/findings/{finding_id}",
depending on the closest CRM ancestor of the resource
associated with the finding.
"""
class State(proto.Enum):
r"""The state of the finding.
Values:
STATE_UNSPECIFIED (0):
Unspecified state.
ACTIVE (1):
The finding requires attention and has not
been addressed yet.
INACTIVE (2):
The finding has been fixed, triaged as a
non-issue or otherwise addressed and is no
longer active.
"""
STATE_UNSPECIFIED = 0
ACTIVE = 1
INACTIVE = 2
class Severity(proto.Enum):
r"""The severity of the finding. This field is managed by the
source that writes the finding.
Values:
SEVERITY_UNSPECIFIED (0):
No severity specified. The default value.
CRITICAL (1):
Critical severity.
HIGH (2):
High severity.
MEDIUM (3):
Medium severity.
LOW (4):
Low severity.
"""
SEVERITY_UNSPECIFIED = 0
CRITICAL = 1
HIGH = 2
MEDIUM = 3
LOW = 4
name: str = proto.Field(
proto.STRING,
number=1,
)
parent: str = proto.Field(
proto.STRING,
number=2,
)
resource_name: str = proto.Field(
proto.STRING,
number=3,
)
state: State = proto.Field(
proto.ENUM,
number=4,
enum=State,
)
category: str = proto.Field(
proto.STRING,
number=5,
)
external_uri: str = proto.Field(
proto.STRING,
number=6,
)
source_properties: MutableMapping[str, struct_pb2.Value] = proto.MapField(
proto.STRING,
proto.MESSAGE,
number=7,
message=struct_pb2.Value,
)
security_marks: gcs_security_marks.SecurityMarks = proto.Field(
proto.MESSAGE,
number=8,
message=gcs_security_marks.SecurityMarks,
)
event_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=9,
message=timestamp_pb2.Timestamp,
)
create_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=10,
message=timestamp_pb2.Timestamp,
)
severity: Severity = proto.Field(
proto.ENUM,
number=13,
enum=Severity,
)
canonical_name: str = proto.Field(
proto.STRING,
number=14,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
9881e198376e0d2d2ecf00ad89d7f94d6ce022b5
|
fce81b804cae23f525a5ad4370b684bf0dc531a5
|
/numpy/f2py/tests/test_module_doc.py
|
28822d405cc02ac2ce5cc214c27271a199612349
|
[
"Zlib",
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
numpy/numpy
|
ba2abcc1d2d46affbb6aabe5aed6407b4b57507e
|
dc2ff125493777a1084044e6cd6857a42ee323d4
|
refs/heads/main
| 2023-09-05T10:10:52.767363
| 2023-09-04T18:03:29
| 2023-09-04T18:03:29
| 908,607
| 25,725
| 11,968
|
BSD-3-Clause
| 2023-09-14T21:26:09
| 2010-09-13T23:02:39
|
Python
|
UTF-8
|
Python
| false
| false
| 863
|
py
|
test_module_doc.py
|
import os
import sys
import pytest
import textwrap
from . import util
from numpy.testing import IS_PYPY
class TestModuleDocString(util.F2PyTest):
sources = [
util.getpath("tests", "src", "module_data",
"module_data_docstring.f90")
]
@pytest.mark.skipif(sys.platform == "win32",
reason="Fails with MinGW64 Gfortran (Issue #9673)")
@pytest.mark.xfail(IS_PYPY,
reason="PyPy cannot modify tp_doc after PyType_Ready")
def test_module_docstring(self):
assert self.module.mod.__doc__ == textwrap.dedent("""\
i : 'i'-scalar
x : 'i'-array(4)
a : 'f'-array(2,3)
b : 'f'-array(-1,-1), not allocated\x00
foo()\n
Wrapper for ``foo``.\n\n""")
|
3b92234e97f19f42db9f1484bd423c90a55289d4
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/AlipayFundScenepayAuthorizeQueryModel.py
|
006a8c878eeb92662ed8a59aa2dd127a23830426
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 4,370
|
py
|
AlipayFundScenepayAuthorizeQueryModel.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.ScenePayParticipantInfoDTO import ScenePayParticipantInfoDTO
from alipay.aop.api.domain.ScenePayParticipantInfoDTO import ScenePayParticipantInfoDTO
class AlipayFundScenepayAuthorizeQueryModel(object):
def __init__(self):
self._authorization_type = None
self._biz_scene = None
self._business_principal_info = None
self._principal_info = None
self._product_code = None
self._sub_biz_scene = None
@property
def authorization_type(self):
return self._authorization_type
@authorization_type.setter
def authorization_type(self, value):
self._authorization_type = value
@property
def biz_scene(self):
return self._biz_scene
@biz_scene.setter
def biz_scene(self, value):
self._biz_scene = value
@property
def business_principal_info(self):
return self._business_principal_info
@business_principal_info.setter
def business_principal_info(self, value):
if isinstance(value, ScenePayParticipantInfoDTO):
self._business_principal_info = value
else:
self._business_principal_info = ScenePayParticipantInfoDTO.from_alipay_dict(value)
@property
def principal_info(self):
return self._principal_info
@principal_info.setter
def principal_info(self, value):
if isinstance(value, ScenePayParticipantInfoDTO):
self._principal_info = value
else:
self._principal_info = ScenePayParticipantInfoDTO.from_alipay_dict(value)
@property
def product_code(self):
return self._product_code
@product_code.setter
def product_code(self, value):
self._product_code = value
@property
def sub_biz_scene(self):
return self._sub_biz_scene
@sub_biz_scene.setter
def sub_biz_scene(self, value):
self._sub_biz_scene = value
def to_alipay_dict(self):
params = dict()
if self.authorization_type:
if hasattr(self.authorization_type, 'to_alipay_dict'):
params['authorization_type'] = self.authorization_type.to_alipay_dict()
else:
params['authorization_type'] = self.authorization_type
if self.biz_scene:
if hasattr(self.biz_scene, 'to_alipay_dict'):
params['biz_scene'] = self.biz_scene.to_alipay_dict()
else:
params['biz_scene'] = self.biz_scene
if self.business_principal_info:
if hasattr(self.business_principal_info, 'to_alipay_dict'):
params['business_principal_info'] = self.business_principal_info.to_alipay_dict()
else:
params['business_principal_info'] = self.business_principal_info
if self.principal_info:
if hasattr(self.principal_info, 'to_alipay_dict'):
params['principal_info'] = self.principal_info.to_alipay_dict()
else:
params['principal_info'] = self.principal_info
if self.product_code:
if hasattr(self.product_code, 'to_alipay_dict'):
params['product_code'] = self.product_code.to_alipay_dict()
else:
params['product_code'] = self.product_code
if self.sub_biz_scene:
if hasattr(self.sub_biz_scene, 'to_alipay_dict'):
params['sub_biz_scene'] = self.sub_biz_scene.to_alipay_dict()
else:
params['sub_biz_scene'] = self.sub_biz_scene
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayFundScenepayAuthorizeQueryModel()
if 'authorization_type' in d:
o.authorization_type = d['authorization_type']
if 'biz_scene' in d:
o.biz_scene = d['biz_scene']
if 'business_principal_info' in d:
o.business_principal_info = d['business_principal_info']
if 'principal_info' in d:
o.principal_info = d['principal_info']
if 'product_code' in d:
o.product_code = d['product_code']
if 'sub_biz_scene' in d:
o.sub_biz_scene = d['sub_biz_scene']
return o
|
e77be61109449bb7af85aaf74d29bb1dc2df8461
|
f7dc806f341ef5dbb0e11252a4693003a66853d5
|
/platform/web/serve.py
|
6a3efcc4637f772457ec0bb89fdedd2e40f0cc6d
|
[
"CC-BY-3.0",
"LicenseRef-scancode-free-unknown",
"MIT",
"CC-BY-4.0",
"OFL-1.1",
"Bison-exception-2.2",
"CC0-1.0",
"LicenseRef-scancode-nvidia-2002",
"LicenseRef-scancode-other-permissive",
"GPL-3.0-only",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"BSL-1.0",
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unicode",
"BSD-2-Clause",
"FTL",
"GPL-3.0-or-later",
"Bitstream-Vera",
"Zlib",
"MPL-2.0",
"MIT-Modern-Variant"
] |
permissive
|
godotengine/godot
|
8a2419750f4851d1426a8f3bcb52cac5c86f23c2
|
970be7afdc111ccc7459d7ef3560de70e6d08c80
|
refs/heads/master
| 2023-08-21T14:37:00.262883
| 2023-08-21T06:26:15
| 2023-08-21T06:26:15
| 15,634,981
| 68,852
| 18,388
|
MIT
| 2023-09-14T21:42:16
| 2014-01-04T16:05:36
|
C++
|
UTF-8
|
Python
| false
| false
| 1,862
|
py
|
serve.py
|
#!/usr/bin/env python3
from http.server import HTTPServer, SimpleHTTPRequestHandler, test # type: ignore
from pathlib import Path
import os
import sys
import argparse
import subprocess
class CORSRequestHandler(SimpleHTTPRequestHandler):
def end_headers(self):
self.send_header("Cross-Origin-Opener-Policy", "same-origin")
self.send_header("Cross-Origin-Embedder-Policy", "require-corp")
self.send_header("Access-Control-Allow-Origin", "*")
super().end_headers()
def shell_open(url):
if sys.platform == "win32":
os.startfile(url)
else:
opener = "open" if sys.platform == "darwin" else "xdg-open"
subprocess.call([opener, url])
def serve(root, port, run_browser):
os.chdir(root)
if run_browser:
# Open the served page in the user's default browser.
print("Opening the served URL in the default browser (use `--no-browser` or `-n` to disable this).")
shell_open(f"http://127.0.0.1:{port}")
test(CORSRequestHandler, HTTPServer, port=port)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--port", help="port to listen on", default=8060, type=int)
parser.add_argument(
"-r", "--root", help="path to serve as root (relative to `platform/web/`)", default="../../bin", type=Path
)
browser_parser = parser.add_mutually_exclusive_group(required=False)
browser_parser.add_argument(
"-n", "--no-browser", help="don't open default web browser automatically", dest="browser", action="store_false"
)
parser.set_defaults(browser=True)
args = parser.parse_args()
# Change to the directory where the script is located,
# so that the script can be run from any location.
os.chdir(Path(__file__).resolve().parent)
serve(args.root, args.port, args.browser)
|
3e8d274c4e11df1dc6493921c7340c5a60ed0174
|
e441a2f416c83f04889ecd43d6b6bdcf5172b287
|
/web3/middleware/__init__.py
|
76fc62ed45f1ac3b75bd356455163f49da23dc7c
|
[
"MIT"
] |
permissive
|
ethereum/web3.py
|
f8d66eefaa84d30fa51a0978d1d1c44c6807b355
|
76da2146267fa03760f35c33ca8b9a96d9e24835
|
refs/heads/main
| 2023-08-31T18:34:30.144026
| 2023-08-29T15:43:25
| 2023-08-29T15:43:25
| 56,251,096
| 4,403
| 1,680
|
MIT
| 2023-09-14T20:46:08
| 2016-04-14T15:59:35
|
Python
|
UTF-8
|
Python
| false
| false
| 3,848
|
py
|
__init__.py
|
import functools
from typing import (
Coroutine,
TYPE_CHECKING,
Any,
Callable,
Sequence,
)
from web3.types import (
AsyncMiddleware,
Middleware,
RPCEndpoint,
RPCResponse,
)
from .abi import ( # noqa: F401
abi_middleware,
)
from .async_cache import ( # noqa: F401
_async_simple_cache_middleware as async_simple_cache_middleware,
async_construct_simple_cache_middleware,
)
from .attrdict import ( # noqa: F401
async_attrdict_middleware,
attrdict_middleware,
)
from .buffered_gas_estimate import ( # noqa: F401
async_buffered_gas_estimate_middleware,
buffered_gas_estimate_middleware,
)
from .cache import ( # noqa: F401
_latest_block_based_cache_middleware as latest_block_based_cache_middleware,
_simple_cache_middleware as simple_cache_middleware,
_time_based_cache_middleware as time_based_cache_middleware,
construct_latest_block_based_cache_middleware,
construct_simple_cache_middleware,
construct_time_based_cache_middleware,
)
from .exception_handling import ( # noqa: F401
construct_exception_handler_middleware,
)
from .exception_retry_request import ( # noqa: F401
async_http_retry_request_middleware,
http_retry_request_middleware,
)
from .filter import ( # noqa: F401
async_local_filter_middleware,
local_filter_middleware,
)
from .fixture import ( # noqa: F401
async_construct_error_generator_middleware,
async_construct_result_generator_middleware,
construct_error_generator_middleware,
construct_fixture_middleware,
construct_result_generator_middleware,
)
from .formatting import ( # noqa: F401
construct_formatting_middleware,
)
from .gas_price_strategy import ( # noqa: F401
async_gas_price_strategy_middleware,
gas_price_strategy_middleware,
)
from .geth_poa import ( # noqa: F401
async_geth_poa_middleware,
geth_poa_middleware,
)
from .names import ( # noqa: F401
async_name_to_address_middleware,
name_to_address_middleware,
)
from .normalize_request_parameters import ( # noqa: F401
request_parameter_normalizer,
)
from .pythonic import ( # noqa: F401
pythonic_middleware,
)
from .signing import ( # noqa: F401
construct_sign_and_send_raw_middleware,
)
from .stalecheck import ( # noqa: F401
async_make_stalecheck_middleware,
make_stalecheck_middleware,
)
from .validation import ( # noqa: F401
async_validation_middleware,
validation_middleware,
)
if TYPE_CHECKING:
from web3 import AsyncWeb3, Web3 # noqa: F401
def combine_middlewares(
middlewares: Sequence[Middleware],
w3: "Web3",
provider_request_fn: Callable[[RPCEndpoint, Any], Any],
) -> Callable[..., RPCResponse]:
"""
Returns a callable function which will call the provider.provider_request
function wrapped with all of the middlewares.
"""
return functools.reduce(
lambda request_fn, middleware: middleware(request_fn, w3),
reversed(middlewares),
provider_request_fn,
)
async def async_combine_middlewares(
middlewares: Sequence[AsyncMiddleware],
async_w3: "AsyncWeb3",
provider_request_fn: Callable[[RPCEndpoint, Any], Any],
) -> Callable[..., Coroutine[Any, Any, RPCResponse]]:
"""
Returns a callable function which will call the provider.provider_request
function wrapped with all of the middlewares.
"""
accumulator_fn = provider_request_fn
for middleware in reversed(middlewares):
accumulator_fn = await construct_middleware(
middleware, accumulator_fn, async_w3
)
return accumulator_fn
async def construct_middleware(
async_middleware: AsyncMiddleware,
fn: Callable[..., RPCResponse],
async_w3: "AsyncWeb3",
) -> Callable[[RPCEndpoint, Any], RPCResponse]:
return await async_middleware(fn, async_w3)
|
e67bbde4ad66835729f32d66502723ee7ed42d6f
|
c675ff5fcd3b13fa39352bb8cac11d75262659a8
|
/tests/test_scheduler/test_timeoutscheduler.py
|
2a9e119acc94dacb9392677cf16aa47418390414
|
[
"MIT"
] |
permissive
|
ReactiveX/RxPY
|
469eb714996c205989e99899a6f1ab1ae2f42dd0
|
af1663d35810fdcd4c25a3ed2e8f0d71b55c341d
|
refs/heads/master
| 2023-08-14T19:27:40.086304
| 2023-01-08T10:02:08
| 2023-03-04T15:33:19
| 8,946,089
| 4,764
| 467
|
MIT
| 2023-09-05T02:53:16
| 2013-03-22T06:16:54
|
Python
|
UTF-8
|
Python
| false
| false
| 2,853
|
py
|
test_timeoutscheduler.py
|
import os
import threading
import unittest
from datetime import timedelta
from time import sleep
import pytest
from reactivex.internal.basic import default_now
from reactivex.scheduler import TimeoutScheduler
CI = os.getenv("CI") is not None
class TestTimeoutScheduler(unittest.TestCase):
def test_timeout_singleton(self):
scheduler = [TimeoutScheduler(), TimeoutScheduler.singleton()]
assert scheduler[0] is scheduler[1]
gate = [threading.Semaphore(0), threading.Semaphore(0)]
scheduler = [None, None]
def run(idx):
scheduler[idx] = TimeoutScheduler()
gate[idx].release()
for idx in (0, 1):
threading.Thread(target=run, args=(idx,)).start()
gate[idx].acquire()
assert scheduler[0] is not None
assert scheduler[1] is not None
assert scheduler[0] is scheduler[1]
def test_timeout_extend(self):
class MyScheduler(TimeoutScheduler):
pass
scheduler = [
MyScheduler(),
MyScheduler.singleton(),
TimeoutScheduler.singleton(),
]
assert scheduler[0] is scheduler[1]
assert scheduler[0] is not scheduler[2]
@pytest.mark.skipif(CI, reason="Flaky test in GitHub Actions")
def test_timeout_now(self):
scheduler = TimeoutScheduler()
diff = scheduler.now - default_now()
assert abs(diff) < timedelta(milliseconds=1)
@pytest.mark.skipif(CI, reason="Flaky test in GitHub Actions")
def test_timeout_now_units(self):
scheduler = TimeoutScheduler()
diff = scheduler.now
sleep(1.1)
diff = scheduler.now - diff
assert timedelta(milliseconds=1000) < diff < timedelta(milliseconds=1300)
def test_timeout_schedule_action(self):
scheduler = TimeoutScheduler()
ran = False
def action(scheduler, state):
nonlocal ran
ran = True
scheduler.schedule(action)
sleep(0.1)
assert ran is True
def test_timeout_schedule_action_due(self):
scheduler = TimeoutScheduler()
starttime = default_now()
endtime = None
def action(scheduler, state):
nonlocal endtime
endtime = default_now()
scheduler.schedule_relative(timedelta(milliseconds=200), action)
sleep(0.4)
assert endtime is not None
diff = endtime - starttime
assert diff > timedelta(milliseconds=180)
def test_timeout_schedule_action_cancel(self):
ran = False
scheduler = TimeoutScheduler()
def action(scheduler, state):
nonlocal ran
ran = True
d = scheduler.schedule_relative(timedelta(milliseconds=300), action)
d.dispose()
sleep(0.1)
assert ran is False
|
34c5c7457869c3db3c50f87efb1a86efcdd55eee
|
5290e0fa51f5bc0f5244610a2251b58aadfab938
|
/vision/utils/model_book.py
|
b1e9d17e9c3467e3261b5f4e962887f714b5bff0
|
[
"MIT"
] |
permissive
|
qfgaohao/pytorch-ssd
|
ab9aa3c3d9220aa34b67e3130aa059756001d4cb
|
7a839cbc8c3fb39679856b4dc42a1ab19ec07581
|
refs/heads/master
| 2023-07-30T02:44:24.162185
| 2023-03-11T09:50:27
| 2023-03-11T09:50:27
| 133,904,817
| 1,481
| 541
|
MIT
| 2023-02-21T00:56:51
| 2018-05-18T04:56:09
|
Python
|
UTF-8
|
Python
| false
| false
| 2,390
|
py
|
model_book.py
|
from collections import OrderedDict
import torch.nn as nn
class ModelBook:
"""Maintain the mapping between modules and their paths.
Example:
book = ModelBook(model_ft)
for p, m in book.conv2d_modules():
print('path:', p, 'num of filters:', m.out_channels)
assert m is book.get_module(p)
"""
def __init__(self, model):
self._model = model
self._modules = OrderedDict()
self._paths = OrderedDict()
path = []
self._construct(self._model, path)
def _construct(self, module, path):
if not module._modules:
return
for name, m in module._modules.items():
cur_path = tuple(path + [name])
self._paths[m] = cur_path
self._modules[cur_path] = m
self._construct(m, path + [name])
def conv2d_modules(self):
return self.modules(nn.Conv2d)
def linear_modules(self):
return self.modules(nn.Linear)
def modules(self, module_type=None):
for p, m in self._modules.items():
if not module_type or isinstance(m, module_type):
yield p, m
def num_of_conv2d_modules(self):
return self.num_of_modules(nn.Conv2d)
def num_of_conv2d_filters(self):
"""Return the sum of out_channels of all conv2d layers.
Here we treat the sub weight with size of [in_channels, h, w] as a single filter.
"""
num_filters = 0
for _, m in self.conv2d_modules():
num_filters += m.out_channels
return num_filters
def num_of_linear_modules(self):
return self.num_of_modules(nn.Linear)
def num_of_linear_filters(self):
num_filters = 0
for _, m in self.linear_modules():
num_filters += m.out_features
return num_filters
def num_of_modules(self, module_type=None):
num = 0
for p, m in self._modules.items():
if not module_type or isinstance(m, module_type):
num += 1
return num
def get_module(self, path):
return self._modules.get(path)
def get_path(self, module):
return self._paths.get(module)
def update(self, path, module):
old_module = self._modules[path]
del self._paths[old_module]
self._paths[module] = path
self._modules[path] = module
|
f7feb81333dce676a6f9eefef54e3bd9baefc016
|
e3bb1df7fa4c51900dec7e9ddf5295e1a80938bd
|
/hummingbot/strategy/spot_perpetual_arbitrage/spot_perpetual_arbitrage_config_map.py
|
2c992d64819b9c031c056fdf7f80c442c1d00166
|
[
"Apache-2.0"
] |
permissive
|
CoinAlpha/hummingbot
|
0d1e2bd94de1280748647108c7d7800a09546eb8
|
c3f101759ab7e7a2165cd23a3a3e94c90c642a9b
|
refs/heads/development
| 2023-09-01T11:24:43.322137
| 2023-08-31T03:08:06
| 2023-08-31T03:08:06
| 439,330,952
| 135
| 98
|
Apache-2.0
| 2023-08-30T13:55:08
| 2021-12-17T12:50:42
|
Python
|
UTF-8
|
Python
| false
| false
| 5,762
|
py
|
spot_perpetual_arbitrage_config_map.py
|
from decimal import Decimal
from hummingbot.client.config.config_validators import (
validate_connector,
validate_decimal,
validate_derivative,
validate_int,
validate_market_trading_pair,
)
from hummingbot.client.config.config_var import ConfigVar
from hummingbot.client.settings import AllConnectorSettings, required_exchanges, requried_connector_trading_pairs
def exchange_on_validated(value: str) -> None:
required_exchanges.add(value)
def spot_market_validator(value: str) -> None:
exchange = spot_perpetual_arbitrage_config_map["spot_connector"].value
return validate_market_trading_pair(exchange, value)
def spot_market_on_validated(value: str) -> None:
requried_connector_trading_pairs[spot_perpetual_arbitrage_config_map["spot_connector"].value] = [value]
def perpetual_market_validator(value: str) -> None:
exchange = spot_perpetual_arbitrage_config_map["perpetual_connector"].value
return validate_market_trading_pair(exchange, value)
def perpetual_market_on_validated(value: str) -> None:
requried_connector_trading_pairs[spot_perpetual_arbitrage_config_map["perpetual_connector"].value] = [value]
def spot_market_prompt() -> str:
connector = spot_perpetual_arbitrage_config_map.get("spot_connector").value
example = AllConnectorSettings.get_example_pairs().get(connector)
return "Enter the token trading pair you would like to trade on %s%s >>> " \
% (connector, f" (e.g. {example})" if example else "")
def perpetual_market_prompt() -> str:
connector = spot_perpetual_arbitrage_config_map.get("perpetual_connector").value
example = AllConnectorSettings.get_example_pairs().get(connector)
return "Enter the token trading pair you would like to trade on %s%s >>> " \
% (connector, f" (e.g. {example})" if example else "")
def order_amount_prompt() -> str:
trading_pair = spot_perpetual_arbitrage_config_map["spot_market"].value
base_asset, quote_asset = trading_pair.split("-")
return f"What is the amount of {base_asset} per order? >>> "
spot_perpetual_arbitrage_config_map = {
"strategy": ConfigVar(
key="strategy",
prompt="",
default="spot_perpetual_arbitrage"),
"spot_connector": ConfigVar(
key="spot_connector",
prompt="Enter a spot connector (Exchange/AMM/CLOB) >>> ",
prompt_on_new=True,
validator=validate_connector,
on_validated=exchange_on_validated),
"spot_market": ConfigVar(
key="spot_market",
prompt=spot_market_prompt,
prompt_on_new=True,
validator=spot_market_validator,
on_validated=spot_market_on_validated),
"perpetual_connector": ConfigVar(
key="perpetual_connector",
prompt="Enter a derivative connector >>> ",
prompt_on_new=True,
validator=validate_derivative,
on_validated=exchange_on_validated),
"perpetual_market": ConfigVar(
key="perpetual_market",
prompt=perpetual_market_prompt,
prompt_on_new=True,
validator=perpetual_market_validator,
on_validated=perpetual_market_on_validated),
"order_amount": ConfigVar(
key="order_amount",
prompt=order_amount_prompt,
type_str="decimal",
prompt_on_new=True),
"perpetual_leverage": ConfigVar(
key="perpetual_leverage",
prompt="How much leverage would you like to use on the perpetual exchange? (Enter 1 to indicate 1X) >>> ",
type_str="int",
default=1,
validator= lambda v: validate_int(v),
prompt_on_new=True),
"min_opening_arbitrage_pct": ConfigVar(
key="min_opening_arbitrage_pct",
prompt="What is the minimum arbitrage percentage between the spot and perpetual market price before opening "
"an arbitrage position? (Enter 1 to indicate 1%) >>> ",
prompt_on_new=True,
default=Decimal("1"),
validator=lambda v: validate_decimal(v, Decimal(-100), 100, inclusive=False),
type_str="decimal"),
"min_closing_arbitrage_pct": ConfigVar(
key="min_closing_arbitrage_pct",
prompt="What is the minimum arbitrage percentage between the spot and perpetual market price before closing "
"an existing arbitrage position? (Enter 1 to indicate 1%) (This can be negative value to close out the "
"position with lesser profit at higher chance of closing) >>> ",
prompt_on_new=True,
default=Decimal("-0.1"),
validator=lambda v: validate_decimal(v, Decimal(-100), 100, inclusive=False),
type_str="decimal"),
"spot_market_slippage_buffer": ConfigVar(
key="spot_market_slippage_buffer",
prompt="How much buffer do you want to add to the price to account for slippage for orders on the spot market "
"(Enter 1 for 1%)? >>> ",
prompt_on_new=True,
default=Decimal("0.05"),
validator=lambda v: validate_decimal(v),
type_str="decimal"),
"perpetual_market_slippage_buffer": ConfigVar(
key="perpetual_market_slippage_buffer",
prompt="How much buffer do you want to add to the price to account for slippage for orders on the perpetual "
"market (Enter 1 for 1%)? >>> ",
prompt_on_new=True,
default=Decimal("0.05"),
validator=lambda v: validate_decimal(v),
type_str="decimal"),
"next_arbitrage_opening_delay": ConfigVar(
key="next_arbitrage_opening_delay",
prompt="How long do you want the strategy to wait before opening the next arbitrage position (in seconds)?",
type_str="float",
validator=lambda v: validate_decimal(v, min_value=0, inclusive=False),
default=120),
}
|
29da26c110eb7de0f24f8f8bb531220817bac7bd
|
ffb0b623455f22af81a03eb52889bd1bfed50566
|
/src/bandersnatch/tests/test_configuration.py
|
92b8d384fba0efd6c62f8810a60f9994910cd460
|
[
"AFL-3.0"
] |
permissive
|
pypa/bandersnatch
|
c5ba356caae55e4edb80005da625b04e7fb70500
|
bf19ea547086c1b9dd997d1dc00081109b5cd626
|
refs/heads/main
| 2023-09-03T03:27:19.538217
| 2023-08-28T23:55:04
| 2023-08-28T23:55:04
| 133,377,409
| 405
| 157
|
AFL-3.0
| 2023-09-13T10:46:33
| 2018-05-14T14:52:22
|
Python
|
UTF-8
|
Python
| false
| false
| 6,629
|
py
|
test_configuration.py
|
import configparser
import importlib.resources
import os
import unittest
from tempfile import TemporaryDirectory
from unittest import TestCase
from bandersnatch.configuration import (
BandersnatchConfig,
SetConfigValues,
Singleton,
validate_config_values,
)
from bandersnatch.simple import SimpleFormat
class TestBandersnatchConf(TestCase):
"""
Tests for the BandersnatchConf singleton class
"""
tempdir = None
cwd = None
def setUp(self) -> None:
self.cwd = os.getcwd()
self.tempdir = TemporaryDirectory()
os.chdir(self.tempdir.name)
# Hack to ensure each test gets fresh instance if needed
# We have a dedicated test to ensure we're creating a singleton
Singleton._instances = {}
def tearDown(self) -> None:
if self.tempdir:
assert self.cwd
os.chdir(self.cwd)
self.tempdir.cleanup()
self.tempdir = None
def test_is_singleton(self) -> None:
instance1 = BandersnatchConfig()
instance2 = BandersnatchConfig()
self.assertEqual(id(instance1), id(instance2))
def test_single_config__default__all_sections_present(self) -> None:
config_file = str(importlib.resources.files("bandersnatch") / "unittest.conf")
instance = BandersnatchConfig(str(config_file))
# All default values should at least be present and be the write types
for section in ["mirror", "plugins", "blocklist"]:
self.assertIn(section, instance.config.sections())
def test_single_config__default__mirror__setting_attributes(self) -> None:
instance = BandersnatchConfig()
options = [option for option in instance.config["mirror"]]
options.sort()
self.assertListEqual(
options,
[
"cleanup",
"compare-method",
"directory",
"global-timeout",
"hash-index",
"json",
"master",
"release-files",
"simple-format",
"stop-on-error",
"storage-backend",
"timeout",
"verifiers",
"workers",
],
)
def test_single_config__default__mirror__setting__types(self) -> None:
"""
Make sure all default mirror settings will cast to the correct types
"""
instance = BandersnatchConfig()
for option, option_type in [
("directory", str),
("hash-index", bool),
("json", bool),
("master", str),
("stop-on-error", bool),
("storage-backend", str),
("timeout", int),
("global-timeout", int),
("workers", int),
("compare-method", str),
]:
self.assertIsInstance(
option_type(instance.config["mirror"].get(option)), option_type
)
def test_single_config_custom_setting_boolean(self) -> None:
with open("test.conf", "w") as testconfig_handle:
testconfig_handle.write("[mirror]\nhash-index=false\n")
instance = BandersnatchConfig()
instance.config_file = "test.conf"
instance.load_configuration()
self.assertFalse(instance.config["mirror"].getboolean("hash-index"))
def test_single_config_custom_setting_int(self) -> None:
with open("test.conf", "w") as testconfig_handle:
testconfig_handle.write("[mirror]\ntimeout=999\n")
instance = BandersnatchConfig()
instance.config_file = "test.conf"
instance.load_configuration()
self.assertEqual(int(instance.config["mirror"]["timeout"]), 999)
def test_single_config_custom_setting_str(self) -> None:
with open("test.conf", "w") as testconfig_handle:
testconfig_handle.write("[mirror]\nmaster=https://foo.bar.baz\n")
instance = BandersnatchConfig()
instance.config_file = "test.conf"
instance.load_configuration()
self.assertEqual(instance.config["mirror"]["master"], "https://foo.bar.baz")
def test_multiple_instances_custom_setting_str(self) -> None:
with open("test.conf", "w") as testconfig_handle:
testconfig_handle.write("[mirror]\nmaster=https://foo.bar.baz\n")
instance1 = BandersnatchConfig()
instance1.config_file = "test.conf"
instance1.load_configuration()
instance2 = BandersnatchConfig()
self.assertEqual(instance2.config["mirror"]["master"], "https://foo.bar.baz")
def test_validate_config_values(self) -> None:
default_values = SetConfigValues(
False,
"",
"",
False,
"sha256",
"filesystem",
False,
True,
"hash",
"",
False,
SimpleFormat.ALL,
)
no_options_configparser = configparser.ConfigParser()
no_options_configparser["mirror"] = {}
self.assertEqual(
default_values, validate_config_values(no_options_configparser)
)
def test_validate_config_values_release_files_false_sets_root_uri(self) -> None:
default_values = SetConfigValues(
False,
"https://files.pythonhosted.org",
"",
False,
"sha256",
"filesystem",
False,
False,
"hash",
"",
False,
SimpleFormat.ALL,
)
release_files_false_configparser = configparser.ConfigParser()
release_files_false_configparser["mirror"] = {"release-files": "false"}
self.assertEqual(
default_values, validate_config_values(release_files_false_configparser)
)
def test_validate_config_values_download_mirror_false_sets_no_fallback(
self,
) -> None:
default_values = SetConfigValues(
False,
"",
"",
False,
"sha256",
"filesystem",
False,
True,
"hash",
"",
False,
SimpleFormat.ALL,
)
release_files_false_configparser = configparser.ConfigParser()
release_files_false_configparser["mirror"] = {
"download-mirror-no-fallback": "true",
}
self.assertEqual(
default_values, validate_config_values(release_files_false_configparser)
)
if __name__ == "__main__":
unittest.main()
|
9470d3c7fa856094752b5c35f40bc2beb8bdbc65
|
d110546d747d7e3865ce5742d5fca09f404623c0
|
/tests/pytests/integration/ssh/test_raw.py
|
27fa31d4f1d26c986aded32b1bb10a93bd1a830d
|
[
"Apache-2.0",
"MIT",
"BSD-2-Clause"
] |
permissive
|
saltstack/salt
|
354fc86a7be1f69514b3dd3b2edb9e6f66844c1d
|
1ef90cbdc7203f97775edb7666db86a41eb9fc15
|
refs/heads/master
| 2023-07-19T20:56:20.210556
| 2023-06-29T23:12:28
| 2023-07-19T11:47:47
| 1,390,248
| 11,026
| 6,296
|
Apache-2.0
| 2023-09-14T20:45:37
| 2011-02-20T20:16:56
|
Python
|
UTF-8
|
Python
| false
| false
| 505
|
py
|
test_raw.py
|
import pytest
pytestmark = [
pytest.mark.slow_test,
pytest.mark.skip_on_windows(reason="salt-ssh not available on Windows"),
]
def test_ssh_raw(salt_ssh_cli):
"""
test salt-ssh with -r argument
"""
msg = "password: foo"
ret = salt_ssh_cli.run("--raw", "echo", msg, _timeout=60)
assert ret.returncode == 0
assert ret.data
assert "retcode" in ret.data
assert ret.data["retcode"] == 0
assert "stdout" in ret.data
assert ret.data["stdout"] == msg + "\n"
|
ab0b98a5410b575d1c4e7f91a8707632b56dcf65
|
1742b6719b988e5519373002305e31d28b8bd691
|
/sdk/python/pulumi_aws/cloudwatch/get_log_groups.py
|
517eb8a606c6ce4a65ae9da61dbeb2fa1c3a6549
|
[
"BSD-3-Clause",
"Apache-2.0",
"MPL-2.0"
] |
permissive
|
pulumi/pulumi-aws
|
4f7fdb4a816c5ea357cff2c2e3b613c006e49f1a
|
42b0a0abdf6c14da248da22f8c4530af06e67b98
|
refs/heads/master
| 2023-08-03T23:08:34.520280
| 2023-08-01T18:09:58
| 2023-08-01T18:09:58
| 97,484,940
| 384
| 171
|
Apache-2.0
| 2023-09-14T14:48:40
| 2017-07-17T14:20:33
|
Java
|
UTF-8
|
Python
| false
| false
| 4,271
|
py
|
get_log_groups.py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetLogGroupsResult',
'AwaitableGetLogGroupsResult',
'get_log_groups',
'get_log_groups_output',
]
@pulumi.output_type
class GetLogGroupsResult:
"""
A collection of values returned by getLogGroups.
"""
def __init__(__self__, arns=None, id=None, log_group_name_prefix=None, log_group_names=None):
if arns and not isinstance(arns, list):
raise TypeError("Expected argument 'arns' to be a list")
pulumi.set(__self__, "arns", arns)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if log_group_name_prefix and not isinstance(log_group_name_prefix, str):
raise TypeError("Expected argument 'log_group_name_prefix' to be a str")
pulumi.set(__self__, "log_group_name_prefix", log_group_name_prefix)
if log_group_names and not isinstance(log_group_names, list):
raise TypeError("Expected argument 'log_group_names' to be a list")
pulumi.set(__self__, "log_group_names", log_group_names)
@property
@pulumi.getter
def arns(self) -> Sequence[str]:
"""
Set of ARNs of the Cloudwatch log groups
"""
return pulumi.get(self, "arns")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="logGroupNamePrefix")
def log_group_name_prefix(self) -> Optional[str]:
return pulumi.get(self, "log_group_name_prefix")
@property
@pulumi.getter(name="logGroupNames")
def log_group_names(self) -> Sequence[str]:
"""
Set of names of the Cloudwatch log groups
"""
return pulumi.get(self, "log_group_names")
class AwaitableGetLogGroupsResult(GetLogGroupsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetLogGroupsResult(
arns=self.arns,
id=self.id,
log_group_name_prefix=self.log_group_name_prefix,
log_group_names=self.log_group_names)
def get_log_groups(log_group_name_prefix: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetLogGroupsResult:
"""
Use this data source to get a list of AWS Cloudwatch Log Groups
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.cloudwatch.get_log_groups(log_group_name_prefix="/MyImportantLogs")
```
:param str log_group_name_prefix: Group prefix of the Cloudwatch log groups to list
"""
__args__ = dict()
__args__['logGroupNamePrefix'] = log_group_name_prefix
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('aws:cloudwatch/getLogGroups:getLogGroups', __args__, opts=opts, typ=GetLogGroupsResult).value
return AwaitableGetLogGroupsResult(
arns=pulumi.get(__ret__, 'arns'),
id=pulumi.get(__ret__, 'id'),
log_group_name_prefix=pulumi.get(__ret__, 'log_group_name_prefix'),
log_group_names=pulumi.get(__ret__, 'log_group_names'))
@_utilities.lift_output_func(get_log_groups)
def get_log_groups_output(log_group_name_prefix: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetLogGroupsResult]:
"""
Use this data source to get a list of AWS Cloudwatch Log Groups
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.cloudwatch.get_log_groups(log_group_name_prefix="/MyImportantLogs")
```
:param str log_group_name_prefix: Group prefix of the Cloudwatch log groups to list
"""
...
|
7827fc966461437192eba9e0a48eb08f3a034ea8
|
3f55217e912141e04815bc8bcb6fbd5638d0896e
|
/build_utils/find_libs.py
|
15762bab89b0d9797db30731ea52077d7fdc487b
|
[
"BSD-2-Clause"
] |
permissive
|
NREL/Radiance
|
bfbb93c99d86368ad0f27052a2a5504aeced47f8
|
2fcca99ace2f2435f32a09525ad31f2b3be3c1bc
|
refs/heads/master
| 2021-12-26T12:42:04.586614
| 2021-12-18T00:43:56
| 2021-12-18T00:43:56
| 8,210,805
| 164
| 68
|
NOASSERTION
| 2019-06-06T19:57:11
| 2013-02-15T00:47:56
|
C
|
UTF-8
|
Python
| false
| false
| 3,418
|
py
|
find_libs.py
|
from __future__ import division, print_function, unicode_literals
import os
from SCons.SConf import SConf # aka Configure
def find_radlib(env):
v = env.FindFile('helvet.fnt', './lib')
if not v:
print('''
Radiance auxiliary support files not found.
-> Download from radiance-online.org and extract.
''')
env.Exit()
def find_pyinstaller(env):
if os.name != 'nt':
return
conf = SConf(env)
oldpath = (env['ENV'].get('PATH'))
try:
env['ENV']['PATH'] = os.environ['PATH']
pyinst = conf.CheckProg('pyinstaller.exe')
if pyinst:
env['PYINSTALLER'] = pyinst
env['PYSCRIPTS'] = []
env = conf.Finish()
finally:
env['ENV']['PATH'] = oldpath
def find_x11(env):
# Search for libX11, remember the X11 library and include dirs
for d in ('/usr/X11R6', '/usr/X11', '/usr/openwin'):
if os.path.isdir (d):
incdir = os.path.join(d, 'include')
libdir = os.path.join(d, 'lib')
env.Prepend(CPPPATH=[incdir]) # add temporarily
env.Prepend(LIBPATH=[libdir])
conf = SConf(env)
if conf.CheckLibWithHeader('X11', 'X11/X.h', 'C', autoadd=0):
env.Replace(X11INCLUDE=incdir)
env.Replace(X11LIB=libdir)
env['CPPPATH'].remove(incdir) # not needed for now
env['LIBPATH'].remove(libdir)
if env['X11INCLUDE']:
# Check for SGI stereo extension
if conf.CheckCHeader('X11/extensions/SGIStereo.h'):
env['RAD_STEREO'] = '-DSTEREO'
else: env['RAD_STEREO'] = '-DNOSTEREO'
env = conf.Finish ()
break
env = conf.Finish ()
def find_gl(env):
# Check for libGL, set flag
dl = [(None,None)] # standard search path
if env.has_key('X11INCLUDE'): # sometimes found there (Darwin)
dl.append((env['X11INCLUDE'], env['X11LIB']))
for incdir, libdir in dl:
if incdir: env.Prepend(CPPPATH=[incdir]) # add temporarily
if libdir: env.Prepend(LIBPATH=[libdir])
conf = SConf(env)
if (conf.CheckLib('GL')
or conf.CheckLib('opengl32')
or conf.CheckCHeader('OpenGL/gl.h')
or conf.CheckCHeader('GL/gl.h')):
env['OGL'] = 1
if os.name == 'nt':
if (conf.CheckLib('GLU') # for winrview
or conf.CheckLib('glu32')
or conf.CheckCHeader('OpenGL/glu.h')):
env['GLU'] = 1
if incdir: env['CPPPATH'].remove(incdir) # not needed for now
if libdir: env['LIBPATH'].remove(libdir)
if env.has_key('OGL'):
if incdir: env.Replace(OGLINCLUDE=[incdir])
if env.has_key('GLU'):
if incdir: env.Replace(GLUINCLUDE=[incdir])
#if libdir: env.Replace(OGLLIB=[libdir])
conf.Finish()
break
conf.Finish()
def find_libtiff(env):
# Check for libtiff, set flag and include/lib directories
dl = [ (None,None), ] # standard search path
cfgi = env.get('TIFFINCLUDE')
cfgl = env.get('TIFFLIB')
if cfgi or cfgl:
dl.insert(0,(cfgi, cfgl))
for incdir, libdir in dl:
xenv = env.Clone()
if incdir: xenv.Prepend(CPPPATH=[incdir]) # add temporarily
if libdir:
xenv.Prepend(LIBPATH=[libdir])
xenv.Prepend(PATH=[libdir])
conf = SConf(xenv)
libname = 'tiff'
if os.name == 'nt':
xenv['INCPREFIX'] = '/I ' # Bug in SCons (uses '/I')
libname = 'libtiff'
if conf.CheckLib(libname, 'TIFFInitSGILog',
header='''#include "tiff.h"''', autoadd=0):
env['TIFFLIB_INSTALLED'] = 1
if env.has_key('TIFFLIB_INSTALLED'):
env.Replace(RAD_LIBTIFF=libname)
if incdir: env.Replace(RAD_TIFFINCLUDE=[incdir])
if libdir: env.Replace(RAD_TIFFLIB=[libdir])
conf.Finish()
break
conf.Finish()
# vi: set ts=4 sw=4 :
|
bf163b14d319e5936fc5443f8897502dcf5fa641
|
e4b940707c32882d248fda5cccdf513af8f52188
|
/docs/screenshots/administrator/scene_basic.py
|
c6f33ddcf20a43730c7c84ea574602cadcfa1e6d
|
[
"Apache-2.0"
] |
permissive
|
byro/byro
|
5ca0d5cd6470ee18400432e85420d5594f54d927
|
3b757a4fd567a352f34c9e6638408bc98d144d03
|
refs/heads/main
| 2023-09-02T04:16:12.817733
| 2023-08-01T23:45:44
| 2023-08-02T11:44:34
| 100,076,395
| 144
| 56
|
Apache-2.0
| 2023-09-12T23:39:11
| 2017-08-11T23:21:34
|
Python
|
UTF-8
|
Python
| false
| false
| 1,150
|
py
|
scene_basic.py
|
import pytest
from utils import screenshot
@pytest.mark.django_db
def shot_login(client):
client.go_to("common:login")
screenshot(client, "common_login.png")
@pytest.mark.django_db
def shot_dashboard(logged_in_client, full_testdata):
logged_in_client.go_to("office:dashboard")
screenshot(logged_in_client, "office_dashboard.png")
@pytest.mark.django_db
def shot_member_list(logged_in_client, full_testdata):
logged_in_client.go_to("office:members.list")
screenshot(logged_in_client, "office_members_list.png")
@pytest.mark.django_db
def shot_settings_base(logged_in_client, full_testdata):
logged_in_client.go_to("office:settings.base")
screenshot(logged_in_client, "office_settings_base.png")
@pytest.mark.django_db
def shot_settings_registration(logged_in_client, full_testdata):
logged_in_client.go_to("office:settings.registration")
screenshot(logged_in_client, "office_settings_registration.png")
@pytest.mark.django_db
def shot_settings_about(logged_in_client, full_testdata):
logged_in_client.go_to("office:settings.about")
screenshot(logged_in_client, "office_settings_about.png")
|
c0b818887c1aa5587dbfe88531d0f435199a2a68
|
3395a234e7c80d011607e79c49cd48bf516f256b
|
/dependencies/jedi/third_party/typeshed/third_party/2and3/werkzeug/script.pyi
|
b9db97a0051a21c594e7f74179eea4ae82ce2479
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
srusskih/SublimeJEDI
|
67329b72e184bc9584843968dcc534a002c797a1
|
95c185d778425c04536d53517b0e3fe6dedf8e59
|
refs/heads/master
| 2023-08-24T11:30:37.801834
| 2022-08-30T09:04:17
| 2022-08-30T09:04:17
| 6,241,108
| 669
| 125
|
MIT
| 2022-08-30T09:04:18
| 2012-10-16T08:23:57
|
Python
|
UTF-8
|
Python
| false
| false
| 759
|
pyi
|
script.pyi
|
from typing import Any, Optional
argument_types: Any
converters: Any
def run(namespace: Optional[Any] = ..., action_prefix: str = ..., args: Optional[Any] = ...): ...
def fail(message, code: int = ...): ...
def find_actions(namespace, action_prefix): ...
def print_usage(actions): ...
def analyse_action(func): ...
def make_shell(init_func: Optional[Any] = ..., banner: Optional[Any] = ..., use_ipython: bool = ...): ...
def make_runserver(app_factory, hostname: str = ..., port: int = ..., use_reloader: bool = ..., use_debugger: bool = ...,
use_evalex: bool = ..., threaded: bool = ..., processes: int = ..., static_files: Optional[Any] = ...,
extra_files: Optional[Any] = ..., ssl_context: Optional[Any] = ...): ...
|
69d3e4e8ad27a582d8b610081651c82184119c95
|
686177a1034cee76ad040c62e3d3e49d44a9ee7a
|
/src/collectors/http/http.py
|
1f5c73e4a13b71e7b21a4a743abf99dd3810a812
|
[
"MIT"
] |
permissive
|
python-diamond/Diamond
|
8fce83a3d883dfa05b664fdd5cb6064f34c4ac41
|
461caf29e84db8cbf46f9fc4c895f56353e10c61
|
refs/heads/master
| 2023-07-20T01:44:18.504408
| 2023-01-29T19:46:36
| 2023-01-29T19:46:36
| 13,512,018
| 1,874
| 824
|
MIT
| 2023-07-10T11:06:38
| 2013-10-11T23:32:13
|
Python
|
UTF-8
|
Python
| false
| false
| 3,124
|
py
|
http.py
|
# coding=utf-8
"""
Collect statistics from a HTTP or HTTPS connexion
#### Dependencies
* urllib2
#### Usage
Add the collector config as :
enabled = True
ttl_multiplier = 2
path_suffix = ""
measure_collector_time = False
byte_unit = byte,
req_vhost = www.my_server.com
req_url = https://www.my_server.com/, https://www.my_server.com/assets/jquery.js
Metrics are collected as :
- servers.<hostname>.http.<url>.size (size of the page received in bytes)
- servers.<hostname>.http.<url>.time (time to download the page in microsec)
'.' and '/' chars are replaced by __, url looking like
http://www.site.com/admin/page.html are replaced by
http:__www_site_com_admin_page_html
"""
import urllib2
import diamond.collector
import datetime
class HttpCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(HttpCollector, self).get_default_config_help()
config_help.update({
'req_port': 'Port',
'req_url':
'array of full URL to get (ex : https://www.ici.net/mypage.html)',
'req_vhost':
'Host header variable if needed. Will be added to every request',
})
return config_help
def get_default_config(self):
default_config = super(HttpCollector, self).get_default_config()
default_config['path'] = 'http'
default_config['req_vhost'] = ''
default_config['req_url'] = ['http://localhost/']
default_config['headers'] = {'User-Agent': 'Diamond HTTP collector', }
return default_config
def collect(self):
# create urllib2 vars
if self.config['req_vhost'] != "":
self.config['headers']['Host'] = self.config['req_vhost']
# time the request
for url in self.config['req_url']:
self.log.debug("collecting %s", str(url))
req_start = datetime.datetime.now()
req = urllib2.Request(url, headers=self.config['headers'])
try:
handle = urllib2.urlopen(req)
the_page = handle.read()
req_end = datetime.datetime.now()
req_time = req_end - req_start
# build a compatible name : no '.' and no'/' in the name
metric_name = url.replace(
'/', '_').replace(
'.', '_').replace(
'\\', '').replace(
':', '')
# metric_name = url.split("/")[-1].replace(".", "_")
if metric_name == '':
metric_name = "root"
self.publish_gauge(
metric_name + '.time',
req_time.seconds * 1000000 + req_time.microseconds)
self.publish_gauge(
metric_name + '.size',
len(the_page))
except IOError as e:
self.log.error("Unable to open %s",
self.config['req_url'])
except Exception as e:
self.log.error("Unknown error opening url: %s", e)
|
b02b3f9175530465cabb2c9e48d19df4343fbaf8
|
8a85eb9b50864626cd2674f15b07df3d5dbe0b73
|
/neo/rawio/neuroscoperawio.py
|
80f2ec22b439e903651c16bdc174d0890141e99b
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
NeuralEnsemble/python-neo
|
287d3457a44c45f4dcbee0e9f9a2a5d83142de69
|
354c8d9d5fbc4daad3547773d2f281f8c163d208
|
refs/heads/master
| 2023-09-06T03:29:34.835053
| 2023-09-01T09:17:14
| 2023-09-01T09:17:14
| 3,949,530
| 265
| 213
|
BSD-3-Clause
| 2023-09-14T19:09:24
| 2012-04-06T12:48:48
|
Python
|
UTF-8
|
Python
| false
| false
| 7,669
|
py
|
neuroscoperawio.py
|
"""
Reading from neuroscope format files.
Ref: http://neuroscope.sourceforge.net/
It is an old format from Buzsaki's lab
Some old open datasets from spike sorting
are still using this format.
This only the signals.
This should be done (but maybe never will):
* SpikeTrain file '.clu' '.res'
* Event '.ext.evt' or '.evt.ext'
Author: Samuel Garcia
"""
from pathlib import Path
from .baserawio import (BaseRawIO, _signal_channel_dtype, _signal_stream_dtype,
_spike_channel_dtype, _event_channel_dtype)
import numpy as np
from xml.etree import ElementTree
class NeuroScopeRawIO(BaseRawIO):
extensions = ['xml', 'dat', 'lfp', 'eeg']
rawmode = 'one-file'
def __init__(self, filename, binary_file=None):
"""raw reader for Neuroscope
Parameters
----------
filename : str, Path
Usually the path of an xml file
binary_file : str or Path optional
The binary data file
Supported formats: ['.dat', '.lfp', '.eeg']
Neuroscope format is composed of two files: a xml file with metadata and a binary file
in either .dat, .lfp or .eeg format.
For backwards compatibility, we offer three ways of initializing the reader.
Cases:
filename provided with .xml extension:
- If binary_file is provided, it is used as the data file.
- If binary_file is not provided, it tries to find a binary file with the same name and the
supported extensions (.dat, .lfp, .eeg) in that order.
filename provided with empty extension:
- If binary_file is provided, it is used as the data file.
- If binary_file is not provided, it tries to find a binary file with the same name and the
supported extensions (.dat, .lfp, .eeg) in that order.
filename provided with a supported data extension (.dat, .lfp, .eeg):
- It assumes that the XML file has the same name and a .xml extension.
"""
BaseRawIO.__init__(self)
self.filename = filename
self.binary_file = binary_file
def _source_name(self):
return Path(self.filename).stem
def _parse_header(self):
# Load the right paths to xml and data
self._resolve_xml_and_data_paths()
# Parse XML-file
tree = ElementTree.parse(self.xml_file_path)
root = tree.getroot()
acq = root.find('acquisitionSystem')
nbits = int(acq.find('nBits').text)
nb_channel = int(acq.find('nChannels').text)
self._sampling_rate = float(acq.find('samplingRate').text)
voltage_range = float(acq.find('voltageRange').text)
# offset = int(acq.find('offset').text)
amplification = float(acq.find('amplification').text)
# find groups for channels
channel_group = {}
for grp_index, xml_chx in enumerate(
root.find('anatomicalDescription').find('channelGroups').findall('group')):
for xml_rc in xml_chx:
channel_group[int(xml_rc.text)] = grp_index
if nbits == 16:
sig_dtype = 'int16'
gain = voltage_range / (2 ** 16) / amplification / 1000.
# ~ elif nbits==32:
# Not sure if it is int or float
# ~ dt = 'int32'
# ~ gain = voltage_range/2**32/amplification
else:
raise (NotImplementedError)
# Extract signal from the data file
self._raw_signals = np.memmap(self.data_file_path, dtype=sig_dtype,
mode='r', offset=0).reshape(-1, nb_channel)
# one unique stream
signal_streams = np.array([('Signals', '0')], dtype=_signal_stream_dtype)
# signals
sig_channels = []
for c in range(nb_channel):
name = 'ch{}grp{}'.format(c, channel_group.get(c, 'none'))
chan_id = str(c)
units = 'mV'
offset = 0.
stream_id = '0'
sig_channels.append((name, chan_id, self._sampling_rate,
sig_dtype, units, gain, offset, stream_id))
sig_channels = np.array(sig_channels, dtype=_signal_channel_dtype)
# No events
event_channels = []
event_channels = np.array(event_channels, dtype=_event_channel_dtype)
# No spikes
spike_channels = []
spike_channels = np.array(spike_channels, dtype=_spike_channel_dtype)
# fille into header dict
self.header = {}
self.header['nb_block'] = 1
self.header['nb_segment'] = [1]
self.header['signal_streams'] = signal_streams
self.header['signal_channels'] = sig_channels
self.header['spike_channels'] = spike_channels
self.header['event_channels'] = event_channels
self._generate_minimal_annotations()
def _segment_t_start(self, block_index, seg_index):
return 0.
def _segment_t_stop(self, block_index, seg_index):
t_stop = self._raw_signals.shape[0] / self._sampling_rate
return t_stop
def _get_signal_size(self, block_index, seg_index, stream_index):
assert stream_index == 0
return self._raw_signals.shape[0]
def _get_signal_t_start(self, block_index, seg_index, stream_index):
assert stream_index == 0
return 0.
def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop,
stream_index, channel_indexes):
if channel_indexes is None:
channel_indexes = slice(None)
raw_signals = self._raw_signals[slice(i_start, i_stop), channel_indexes]
return raw_signals
def _resolve_xml_and_data_paths(self):
"""
Resolves XML and data paths from the provided filename and binary_file attributes.
See the __init__ of the class for more a description of the conditions.
Using these conditions these function updates the self.xml_file_path and self.data_file_path attributes.
"""
supported_extensions = ['.dat', '.lfp', '.eeg']
self.filename = Path(self.filename)
self.binary_file = Path(self.binary_file) if self.binary_file is not None else None
if self.filename.suffix == '.xml':
xml_file_path = self.filename
data_file_path = self.binary_file
elif self.filename.suffix == '':
xml_file_path = self.filename.with_suffix(".xml")
data_file_path = self.binary_file
elif self.filename.suffix in supported_extensions:
xml_file_path = self.filename.with_suffix(".xml")
data_file_path = self.filename
else:
raise KeyError(f"Format {self.filename.suffix} not supported, filename format should be {supported_extensions} or .xml")
if data_file_path is None:
possible_file_paths = (xml_file_path.with_suffix(extension) for extension in supported_extensions)
data_file_path = next((path for path in possible_file_paths if path.is_file()), None)
if data_file_path is None:
raise FileNotFoundError(f"data binary not found for file {xml_file_path.stem} with supported extensions: {supported_extensions}")
assert xml_file_path.is_file(), f"xml file not found at the expected location {xml_file_path}"
assert data_file_path.is_file(), f"binary file not found at the expected location {data_file_path}"
self.xml_file_path = xml_file_path
self.data_file_path = data_file_path
|
7ae9ec30ea3c9bcd92e4338b81b27296e822078d
|
1742b6719b988e5519373002305e31d28b8bd691
|
/sdk/python/pulumi_aws/lex/get_bot.py
|
0931ec2da951118aa586a38d54f4a882b46885d9
|
[
"MPL-2.0",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
pulumi/pulumi-aws
|
4f7fdb4a816c5ea357cff2c2e3b613c006e49f1a
|
42b0a0abdf6c14da248da22f8c4530af06e67b98
|
refs/heads/master
| 2023-08-03T23:08:34.520280
| 2023-08-01T18:09:58
| 2023-08-01T18:09:58
| 97,484,940
| 384
| 171
|
Apache-2.0
| 2023-09-14T14:48:40
| 2017-07-17T14:20:33
|
Java
|
UTF-8
|
Python
| false
| false
| 11,890
|
py
|
get_bot.py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetBotResult',
'AwaitableGetBotResult',
'get_bot',
'get_bot_output',
]
@pulumi.output_type
class GetBotResult:
"""
A collection of values returned by getBot.
"""
def __init__(__self__, arn=None, checksum=None, child_directed=None, created_date=None, description=None, detect_sentiment=None, enable_model_improvements=None, failure_reason=None, id=None, idle_session_ttl_in_seconds=None, last_updated_date=None, locale=None, name=None, nlu_intent_confidence_threshold=None, status=None, version=None, voice_id=None):
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if checksum and not isinstance(checksum, str):
raise TypeError("Expected argument 'checksum' to be a str")
pulumi.set(__self__, "checksum", checksum)
if child_directed and not isinstance(child_directed, bool):
raise TypeError("Expected argument 'child_directed' to be a bool")
pulumi.set(__self__, "child_directed", child_directed)
if created_date and not isinstance(created_date, str):
raise TypeError("Expected argument 'created_date' to be a str")
pulumi.set(__self__, "created_date", created_date)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if detect_sentiment and not isinstance(detect_sentiment, bool):
raise TypeError("Expected argument 'detect_sentiment' to be a bool")
pulumi.set(__self__, "detect_sentiment", detect_sentiment)
if enable_model_improvements and not isinstance(enable_model_improvements, bool):
raise TypeError("Expected argument 'enable_model_improvements' to be a bool")
pulumi.set(__self__, "enable_model_improvements", enable_model_improvements)
if failure_reason and not isinstance(failure_reason, str):
raise TypeError("Expected argument 'failure_reason' to be a str")
pulumi.set(__self__, "failure_reason", failure_reason)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if idle_session_ttl_in_seconds and not isinstance(idle_session_ttl_in_seconds, int):
raise TypeError("Expected argument 'idle_session_ttl_in_seconds' to be a int")
pulumi.set(__self__, "idle_session_ttl_in_seconds", idle_session_ttl_in_seconds)
if last_updated_date and not isinstance(last_updated_date, str):
raise TypeError("Expected argument 'last_updated_date' to be a str")
pulumi.set(__self__, "last_updated_date", last_updated_date)
if locale and not isinstance(locale, str):
raise TypeError("Expected argument 'locale' to be a str")
pulumi.set(__self__, "locale", locale)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if nlu_intent_confidence_threshold and not isinstance(nlu_intent_confidence_threshold, float):
raise TypeError("Expected argument 'nlu_intent_confidence_threshold' to be a float")
pulumi.set(__self__, "nlu_intent_confidence_threshold", nlu_intent_confidence_threshold)
if status and not isinstance(status, str):
raise TypeError("Expected argument 'status' to be a str")
pulumi.set(__self__, "status", status)
if version and not isinstance(version, str):
raise TypeError("Expected argument 'version' to be a str")
pulumi.set(__self__, "version", version)
if voice_id and not isinstance(voice_id, str):
raise TypeError("Expected argument 'voice_id' to be a str")
pulumi.set(__self__, "voice_id", voice_id)
@property
@pulumi.getter
def arn(self) -> str:
"""
ARN of the bot.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter
def checksum(self) -> str:
"""
Checksum of the bot used to identify a specific revision of the bot's `$LATEST` version.
"""
return pulumi.get(self, "checksum")
@property
@pulumi.getter(name="childDirected")
def child_directed(self) -> bool:
"""
If this Amazon Lex Bot is related to a website, program, or other application that is directed or targeted, in whole or in part, to children under age 13 and subject to COPPA.
"""
return pulumi.get(self, "child_directed")
@property
@pulumi.getter(name="createdDate")
def created_date(self) -> str:
"""
Date that the bot was created.
"""
return pulumi.get(self, "created_date")
@property
@pulumi.getter
def description(self) -> str:
"""
Description of the bot.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="detectSentiment")
def detect_sentiment(self) -> bool:
"""
When set to true user utterances are sent to Amazon Comprehend for sentiment analysis.
"""
return pulumi.get(self, "detect_sentiment")
@property
@pulumi.getter(name="enableModelImprovements")
def enable_model_improvements(self) -> bool:
"""
Set to true if natural language understanding improvements are enabled.
"""
return pulumi.get(self, "enable_model_improvements")
@property
@pulumi.getter(name="failureReason")
def failure_reason(self) -> str:
"""
If the `status` is `FAILED`, the reason why the bot failed to build.
"""
return pulumi.get(self, "failure_reason")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="idleSessionTtlInSeconds")
def idle_session_ttl_in_seconds(self) -> int:
"""
The maximum time in seconds that Amazon Lex retains the data gathered in a conversation.
"""
return pulumi.get(self, "idle_session_ttl_in_seconds")
@property
@pulumi.getter(name="lastUpdatedDate")
def last_updated_date(self) -> str:
"""
Date that the bot was updated.
"""
return pulumi.get(self, "last_updated_date")
@property
@pulumi.getter
def locale(self) -> str:
"""
Target locale for the bot. Any intent used in the bot must be compatible with the locale of the bot.
"""
return pulumi.get(self, "locale")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the bot, case sensitive.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="nluIntentConfidenceThreshold")
def nlu_intent_confidence_threshold(self) -> float:
"""
The threshold where Amazon Lex will insert the AMAZON.FallbackIntent, AMAZON.KendraSearchIntent, or both when returning alternative intents in a PostContent or PostText response. AMAZON.FallbackIntent and AMAZON.KendraSearchIntent are only inserted if they are configured for the bot.
"""
return pulumi.get(self, "nlu_intent_confidence_threshold")
@property
@pulumi.getter
def status(self) -> str:
"""
Status of the bot.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def version(self) -> Optional[str]:
"""
Version of the bot. For a new bot, the version is always `$LATEST`.
"""
return pulumi.get(self, "version")
@property
@pulumi.getter(name="voiceId")
def voice_id(self) -> str:
"""
Amazon Polly voice ID that the Amazon Lex Bot uses for voice interactions with the user.
"""
return pulumi.get(self, "voice_id")
class AwaitableGetBotResult(GetBotResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetBotResult(
arn=self.arn,
checksum=self.checksum,
child_directed=self.child_directed,
created_date=self.created_date,
description=self.description,
detect_sentiment=self.detect_sentiment,
enable_model_improvements=self.enable_model_improvements,
failure_reason=self.failure_reason,
id=self.id,
idle_session_ttl_in_seconds=self.idle_session_ttl_in_seconds,
last_updated_date=self.last_updated_date,
locale=self.locale,
name=self.name,
nlu_intent_confidence_threshold=self.nlu_intent_confidence_threshold,
status=self.status,
version=self.version,
voice_id=self.voice_id)
def get_bot(name: Optional[str] = None,
version: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetBotResult:
"""
Provides details about a specific Amazon Lex Bot.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
order_flowers_bot = aws.lex.get_bot(name="OrderFlowers",
version="$LATEST")
```
:param str name: Name of the bot. The name is case sensitive.
:param str version: Version or alias of the bot.
"""
__args__ = dict()
__args__['name'] = name
__args__['version'] = version
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('aws:lex/getBot:getBot', __args__, opts=opts, typ=GetBotResult).value
return AwaitableGetBotResult(
arn=pulumi.get(__ret__, 'arn'),
checksum=pulumi.get(__ret__, 'checksum'),
child_directed=pulumi.get(__ret__, 'child_directed'),
created_date=pulumi.get(__ret__, 'created_date'),
description=pulumi.get(__ret__, 'description'),
detect_sentiment=pulumi.get(__ret__, 'detect_sentiment'),
enable_model_improvements=pulumi.get(__ret__, 'enable_model_improvements'),
failure_reason=pulumi.get(__ret__, 'failure_reason'),
id=pulumi.get(__ret__, 'id'),
idle_session_ttl_in_seconds=pulumi.get(__ret__, 'idle_session_ttl_in_seconds'),
last_updated_date=pulumi.get(__ret__, 'last_updated_date'),
locale=pulumi.get(__ret__, 'locale'),
name=pulumi.get(__ret__, 'name'),
nlu_intent_confidence_threshold=pulumi.get(__ret__, 'nlu_intent_confidence_threshold'),
status=pulumi.get(__ret__, 'status'),
version=pulumi.get(__ret__, 'version'),
voice_id=pulumi.get(__ret__, 'voice_id'))
@_utilities.lift_output_func(get_bot)
def get_bot_output(name: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetBotResult]:
"""
Provides details about a specific Amazon Lex Bot.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
order_flowers_bot = aws.lex.get_bot(name="OrderFlowers",
version="$LATEST")
```
:param str name: Name of the bot. The name is case sensitive.
:param str version: Version or alias of the bot.
"""
...
|
1982d18d2655c6d9240c14fa3db6843c26726bc5
|
5396a46275e52bfc972f05097e925742d5bbf2d1
|
/_2017/eoc/chapter1.py
|
0f3d18e6a17e6ad0b137819d06f285b96dc4641d
|
[
"MIT"
] |
permissive
|
3b1b/videos
|
6ab0e4fe0fb07d15b5455f8726131a880437c42c
|
e841b1410fdda2d3bddb7cfa12ce070a3b66a026
|
refs/heads/master
| 2023-08-29T01:37:23.424512
| 2023-08-16T03:35:03
| 2023-08-16T03:35:03
| 325,873,493
| 4,601
| 1,868
| null | 2023-03-30T08:15:37
| 2020-12-31T21:07:33
|
Python
|
UTF-8
|
Python
| false
| false
| 87,436
|
py
|
chapter1.py
|
from manim_imports_ext import *
from _2017.eoc.chapter2 import Car, MoveCar
class Eoc1Thumbnail(GraphScene):
CONFIG = {
}
def construct(self):
title = OldTexText(
"The Essence of\\\\Calculus",
tex_to_color_map={
"\\emph{you}": YELLOW,
},
)
subtitle = OldTexText("Chapter 1")
subtitle.match_width(title)
subtitle.scale(0.75)
subtitle.next_to(title, DOWN)
# title.add(subtitle)
title.set_width(FRAME_WIDTH - 2)
title.to_edge(UP)
title.set_stroke(BLACK, 8, background=True)
# answer = OldTexText("...yes")
# answer.to_edge(DOWN)
axes = Axes(
x_range=(-1, 5),
y_range=(-1, 5),
y_axis_config={
"include_tip": False,
},
x_axis_config={
"unit_size": 2,
},
)
axes.set_width(FRAME_WIDTH - 1)
axes.center().to_edge(DOWN)
axes.shift(DOWN)
self.x_axis = axes.x_axis
self.y_axis = axes.y_axis
self.axes = axes
graph = self.get_graph(self.func)
rects = self.get_riemann_rectangles(
graph,
x_min=0, x_max=4,
dx=0.2,
)
rects.set_submobject_colors_by_gradient(BLUE, GREEN)
rects.set_opacity(1)
rects.set_stroke(BLACK, 1)
self.add(axes)
self.add(graph)
self.add(rects)
# self.add(title)
# self.add(answer)
def func(slef, x):
return 0.35 * ((x - 2)**3 - 2 * (x - 2) + 6)
class CircleScene(PiCreatureScene):
CONFIG = {
"radius" : 1.5,
"stroke_color" : WHITE,
"fill_color" : BLUE_E,
"fill_opacity" : 0.75,
"radial_line_color" : MAROON_B,
"outer_ring_color" : GREEN_E,
"ring_colors" : [BLUE, GREEN],
"dR" : 0.1,
"dR_color" : YELLOW,
"unwrapped_tip" : ORIGIN,
"include_pi_creature" : False,
"circle_corner" : UP+LEFT,
}
def setup(self):
PiCreatureScene.setup(self)
self.circle = Circle(
radius = self.radius,
stroke_color = self.stroke_color,
fill_color = self.fill_color,
fill_opacity = self.fill_opacity,
)
self.circle.to_corner(self.circle_corner, buff = MED_LARGE_BUFF)
self.radius_line = Line(
self.circle.get_center(),
self.circle.get_right(),
color = self.radial_line_color
)
self.radius_brace = Brace(self.radius_line, buff = SMALL_BUFF)
self.radius_label = self.radius_brace.get_text("$R$", buff = SMALL_BUFF)
self.radius_group = VGroup(
self.radius_line, self.radius_brace, self.radius_label
)
self.add(self.circle, *self.radius_group)
if not self.include_pi_creature:
self.remove(self.get_primary_pi_creature())
def introduce_circle(self, added_anims = []):
self.remove(self.circle)
self.play(
ShowCreation(self.radius_line),
GrowFromCenter(self.radius_brace),
Write(self.radius_label),
)
self.circle.set_fill(opacity = 0)
self.play(
Rotate(
self.radius_line, 2*np.pi-0.001,
about_point = self.circle.get_center(),
),
ShowCreation(self.circle),
*added_anims,
run_time = 2
)
self.play(
self.circle.set_fill, self.fill_color, self.fill_opacity,
Animation(self.radius_line),
Animation(self.radius_brace),
Animation(self.radius_label),
)
def increase_radius(self, numerical_dr = True, run_time = 2):
radius_mobs = VGroup(
self.radius_line, self.radius_brace, self.radius_label
)
nudge_line = Line(
self.radius_line.get_right(),
self.radius_line.get_right() + self.dR*RIGHT,
color = self.dR_color
)
nudge_arrow = Arrow(
nudge_line.get_center() + 0.5*RIGHT+DOWN,
nudge_line.get_center(),
color = YELLOW,
buff = SMALL_BUFF,
tip_length = 0.2,
)
if numerical_dr:
nudge_label = OldTex("%.01f"%self.dR)
else:
nudge_label = OldTex("dr")
nudge_label.set_color(self.dR_color)
nudge_label.scale(0.75)
nudge_label.next_to(nudge_arrow.get_start(), DOWN)
radius_mobs.add(nudge_line, nudge_arrow, nudge_label)
outer_ring = self.get_outer_ring()
self.play(
FadeIn(outer_ring),
ShowCreation(nudge_line),
ShowCreation(nudge_arrow),
Write(nudge_label),
run_time = run_time/2.
)
self.wait(run_time/2.)
self.nudge_line = nudge_line
self.nudge_arrow = nudge_arrow
self.nudge_label = nudge_label
self.outer_ring = outer_ring
return outer_ring
def get_ring(self, radius, dR, color = GREEN):
ring = Circle(radius = radius + dR).center()
inner_ring = Circle(radius = radius)
inner_ring.rotate(np.pi, RIGHT)
ring.append_vectorized_mobject(inner_ring)
ring.set_stroke(width = 0)
ring.set_fill(color)
ring.move_to(self.circle)
ring.R = radius
ring.dR = dR
return ring
def get_rings(self, **kwargs):
dR = kwargs.get("dR", self.dR)
colors = kwargs.get("colors", self.ring_colors)
radii = np.arange(0, self.radius, dR)
colors = color_gradient(colors, len(radii))
rings = VGroup(*[
self.get_ring(radius, dR = dR, color = color)
for radius, color in zip(radii, colors)
])
return rings
def get_outer_ring(self):
return self.get_ring(
radius = self.radius, dR = self.dR,
color = self.outer_ring_color
)
def unwrap_ring(self, ring, **kwargs):
self.unwrap_rings(ring, **kwargs)
def unwrap_rings(self, *rings, **kwargs):
added_anims = kwargs.get("added_anims", [])
rings = VGroup(*rings)
unwrapped = VGroup(*[
self.get_unwrapped(ring, **kwargs)
for ring in rings
])
self.play(
rings.rotate, np.pi/2,
rings.next_to, unwrapped.get_bottom(), UP,
run_time = 2,
path_arc = np.pi/2,
)
self.play(
Transform(rings, unwrapped, run_time = 3),
*added_anims
)
def get_unwrapped(self, ring, to_edge = LEFT, **kwargs):
R = ring.R
R_plus_dr = ring.R + ring.dR
n_anchors = ring.get_num_curves()
result = VMobject()
result.set_points_as_corners([
interpolate(np.pi*R_plus_dr*LEFT, np.pi*R_plus_dr*RIGHT, a)
for a in np.linspace(0, 1, n_anchors/2)
]+[
interpolate(np.pi*R*RIGHT+ring.dR*UP, np.pi*R*LEFT+ring.dR*UP, a)
for a in np.linspace(0, 1, n_anchors/2)
])
result.set_style_data(
stroke_color = ring.get_stroke_color(),
stroke_width = ring.get_stroke_width(),
fill_color = ring.get_fill_color(),
fill_opacity = ring.get_fill_opacity(),
)
result.move_to(self.unwrapped_tip, aligned_edge = DOWN)
result.shift(R_plus_dr*DOWN)
if to_edge is not None:
result.to_edge(to_edge)
return result
def create_pi_creature(self):
self.pi_creature = Randolph(color = BLUE_C)
self.pi_creature.to_corner(DOWN+LEFT)
return self.pi_creature
#############
class Chapter1OpeningQuote(OpeningQuote):
CONFIG = {
"quote" : [
"""The art of doing mathematics is finding
that """, "special case",
"""that contains all the
germs of generality."""
],
"quote_arg_separator" : " ",
"highlighted_quote_terms" : {
"special case" : BLUE
},
"author" : "David Hilbert",
}
class Introduction(TeacherStudentsScene):
def construct(self):
self.show_series()
self.show_many_facts()
self.invent_calculus()
def show_series(self):
series = VideoSeries()
series.to_edge(UP)
this_video = series[0]
this_video.set_color(YELLOW)
this_video.save_state()
this_video.set_fill(opacity = 0)
this_video.center()
this_video.set_height(FRAME_HEIGHT)
self.this_video = this_video
words = OldTexText(
"Welcome to \\\\",
"Essence of calculus"
)
words.set_color_by_tex("Essence of calculus", YELLOW)
self.teacher.change_mode("happy")
self.play(
FadeIn(
series,
lag_ratio = 0.5,
run_time = 2
),
Blink(self.get_teacher())
)
self.teacher_says(words, target_mode = "hooray")
self.play_student_changes(
*["hooray"]*3,
look_at = series[1].get_left(),
added_anims = [
ApplyMethod(this_video.restore, run_time = 3),
]
)
self.play(*[
ApplyMethod(
video.shift, 0.5*video.get_height()*DOWN,
run_time = 3,
rate_func = squish_rate_func(
there_and_back, alpha, alpha+0.3
)
)
for video, alpha in zip(series, np.linspace(0, 0.7, len(series)))
]+[
Animation(self.teacher.bubble),
Animation(self.teacher.bubble.content),
])
essence_words = words.get_part_by_tex("Essence").copy()
self.play(
FadeOut(self.teacher.bubble),
FadeOut(self.teacher.bubble.content),
essence_words.next_to, series, DOWN,
*[
ApplyMethod(pi.change_mode, "pondering")
for pi in self.get_pi_creatures()
]
)
self.wait(3)
self.series = series
self.essence_words = essence_words
def show_many_facts(self):
rules = list(it.starmap(Tex, [
("{d(", "x", "^2)", "\\over \\,", "dx}", "=", "2", "x"),
(
"d(", "f", "g", ")", "=",
"f", "dg", "+", "g", "df",
),
(
"F(x)", "=", "\\int_0^x",
"\\frac{dF}{dg}(t)\\,", "dt"
),
(
"f(x)", "=", "\\sum_{n = 0}^\\infty",
"f^{(n)}(a)", "\\frac{(x-a)^n}{n!}"
),
]))
video_indices = [2, 3, 7, 10]
tex_to_color = [
("x", BLUE),
("f", BLUE),
("df", BLUE),
("g", YELLOW),
("dg", YELLOW),
("f(x)", BLUE),
( "f^{(n)}(a)", BLUE),
]
for rule in rules:
for tex, color in tex_to_color:
rule.set_color_by_tex(tex, color, substring = False)
rule.next_to(self.teacher.get_corner(UP+LEFT), UP)
rule.shift_onto_screen()
index = 1
student = self.get_students()[index]
self.play_student_changes(
"pondering", "sassy", "pondering",
look_at = self.teacher.eyes,
added_anims = [
self.teacher.change_mode, "plain"
]
)
self.wait(2)
self.play(
Write(rules[0]),
self.teacher.change_mode, "raise_right_hand",
)
self.wait()
alt_rules_list = list(rules[1:]) + [VectorizedPoint(self.teacher.eyes.get_top())]
for last_rule, rule, video_index in zip(rules, alt_rules_list, video_indices):
video = self.series[video_index]
self.play(
last_rule.replace, video,
FadeIn(rule),
)
self.play(Animation(rule))
self.wait()
self.play(
self.teacher.change_mode, "happy",
self.teacher.look_at, student.eyes
)
def invent_calculus(self):
student = self.get_students()[1]
creatures = self.get_pi_creatures()
creatures.remove(student)
creature_copies = creatures.copy()
self.remove(creatures)
self.add(creature_copies)
calculus = VGroup(*self.essence_words[-len("calculus"):])
calculus.generate_target()
invent = OldTexText("Invent")
invent_calculus = VGroup(invent, calculus.target)
invent_calculus.arrange(RIGHT, buff = MED_SMALL_BUFF)
invent_calculus.next_to(student, UP, 1.5*LARGE_BUFF)
invent_calculus.shift(RIGHT)
arrow = Arrow(invent_calculus, student)
fader = Rectangle(
width = FRAME_WIDTH,
height = FRAME_HEIGHT,
stroke_width = 0,
fill_color = BLACK,
fill_opacity = 0.5,
)
self.play(
FadeIn(fader),
Animation(student),
Animation(calculus)
)
self.play(
Write(invent),
MoveToTarget(calculus),
student.change_mode, "erm",
student.look_at, calculus
)
self.play(ShowCreation(arrow))
self.wait(2)
class PreviewFrame(Scene):
def construct(self):
frame = Rectangle(height = 9, width = 16, color = WHITE)
frame.set_height(1.5*FRAME_Y_RADIUS)
colors = iter(color_gradient([BLUE, YELLOW], 3))
titles = [
OldTexText("Chapter %d:"%d, s).to_edge(UP).set_color(next(colors))
for d, s in [
(3, "Derivative formulas through geometry"),
(4, "Chain rule, product rule, etc."),
(7, "Limits"),
]
]
title = titles[0]
frame.next_to(title, DOWN)
self.add(frame, title)
self.wait(3)
for next_title in titles[1:]:
self.play(Transform(title, next_title))
self.wait(3)
class ProductRuleDiagram(Scene):
def construct(self):
df = 0.4
dg = 0.2
rect_kwargs = {
"stroke_width" : 0,
"fill_color" : BLUE,
"fill_opacity" : 0.6,
}
rect = Rectangle(width = 4, height = 3, **rect_kwargs)
rect.shift(DOWN)
df_rect = Rectangle(
height = rect.get_height(),
width = df,
**rect_kwargs
)
dg_rect = Rectangle(
height = dg,
width = rect.get_width(),
**rect_kwargs
)
corner_rect = Rectangle(
height = dg,
width = df,
**rect_kwargs
)
d_rects = VGroup(df_rect, dg_rect, corner_rect)
for d_rect, direction in zip(d_rects, [RIGHT, DOWN, RIGHT+DOWN]):
d_rect.next_to(rect, direction, buff = 0)
d_rect.set_fill(YELLOW, 0.75)
corner_pairs = [
(DOWN+RIGHT, UP+RIGHT),
(DOWN+RIGHT, DOWN+LEFT),
(DOWN+RIGHT, DOWN+RIGHT),
]
for d_rect, corner_pair in zip(d_rects, corner_pairs):
line = Line(*[
rect.get_corner(corner)
for corner in corner_pair
])
d_rect.line = d_rect.copy().replace(line, stretch = True)
d_rect.line.set_color(d_rect.get_color())
f_brace = Brace(rect, UP)
g_brace = Brace(rect, LEFT)
df_brace = Brace(df_rect, UP)
dg_brace = Brace(dg_rect, LEFT)
f_label = f_brace.get_text("$f$")
g_label = g_brace.get_text("$g$")
df_label = df_brace.get_text("$df$")
dg_label = dg_brace.get_text("$dg$")
VGroup(f_label, df_label).set_color(GREEN)
VGroup(g_label, dg_label).set_color(RED)
f_label.generate_target()
g_label.generate_target()
fg_group = VGroup(f_label.target, g_label.target)
fg_group.generate_target()
fg_group.target.arrange(RIGHT, buff = SMALL_BUFF)
fg_group.target.move_to(rect.get_center())
for mob in df_brace, df_label, dg_brace, dg_label:
mob.save_state()
mob.scale(0.01, about_point = rect.get_corner(
mob.get_center() - rect.get_center()
))
self.add(rect)
self.play(
GrowFromCenter(f_brace),
GrowFromCenter(g_brace),
Write(f_label),
Write(g_label),
)
self.play(MoveToTarget(fg_group))
self.play(*[
mob.restore
for mob in (df_brace, df_label, dg_brace, dg_label)
] + [
ReplacementTransform(d_rect.line, d_rect)
for d_rect in d_rects
])
self.wait()
self.play(
d_rects.space_out_submobjects, 1.2,
MaintainPositionRelativeTo(
VGroup(df_brace, df_label),
df_rect
),
MaintainPositionRelativeTo(
VGroup(dg_brace, dg_label),
dg_rect
),
)
self.wait()
deriv = OldTex(
"d(", "fg", ")", "=",
"f", "\\cdot", "dg", "+", "g", "\\cdot", "df"
)
deriv.to_edge(UP)
alpha_iter = iter(np.linspace(0, 0.5, 5))
self.play(*[
ApplyMethod(
mob.copy().move_to,
deriv.get_part_by_tex(tex, substring = False),
rate_func = squish_rate_func(smooth, alpha, alpha+0.5)
)
for mob, tex in [
(fg_group, "fg"),
(f_label, "f"),
(dg_label, "dg"),
(g_label, "g"),
(df_label, "df"),
]
for alpha in [next(alpha_iter)]
]+[
Write(VGroup(*it.chain(*[
deriv.get_parts_by_tex(tex, substring = False)
for tex in ("d(", ")", "=", "\\cdot", "+")
])))
], run_time = 3)
self.wait()
class IntroduceCircle(CircleScene):
CONFIG = {
"include_pi_creature" : True,
"unwrapped_tip" : 2*RIGHT
}
def construct(self):
self.force_skipping()
self.introduce_area()
self.question_area()
self.show_calculus_symbols()
def introduce_area(self):
area = OldTex("\\text{Area}", "=", "\\pi", "R", "^2")
area.next_to(self.pi_creature.get_corner(UP+RIGHT), UP+RIGHT)
self.remove(self.circle, self.radius_group)
self.play(
self.pi_creature.change_mode, "pondering",
self.pi_creature.look_at, self.circle
)
self.introduce_circle()
self.wait()
R_copy = self.radius_label.copy()
self.play(
self.pi_creature.change_mode, "raise_right_hand",
self.pi_creature.look_at, area,
Transform(R_copy, area.get_part_by_tex("R"))
)
self.play(Write(area))
self.remove(R_copy)
self.wait()
self.area = area
def question_area(self):
q_marks = OldTex("???")
q_marks.next_to(self.pi_creature, UP)
rings = VGroup(*reversed(self.get_rings()))
unwrapped_rings = VGroup(*[
self.get_unwrapped(ring, to_edge = None)
for ring in rings
])
unwrapped_rings.arrange(UP, buff = SMALL_BUFF)
unwrapped_rings.move_to(self.unwrapped_tip, UP)
ring_anim_kwargs = {
"run_time" : 3,
"lag_ratio" : 0.5
}
self.play(
Animation(self.area),
Write(q_marks),
self.pi_creature.change_mode, "confused",
self.pi_creature.look_at, self.area,
)
self.wait()
self.play(
FadeIn(rings, **ring_anim_kwargs),
Animation(self.radius_group),
FadeOut(q_marks),
self.pi_creature.change_mode, "thinking"
)
self.wait()
self.play(
rings.rotate, np.pi/2,
rings.move_to, unwrapped_rings.get_top(),
Animation(self.radius_group),
path_arc = np.pi/2,
**ring_anim_kwargs
)
self.play(
Transform(rings, unwrapped_rings, **ring_anim_kwargs),
)
self.wait()
def show_calculus_symbols(self):
ftc = OldTex(
"\\int_0^R", "\\frac{dA}{dr}", "\\,dr",
"=", "A(R)"
)
ftc.shift(2*UP)
self.play(
ReplacementTransform(
self.area.get_part_by_tex("R").copy(),
ftc.get_part_by_tex("int")
),
self.pi_creature.change_mode, "plain"
)
self.wait()
self.play(
ReplacementTransform(
self.area.get_part_by_tex("Area").copy(),
ftc.get_part_by_tex("frac")
),
ReplacementTransform(
self.area.get_part_by_tex("R").copy(),
ftc.get_part_by_tex("\\,dr")
)
)
self.wait()
self.play(Write(VGroup(*ftc[-2:])))
self.wait(2)
class ApproximateOneRing(CircleScene, ReconfigurableScene):
CONFIG = {
"num_lines" : 24,
"ring_index_proportion" : 0.6,
"ring_shift_val" : 6*RIGHT,
"ring_colors" : [BLUE, GREEN_E],
"unwrapped_tip" : 2*RIGHT+0.5*UP,
}
def setup(self):
CircleScene.setup(self)
ReconfigurableScene.setup(self)
def construct(self):
self.force_skipping()
self.write_radius_three()
self.try_to_understand_area()
self.slice_into_rings()
self.isolate_one_ring()
self.revert_to_original_skipping_status()
self.straighten_ring_out()
self.force_skipping()
self.approximate_as_rectangle()
def write_radius_three(self):
three = OldTex("3")
three.move_to(self.radius_label)
self.look_at(self.circle)
self.play(Transform(
self.radius_label, three,
path_arc = np.pi
))
self.wait()
def try_to_understand_area(self):
line_sets = [
VGroup(*[
Line(
self.circle.point_from_proportion(alpha),
self.circle.point_from_proportion(func(alpha)),
)
for alpha in np.linspace(0, 1, self.num_lines)
])
for func in [
lambda alpha : 1-alpha,
lambda alpha : (0.5-alpha)%1,
lambda alpha : (alpha + 0.4)%1,
lambda alpha : (alpha + 0.5)%1,
]
]
for lines in line_sets:
lines.set_stroke(BLACK, 2)
lines = line_sets[0]
self.play(
ShowCreation(
lines,
run_time = 2,
lag_ratio = 0.5
),
Animation(self.radius_group),
self.pi_creature.change_mode, "maybe"
)
self.wait(2)
for new_lines in line_sets[1:]:
self.play(
Transform(lines, new_lines),
Animation(self.radius_group)
)
self.wait()
self.wait()
self.play(FadeOut(lines), Animation(self.radius_group))
def slice_into_rings(self):
rings = self.get_rings()
rings.set_stroke(BLACK, 1)
self.play(
FadeIn(
rings,
lag_ratio = 0.5,
run_time = 3
),
Animation(self.radius_group),
self.pi_creature.change_mode, "pondering",
self.pi_creature.look_at, self.circle
)
self.wait(2)
for x in range(2):
self.play(
Rotate(rings, np.pi, in_place = True, run_time = 2),
Animation(self.radius_group),
self.pi_creature.change_mode, "happy"
)
self.wait(2)
self.rings = rings
def isolate_one_ring(self):
rings = self.rings
index = int(self.ring_index_proportion*len(rings))
original_ring = rings[index]
ring = original_ring.copy()
radius = Line(ORIGIN, ring.R*RIGHT, color = WHITE)
radius.rotate(np.pi/4)
r_label = OldTex("r")
r_label.next_to(radius.get_center(), UP+LEFT, SMALL_BUFF)
area_q = OldTexText("Area", "?", arg_separator = "")
area_q.set_color(YELLOW)
self.play(
ring.shift, self.ring_shift_val,
original_ring.set_fill, None, 0.25,
Animation(self.radius_group),
)
VGroup(radius, r_label).shift(ring.get_center())
area_q.next_to(ring, RIGHT)
self.play(ShowCreation(radius))
self.play(Write(r_label))
self.wait()
self.play(Write(area_q))
self.wait()
self.play(*[
ApplyMethod(
r.set_fill, YELLOW,
rate_func = squish_rate_func(there_and_back, alpha, alpha+0.15),
run_time = 3
)
for r, alpha in zip(rings, np.linspace(0, 0.85, len(rings)))
]+[
Animation(self.radius_group)
])
self.wait()
self.change_mode("thinking")
self.wait()
self.original_ring = original_ring
self.ring = ring
self.ring_radius_group = VGroup(radius, r_label)
self.area_q = area_q
def straighten_ring_out(self):
ring = self.ring.copy()
trapezoid = OldTexText("Trapezoid?")
rectangle_ish = OldTexText("Rectangle-ish")
for text in trapezoid, rectangle_ish:
text.next_to(
self.pi_creature.get_corner(UP+RIGHT),
DOWN+RIGHT, buff = MED_LARGE_BUFF
)
self.unwrap_ring(ring, to_edge = RIGHT)
self.change_mode("pondering")
self.wait()
self.play(Write(trapezoid))
self.wait()
self.play(trapezoid.shift, DOWN)
strike = Line(
trapezoid.get_left(), trapezoid.get_right(),
stroke_color = RED,
stroke_width = 8
)
self.play(
Write(rectangle_ish),
ShowCreation(strike),
self.pi_creature.change_mode, "happy"
)
self.wait()
self.play(*list(map(FadeOut, [trapezoid, strike])))
self.unwrapped_ring = ring
def approximate_as_rectangle(self):
top_brace, side_brace = [
Brace(
self.unwrapped_ring, vect, buff = SMALL_BUFF,
min_num_quads = 2,
)
for vect in (UP, LEFT)
]
top_brace.scale(self.ring.R/(self.ring.R+self.dR))
side_brace.set_stroke(WHITE, 0.5)
width_label = OldTex("2\\pi", "r")
width_label.next_to(top_brace, UP, SMALL_BUFF)
dr_label = OldTex("dr")
q_marks = OldTex("???")
concrete_dr = OldTex("=0.1")
concrete_dr.submobjects.reverse()
for mob in dr_label, q_marks, concrete_dr:
mob.next_to(side_brace, LEFT, SMALL_BUFF)
dr_label.save_state()
alt_side_brace = side_brace.copy()
alt_side_brace.move_to(ORIGIN, UP+RIGHT)
alt_side_brace.rotate(-np.pi/2)
alt_side_brace.shift(
self.original_ring.get_boundary_point(RIGHT)
)
alt_dr_label = dr_label.copy()
alt_dr_label.next_to(alt_side_brace, UP, SMALL_BUFF)
approx = OldTex("\\approx")
approx.next_to(
self.area_q.get_part_by_tex("Area"),
RIGHT,
align_using_submobjects = True,
)
two_pi_r_dr = VGroup(width_label, dr_label).copy()
two_pi_r_dr.generate_target()
two_pi_r_dr.target.arrange(
RIGHT, buff = SMALL_BUFF, aligned_edge = DOWN
)
two_pi_r_dr.target.next_to(approx, RIGHT, aligned_edge = DOWN)
self.play(GrowFromCenter(top_brace))
self.play(
Write(width_label.get_part_by_tex("pi")),
ReplacementTransform(
self.ring_radius_group[1].copy(),
width_label.get_part_by_tex("r")
)
)
self.wait()
self.play(
GrowFromCenter(side_brace),
Write(q_marks)
)
self.change_mode("confused")
self.wait()
for num_rings in 20, 7:
self.show_alternate_width(num_rings)
self.play(ReplacementTransform(q_marks, dr_label))
self.play(
ReplacementTransform(side_brace.copy(), alt_side_brace),
ReplacementTransform(dr_label.copy(), alt_dr_label),
run_time = 2
)
self.wait()
self.play(
dr_label.next_to, concrete_dr.copy(), LEFT, SMALL_BUFF, DOWN,
Write(concrete_dr, run_time = 2),
self.pi_creature.change_mode, "pondering"
)
self.wait(2)
self.play(
MoveToTarget(two_pi_r_dr),
FadeIn(approx),
self.area_q.get_part_by_tex("?").fade, 1,
)
self.wait()
self.play(
FadeOut(concrete_dr),
dr_label.restore
)
self.show_alternate_width(
40,
transformation_kwargs = {"run_time" : 4},
return_to_original_configuration = False,
)
self.wait(2)
self.look_at(self.circle)
self.play(
ApplyWave(self.rings, amplitude = 0.1),
Animation(self.radius_group),
Animation(alt_side_brace),
Animation(alt_dr_label),
run_time = 3,
lag_ratio = 0.5
)
self.wait(2)
def show_alternate_width(self, num_rings, **kwargs):
self.transition_to_alt_config(
dR = self.radius/num_rings, **kwargs
)
class MoveForwardWithApproximation(TeacherStudentsScene):
def construct(self):
self.teacher_says(
"Move forward with \\\\",
"the", "approximation"
)
self.play_student_changes("hesitant", "erm", "sassy")
self.wait()
words = OldTexText(
"It gets better",
"\\\\ for smaller ",
"$dr$"
)
words.set_color_by_tex("dr", BLUE)
self.teacher_says(words, target_mode = "shruggie")
self.wait(3)
class GraphRectangles(CircleScene, GraphScene):
CONFIG = {
"graph_origin" : 3.25*LEFT+2.5*DOWN,
"x_min" : 0,
"x_max" : 4,
"x_axis_width" : 7,
"x_labeled_nums" : list(range(5)),
"x_axis_label" : "$r$",
"y_min" : 0,
"y_max" : 20,
"y_tick_frequency" : 2.5,
"y_labeled_nums" : list(range(5, 25, 5)),
"y_axis_label" : "",
"exclude_zero_label" : False,
"num_rings_in_ring_sum_start" : 3,
"tick_height" : 0.2,
}
def setup(self):
CircleScene.setup(self)
GraphScene.setup(self)
self.setup_axes()
self.remove(self.axes)
# self.pi_creature.change_mode("pondering")
# self.pi_creature.look_at(self.circle)
# self.add(self.pi_creature)
three = OldTex("3")
three.move_to(self.radius_label)
self.radius_label.save_state()
Transform(self.radius_label, three).update(1)
def construct(self):
self.draw_ring_sum()
self.draw_r_values()
self.unwrap_rings_onto_graph()
self.draw_graph()
self.point_out_approximation()
self.let_dr_approah_zero()
self.compute_area_under_graph()
self.show_circle_unwrapping()
def draw_ring_sum(self):
rings = self.get_rings()
rings.set_stroke(BLACK, 1)
ring_sum, draw_ring_sum_anims = self.get_ring_sum(rings)
area_label = OldTex(
"\\text{Area}", "\\approx",
"2\\pi", "r", "\\,dr"
)
area_label.set_color_by_tex("r", YELLOW, substring = False)
area_label.next_to(ring_sum, RIGHT, aligned_edge = UP)
area = area_label.get_part_by_tex("Area")
arrow_start = area.get_corner(DOWN+LEFT)
arrows = VGroup(*[
Arrow(
arrow_start,
ring.target.get_boundary_point(
arrow_start - ring.target.get_center()
),
color = ring.get_color()
)
for ring in rings
if ring.target.get_fill_opacity() > 0
])
self.add(rings, self.radius_group)
self.remove(self.circle)
self.wait()
self.play(*draw_ring_sum_anims)
self.play(Write(area_label, run_time = 2))
self.play(ShowCreation(arrows))
self.wait()
self.ring_sum = ring_sum
area_label.add(arrows)
self.area_label = area_label
self.rings = rings
def draw_r_values(self):
values_of_r = OldTexText("Values of ", "$r$")
values_of_r.set_color_by_tex("r", YELLOW)
values_of_r.next_to(
self.x_axis, UP,
buff = 2*LARGE_BUFF,
aligned_edge = LEFT
)
r_ticks = VGroup(*[
Line(
self.coords_to_point(r, -self.tick_height),
self.coords_to_point(r, self.tick_height),
color = YELLOW
)
for r in np.arange(0, 3, 0.1)
])
arrows = VGroup(*[
Arrow(
values_of_r.get_part_by_tex("r").get_bottom(),
tick.get_top(),
buff = SMALL_BUFF,
color = YELLOW,
tip_length = 0.15
)
for tick in (r_ticks[0], r_ticks[-1])
])
first_tick = r_ticks[0].copy()
moving_arrow = arrows[0].copy()
index = 2
dr_brace = Brace(
VGroup(*r_ticks[index:index+2]),
DOWN, buff = SMALL_BUFF
)
dr_label = OldTex("dr")
dr_label.next_to(
dr_brace, DOWN,
buff = SMALL_BUFF,
aligned_edge = LEFT
)
dr_group = VGroup(dr_brace, dr_label)
self.play(
FadeIn(values_of_r),
FadeIn(self.x_axis),
)
self.play(
ShowCreation(moving_arrow),
ShowCreation(first_tick),
)
self.play(Indicate(self.rings[0]))
self.wait()
self.play(
Transform(moving_arrow, arrows[-1]),
ShowCreation(r_ticks, lag_ratio = 0.5),
run_time = 2
)
self.play(Indicate(self.rings[-1]))
self.wait()
self.play(FadeIn(dr_group))
self.wait()
self.play(*list(map(FadeOut, [moving_arrow, values_of_r])))
self.x_axis.add(r_ticks)
self.r_ticks = r_ticks
self.dr_group = dr_group
def unwrap_rings_onto_graph(self):
rings = self.rings
graph = self.get_graph(lambda r : 2*np.pi*r)
flat_graph = self.get_graph(lambda r : 0)
rects, flat_rects = [
self.get_riemann_rectangles(
g, x_min = 0, x_max = 3, dx = self.dR,
start_color = self.rings[0].get_fill_color(),
end_color = self.rings[-1].get_fill_color(),
)
for g in (graph, flat_graph)
]
self.graph, self.flat_rects = graph, flat_rects
transformed_rings = VGroup()
self.ghost_rings = VGroup()
for index, rect, r in zip(it.count(), rects, np.arange(0, 3, 0.1)):
proportion = float(index)/len(rects)
ring_index = int(len(rings)*proportion**0.6)
ring = rings[ring_index]
if ring in transformed_rings:
ring = ring.copy()
transformed_rings.add(ring)
if ring.get_fill_opacity() > 0:
ghost_ring = ring.copy()
ghost_ring.set_fill(opacity = 0.25)
self.add(ghost_ring, ring)
self.ghost_rings.add(ghost_ring)
ring.rect = rect
n_anchors = ring.get_num_curves()
target = VMobject()
target.set_points_as_corners([
interpolate(ORIGIN, DOWN, a)
for a in np.linspace(0, 1, n_anchors/2)
]+[
interpolate(DOWN+RIGHT, RIGHT, a)
for a in np.linspace(0, 1, n_anchors/2)
])
target.replace(rect, stretch = True)
target.stretch_to_fit_height(2*np.pi*r)
target.move_to(rect, DOWN)
target.set_stroke(BLACK, 1)
target.set_fill(ring.get_fill_color(), 1)
ring.target = target
ring.original_ring = ring.copy()
foreground_animations = list(map(Animation, [self.x_axis, self.area_label]))
example_ring = transformed_rings[2]
self.play(
MoveToTarget(
example_ring,
path_arc = -np.pi/2,
run_time = 2
),
Animation(self.x_axis),
)
self.wait(2)
self.play(*[
MoveToTarget(
ring,
path_arc = -np.pi/2,
run_time = 4,
rate_func = squish_rate_func(smooth, alpha, alpha+0.25)
)
for ring, alpha in zip(
transformed_rings,
np.linspace(0, 0.75, len(transformed_rings))
)
] + foreground_animations)
self.wait()
##Demonstrate height of one rect
highlighted_ring = transformed_rings[6].copy()
original_ring = transformed_rings[6].original_ring
original_ring.move_to(highlighted_ring, RIGHT)
original_ring.set_fill(opacity = 1)
highlighted_ring.save_state()
side_brace = Brace(highlighted_ring, RIGHT)
height_label = side_brace.get_text("2\\pi", "r")
height_label.set_color_by_tex("r", YELLOW)
self.play(
transformed_rings.set_fill, None, 0.2,
Animation(highlighted_ring),
*foreground_animations
)
self.play(
self.dr_group.arrange, DOWN,
self.dr_group.next_to, highlighted_ring,
DOWN, SMALL_BUFF
)
self.wait()
self.play(
GrowFromCenter(side_brace),
Write(height_label)
)
self.wait()
self.play(Transform(highlighted_ring, original_ring))
self.wait()
self.play(highlighted_ring.restore)
self.wait()
self.play(
transformed_rings.set_fill, None, 1,
FadeOut(side_brace),
FadeOut(height_label),
*foreground_animations
)
self.remove(highlighted_ring)
self.wait()
##Rescale
self.play(*[
ApplyMethod(
ring.replace, ring.rect,
method_kwargs = {"stretch" : True}
)
for ring in transformed_rings
] + [
Write(self.y_axis),
FadeOut(self.area_label),
] + foreground_animations)
self.remove(transformed_rings)
self.add(rects)
self.wait()
self.rects = rects
def draw_graph(self):
graph_label = self.get_graph_label(
self.graph, "2\\pi r",
direction = UP+LEFT,
x_val = 2.5,
buff = SMALL_BUFF
)
self.play(ShowCreation(self.graph))
self.play(Write(graph_label))
self.wait()
self.play(*[
Transform(
rect, flat_rect,
run_time = 2,
rate_func = squish_rate_func(
lambda t : 0.1*there_and_back(t),
alpha, alpha+0.5
),
lag_ratio = 0.5
)
for rect, flat_rect, alpha in zip(
self.rects, self.flat_rects,
np.linspace(0, 0.5, len(self.rects))
)
] + list(map(Animation, [self.x_axis, self.graph]))
)
self.wait(2)
def point_out_approximation(self):
rect = self.rects[10]
rect.generate_target()
rect.save_state()
approximation = OldTexText("= Approximation")
approximation.scale(0.8)
group = VGroup(rect.target, approximation)
group.arrange(RIGHT)
group.to_edge(RIGHT)
self.play(
MoveToTarget(rect),
Write(approximation),
)
self.wait(2)
self.play(
rect.restore,
FadeOut(approximation)
)
self.wait()
def let_dr_approah_zero(self):
thinner_rects_list = [
self.get_riemann_rectangles(
self.graph,
x_min = 0,
x_max = 3,
dx = 1./(10*2**n),
stroke_width = 1./(2**n),
start_color = self.rects[0].get_fill_color(),
end_color = self.rects[-1].get_fill_color(),
)
for n in range(1, 5)
]
self.play(*list(map(FadeOut, [self.r_ticks, self.dr_group])))
self.x_axis.remove(self.r_ticks, *self.r_ticks)
for new_rects in thinner_rects_list:
self.play(
Transform(
self.rects, new_rects,
lag_ratio = 0.5,
run_time = 2
),
Animation(self.axes),
Animation(self.graph),
)
self.wait()
self.play(ApplyWave(
self.rects,
direction = RIGHT,
run_time = 2,
lag_ratio = 0.5,
))
self.wait()
def compute_area_under_graph(self):
formula, formula_with_R = formulas = [
self.get_area_formula(R)
for R in ("3", "R")
]
for mob in formulas:
mob.to_corner(UP+RIGHT, buff = MED_SMALL_BUFF)
brace = Brace(self.rects, RIGHT)
height_label = brace.get_text("$2\\pi \\cdot 3$")
height_label_with_R = brace.get_text("$2\\pi \\cdot R$")
base_line = Line(
self.coords_to_point(0, 0),
self.coords_to_point(3, 0),
color = YELLOW
)
fresh_rings = self.get_rings(dR = 0.025)
fresh_rings.set_stroke(width = 0)
self.radius_label.restore()
VGroup(
fresh_rings, self.radius_group
).to_corner(UP+LEFT, buff = SMALL_BUFF)
self.play(Write(formula.top_line, run_time = 2))
self.play(FocusOn(base_line))
self.play(ShowCreation(base_line))
self.wait()
self.play(
GrowFromCenter(brace),
Write(height_label)
)
self.wait()
self.play(FocusOn(formula))
self.play(Write(formula.mid_line))
self.wait()
self.play(Write(formula.bottom_line))
self.wait(2)
self.play(*list(map(FadeOut, [
self.ghost_rings,
self.ring_sum.tex_mobs
])))
self.play(*list(map(FadeIn, [fresh_rings, self.radius_group])))
self.wait()
self.play(
Transform(formula, formula_with_R),
Transform(height_label, height_label_with_R),
)
self.wait(2)
self.fresh_rings = fresh_rings
def show_circle_unwrapping(self):
rings = self.fresh_rings
rings.rotate(np.pi)
rings.submobjects.reverse()
ghost_rings = rings.copy()
ghost_rings.set_fill(opacity = 0.25)
self.add(ghost_rings, rings, self.radius_group)
unwrapped = VGroup(*[
self.get_unwrapped(ring, to_edge = None)
for ring in rings
])
unwrapped.stretch_to_fit_height(1)
unwrapped.stretch_to_fit_width(2)
unwrapped.move_to(ORIGIN, DOWN)
unwrapped.apply_function(
lambda p : np.dot(p,
np.array([[1, 0, 0], [-1, 1, 0], [0, 0, 1]])
),
maintain_smoothness = False
)
unwrapped.rotate(np.pi/2)
unwrapped.replace(self.rects, stretch = True)
self.play(self.rects.fade, 0.8)
self.play(
Transform(
rings, unwrapped,
run_time = 5,
lag_ratio = 0.5,
),
Animation(self.radius_group)
)
self.wait()
#####
def get_ring_sum(self, rings):
arranged_group = VGroup()
tex_mobs = VGroup()
for ring in rings:
ring.generate_target()
ring.target.set_stroke(width = 0)
for ring in rings[:self.num_rings_in_ring_sum_start]:
plus = OldTex("+")
arranged_group.add(ring.target)
arranged_group.add(plus)
tex_mobs.add(plus)
dots = OldTex("\\vdots")
plus = OldTex("+")
arranged_group.add(dots, plus)
tex_mobs.add(dots, plus)
last_ring = rings[-1]
arranged_group.add(last_ring.target)
arranged_group.arrange(DOWN, buff = SMALL_BUFF)
arranged_group.set_height(FRAME_HEIGHT-1)
arranged_group.to_corner(DOWN+LEFT, buff = MED_SMALL_BUFF)
for mob in tex_mobs:
mob.scale(0.7)
middle_rings = rings[self.num_rings_in_ring_sum_start:-1]
alphas = np.linspace(0, 1, len(middle_rings))
for ring, alpha in zip(middle_rings, alphas):
ring.target.set_fill(opacity = 0)
ring.target.move_to(interpolate(
dots.get_left(), last_ring.target.get_center(), alpha
))
draw_ring_sum_anims = [Write(tex_mobs)]
draw_ring_sum_anims += [
MoveToTarget(
ring,
run_time = 3,
path_arc = -np.pi/3,
rate_func = squish_rate_func(smooth, alpha, alpha+0.8)
)
for ring, alpha in zip(rings, np.linspace(0, 0.2, len(rings)))
]
draw_ring_sum_anims.append(FadeOut(self.radius_group))
ring_sum = VGroup(rings, tex_mobs)
ring_sum.rings = VGroup(*[r.target for r in rings])
ring_sum.tex_mobs = tex_mobs
return ring_sum, draw_ring_sum_anims
def get_area_formula(self, R):
formula = OldTex(
"\\text{Area}", "&= \\frac{1}{2}", "b", "h",
"\\\\ &=", "\\frac{1}{2}", "(%s)"%R, "(2\\pi \\cdot %s)"%R,
"\\\\ &=", "\\pi ", "%s"%R, "^2"
)
formula.set_color_by_tex("b", GREEN, substring = False)
formula.set_color_by_tex("h", RED, substring = False)
formula.set_color_by_tex("%s"%R, GREEN)
formula.set_color_by_tex("(2\\pi ", RED)
formula.set_color_by_tex("(2\\pi ", RED)
formula.scale(0.8)
formula.top_line = VGroup(*formula[:4])
formula.mid_line = VGroup(*formula[4:8])
formula.bottom_line = VGroup(*formula[8:])
return formula
class ThinkLikeAMathematician(TeacherStudentsScene):
def construct(self):
pi_R_squraed = OldTex("\\pi", "R", "^2")
pi_R_squraed.set_color_by_tex("R", YELLOW)
pi_R_squraed.move_to(self.get_students(), UP)
pi_R_squraed.set_fill(opacity = 0)
self.play(
pi_R_squraed.shift, 2*UP,
pi_R_squraed.set_fill, None, 1
)
self.play_student_changes(*["hooray"]*3)
self.wait(2)
self.play_student_changes(
*["pondering"]*3,
look_at = self.teacher.eyes,
added_anims = [PiCreatureSays(
self.teacher, "But why did \\\\ that work?"
)]
)
self.play(FadeOut(pi_R_squraed))
self.look_at(2*UP+4*LEFT)
self.wait(5)
class TwoThingsToNotice(TeacherStudentsScene):
def construct(self):
words = OldTexText(
"Two things to \\\\ note about",
"$dr$",
)
words.set_color_by_tex("dr", GREEN)
self.teacher_says(words, run_time = 1)
self.wait(3)
class RecapCircleSolution(GraphRectangles, ReconfigurableScene):
def setup(self):
GraphRectangles.setup(self)
ReconfigurableScene.setup(self)
def construct(self):
self.break_up_circle()
self.show_sum()
self.dr_indicates_spacing()
self.smaller_dr()
self.show_riemann_sum()
self.limiting_riemann_sum()
self.full_precision()
def break_up_circle(self):
self.remove(self.circle)
rings = self.get_rings()
rings.set_stroke(BLACK, 1)
ring_sum, draw_ring_sum_anims = self.get_ring_sum(rings)
hard_problem = OldTexText("Hard problem")
down_arrow = OldTex("\\Downarrow")
sum_words = OldTexText("Sum of many \\\\ small values")
integral_condition = VGroup(hard_problem, down_arrow, sum_words)
integral_condition.arrange(DOWN)
integral_condition.scale(0.8)
integral_condition.to_corner(UP+RIGHT)
self.add(rings, self.radius_group)
self.play(FadeIn(
integral_condition,
lag_ratio = 0.5
))
self.wait()
self.play(*draw_ring_sum_anims)
self.rings = rings
self.integral_condition = integral_condition
def show_sum(self):
visible_rings = [ring for ring in self.rings if ring.get_fill_opacity() > 0]
radii = self.dR*np.arange(len(visible_rings))
radii[-1] = 3-self.dR
radial_lines = VGroup()
for ring in visible_rings:
radius_line = Line(ORIGIN, ring.R*RIGHT, color = YELLOW)
radius_line.rotate(np.pi/4)
radius_line.shift(ring.get_center())
radial_lines.add(radius_line)
approximations = VGroup()
for ring, radius in zip(visible_rings, radii):
label = OldTex(
"\\approx", "2\\pi",
"(%s)"%str(radius), "(%s)"%str(self.dR)
)
label[2].set_color(YELLOW)
label[3].set_color(GREEN)
label.scale(0.75)
label.next_to(ring, RIGHT)
approximations.add(label)
approximations[-1].shift(UP+0.5*LEFT)
area_label = OldTex("2\\pi", "r", "\\, dr")
area_label.set_color_by_tex("r", YELLOW)
area_label.set_color_by_tex("dr", GREEN)
area_label.next_to(approximations, RIGHT, buff = 2*LARGE_BUFF)
arrows = VGroup(*[
Arrow(
area_label.get_left(),
approximation.get_right(),
color = WHITE
)
for approximation in approximations
])
self.play(Write(area_label))
self.play(
ShowCreation(arrows, lag_ratio = 0),
FadeIn(radial_lines),
*[
ReplacementTransform(
area_label.copy(),
VGroup(*approximation[1:])
)
for approximation in approximations
]
)
self.wait()
self.play(Write(VGroup(*[
approximation[0]
for approximation in approximations
])))
self.wait()
self.area_label = area_label
self.area_arrows = arrows
self.approximations = approximations
def dr_indicates_spacing(self):
r_ticks = VGroup(*[
Line(
self.coords_to_point(r, -self.tick_height),
self.coords_to_point(r, self.tick_height),
color = YELLOW
)
for r in np.arange(0, 3, self.dR)
])
index = int(0.75*len(r_ticks))
brace_ticks = VGroup(*r_ticks[index:index+2])
dr_brace = Brace(brace_ticks, UP, buff = SMALL_BUFF)
dr = self.area_label.get_part_by_tex("dr")
dr_copy = dr.copy()
circle = Circle().replace(dr)
circle.scale(1.3)
dr_num = self.approximations[0][-1]
self.play(ShowCreation(circle))
self.play(FadeOut(circle))
self.play(ReplacementTransform(
dr.copy(), dr_num,
run_time = 2,
path_arc = np.pi/2,
))
self.wait()
self.play(FadeIn(self.x_axis))
self.play(Write(r_ticks, run_time = 1))
self.wait()
self.play(
GrowFromCenter(dr_brace),
dr_copy.next_to, dr_brace.copy(), UP
)
self.wait()
self.r_ticks = r_ticks
self.dr_brace_group = VGroup(dr_brace, dr_copy)
def smaller_dr(self):
self.transition_to_alt_config(dR = 0.05)
def show_riemann_sum(self):
graph = self.get_graph(lambda r : 2*np.pi*r)
graph_label = self.get_graph_label(
graph, "2\\pi r",
x_val = 2.5,
direction = UP+LEFT
)
rects = self.get_riemann_rectangles(
graph,
x_min = 0,
x_max = 3,
dx = self.dR
)
self.play(
Write(self.y_axis, run_time = 2),
*list(map(FadeOut, [
self.approximations,
self.area_label,
self.area_arrows,
self.dr_brace_group,
self.r_ticks,
]))
)
self.play(
ReplacementTransform(
self.rings.copy(), rects,
run_time = 2,
lag_ratio = 0.5
),
Animation(self.x_axis),
)
self.play(ShowCreation(graph))
self.play(Write(graph_label))
self.wait()
self.graph = graph
self.graph_label = graph_label
self.rects = rects
def limiting_riemann_sum(self):
thinner_rects_list = [
self.get_riemann_rectangles(
self.graph,
x_min = 0,
x_max = 3,
dx = 1./(10*2**n),
stroke_width = 1./(2**n),
start_color = self.rects[0].get_fill_color(),
end_color = self.rects[-1].get_fill_color(),
)
for n in range(1, 4)
]
for new_rects in thinner_rects_list:
self.play(
Transform(
self.rects, new_rects,
lag_ratio = 0.5,
run_time = 2
),
Animation(self.axes),
Animation(self.graph),
)
self.wait()
def full_precision(self):
words = OldTexText("Area under \\\\ a graph")
group = VGroup(OldTex("\\Downarrow"), words)
group.arrange(DOWN)
group.set_color(YELLOW)
group.scale(0.8)
group.next_to(self.integral_condition, DOWN)
arc = Arc(start_angle = 2*np.pi/3, angle = 2*np.pi/3)
arc.scale(2)
arc.add_tip()
arc.add(arc[1].copy().rotate(np.pi, RIGHT))
arc_next_to_group = VGroup(
self.integral_condition[0][0],
words[0]
)
arc.set_height(
arc_next_to_group.get_height()-MED_LARGE_BUFF
)
arc.next_to(arc_next_to_group, LEFT, SMALL_BUFF)
self.play(Write(group))
self.wait()
self.play(ShowCreation(arc))
self.wait()
class ExampleIntegralProblems(PiCreatureScene, GraphScene):
CONFIG = {
"dt" : 0.2,
"t_max" : 7,
"x_max" : 8,
"y_axis_height" : 5.5,
"x_axis_label" : "$t$",
"y_axis_label" : "",
"graph_origin" : 3*DOWN + 4.5*LEFT
}
def construct(self):
self.write_integral_condition()
self.show_car()
self.show_graph()
self.let_dt_approach_zero()
self.show_confusion()
def write_integral_condition(self):
words = OldTexText(
"Hard problem $\\Rightarrow$ Sum of many small values"
)
words.to_edge(UP)
self.play(
Write(words),
self.pi_creature.change_mode, "raise_right_hand"
)
self.wait()
self.words = words
def show_car(self):
car = Car()
start, end = 3*LEFT+UP, 5*RIGHT+UP
car.move_to(start)
line = Line(start, end)
tick_height = MED_SMALL_BUFF
ticks = VGroup(*[
Line(
p+tick_height*UP/2,
p+tick_height*DOWN/2,
color = YELLOW,
stroke_width = 2
)
for t in np.arange(0, self.t_max, self.dt)
for p in [
line.point_from_proportion(smooth(t/self.t_max))
]
])
index = int(len(ticks)/2)
brace_ticks = VGroup(*ticks[index:index+2])
brace = Brace(brace_ticks, UP)
v_dt = OldTex("v(t)", "dt")
v_dt.next_to(brace, UP, SMALL_BUFF)
v_dt.set_color(YELLOW)
v_dt_brace_group = VGroup(brace, v_dt)
self.play(
FadeIn(car),
self.pi_creature.change_mode, "plain"
)
self.play(
MoveCar(car, end),
FadeIn(
ticks,
lag_ratio=1,
rate_func=linear,
),
ShowCreation(line),
FadeIn(
v_dt_brace_group,
rate_func = squish_rate_func(smooth, 0.6, 0.8)
),
run_time = self.t_max
)
self.wait()
for mob in v_dt:
self.play(Indicate(mob))
self.wait(2)
self.v_dt_brace_group = v_dt_brace_group
self.line = line
self.ticks = ticks
self.car = car
def show_graph(self):
self.setup_axes()
self.remove(self.axes)
s_graph = self.get_graph(
lambda t : 1.8*self.y_max*smooth(t/self.t_max)
)
v_graph = self.get_derivative_graph(s_graph)
rects = self.get_riemann_rectangles(
v_graph,
x_min = 0,
x_max = self.t_max,
dx = self.dt
)
rects.set_fill(opacity = 0.5)
pre_rects = rects.copy()
pre_rects.rotate(-np.pi/2)
for index, pre_rect in enumerate(pre_rects):
ti1 = len(self.ticks)*index/len(pre_rects)
ti2 = min(ti1+1, len(self.ticks)-1)
tick_pair = VGroup(self.ticks[ti1], self.ticks[ti2])
pre_rect.stretch_to_fit_width(tick_pair.get_width())
pre_rect.move_to(tick_pair)
special_rect = rects[int(0.6*len(rects))]
brace = Brace(special_rect, LEFT, buff = 0)
v_dt_brace_group_copy = self.v_dt_brace_group.copy()
start_brace, (v_t, dt) = v_dt_brace_group_copy
self.play(
FadeIn(
pre_rects,
run_time = 2,
lag_ratio = 0.5
),
Animation(self.ticks)
)
self.play(
ReplacementTransform(
pre_rects, rects,
run_time = 3,
lag_ratio = 0.5
),
Animation(self.ticks),
Write(self.axes, run_time = 1)
)
self.play(ShowCreation(v_graph))
self.change_mode("pondering")
self.wait()
self.play(
v_t.next_to, brace, LEFT, SMALL_BUFF,
dt.next_to, special_rect, DOWN,
special_rect.set_fill, None, 1,
ReplacementTransform(start_brace, brace),
)
self.wait(3)
self.v_graph = v_graph
self.rects = rects
self.v_dt_brace_group_copy = v_dt_brace_group_copy
def let_dt_approach_zero(self):
thinner_rects_list = [
self.get_riemann_rectangles(
self.v_graph,
x_min = 0,
x_max = self.t_max,
dx = self.dt/(2**n),
stroke_width = 1./(2**n)
)
for n in range(1, 4)
]
self.play(
self.rects.set_fill, None, 1,
Animation(self.x_axis),
FadeOut(self.v_dt_brace_group_copy),
)
self.change_mode("thinking")
self.wait()
for thinner_rects in thinner_rects_list:
self.play(
Transform(
self.rects, thinner_rects,
run_time = 2,
lag_ratio = 0.5
)
)
self.wait()
def show_confusion(self):
randy = Randolph(color = BLUE_C)
randy.to_corner(DOWN+LEFT)
randy.to_edge(LEFT, buff = MED_SMALL_BUFF)
self.play(FadeIn(randy))
self.play(
randy.change_mode, "confused",
randy.look_at, self.rects
)
self.play(
self.pi_creature.change_mode, "confused",
self.pi_creature.look_at, randy.eyes
)
self.play(Blink(randy))
self.wait()
class MathematicianPonderingAreaUnderDifferentCurves(PiCreatureScene):
def construct(self):
self.play(
self.pi_creature.change_mode, "raise_left_hand",
self.pi_creature.look, UP+LEFT
)
self.wait(4)
self.play(
self.pi_creature.change_mode, "raise_right_hand",
self.pi_creature.look, UP+RIGHT
)
self.wait(4)
self.play(
self.pi_creature.change_mode, "pondering",
self.pi_creature.look, UP+LEFT
)
self.wait(2)
def create_pi_creature(self):
self.pi_creature = Randolph(color = BLUE_C)
self.pi_creature.to_edge(DOWN)
return self.pi_creature
class AreaUnderParabola(GraphScene):
CONFIG = {
"x_max" : 4,
"x_labeled_nums" : list(range(-1, 5)),
"y_min" : 0,
"y_max" : 15,
"y_tick_frequency" : 2.5,
"y_labeled_nums" : list(range(5, 20, 5)),
"n_rect_iterations" : 6,
"default_right_x" : 3,
"func" : lambda x : x**2,
"graph_label_tex" : "x^2",
"graph_label_x_val" : 3.8,
}
def construct(self):
self.setup_axes()
self.show_graph()
self.show_area()
self.ask_about_area()
self.show_confusion()
self.show_variable_endpoint()
self.name_integral()
def show_graph(self):
graph = self.get_graph(self.func)
graph_label = self.get_graph_label(
graph, self.graph_label_tex,
direction = LEFT,
x_val = self.graph_label_x_val,
)
self.play(ShowCreation(graph))
self.play(Write(graph_label))
self.wait()
self.graph = graph
self.graph_label = graph_label
def show_area(self):
dx_list = [0.25/(2**n) for n in range(self.n_rect_iterations)]
rect_lists = [
self.get_riemann_rectangles(
self.graph,
x_min = 0,
x_max = self.default_right_x,
dx = dx,
stroke_width = 4*dx,
)
for dx in dx_list
]
rects = rect_lists[0]
foreground_mobjects = [self.axes, self.graph]
self.play(
DrawBorderThenFill(
rects,
run_time = 2,
rate_func = smooth,
lag_ratio = 0.5,
),
*list(map(Animation, foreground_mobjects))
)
self.wait()
for new_rects in rect_lists[1:]:
self.play(
Transform(
rects, new_rects,
lag_ratio = 0.5,
),
*list(map(Animation, foreground_mobjects))
)
self.wait()
self.rects = rects
self.dx = dx_list[-1]
self.foreground_mobjects = foreground_mobjects
def ask_about_area(self):
rects = self.rects
question = OldTexText("Area?")
question.move_to(rects.get_top(), DOWN)
mid_rect = rects[2*len(rects)/3]
arrow = Arrow(question.get_bottom(), mid_rect.get_center())
v_lines = VGroup(*[
DashedLine(
FRAME_HEIGHT*UP, ORIGIN,
color = RED
).move_to(self.coords_to_point(x, 0), DOWN)
for x in (0, self.default_right_x)
])
self.play(
Write(question),
ShowCreation(arrow)
)
self.wait()
self.play(ShowCreation(v_lines, run_time = 2))
self.wait()
self.foreground_mobjects += [question, arrow]
self.question = question
self.question_arrow = arrow
self.v_lines = v_lines
def show_confusion(self):
morty = Mortimer()
morty.to_corner(DOWN+RIGHT)
self.play(FadeIn(morty))
self.play(
morty.change_mode, "confused",
morty.look_at, self.question,
)
self.play(morty.look_at, self.rects.get_bottom())
self.play(Blink(morty))
self.play(morty.look_at, self.question)
self.wait()
self.play(Blink(morty))
self.play(FadeOut(morty))
def show_variable_endpoint(self):
triangle = RegularPolygon(
n = 3,
start_angle = np.pi/2,
stroke_width = 0,
fill_color = WHITE,
fill_opacity = 1,
)
triangle.set_height(0.25)
triangle.move_to(self.v_lines[1].get_bottom(), UP)
x_label = OldTex("x")
x_label.next_to(triangle, DOWN)
self.right_point_slider = VGroup(triangle, x_label)
A_func = OldTex("A(x)")
A_func.move_to(self.question, DOWN)
self.play(FadeOut(self.x_axis.numbers))
self.x_axis.remove(*self.x_axis.numbers)
self.foreground_mobjects.remove(self.axes)
self.play(DrawBorderThenFill(self.right_point_slider))
self.move_right_point_to(2)
self.wait()
self.move_right_point_to(self.default_right_x)
self.wait()
self.play(ReplacementTransform(self.question, A_func))
self.wait()
self.A_func = A_func
def name_integral(self):
f_tex = "$%s$"%self.graph_label_tex
words = OldTexText("``Integral'' of ", f_tex)
words.set_color_by_tex(f_tex, self.graph_label.get_color())
brace = Brace(self.A_func, UP)
words.next_to(brace, UP)
self.play(
Write(words),
GrowFromCenter(brace)
)
self.wait()
for x in 4, 2, self.default_right_x:
self.move_right_point_to(x, run_time = 2)
self.integral_words_group = VGroup(brace, words)
####
def move_right_point_to(self, target_x, **kwargs):
v_line = self.v_lines[1]
slider = self.right_point_slider
rects = self.rects
curr_x = self.x_axis.point_to_number(v_line.get_bottom())
group = VGroup(rects, v_line, slider)
def update_group(group, alpha):
rects, v_line, slider = group
new_x = interpolate(curr_x, target_x, alpha)
new_rects = self.get_riemann_rectangles(
self.graph,
x_min = 0,
x_max = new_x,
dx = self.dx*new_x/3.0,
stroke_width = rects[0].get_stroke_width(),
)
point = self.coords_to_point(new_x, 0)
v_line.move_to(point, DOWN)
slider.move_to(point, UP)
Transform(rects, new_rects).update(1)
return VGroup(rects, v_line, slider)
self.play(
UpdateFromAlphaFunc(
group, update_group,
**kwargs
),
*list(map(Animation, self.foreground_mobjects))
)
class WhoCaresAboutArea(TeacherStudentsScene):
def construct(self):
point = 2*RIGHT+3*UP
self.student_says(
"Who cares!?!", target_mode = "angry",
)
self.play(self.teacher.change_mode, "guilty")
self.wait()
self.play(
RemovePiCreatureBubble(self.students[1]),
self.teacher.change_mode, "raise_right_hand",
self.teacher.look_at, point
)
self.play_student_changes(
*["pondering"]*3,
look_at = point,
added_anims = [self.teacher.look_at, point]
)
self.wait(3)
class PlayWithThisIdea(TeacherStudentsScene):
def construct(self):
self.teacher_says(
"Play with", "the", "thought!",
target_mode = "hooray"
)
self.play_student_changes(*["happy"]*3)
self.wait()
equation = OldTex("A(x)", "\\leftrightarrow", "x^2")
equation.set_color_by_tex("x^2", BLUE)
self.teacher_says(equation, target_mode = "sassy")
self.play_student_changes(*["thinking"]*3)
self.wait(2)
class PlayingTowardsDADX(AreaUnderParabola, ReconfigurableScene):
CONFIG = {
"n_rect_iterations" : 6,
"deriv_dx" : 0.2,
"graph_origin" : 2.5*DOWN + 6*LEFT,
}
def setup(self):
AreaUnderParabola.setup(self)
ReconfigurableScene.setup(self)
def construct(self):
self.fast_forward_to_end_of_previous_scene()
self.nudge_x()
self.describe_sliver()
self.shrink_dx()
self.write_dA_dx()
self.dA_remains_a_mystery()
self.write_example_inputs()
self.show_dA_dx_in_detail()
self.show_smaller_x()
def fast_forward_to_end_of_previous_scene(self):
self.force_skipping()
AreaUnderParabola.construct(self)
self.revert_to_original_skipping_status()
def nudge_x(self):
shadow_rects = self.rects.copy()
shadow_rects.set_fill(BLACK, opacity = 0.5)
original_v_line = self.v_lines[1].copy()
right_v_lines = VGroup(original_v_line, self.v_lines[1])
curr_x = self.x_axis.point_to_number(original_v_line.get_bottom())
self.add(original_v_line)
self.foreground_mobjects.append(original_v_line)
self.move_right_point_to(curr_x + self.deriv_dx)
self.play(
FadeIn(shadow_rects),
*list(map(Animation, self.foreground_mobjects))
)
self.shadow_rects = shadow_rects
self.right_v_lines = right_v_lines
def describe_sliver(self):
dx_brace = Brace(self.right_v_lines, DOWN, buff = 0)
dx_label = dx_brace.get_text("$dx$")
dx_group = VGroup(dx_brace, dx_label)
dA_rect = Rectangle(
width = self.right_v_lines.get_width(),
height = self.shadow_rects[-1].get_height(),
stroke_width = 0,
fill_color = YELLOW,
fill_opacity = 0.5,
).move_to(self.right_v_lines, DOWN)
dA_label = OldTex("d", "A")
dA_label.next_to(dA_rect, RIGHT, MED_LARGE_BUFF, UP)
dA_label.set_color(GREEN)
dA_arrow = Arrow(
dA_label.get_bottom()+MED_SMALL_BUFF*DOWN,
dA_rect.get_center(),
buff = 0,
color = WHITE
)
difference_in_area = OldTexText(
"d", "ifference in ", "A", "rea",
arg_separator = ""
)
difference_in_area.set_color_by_tex("d", GREEN)
difference_in_area.set_color_by_tex("A", GREEN)
difference_in_area.scale(0.7)
difference_in_area.next_to(dA_label, UP, MED_SMALL_BUFF, LEFT)
side_brace = Brace(dA_rect, LEFT, buff = 0)
graph_label_copy = self.graph_label.copy()
self.play(
FadeOut(self.right_point_slider),
FadeIn(dx_group)
)
self.play(Indicate(dx_label))
self.wait()
self.play(ShowCreation(dA_arrow))
self.wait()
self.play(Write(dA_label, run_time = 2))
self.wait()
self.play(
ReplacementTransform(dA_label[0].copy(), difference_in_area[0]),
ReplacementTransform(dA_label[1].copy(), difference_in_area[2]),
*list(map(FadeIn, [difference_in_area[1], difference_in_area[3]]))
)
self.wait(2)
self.play(FadeIn(dA_rect), Animation(dA_arrow))
self.play(GrowFromCenter(side_brace))
self.play(
graph_label_copy.set_color, WHITE,
graph_label_copy.next_to, side_brace, LEFT, SMALL_BUFF
)
self.wait()
self.play(Indicate(dx_group))
self.wait()
self.play(FadeOut(difference_in_area))
self.dx_group = dx_group
self.dA_rect = dA_rect
self.dA_label = dA_label
self.graph_label_copy = graph_label_copy
def shrink_dx(self, **kwargs):
self.transition_to_alt_config(
deriv_dx = 0.05,
transformation_kwargs = {"run_time" : 2},
**kwargs
)
def write_dA_dx(self):
f_tex = self.graph_label_tex
equation = OldTex("dA", "\\approx", f_tex, "dx")
equation.to_edge(RIGHT).shift(3*UP)
deriv_equation = OldTex(
"{dA", "\\over \\,", "dx}", "\\approx", f_tex
)
deriv_equation.move_to(equation, UP+LEFT)
for tex_mob in equation, deriv_equation:
tex_mob.set_color_by_tex(
"dA", self.dA_label.get_color()
)
dA = VGroup(self.dA_label[0][0], self.dA_label[1][0])
x_squared = self.graph_label_copy
dx = self.dx_group[1]
self.play(*[
ReplacementTransform(
mob.copy(),
equation.get_part_by_tex(tex),
run_time = 2
)
for mob, tex in [(x_squared, f_tex), (dx, "dx"), (dA, "dA")]
])
self.play(Write(equation.get_part_by_tex("approx")))
self.wait()
for tex, mob in (f_tex, x_squared), ("dx", dx):
self.play(*list(map(Indicate, [
equation.get_part_by_tex(tex),
mob
])))
self.wait(2)
self.play(*[
ReplacementTransform(
equation.get_part_by_tex(tex),
deriv_equation.get_part_by_tex(tex),
run_time = 2,
)
for tex in ("dA", "approx", f_tex, "dx")
] + [
Write(deriv_equation.get_part_by_tex("over"))
])
self.wait(2)
self.shrink_dx(return_to_original_configuration = False)
self.wait()
self.deriv_equation = deriv_equation
def dA_remains_a_mystery(self):
randy = Randolph(color = BLUE_C)
randy.to_corner(DOWN+LEFT)
randy.look_at(self.A_func)
A_circle, dA_circle = [
Circle(color = color).replace(
mob, stretch = True
).scale(1.5)
for mob, color in [(self.A_func, RED), (self.deriv_equation, GREEN)]
]
q_marks = OldTex("???")
q_marks.next_to(A_circle, UP)
self.play(
FadeOut(self.integral_words_group),
FadeIn(randy)
)
self.play(
ShowCreation(A_circle),
randy.change_mode, "confused"
)
self.play(Write(q_marks, run_time = 2))
self.play(Blink(randy))
self.wait()
self.play(
randy.change_mode, "surprised",
randy.look_at, dA_circle,
ReplacementTransform(A_circle, dA_circle)
)
self.play(Blink(randy))
self.wait()
self.play(*list(map(FadeOut, [randy, q_marks, dA_circle])))
def write_example_inputs(self):
d = self.default_right_x
three = OldTex("x =", "%d"%d)
three_plus_dx = OldTex("x = ", "%d.001"%d)
labels_lines_vects = list(zip(
[three, three_plus_dx],
self.right_v_lines,
[LEFT, RIGHT]
))
for label, line, vect in labels_lines_vects:
point = line.get_bottom()
label.next_to(point, DOWN+vect, MED_SMALL_BUFF)
label.shift(LARGE_BUFF*vect)
label.arrow = Arrow(
label, point,
buff = SMALL_BUFF,
color = WHITE,
tip_length = 0.15
)
line_copy = line.copy()
line_copy.set_color(YELLOW)
self.play(
FadeIn(label),
FadeIn(label.arrow),
ShowCreation(line_copy)
)
self.play(FadeOut(line_copy))
self.wait()
self.three = three
self.three_plus_dx = three_plus_dx
def show_dA_dx_in_detail(self):
d = self.default_right_x
expression = OldTex(
"{A(", "%d.001"%d, ") ", "-A(", "%d"%d, ")",
"\\over \\,", "0.001}",
"\\approx", "%d"%d, "^2"
)
expression.scale(0.9)
expression.next_to(
self.deriv_equation, DOWN, MED_LARGE_BUFF
)
expression.to_edge(RIGHT)
self.play(
ReplacementTransform(
self.three_plus_dx.get_part_by_tex("%d.001"%d).copy(),
expression.get_part_by_tex("%d.001"%d)
),
Write(VGroup(
expression.get_part_by_tex("A("),
expression.get_part_by_tex(")"),
)),
)
self.wait()
self.play(
ReplacementTransform(
self.three.get_part_by_tex("%d"%d).copy(),
expression.get_part_by_tex("%d"%d, substring = False)
),
Write(VGroup(
expression.get_part_by_tex("-A("),
expression.get_parts_by_tex(")")[1],
)),
)
self.wait(2)
self.play(
Write(expression.get_part_by_tex("over")),
ReplacementTransform(
expression.get_part_by_tex("%d.001"%d).copy(),
expression.get_part_by_tex("0.001"),
)
)
self.wait()
self.play(
Write(expression.get_part_by_tex("approx")),
ReplacementTransform(
self.graph_label_copy.copy(),
VGroup(*expression[-2:]),
run_time = 2
)
)
self.wait()
def show_smaller_x(self):
self.transition_to_alt_config(
default_right_x = 2,
deriv_dx = 0.04,
transformation_kwargs = {"run_time" : 2}
)
class AlternateAreaUnderCurve(PlayingTowardsDADX):
CONFIG = {
"func" : lambda x : (x-2)**3 - 3*(x-2) + 6,
"graph_label_tex" : "f(x)",
"deriv_dx" : 0.1,
"x_max" : 5,
"x_axis_width" : 11,
"graph_label_x_val" : 4.5,
}
def construct(self):
#Superclass parts to skip
self.force_skipping()
self.setup_axes()
self.show_graph()
self.show_area()
self.ask_about_area()
self.show_confusion()
#Superclass parts to show
self.revert_to_original_skipping_status()
self.show_variable_endpoint()
self.name_integral()
self.nudge_x()
self.describe_sliver()
self.write_dA_dx()
#New animations
self.approximation_improves_for_smaller_dx()
self.name_derivative()
def approximation_improves_for_smaller_dx(self):
color = YELLOW
approx = self.deriv_equation.get_part_by_tex("approx")
dx_to_zero_words = OldTexText(
"Gets better \\\\ as",
"$dx \\to 0$"
)
dx_to_zero_words.set_color_by_tex("dx", color)
dx_to_zero_words.next_to(approx, DOWN, 1.5*LARGE_BUFF)
arrow = Arrow(dx_to_zero_words, approx, color = color)
self.play(
approx.set_color, color,
ShowCreation(arrow),
FadeIn(dx_to_zero_words),
)
self.wait()
self.transition_to_alt_config(
deriv_dx = self.deriv_dx/4.0,
transformation_kwargs = {"run_time" : 2}
)
self.dx_to_zero_words = dx_to_zero_words
self.dx_to_zero_words_arrow = arrow
def name_derivative(self):
deriv_words = OldTexText("``Derivative'' of $A$")
deriv_words.scale(0.9)
deriv_words.to_edge(UP+RIGHT)
moving_group = VGroup(
self.deriv_equation,
self.dx_to_zero_words,
self.dx_to_zero_words_arrow,
)
moving_group.generate_target()
moving_group.target.next_to(deriv_words, DOWN, LARGE_BUFF)
moving_group.target.to_edge(RIGHT)
self.play(
FadeIn(deriv_words),
MoveToTarget(moving_group)
)
dA_dx = VGroup(*self.deriv_equation[:3])
box = Rectangle(color = GREEN)
box.replace(dA_dx, stretch = True)
box.scale(1.3)
brace = Brace(box, UP)
faders = VGroup(
self.dx_to_zero_words[0],
self.dx_to_zero_words_arrow
)
dx_to_zero = self.dx_to_zero_words[1]
self.play(*list(map(FadeIn, [box, brace])))
self.wait()
self.play(
FadeOut(faders),
dx_to_zero.next_to, box, DOWN
)
self.wait()
########
def show_smaller_x(self):
return
def shrink_dx(self, **kwargs):
return
class NextVideoWrapper(Scene):
def construct(self):
rect = Rectangle(height = 9, width = 16)
rect.set_height(1.5*FRAME_Y_RADIUS)
titles = [
OldTexText("Chapter %d:"%d, s)
for d, s in [
(2, "The paradox of the derivative"),
(3, "Derivative formulas through geometry"),
]
]
for title in titles:
title.to_edge(UP)
rect.next_to(VGroup(*titles), DOWN)
self.add(titles[0])
self.play(ShowCreation(rect))
self.wait(3)
self.play(Transform(*titles))
self.wait(3)
class ProblemSolvingTool(TeacherStudentsScene):
def construct(self):
self.teacher_says("""
The derivative is a
problem-solving tool
""")
self.wait(3)
class FundamentalTheorem(Scene):
def construct(self):
words = OldTexText("""
Fundamental theorem of calculus
""")
words.to_edge(UP)
arrow = DoubleArrow(LEFT, RIGHT).shift(2*RIGHT)
deriv = OldTex(
"{dA", "\\over \\,", "dx}", "=", "x^2"
)
deriv.set_color_by_tex("dA", GREEN)
deriv.next_to(arrow, RIGHT)
self.play(ShowCreation(arrow))
self.wait()
self.play(Write(deriv))
self.wait()
self.play(Write(words))
self.wait()
class NextVideos(TeacherStudentsScene):
def construct(self):
series = VideoSeries()
series.to_edge(UP)
this_video = series[0]
this_video.set_color(YELLOW)
self.add(series)
self.teacher_says(
"That's a high-level view"
)
self.wait()
self.play(
RemovePiCreatureBubble(
self.teacher,
target_mode = "raise_right_hand",
look_at = this_video,
),
*it.chain(*[
[pi.change_mode, "pondering", pi.look_at, this_video]
for pi in self.get_students()
])
)
self.play(*[
ApplyMethod(pi.look_at, series)
for pi in self.get_pi_creatures()
])
self.play(*[
ApplyMethod(
video.shift, 0.5*video.get_height()*DOWN,
run_time = 3,
rate_func = squish_rate_func(
there_and_back, alpha, alpha+0.3
)
)
for video, alpha in zip(series, np.linspace(0, 0.7, len(series)))
])
self.wait()
student = self.get_students()[1]
self.remove(student)
everything = VGroup(*self.get_top_level_mobjects())
self.add(student)
words = OldTexText("""
You could have
invented this.
""")
words.next_to(student, UP, LARGE_BUFF)
self.play(self.teacher.change_mode, "plain")
self.play(
everything.fade, 0.75,
student.change_mode, "plain"
)
self.play(
Write(words),
student.look_at, words,
)
self.play(
student.change_mode, "confused",
student.look_at, words
)
self.wait(3)
self.play(student.change_mode, "thinking")
self.wait(4)
class Chapter1PatreonThanks(PatreonThanks):
CONFIG = {
"specific_patrons" : [
"Ali Yahya",
"CrypticSwarm",
"Juan Benet",
"Yu Jun",
"Othman Alikhan",
"Markus Persson",
"Joseph John Cox",
"Luc Ritchie",
"Einar Johansen",
"Rish Kundalia",
"Achille Brighton",
"Kirk Werklund",
"Ripta Pasay",
"Felipe Diniz",
],
"patron_scale_val" : 0.9
}
class EndScreen(PiCreatureScene):
CONFIG = {
"seconds_to_blink" : 3,
}
def construct(self):
words = OldTexText("Clicky stuffs")
words.scale(1.5)
words.next_to(self.pi_creature, UP)
words.to_edge(UP)
self.play(
FadeIn(
words,
run_time = 2,
lag_ratio = 0.5
),
self.pi_creature.change_mode, "hooray"
)
self.wait()
mode_point_pairs = [
("raise_left_hand", 5*LEFT+3*UP),
("raise_right_hand", 5*RIGHT+3*UP),
("thinking", 5*LEFT+2*DOWN),
("thinking", 5*RIGHT+2*DOWN),
("thinking", 5*RIGHT+2*DOWN),
("happy", 5*LEFT+3*UP),
("raise_right_hand", 5*RIGHT+3*UP),
]
for mode, point in mode_point_pairs:
self.play(self.pi_creature.change, mode, point)
self.wait()
self.wait(3)
def create_pi_creature(self):
self.pi_creature = Randolph()
self.pi_creature.shift(2*DOWN + 1.5*LEFT)
return self.pi_creature
class Thumbnail(AlternateAreaUnderCurve):
CONFIG = {
"x_axis_label" : "",
"y_axis_label" : "",
"graph_origin" : 2.4 * DOWN + 3 * LEFT,
}
def construct(self):
self.setup_axes()
self.remove(*self.x_axis.numbers)
self.remove(*self.y_axis.numbers)
graph = self.get_graph(self.func)
rects = self.get_riemann_rectangles(
graph,
x_min = 0,
x_max = 4,
dx = 0.25,
start_color = BLUE_E,
)
words = OldTexText("""
Could \\emph{you} invent
calculus?
""")
words.set_width(9)
words.to_edge(UP)
self.add(graph, rects, words)
|
f4927c9553c38f14487d78826735581a144b93fc
|
85373d45a83e4096affafa4f4e5b400787413e57
|
/test/programytest/parser/template/node_tests/test_conditiontype2.py
|
a3134463aacf75f9c96e19a3dd309c5e230a483c
|
[
"MIT"
] |
permissive
|
keiffster/program-y
|
a02bb9d8278835547cc875f4f9cd668d5b1f44da
|
fc7b0a3afa4fa6ed683e0c817a9aa89f9543bb20
|
refs/heads/master
| 2023-08-23T13:55:39.255535
| 2022-12-13T09:51:57
| 2022-12-13T09:51:57
| 74,462,571
| 379
| 173
|
NOASSERTION
| 2023-05-23T00:51:21
| 2016-11-22T10:43:41
|
Python
|
UTF-8
|
Python
| false
| false
| 10,368
|
py
|
test_conditiontype2.py
|
from unittest.mock import patch
import xml.etree.ElementTree as ET
from programy.dialog.question import Question
from programy.parser.template.nodes.base import TemplateNode
from programy.parser.template.nodes.condition import TemplateConditionListItemNode
from programy.parser.template.nodes.condition import TemplateConditionNode
from programy.parser.template.nodes.word import TemplateWordNode
from programytest.parser.base import ParserTestsBaseClass
class TemplateConditionType2NodeTests(ParserTestsBaseClass):
def test_type2_node_global(self):
root = TemplateNode()
self.assertIsNotNone(root)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 0)
node = TemplateConditionNode("cond1", condition_type=2)
self.assertIsNotNone(node)
cond1 = TemplateConditionListItemNode(value=TemplateWordNode("value1"))
cond1.append(TemplateWordNode("Word1"))
node.append(cond1)
cond2 = TemplateConditionListItemNode(value=TemplateWordNode("value2"))
cond2.append(TemplateWordNode("Word2"))
node.append(cond2)
cond3 = TemplateConditionListItemNode()
cond3.append(TemplateWordNode("Word3"))
node.append(cond3)
root.append(node)
self.assertEqual(len(root.children), 1)
self._client_context.bot.get_conversation(self._client_context).set_property('cond1', "value2")
question = Question.create_from_text(self._client_context, "Hello")
self._client_context.bot.get_conversation(self._client_context).record_dialog(question)
self._client_context.bot.get_conversation(self._client_context).current_question().set_property("cond1", "value2")
result = root.resolve(self._client_context)
self.assertIsNotNone(result)
self.assertEqual("Word2", result)
def patch_resolve_type2_to_string(self, client_context):
raise Exception ("Mock Exception")
@patch("programy.parser.template.nodes.condition.TemplateConditionNode._resolve_type2_to_string", patch_resolve_type2_to_string)
def test_type2_exception(self):
root = TemplateNode()
self.assertIsNotNone(root)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 0)
node = TemplateConditionNode("cond1", condition_type=2)
self.assertIsNotNone(node)
cond1 = TemplateConditionListItemNode(value=TemplateWordNode("value1"))
cond1.append(TemplateWordNode("Word1"))
node.append(cond1)
cond2 = TemplateConditionListItemNode(value=TemplateWordNode("value2"))
cond2.append(TemplateWordNode("Word2"))
node.append(cond2)
cond3 = TemplateConditionListItemNode()
cond3.append(TemplateWordNode("Word3"))
node.append(cond3)
root.append(node)
self.assertEqual(len(root.children), 1)
self._client_context.bot.get_conversation(self._client_context).set_property('cond1', "value2")
question = Question.create_from_text(self._client_context, "Hello")
self._client_context.bot.get_conversation(self._client_context).record_dialog(question)
self._client_context.bot.get_conversation(self._client_context).current_question().set_property("cond1", "value2")
result = root.resolve(self._client_context)
self.assertIsNotNone(result)
self.assertEqual("", result)
def test_type2_to_xml_global(self):
root = TemplateNode()
node = TemplateConditionNode("cond1", condition_type=2)
self.assertIsNotNone(node)
cond1 = TemplateConditionListItemNode(value=TemplateWordNode("value1"))
cond1.append(TemplateWordNode("Word1"))
node.append(cond1)
cond2 = TemplateConditionListItemNode(value=TemplateWordNode("value2"))
cond2.append(TemplateWordNode("Word2"))
node.append(cond2)
cond3 = TemplateConditionListItemNode()
cond3.append(TemplateWordNode("Word3"))
node.append(cond3)
root.append(node)
xml = root.xml_tree(self._client_context)
self.assertIsNotNone(xml)
xml_str = ET.tostring(xml, "utf-8").decode("utf-8")
self.assertEqual('<template><condition name="cond1"><li><value>value1</value>Word1</li> <li><value>value2</value>Word2</li> <li>Word3</li></condition></template>', xml_str)
###################################################################################################################
# Type 2 Local
#
def test_type2_node_local(self):
root = TemplateNode()
self.assertIsNotNone(root)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 0)
node = TemplateConditionNode("var1", var_type=TemplateConditionNode.LOCAL, condition_type=2)
self.assertIsNotNone(node)
cond1 = TemplateConditionListItemNode(value=TemplateWordNode("value1"), var_type=TemplateConditionNode.LOCAL)
cond1.append(TemplateWordNode("Word1"))
node.append(cond1)
cond2 = TemplateConditionListItemNode(value=TemplateWordNode("value2"), var_type=TemplateConditionNode.LOCAL)
cond2.append(TemplateWordNode("Word2"))
node.append(cond2)
cond3 = TemplateConditionListItemNode()
cond3.append(TemplateWordNode("Word3"))
node.append(cond3)
root.append(node)
self.assertEqual(len(root.children), 1)
question = Question.create_from_text(self._client_context, "Hello")
self._client_context.bot.get_conversation(self._client_context).record_dialog(question)
self._client_context.bot.get_conversation(self._client_context).current_question().set_property("var1", "value2")
result = root.resolve(self._client_context)
self.assertIsNotNone(result)
self.assertEqual("Word2", result)
def test_type2_to_xml_local(self):
root = TemplateNode()
self.assertIsNotNone(root)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 0)
node = TemplateConditionNode("var1", var_type=TemplateConditionNode.LOCAL, condition_type=2)
self.assertIsNotNone(node)
cond1 = TemplateConditionListItemNode(value=TemplateWordNode("value1"), var_type=TemplateConditionNode.LOCAL)
cond1.append(TemplateWordNode("Word1"))
node.append(cond1)
cond2 = TemplateConditionListItemNode(value=TemplateWordNode("value2"), var_type=TemplateConditionNode.LOCAL)
cond2.append(TemplateWordNode("Word2"))
node.append(cond2)
cond3 = TemplateConditionListItemNode()
cond3.append(TemplateWordNode("Word3"))
node.append(cond3)
root.append(node)
xml = root.xml_tree(self._client_context)
self.assertIsNotNone(xml)
xml_str = ET.tostring(xml, "utf-8").decode("utf-8")
self.assertEqual('<template><condition var="var1"><li><value>value1</value>Word1</li> <li><value>value2</value>Word2</li> <li>Word3</li></condition></template>', xml_str)
###################################################################################################################
# Type 2 Bot
#
def test_type2_node_bot(self):
root = TemplateNode()
self.assertIsNotNone(root)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 0)
node = TemplateConditionNode("cond1", var_type=TemplateConditionNode.BOT, condition_type=2)
self.assertIsNotNone(node)
cond1 = TemplateConditionListItemNode(value=TemplateWordNode("value1"), var_type=TemplateConditionNode.BOT)
cond1.append(TemplateWordNode("Word1"))
node.append(cond1)
cond2 = TemplateConditionListItemNode(value=TemplateWordNode("value2"), var_type=TemplateConditionNode.BOT)
cond2.append(TemplateWordNode("Word2"))
node.append(cond2)
cond3 = TemplateConditionListItemNode()
cond3.append(TemplateWordNode("Word3"))
node.append(cond3)
root.append(node)
self.assertEqual(len(root.children), 1)
self._client_context.brain.properties.add_property('cond1', "value2")
result = root.resolve(self._client_context)
self.assertIsNotNone(result)
self.assertEqual("Word2", result)
def test_type2_to_xml_bot(self):
root = TemplateNode()
node = TemplateConditionNode("cond1", var_type=TemplateConditionNode.BOT, condition_type=2)
self.assertIsNotNone(node)
cond1 = TemplateConditionListItemNode(value=TemplateWordNode("value1"))
cond1.append(TemplateWordNode("Word1"))
node.append(cond1)
cond2 = TemplateConditionListItemNode(value=TemplateWordNode("value2"))
cond2.append(TemplateWordNode("Word2"))
node.append(cond2)
cond3 = TemplateConditionListItemNode()
cond3.append(TemplateWordNode("Word3"))
node.append(cond3)
root.append(node)
xml = root.xml_tree(self._client_context)
self.assertIsNotNone(xml)
xml_str = ET.tostring(xml, "utf-8").decode("utf-8")
self.assertEqual('<template><condition bot="cond1"><li><value>value1</value>Word1</li> <li><value>value2</value>Word2</li> <li>Word3</li></condition></template>', xml_str)
def test_resolve_type2_condition(self):
template = ET.fromstring("""
<template>
<condition>
<li name='name1' value="a">Val1</li>
<li>Val5</li>
</condition>
</template>
""")
graph = self._client_context.bot.brain.aiml_parser.template_parser
ast = graph.parse_template_expression(template)
template_node = ast.children[0]
self.assertEquals("Val5", template_node.resolve_type2_condition(self._client_context))
def test_resolve_type2_condition_no_default(self):
template = ET.fromstring("""
<template>
<condition>
<li name='name1' value="a">Val1</li>
</condition>
</template>
""")
graph = self._client_context.bot.brain.aiml_parser.template_parser
ast = graph.parse_template_expression(template)
template_node = ast.children[0]
self.assertEquals("", template_node.resolve_type2_condition(self._client_context))
|
6b88effd63d4a4407b81c0f1d0ea6e4c92dc887d
|
e2e34d01afc5b6bc6923a721ef92e8ffa8884f86
|
/tests/endtoend/eventgrid_functions/eventgrid_output_binding/__init__.py
|
817e3139616bfa262e702273aabc197b3771dc67
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
Azure/azure-functions-python-worker
|
094340eeb0c4728e3202749027f01ab75e908bd8
|
d4bdf7edc544b6c15e541930f890da790b180ebd
|
refs/heads/dev
| 2023-08-22T22:48:01.645722
| 2023-08-14T14:52:42
| 2023-08-14T14:52:42
| 117,730,503
| 329
| 122
|
MIT
| 2023-09-01T16:54:58
| 2018-01-16T19:23:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,297
|
py
|
__init__.py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from datetime import datetime
import azure.functions as func
def main(req: func.HttpRequest,
outputEvent: func.Out[func.EventGridOutputEvent]) -> func.HttpResponse:
test_uuid = req.params.get('test_uuid')
data_to_event_grid = func.EventGridOutputEvent(id="test-id",
data={
"test_uuid": test_uuid
},
subject="test-subject",
event_type="test-event-1",
event_time=datetime.utcnow(),
data_version="1.0")
outputEvent.set(data_to_event_grid)
r_value = "Sent event with subject: {}, id: {}, data: {}, event_type: {} " \
"to EventGrid!".format(data_to_event_grid.subject,
data_to_event_grid.id,
data_to_event_grid.get_json(),
data_to_event_grid.event_type)
return func.HttpResponse(r_value)
|
16d387a3d587b93891ba6873d41c977b4c77c858
|
7343ece3b82ac87a594865c4074623b45b0297b4
|
/synapse/module_api/callbacks/__init__.py
|
dcb036552bce5a37cbda1a334d1367b5ba0d94ac
|
[
"Apache-2.0"
] |
permissive
|
matrix-org/synapse
|
a00111f83310783b78e2996557f8bbae4d9fb229
|
d35bed8369514fe727b4fe1afb68f48cc8b2655a
|
refs/heads/develop
| 2023-09-05T05:24:20.808942
| 2023-09-04T16:14:09
| 2023-09-04T16:14:09
| 22,844,864
| 12,215
| 2,869
|
Apache-2.0
| 2023-09-14T15:20:48
| 2014-08-11T15:51:42
|
Python
|
UTF-8
|
Python
| false
| false
| 1,318
|
py
|
__init__.py
|
# Copyright 2023 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from synapse.server import HomeServer
from synapse.module_api.callbacks.account_validity_callbacks import (
AccountValidityModuleApiCallbacks,
)
from synapse.module_api.callbacks.spamchecker_callbacks import (
SpamCheckerModuleApiCallbacks,
)
from synapse.module_api.callbacks.third_party_event_rules_callbacks import (
ThirdPartyEventRulesModuleApiCallbacks,
)
class ModuleApiCallbacks:
def __init__(self, hs: "HomeServer") -> None:
self.account_validity = AccountValidityModuleApiCallbacks()
self.spam_checker = SpamCheckerModuleApiCallbacks(hs)
self.third_party_event_rules = ThirdPartyEventRulesModuleApiCallbacks(hs)
|
bb2d976b5e7a2386a436d89824a2cf647a3e8690
|
814473697db9612e7c5ff3d6c5e0c5bf8882253b
|
/tests/test_rendering.py
|
677497acea7f0007ea9ef4011f44d5c1b35733be
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Hrabal/TemPy
|
c75fd0c3d298dcb7c7b019629c374e6efc25e97e
|
92f8c88ce9b06f585cdc092e419f95ef0c948665
|
refs/heads/master
| 2022-07-30T10:13:39.881280
| 2022-07-18T15:42:44
| 2022-07-18T15:42:44
| 92,857,015
| 166
| 91
|
Apache-2.0
| 2022-04-06T16:02:18
| 2017-05-30T17:08:40
|
Python
|
UTF-8
|
Python
| false
| false
| 1,956
|
py
|
test_rendering.py
|
# -*- coding: utf-8 -*-
"""
@author: Federico Cerchiari <federicocerchiari@gmail.com>
"""
import os
import unittest
from collections import Counter
from tempy.tags import Html, Head, Body, Link, Div, A, P, Meta, Title
from tempy import render_template
class TestRender(unittest.TestCase):
def test_render_template(self):
self.assertEqual(render_template('test', start_directory=os.path.dirname(os.path.realpath(__file__))),
'<div></div>')
def test_page(self):
self.maxDiff = None
expected = '<!DOCTYPE HTML><html><head><meta charset="utf-8"/><link href="my.css" type="text/css" ' \
'rel="stylesheet"/><title>test_title</title></head><body><div class="linkBox"><a href="www.' \
'foo.com">www.foo.com</a></div><p>This is foo</p><p>This is Bar</p><p>Have you met my friend Baz?' \
'</p>Lorem ipsum dolor sit amet, consectetur adipiscing elit</body></html>'
my_text_list = ['This is foo', 'This is Bar', 'Have you met my friend Baz?']
another_list = ['Lorem ipsum ', 'dolor sit amet, ', 'consectetur adipiscing elit']
page = Html()( # add tags inside the one you created calling the parent
Head()( # add multiple tags in one call
Meta(charset='utf-8'), # add tag attributes using kwargs in tag initialization
Link(href="my.css", typ="text/css", rel="stylesheet"),
Title('test_title')
),
body=Body()( # give them a name so you can navigate the DOM with those names
Div(klass='linkBox')(
A(href='www.foo.com')
),
(P()(text) for text in my_text_list), # tag insertion accepts generators
another_list # add text from a list, str.join is used in rendering
)
)
self.assertEqual(Counter(page.render()), Counter(expected))
|
9c9f106646bf46714429451bc269c1190e814260
|
ea57d267ab31480d8d731b2c095e9da9ad989133
|
/tests/test_aea/test_cli/test_generate_key.py
|
982f5ee59225908e753f42e99546a23df0bef7e9
|
[
"Apache-2.0"
] |
permissive
|
fetchai/agents-aea
|
6d034f1db6f3beacf31dac2f5a1baaa60c8edb7d
|
bec49adaeba661d8d0f03ac9935dc89f39d95a0d
|
refs/heads/main
| 2023-08-08T23:19:06.276643
| 2023-02-04T10:46:39
| 2023-02-04T10:46:39
| 203,558,879
| 192
| 58
|
Apache-2.0
| 2023-07-19T04:45:26
| 2019-08-21T10:12:47
|
Python
|
UTF-8
|
Python
| false
| false
| 8,616
|
py
|
test_generate_key.py
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2023 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This test module contains the tests for the `aea generate-key` sub-command."""
import json
import os
import shutil
import tempfile
from pathlib import Path
from aea_ledger_ethereum import EthereumCrypto
from aea_ledger_fetchai import FetchAICrypto
from aea.cli import cli
from aea.crypto.registries import make_crypto
from aea.helpers.sym_link import cd
from aea.test_tools.test_cases import AEATestCaseEmpty
from tests.conftest import (
CLI_LOG_OPTION,
CliRunner,
ETHEREUM_PRIVATE_KEY_FILE,
FETCHAI_PRIVATE_KEY_FILE,
)
class TestGenerateKey:
"""Test that the command 'aea generate-key' works as expected."""
@classmethod
def setup_class(cls):
"""Set the test up."""
cls.runner = CliRunner()
cls.agent_name = "myagent"
cls.cwd = os.getcwd()
cls.t = tempfile.mkdtemp()
os.chdir(cls.t)
def test_fetchai(self, password_or_none):
"""Test that the fetch private key is created correctly."""
args = [*CLI_LOG_OPTION, "generate-key", FetchAICrypto.identifier] + (
["--password", password_or_none] if password_or_none is not None else []
)
result = self.runner.invoke(cli, args)
assert result.exit_code == 0
assert Path(FETCHAI_PRIVATE_KEY_FILE).exists()
make_crypto(
FetchAICrypto.identifier,
private_key_path=FETCHAI_PRIVATE_KEY_FILE,
password=password_or_none,
)
Path(FETCHAI_PRIVATE_KEY_FILE).unlink()
def test_ethereum(self, password_or_none):
"""Test that the fetch private key is created correctly."""
args = [*CLI_LOG_OPTION, "generate-key", EthereumCrypto.identifier] + (
["--password", password_or_none] if password_or_none is not None else []
)
result = self.runner.invoke(cli, args)
assert result.exit_code == 0
assert Path(ETHEREUM_PRIVATE_KEY_FILE).exists()
make_crypto(
EthereumCrypto.identifier,
private_key_path=ETHEREUM_PRIVATE_KEY_FILE,
password=password_or_none,
)
Path(ETHEREUM_PRIVATE_KEY_FILE).unlink()
def test_all(self):
"""Test that all the private keys are created correctly when running 'aea generate-key all'."""
result = self.runner.invoke(cli, [*CLI_LOG_OPTION, "generate-key", "all"])
assert result.exit_code == 0
assert Path(FETCHAI_PRIVATE_KEY_FILE).exists()
assert Path(ETHEREUM_PRIVATE_KEY_FILE).exists()
make_crypto(FetchAICrypto.identifier, private_key_path=FETCHAI_PRIVATE_KEY_FILE)
make_crypto(
EthereumCrypto.identifier, private_key_path=ETHEREUM_PRIVATE_KEY_FILE
)
Path(FETCHAI_PRIVATE_KEY_FILE).unlink()
Path(ETHEREUM_PRIVATE_KEY_FILE).unlink()
@classmethod
def teardown_class(cls):
"""Tear the test down."""
os.chdir(cls.cwd)
shutil.rmtree(cls.t)
class TestGenerateKeyWhenAlreadyExists:
"""Test that the command 'aea generate-key' asks for confirmation when a key already exists."""
@classmethod
def setup_class(cls):
"""Set the test up."""
cls.runner = CliRunner()
cls.agent_name = "myagent"
cls.cwd = os.getcwd()
cls.t = tempfile.mkdtemp()
os.chdir(cls.t)
def test_fetchai(self):
"""Test that the fetchai private key is overwritten or not dependending on the user input."""
result = self.runner.invoke(
cli, [*CLI_LOG_OPTION, "generate-key", FetchAICrypto.identifier]
)
assert result.exit_code == 0
assert Path(FETCHAI_PRIVATE_KEY_FILE).exists()
# This tests if the file has been created and its content is correct.
make_crypto(FetchAICrypto.identifier, private_key_path=FETCHAI_PRIVATE_KEY_FILE)
content = Path(FETCHAI_PRIVATE_KEY_FILE).read_bytes()
# Saying 'no' leave the files as it is.
result = self.runner.invoke(
cli, [*CLI_LOG_OPTION, "generate-key", FetchAICrypto.identifier], input="n"
)
assert result.exit_code == 0
assert Path(FETCHAI_PRIVATE_KEY_FILE).read_bytes() == content
# Saying 'yes' overwrites the file.
result = self.runner.invoke(
cli, [*CLI_LOG_OPTION, "generate-key", FetchAICrypto.identifier], input="y"
)
assert result.exit_code == 0
assert Path(FETCHAI_PRIVATE_KEY_FILE).read_bytes() != content
make_crypto(FetchAICrypto.identifier, private_key_path=FETCHAI_PRIVATE_KEY_FILE)
@classmethod
def teardown_class(cls):
"""Tear the test down."""
os.chdir(cls.cwd)
shutil.rmtree(cls.t)
class TestGenerateKeyWithFile:
"""Test that the command 'aea generate-key' can accept a file path."""
@classmethod
def setup_class(cls):
"""Set the test up."""
cls.runner = CliRunner()
cls.agent_name = "myagent"
cls.cwd = os.getcwd()
cls.t = tempfile.mkdtemp()
os.chdir(cls.t)
def test_fetchai(self):
"""Test that the fetchai private key can be deposited in a custom file."""
test_file = "test.txt"
result = self.runner.invoke(
cli, [*CLI_LOG_OPTION, "generate-key", FetchAICrypto.identifier, test_file]
)
assert result.exit_code == 0
assert Path(test_file).exists()
# This tests if the file has been created and its content is correct.
crypto = make_crypto(FetchAICrypto.identifier, private_key_path=test_file)
content = Path(test_file).read_bytes()
assert content.decode("utf-8") == crypto.private_key
def test_all(self):
"""Test that the all command does not allow a file to be provided."""
test_file = "test.txt"
result = self.runner.invoke(
cli, [*CLI_LOG_OPTION, "generate-key", "all", test_file]
)
assert result.exit_code == 1
@classmethod
def teardown_class(cls):
"""Tear the test down."""
os.chdir(cls.cwd)
shutil.rmtree(cls.t)
class TestGenerateKeyWithAddKeyWithoutConnection(AEATestCaseEmpty):
"""Test that the command 'aea generate-key --add-key' works as expected."""
keys_config_path = "agent.private_key_paths"
args = [] # type: ignore
def test_fetchai(self):
"""Test that the fetch private key is created correctly."""
with cd(self._get_cwd()):
result = self.run_cli_command(
"config", "get", self.keys_config_path, cwd=self._get_cwd()
)
assert result.exit_code == 0
assert json.loads(result.stdout_bytes) == {}
args = [*CLI_LOG_OPTION, "generate-key", FetchAICrypto.identifier]
result = self.run_cli_command(
*args, "--add-key", *self.args, cwd=self._get_cwd()
)
assert result.exit_code == 0
assert Path(FETCHAI_PRIVATE_KEY_FILE).exists()
make_crypto(
FetchAICrypto.identifier,
private_key_path=FETCHAI_PRIVATE_KEY_FILE,
password=None,
)
Path(FETCHAI_PRIVATE_KEY_FILE).unlink()
result = self.run_cli_command(
"config", "get", self.keys_config_path, cwd=self._get_cwd()
)
assert result.exit_code == 0
agent_keys = json.loads(result.stdout_bytes)
assert agent_keys.get(FetchAICrypto.identifier) == FETCHAI_PRIVATE_KEY_FILE
class TestGenerateKeyWithAddKeyWithConnection(
TestGenerateKeyWithAddKeyWithoutConnection
):
"""Test that the command 'aea generate-key --add-key' works as expected."""
keys_config_path = "agent.connection_private_key_paths"
args = ["--connection"] # type: ignore
|
d2ffd21580105bd341974c7f5e2ddd9c7f96257f
|
39a446531a94bdebfe4ec15dc29451d0d0560485
|
/tests/test_vcs.py
|
8890f6f9a90284f6fb7dedc3008b2e32b9f0b0dc
|
[
"BSD-2-Clause"
] |
permissive
|
olofk/edalize
|
b5317fa955893a6a061ab5195d937281a7a5ff77
|
c7e7f0e2fb3e75a86e07ec6dfb9de896560e8c89
|
refs/heads/main
| 2023-08-31T14:49:50.581938
| 2023-08-10T19:00:35
| 2023-08-16T13:10:30
| 132,811,303
| 550
| 163
|
BSD-2-Clause
| 2023-08-24T10:15:47
| 2018-05-09T20:48:49
|
Python
|
UTF-8
|
Python
| false
| false
| 1,520
|
py
|
test_vcs.py
|
from .edalize_common import make_edalize_test
def run_vcs_test(tf):
tf.backend.configure()
tf.compare_files(["Makefile", tf.test_name + ".scr"])
tf.backend.build()
tf.compare_files(["vcs.cmd"])
tf.backend.run()
tf.compare_files(["run.cmd"])
def test_vcs_tool_options(make_edalize_test):
tool_options = {
"vcs_options": ["-debug_access+pp", "-debug_access+all"],
"run_options": ["-licqueue"],
}
tf = make_edalize_test(
"vcs",
test_name="test_vcs_tool_options_0",
ref_dir="tool_options",
tool_options=tool_options,
)
run_vcs_test(tf)
def test_vcs_no_tool_options(make_edalize_test):
tf = make_edalize_test("vcs", ref_dir="no_tool_options")
run_vcs_test(tf)
def test_vcs_minimal(tmpdir):
import os
from edalize.edatool import get_edatool
from .edalize_common import compare_files, tests_dir
os.environ["PATH"] = (
os.path.join(tests_dir, "mock_commands") + ":" + os.environ["PATH"]
)
tool = "vcs"
name = "test_" + tool + "_minimal_0"
work_root = str(tmpdir)
edam = {"name": name, "toplevel": "top"}
backend = get_edatool(tool)(edam=edam, work_root=work_root)
backend.configure()
ref_dir = os.path.join(tests_dir, "test_" + tool, "minimal")
compare_files(ref_dir, work_root, ["Makefile", name + ".scr"])
backend.build()
compare_files(ref_dir, work_root, ["vcs.cmd"])
backend.run()
compare_files(ref_dir, work_root, ["run.cmd"])
|
aaad8eac5386ec0df7cbaf6b952e5e18e3e80eae
|
2b3bbfc742ad6a2529f2906193c3c5263ebd5fac
|
/src/app/application/algorithm/wscript
|
ac6f87f506b109fbf2010c75ffbce2a373982e09
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] |
permissive
|
foxBMS/foxbms-2
|
35502ef8441dfc7374fd6c0839e7f5328a5bda8f
|
9eb6d1c44e63e43e62bbf6983b2e618fb6ad02cc
|
refs/heads/master
| 2023-05-22T05:30:25.862475
| 2023-02-23T15:03:35
| 2023-02-24T15:04:15
| 353,751,476
| 151
| 80
|
NOASSERTION
| 2023-09-01T09:59:30
| 2021-04-01T15:52:24
|
C
|
UTF-8
|
Python
| false
| false
| 4,342
|
wscript
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2010 - 2023, Fraunhofer-Gesellschaft zur Foerderung der angewandten Forschung e.V.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# We kindly request you to use one or more of the following phrases to refer to
# foxBMS in your hardware, software, documentation or advertising materials:
#
# - "This product uses parts of foxBMS®"
# - "This product includes parts of foxBMS®"
# - "This product is derived from foxBMS®"
"""TODO"""
import os
def build(bld):
"""builds the algorithm library"""
soc = bld.env.state_estimator_soc
soe = bld.env.state_estimator_soe
sof = bld.env.state_estimator_sof
soh = bld.env.state_estimator_soh
source = [
os.path.join("algorithm.c"),
os.path.join("config", "algorithm_cfg.c"),
os.path.join("moving_average", "moving_average.c"),
os.path.join("state_estimation", "soc", soc, f"soc_{soc}_cfg.c"),
os.path.join("state_estimation", "soc", soc, f"soc_{soc}.c"),
os.path.join("state_estimation", "soe", soe, f"soe_{soe}_cfg.c"),
os.path.join("state_estimation", "soe", soe, f"soe_{soe}.c"),
os.path.join("state_estimation", "sof", sof, f"sof_{sof}.c"),
os.path.join("state_estimation", "sof", sof, f"sof_{sof}_cfg.c"),
os.path.join("state_estimation", "soh", soh, f"soh_{soh}.c"),
os.path.join("state_estimation", "soh", soh, f"soh_{soh}_cfg.c"),
os.path.join(".", "state_estimation", "state_estimation.c"),
]
includes = [
".",
"config",
"moving_average",
"state_estimation",
os.path.join("state_estimation", "soc", soc),
os.path.join("state_estimation", "soe", soe),
os.path.join("state_estimation", "sof", sof),
os.path.join("state_estimation", "soh", soh),
os.path.join("..", "config"),
os.path.join("..", "..", "application", "bms"),
os.path.join("..", "..", "driver", "config"),
os.path.join("..", "..", "driver", "contactor"),
os.path.join("..", "..", "driver", "foxmath"),
os.path.join("..", "..", "driver", "fram"),
os.path.join("..", "..", "driver", "sps"),
os.path.join("..", "..", "driver", "mcu"),
os.path.join("..", "..", "engine", "config"),
os.path.join("..", "..", "engine", "database"),
os.path.join("..", "..", "engine", "diag"),
os.path.join("..", "..", "main", "include"),
os.path.join("..", "..", "task", "config"),
os.path.join("..", "..", "task", "os"),
]
includes.extend(bld.env.INCLUDES_RTOS)
cflags = bld.env.CFLAGS_FOXBMS
target = f"{bld.env.APPNAME.lower()}-algorithms"
bld.stlib(
source=source,
includes=includes,
cflags=cflags,
target=target,
)
|
|
18b73ce74f04e46b18800a029ea7914bf2ebb3b6
|
b049a961f100444dde14599bab06a0a4224d869b
|
/sdk/python/pulumi_azure_native/containerregistry/v20230701/get_webhook_callback_config.py
|
8c2a1d8b720fcf124e34f4bf99ed16498638edea
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
pulumi/pulumi-azure-native
|
b390c88beef8381f9a71ab2bed5571e0dd848e65
|
4c499abe17ec6696ce28477dde1157372896364e
|
refs/heads/master
| 2023-08-30T08:19:41.564780
| 2023-08-28T19:29:04
| 2023-08-28T19:29:04
| 172,386,632
| 107
| 29
|
Apache-2.0
| 2023-09-14T13:17:00
| 2019-02-24T20:30:21
|
Python
|
UTF-8
|
Python
| false
| false
| 3,944
|
py
|
get_webhook_callback_config.py
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetWebhookCallbackConfigResult',
'AwaitableGetWebhookCallbackConfigResult',
'get_webhook_callback_config',
'get_webhook_callback_config_output',
]
@pulumi.output_type
class GetWebhookCallbackConfigResult:
"""
The configuration of service URI and custom headers for the webhook.
"""
def __init__(__self__, custom_headers=None, service_uri=None):
if custom_headers and not isinstance(custom_headers, dict):
raise TypeError("Expected argument 'custom_headers' to be a dict")
pulumi.set(__self__, "custom_headers", custom_headers)
if service_uri and not isinstance(service_uri, str):
raise TypeError("Expected argument 'service_uri' to be a str")
pulumi.set(__self__, "service_uri", service_uri)
@property
@pulumi.getter(name="customHeaders")
def custom_headers(self) -> Optional[Mapping[str, str]]:
"""
Custom headers that will be added to the webhook notifications.
"""
return pulumi.get(self, "custom_headers")
@property
@pulumi.getter(name="serviceUri")
def service_uri(self) -> str:
"""
The service URI for the webhook to post notifications.
"""
return pulumi.get(self, "service_uri")
class AwaitableGetWebhookCallbackConfigResult(GetWebhookCallbackConfigResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetWebhookCallbackConfigResult(
custom_headers=self.custom_headers,
service_uri=self.service_uri)
def get_webhook_callback_config(registry_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
webhook_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWebhookCallbackConfigResult:
"""
Gets the configuration of service URI and custom headers for the webhook.
:param str registry_name: The name of the container registry.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str webhook_name: The name of the webhook.
"""
__args__ = dict()
__args__['registryName'] = registry_name
__args__['resourceGroupName'] = resource_group_name
__args__['webhookName'] = webhook_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:containerregistry/v20230701:getWebhookCallbackConfig', __args__, opts=opts, typ=GetWebhookCallbackConfigResult).value
return AwaitableGetWebhookCallbackConfigResult(
custom_headers=pulumi.get(__ret__, 'custom_headers'),
service_uri=pulumi.get(__ret__, 'service_uri'))
@_utilities.lift_output_func(get_webhook_callback_config)
def get_webhook_callback_config_output(registry_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
webhook_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetWebhookCallbackConfigResult]:
"""
Gets the configuration of service URI and custom headers for the webhook.
:param str registry_name: The name of the container registry.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str webhook_name: The name of the webhook.
"""
...
|
b6e83297e29cd45403836e7b01495895e1ee01a7
|
9fcb5164ff77e8cf48d860485fed262d36ed63e7
|
/pkgpanda/__init__.py
|
48e6913e81788c3f1e16f839114b34b30b3f1366
|
[
"Apache-2.0",
"MIT",
"LicenseRef-scancode-oracle-bcl-javase-javafx-2012",
"ErlPL-1.1",
"MPL-2.0",
"ISC",
"BSL-1.0",
"Python-2.0",
"BSD-2-Clause"
] |
permissive
|
dcos/dcos
|
2415d298979c6d6e3183aeb64c906a0959863576
|
79b9a39b4e639dc2c9435a869918399b50bfaf24
|
refs/heads/master
| 2023-08-09T04:16:19.696621
| 2021-07-05T06:42:39
| 2021-07-05T06:42:39
| 56,184,050
| 2,613
| 641
|
Apache-2.0
| 2023-07-27T04:13:50
| 2016-04-13T20:31:58
|
Python
|
UTF-8
|
Python
| false
| false
| 38,981
|
py
|
__init__.py
|
"""
See `docs/package_concepts.md` for the package layout.
Packages have ids. Ids are composed of a name + blob. The blob is never
introspected by the packaging stuff.
Each package contains a pkginfo.json. That contains a list of requires as well as
environment variables from the package.
"""
import json
import logging
import os
import os.path
import re
import shutil
import tempfile
from collections import Iterable
from itertools import chain
from typing import Union
from pkgpanda.constants import (DCOS_SERVICE_CONFIGURATION_FILE,
RESERVED_UNIT_NAMES,
STATE_DIR_ROOT)
from pkgpanda.exceptions import (InstallError, PackageError, PackageNotFound,
ValidationError)
from pkgpanda.subprocess import CalledProcessError, check_call, check_output
from pkgpanda.util import (download, extract_tarball, if_exists, is_windows,
load_json, make_directory, remove_directory, write_json, write_string)
if not is_windows:
import grp
import pwd
log = logging.getLogger(__name__)
# TODO(cmaloney): Can we switch to something like a PKGBUILD from ArchLinux and
# then just do the mutli-version stuff ourself and save a lot of re-implementation?
reserved_env_vars = ["LD_LIBRARY_PATH", "PATH"]
env_header = """# Pkgpanda provided environment variables
LD_LIBRARY_PATH={0}/lib
PATH={0}/bin:/usr/bin:/bin:/sbin\n\n"""
env_export_header = """# Pkgpanda provided environment variables
export LD_LIBRARY_PATH={0}/lib
export PATH="{0}/bin:$PATH"\n\n"""
name_regex = r"^[a-zA-Z0-9@_+][a-zA-Z0-9@._+\-]*$"
version_regex = r"^[a-zA-Z0-9@_+:.]+$"
username_regex = r"^dcos_[a-z0-9_]+$"
linux_group_regex = r"^[a-z_][a-z0-9_-]*$" # https://github.com/shadow-maint/shadow/blob/master/libmisc/chkname.c#L52
class Systemd:
"""Manages systemd units and unit files during installation.
This class uses a unit.wants directory as the list of all units it needs to manage. Unit files are copied to the
base systemd dir to make sure they're available on the root volume, and thus readable when systemd starts. Symlinks
in the unit.wants directory are rewritten to point to the copied unit files.
"""
# Use "unit.new" to prevent removing unrelated ".new" directories if the install root is being used as the systemd
# base dir.
new_unit_suffix = ".unit.new"
def __init__(self, unit_directory, active, block):
self.__unit_directory = unit_directory
self.__active = active
self.__block = block
self.__base_systemd = os.path.normpath(os.path.join(self.__unit_directory, ".."))
def stop_all(self):
if not self.__active:
log.warning("Do not stop services")
return
if not os.path.exists(self.__unit_directory):
log.warning("Do not stop services. %s does not exist", self.__unit_directory)
return
names = list(filter(
lambda n: os.path.isfile(os.path.join(self.__unit_directory, n)),
os.listdir(self.__unit_directory)))
try:
cmd = ["systemctl", "stop"] + names
if not self.__block:
cmd.append("--no-block")
check_call(cmd)
except CalledProcessError as ex:
# If the service doesn't exist, don't error. This happens when a
# bootstrap tarball has just been extracted but nothing started
# yet during first activation.
log.warning(ex)
if ex.returncode != 5:
raise
def remove_staged_unit_files(self):
"""Remove staged unit files created by Systemd.stage_new_units()."""
for filename in os.listdir(self.__base_systemd):
if filename.endswith(self.new_unit_suffix):
os.remove(os.path.join(self.__base_systemd, filename))
def stage_new_units(self, new_wants_dir):
"""Prepare new systemd units for activation.
Unit files targeted by the symlinks in new_wants_dir are copied to a temporary location in the base systemd
directory, and the symlinks are rewritten to target the intended final destination of the copied unit files.
"""
for unit_name in self.unit_names(new_wants_dir):
wants_symlink_path = os.path.join(new_wants_dir, unit_name)
package_file_path = os.path.realpath(wants_symlink_path)
systemd_file_path = os.path.join(self.__base_systemd, unit_name)
tmp_systemd_file_path = systemd_file_path + self.new_unit_suffix
# Copy the unit file to the systemd directory with a suffix added to the filename.
# This file will be moved to systemd_file_path when the new package set is swapped in.
shutil.copyfile(package_file_path, tmp_systemd_file_path)
shutil.copymode(package_file_path, tmp_systemd_file_path)
# Rewrite the symlink to point to the copied unit file's destination.
# This symlink won't point to the correct file until the copied unit file is moved to its target location
# during activate_new_unit_files().
os.remove(wants_symlink_path)
os.symlink(systemd_file_path, wants_symlink_path)
def remove_unit_files(self):
if not os.path.exists(self.__unit_directory):
log.warning("Do not remove files. %s does not exist", self.__unit_directory)
return
for unit_name in self.unit_names(self.__unit_directory):
try:
path = os.path.join(self.__base_systemd, unit_name)
log.debug("Remove %s", path)
os.remove(path)
except FileNotFoundError:
log.debug(unit_name + " not found")
def activate_new_unit_files(self):
log.info("Move new unit files to their final locations.")
if not os.path.exists(self.__unit_directory):
log.warning("Do not move new unit files. %s does not exist", self.__unit_directory)
return
self.remove_unit_files()
for unit_name in self.unit_names(self.__unit_directory):
systemd_file_path = os.path.join(self.__base_systemd, unit_name)
shutil.move(systemd_file_path + self.new_unit_suffix, systemd_file_path)
@staticmethod
def unit_names(unit_dir):
units = os.listdir(unit_dir)
for unit_name in units:
if unit_name in RESERVED_UNIT_NAMES:
raise Exception("Reserved name encountered - {}.".format(unit_name))
return units
@property
def unit_directory(self):
return self.__unit_directory
class PackageId:
@staticmethod
def parse(id: str):
parts = id.split('--')
if len(parts) != 2:
raise ValidationError(
"Invalid package id {0}. Package ids may only ".format(id) +
"contain one '--' which seperates the name and version")
PackageId.validate_name(parts[0])
PackageId.validate_version(parts[1])
return parts[0], parts[1]
@staticmethod
def from_parts(name, version):
# TODO(cmaloney): This format, then parse is less than ideal.
return PackageId("{0}--{1}".format(name, version))
@staticmethod
def validate_name(name):
# [a-zA-Z0-9@._+-]
# May not start with '.' or '-'.
if not re.match(name_regex, name):
raise ValidationError("Invalid package name {0}. Must match the regex {1}".format(name, name_regex))
@staticmethod
def is_id(package_str):
return package_str.count('--') == 1
@staticmethod
def validate_version(version):
# [a-zA-Z0-9@._+:]
# May not contain a '-'.
if not re.match(version_regex, version):
raise ValidationError(
"Invalid package version {0}. Must match the regex {1}".format(version, version_regex))
def __init__(self, id):
self.name, self.version = PackageId.parse(id)
def __repr__(self):
return '{0}--{1}'.format(self.name, self.version)
class Package:
def __init__(self, path, id: Union[PackageId, str], pkginfo):
if isinstance(id, str):
id = PackageId(id)
self.__id = id
self.__path = path
self.__pkginfo = pkginfo
@property
def environment(self):
return self.__pkginfo.get('environment', dict())
@property
def sysctl(self):
return self.__pkginfo.get('sysctl', dict())
@property
def check_dir(self):
return self.__path + '/check'
@property
def id(self):
return self.__id
@property
def name(self):
return self.__id.name
@property
def path(self):
return self.__path
@property
def variant(self):
return self.__pkginfo.get('variant', None)
@property
def requires(self):
return list(self.__pkginfo.get('requires', list()))
@property
def version(self):
return self.__id.version
@property
def state_directory(self):
return self.__pkginfo.get('state_directory', False)
@property
def username(self):
return self.__pkginfo.get('username', None)
@property
def group(self):
return self.__pkginfo.get('group', None)
def __repr__(self):
return str(self.__id)
def expand_require(require: Union[str, dict]):
name = None
variant = None
if isinstance(require, str):
name = require
else:
assert isinstance(require, dict)
if 'name' not in require or 'variant' not in require:
raise ValidationError(
"When specifying a dependency in requires by dictionary to " +
"depend on a variant both the name of the package and the " +
"variant name must always be specified")
name = require['name']
variant = require['variant']
if PackageId.is_id(name):
raise ValidationError(
"ERROR: Specifying a dependency on '" + name + "', an exact" +
"package id isn't allowed. Dependencies may be specified by" +
"package name alone or package name + variant (to change the" +
"package variant).")
return (name, variant)
# Check that a set of packages is reasonable.
def validate_compatible(packages, roles):
# Every package name appears only once.
names = set()
ids = set()
tuples = set()
for package in packages:
if package.name in names:
raise ValidationError(
"Repeated name {0} in set of packages {1}".format(
package.name, ' '.join(map(lambda x: str(x.id), packages))))
if package.username is None and package.group is not None:
raise ValidationError("`group` cannot be used without `username`")
names.add(package.name)
ids.add(str(package.id))
tuples.add((package.name, package.variant))
# All requires are met.
# NOTE: Requires are given just to make it harder to accidentally
# break a cluster.
# Environment variables in packages, mapping from variable to package.
environment = dict()
sysctl_map = dict()
for package in packages:
# Check that all requirements of the package are met.
# Requirements can be specified on a package name or full version string.
for requirement in package.requires:
name, variant = expand_require(requirement)
if name not in names:
raise ValidationError(
("Package {} variant {} requires {} variant {} but that " +
"is not in the set of packages {}").format(
package.id,
package.variant,
name,
variant,
', '.join(str(x.id) for x in packages)))
# No repeated/conflicting environment variables with other packages as
# well as magic system environment variables.
for k, v in package.environment.items():
if k in reserved_env_vars:
raise ValidationError(
"{0} are reserved environment vars and cannot be specified in packages. "
"Present in package {1}".format(
", ".join(reserved_env_vars), package))
if k in environment:
raise ValidationError(
"Repeated environment variable {0}. In both packages {1} and {2}.".format(
k, v, package))
environment[k] = package
# No conflicting sysctl values.
for service_name, sysctl_settings in package.sysctl.items():
for sysctl_var, sysctl_value in sysctl_settings.items():
if sysctl_var in sysctl_map and sysctl_map[sysctl_var] != sysctl_value:
raise ValueError(
"Conflicting sysctl setting {sysctl_var}={sysctl_value}"
" present in the service {service}".format(
sysctl_var=sysctl_var,
sysctl_value=sysctl_value,
service=service_name))
sysctl_map[sysctl_var] = sysctl_value
# TODO(cmaloney): More complete validation
# - There are no repeated file/folder in the well_known_dirs
# - Including the roles subfolders.
# - There is a base set of required package names (pkgpanda, mesos, config)
# - The config is for this specific type of host (master, slave)?
# TODO(cmaloney): Add a github fetcher, useful for grabbing config tarballs.
def requests_fetcher(base_url, id_str, target, work_dir):
assert base_url
assert type(id_str) == str
id = PackageId(id_str)
# TODO(cmaloney): That file:// urls are allowed in base_url is likely a security hole.
# TODO(cmaloney): Switch to mesos-fetcher or aci or something so
# all the logic can go away, we gain integrity checking, etc.
base_url = base_url.rstrip('/')
url = base_url + "/packages/{0}/{1}.tar.xz".format(id.name, id_str)
# TODO(cmaloney): Use a private tmp directory so there is no chance of a user
# intercepting the tarball + other validation data locally.
with tempfile.NamedTemporaryFile(suffix=".tar.xz") as file:
download(file.name, url, work_dir, rm_on_error=False)
extract_tarball(file.name, target)
class Repository:
def __init__(self, path):
self.__path = os.path.abspath(path)
self.__packages = None
@property
def path(self):
return self.__path
def package_path(self, id):
return os.path.join(self.__path, id)
def get_ids(self, name):
# TODO(cmaloney): There is a lot of excess re-parsing here...
return list(pkg_id for pkg_id in self.list() if PackageId(pkg_id).name == name)
def has_package(self, id):
return id in self.list()
def list(self):
"""List the available packages in the repository.
A package is a folder which contains a pkginfo.json"""
if self.__packages is not None:
return self.__packages
packages = set()
if not os.path.exists(self.__path):
return packages
for id in os.listdir(self.__path):
if PackageId.is_id(id):
packages.add(id)
self.__packages = packages
return self.__packages
# Load the given package
def load(self, id: str):
# Validate the package id.
PackageId(id)
path = self.package_path(id)
if not os.path.exists(path):
raise PackageNotFound(id)
filename = os.path.join(path, "pkginfo.json")
try:
pkginfo = load_json(filename)
except OSError as ex:
raise PackageError("No / unreadable pkginfo.json in {0}: {1}".format(id, ex.strerror)) from ex
if not isinstance(pkginfo, dict):
raise PackageError("Usage should be a dictionary, not a {0}".format(type(pkginfo).__name__))
return Package(path, id, pkginfo)
def load_packages(self, ids: Iterable):
packages = set()
for id in ids:
packages.add(self.load(id))
return packages
def integrity_check(self):
# Check that all packages in the local repository have valid
# signatures, are up to date, all packages valid contents, etc.
raise NotImplementedError()
# Add the given package to the repository.
# If the package is already in the repository does a no-op and returns false.
# Returns true otherwise.
def add(self, fetcher, id, warn_added=True):
# Validate the package id.
PackageId(id)
# If the package already exists, return true
package_path = self.package_path(id)
if os.path.exists(package_path):
if warn_added:
print("Package already added.")
return False
# TODO(cmaloney): Supply a temporary directory to extract to
# Then swap that into place, preventing partially-extracted things from
# becoming an issue.
pkg_path = self.package_path(id)
# Appending _tmp so there is very little chance of us running into the
# rm of another package, since all our PackageID strings are SHA-1, so
# they never end with `_tmp`. `{sha}_tmp` is still a valid version
# number however so other code doing directory scans will be fine with
# the temp folders.
tmp_path = pkg_path + '_tmp'
# Cleanup artifacts (if any) laying around from previous partial
# package extractions.
remove_directory(tmp_path)
fetcher(id, tmp_path)
shutil.move(tmp_path, pkg_path)
return True
def remove(self, id):
path = self.package_path(id)
if not os.path.exists(path):
raise PackageNotFound(id)
remove_directory(path)
class ConflictingFile(ValidationError):
def __init__(self, src, dest, ex):
super().__init__(ex)
self.src = src
self.dest = dest
self.ex = ex
# Create folders and symlink files inside the folders. Allows multiple
# packages to have the same folder and provide it publicly.
def symlink_tree(src, dest):
for name in os.listdir(src):
src_path = os.path.join(src, name)
dest_path = os.path.join(dest, name)
# Symlink files and symlinks directly. For directories make a
# real directory and symlink everything inside.
# NOTE: We could relax this and follow symlinks, but then we
# need to be careful about recursive filesystem layouts.
if os.path.isdir(src_path) and not os.path.islink(src_path):
if os.path.exists(dest_path):
# We can only merge a directory into a directory.
# We won't merge into a symlink directory because that could
# result in a package editing inside another package.
if not os.path.isdir(dest_path) and not os.path.islink(dest_path):
raise ValidationError(
"Can't merge a file `{0}` and directory (or symlink) `{1}` with the same name.".format(
src_path, dest_path))
else:
os.makedirs(dest_path)
# Recurse into the directory symlinking everything so long as the directory isn't
symlink_tree(src_path, dest_path)
else:
try:
os.symlink(src_path, dest_path)
except FileNotFoundError as ex:
raise ConflictingFile(src_path, dest_path, ex) from ex
# Manages a systemd-sysusers user set.
# Can have users
class UserManagement:
"""Manages a systemd-sysusers configuration file / user set
add_user() can be called until `ensure_users_exist` is called.
get_uid() can only be called once `ensure_users_exist` is called.
This helps enforce the code pattern which is needed to build one big sysusers configuration file
and then create all the users / validate they all exist once. After that the users can be
referenced / used.
"""
def __init__(self, manage_users: bool, add_users: bool):
self._manage_users = manage_users
self._add_users = add_users
self._users = set()
@staticmethod
def validate_username(username):
if not re.match(username_regex, username):
raise ValidationError("Username must begin with `dcos_` and only have a-z and underscore after that")
@staticmethod
def validate_group(group):
# Empty group is allowed.
if not group:
return
UserManagement.validate_group_name(group)
try:
grp.getgrnam(group)
except KeyError:
raise ValidationError("Group {} does not exist on the system".format(group))
@staticmethod
def validate_group_name(group_name):
if not group_name:
return
if not re.match(linux_group_regex, group_name):
raise ValidationError("Group {} has invalid name, must match the following regex: {}".format(
group_name, linux_group_regex))
@staticmethod
def validate_user_group(username, group_name):
user = pwd.getpwnam(username)
if not group_name:
return
group = grp.getgrnam(group_name)
if user.pw_gid != group.gr_gid:
# check if the user is the right group, but the group is not primary.
if username in group.gr_mem:
return
raise ValidationError(
"User {} exists with current UID {}, however he should be assigned to group {} with {} UID, please "
"check `buildinfo.json`".format(username, user.pw_gid, group_name, group.gr_gid))
def add_user(self, username, groupname):
UserManagement.validate_username(username)
if not self._manage_users:
return
# Check if the user already exists and exit.
try:
if not is_windows:
UserManagement.validate_user_group(username, groupname)
self._users.add(username)
return
except KeyError as ex:
# Doesn't exist, fall through
log.warning("User [%s:%s] already exists", username, groupname)
# If we're not allowed to manage users, error
if not self._add_users:
raise ValidationError("User {} doesn't exist but is required by a DC/OS Component, and "
"automatic user addition is disabled".format(username))
log.info("Add the user")
add_user_cmd = [
'useradd',
'--system',
'--home-dir', '/opt/mesosphere',
'--shell', '/sbin/nologin',
'-c', 'DCOS System User',
]
# A group matching the username will be created by the adduser command.
# Any other group that the user is added to needs to exist prior to executing the
# adduser command.
if groupname is not None and groupname != username:
UserManagement.validate_group(groupname)
add_user_cmd += [
'-g', groupname
]
else:
add_user_cmd += [
'--user-group'
]
add_user_cmd += [username]
try:
log.debug(" ".join(add_user_cmd))
check_output(add_user_cmd)
self._users.add(username)
except CalledProcessError as ex:
raise ValidationError("User {} doesn't exist and couldn't be created because of: {}"
.format(username, ex.output))
def get_uid(self, username):
# Code should have already asserted all users exist, and be passing us
# a user we know about. This method only works for package users.
assert username in self._users
return pwd.getpwnam(username).pw_uid
# A rooted install tree.
# Inside the install tree there will be all the well known folders and files as
# described in `docs/package_concepts.md`
class Install:
# TODO(cmaloney) This is way too many options for these call points. Most
# of these should be made so they can be removed (most are just for testing)
def __init__(
self,
root,
config_dir,
rooted_systemd,
manage_systemd,
block_systemd,
fake_path=False,
skip_systemd_dirs=False,
manage_users=False,
add_users=False,
manage_state_dir=False,
state_dir_root=STATE_DIR_ROOT
):
assert type(rooted_systemd) == bool
assert type(fake_path) == bool
self.__root = os.path.abspath(root)
self.__config_dir = os.path.abspath(config_dir) if config_dir else None
if rooted_systemd:
self.__systemd_dir = "{}/dcos.target.wants".format(root)
else:
self.__systemd_dir = "/etc/systemd/system/dcos.target.wants"
self.__manage_systemd = manage_systemd
self.__block_systemd = block_systemd
# Look up the machine roles
self.__roles = []
if self.__config_dir:
self.__roles = if_exists(os.listdir, os.path.join(self.__config_dir, "roles"))
if self.__roles is None:
self.__roles = []
self.__well_known_dirs = ["bin", "etc", "include", "lib"]
if not skip_systemd_dirs:
self.__well_known_dirs.append(self.__systemd_dir)
self.__fake_path = fake_path
self.__skip_systemd_dirs = skip_systemd_dirs
self.__manage_users = manage_users
self.__add_users = add_users
self.__manage_state_dir = manage_state_dir
assert not state_dir_root.endswith('/')
self.__state_dir_root = state_dir_root
self.systemd = Systemd(self._make_abs(self.__systemd_dir), self.__manage_systemd, self.__block_systemd)
def _get_dcos_configuration_template(self):
return {"sysctl": {}}
def get_active_dir(self):
return os.path.join(self.__root, "active")
def get_active(self):
"""the active folder has symlinks to all the active packages.
Return the full package ids (The targets of the symlinks)."""
active_dir = self.get_active_dir()
if not os.path.exists(active_dir):
if os.path.exists(active_dir + ".old") or os.path.exists(active_dir + ".new"):
raise InstallError(
("Broken past deploy. See {0}.new for what the (potentially incomplete) new state should be " +
"and optionally {0}.old if it exists for the complete previous state.").format(active_dir))
else:
raise InstallError(
"Install directory {0} has no active folder. Has it been bootstrapped?".format(self.__root))
ids = set()
for name in os.listdir(active_dir):
package_path = os.path.realpath(os.path.join(active_dir, name))
# NOTE: We don't validate the id here because we want to be able to
# cope if there is something invalid in the current active dir.
ids.add(os.path.basename(package_path))
return ids
def has_flag(self, name):
return os.path.exists(self.get_config_filename(name))
def get_config_filename(self, name):
return os.path.join(self.__config_dir, name)
def _make_abs(self, name):
return os.path.abspath(os.path.join(self.__root, name))
def get_active_names(self):
return list(map(
self._make_abs,
self.__well_known_dirs + [
"environment",
"environment.export",
"active",
"active.buildinfo.full.json"
]))
# Builds new working directories for the new active set, then swaps it into place as atomically as possible.
def activate(self, packages):
# Ensure the new set is reasonable.
validate_compatible(packages, self.__roles)
# Build the absolute paths for the running config, new config location,
# and where to archive the config.
active_names = self.get_active_names()
active_dirs = list(map(self._make_abs, self.__well_known_dirs + ["active"]))
new_names = [name + ".new" for name in active_names]
new_dirs = [name + ".new" for name in active_dirs]
old_names = [name + ".old" for name in active_names]
log.info("Remove all pre-existing new and old directories")
for name in chain(new_names, old_names):
if os.path.exists(name):
if os.path.isdir(name):
remove_directory(name)
else:
os.remove(name)
log.info("Remove unit files staged for an activation that didn't occur.")
if not self.__skip_systemd_dirs:
self.systemd.remove_staged_unit_files()
log.debug("Make the directories for the new config: " + ", ".join(new_dirs))
for name in new_dirs:
os.makedirs(name)
def symlink_all(src, dest):
if not os.path.isdir(src):
return
symlink_tree(src, dest)
log.info("Set the new LD_LIBRARY_PATH, PATH.")
env_contents = env_header.format("/opt/mesosphere" if self.__fake_path else self.__root)
env_export_contents = env_export_header.format("/opt/mesosphere" if self.__fake_path else self.__root)
active_buildinfo_full = {}
dcos_service_configuration = self._get_dcos_configuration_template()
log.info("Building up the set of users.")
sysusers = UserManagement(self.__manage_users, self.__add_users)
def _get_service_files(_dir):
service_files = []
for root, directories, filenames in os.walk(_dir):
for filename in filter(lambda name: name.endswith(".service"), filenames):
service_files.append(os.path.join(root, filename))
return service_files
def _get_service_names(_dir):
service_files = list(map(os.path.basename, _get_service_files(_dir)))
if not service_files:
return []
return list(map(lambda name: os.path.splitext(name)[0], service_files))
# Add the folders, config in each package.
for package in packages:
# Package folders
# NOTE: Since active is at the end of the folder list it will be
# removed by the zip. This is the desired behavior, since it will be
# populated later.
# Do the basename since some well known dirs are full paths (dcos.target.wants)
# while inside the packages they are always top level directories.
for new, dir_name in zip(new_dirs, self.__well_known_dirs):
dir_name = os.path.basename(dir_name)
pkg_dir = os.path.join(package.path, dir_name)
assert os.path.isabs(new)
assert os.path.isabs(pkg_dir)
try:
symlink_all(pkg_dir, new)
# Symlink all applicable role-based config
for role in self.__roles:
role_dir = os.path.join(package.path, "{0}_{1}".format(dir_name, role))
symlink_all(role_dir, new)
except ConflictingFile as ex:
raise ValidationError("Two packages are trying to install the same file {0} or "
"two roles in the set of roles {1} are causing a package "
"to try activating multiple versions of the same file. "
"One of the package files is {2}.".format(ex.dest,
self.__roles,
ex.src))
log.info("Add %s to the active folder", package.name)
os.symlink(package.path, os.path.join(self._make_abs("active.new"), package.name))
# Add to the environment and environment.export contents
env_contents += "# package: {0}\n".format(package.id)
env_export_contents += "# package: {0}\n".format(package.id)
for k, v in package.environment.items():
env_contents += "{0}={1}\n".format(k, v)
env_export_contents += "export {0}={1}\n".format(k, v)
env_contents += "\n"
env_export_contents += "\n"
# Add to the buildinfo
try:
active_buildinfo_full[package.name] = load_json(os.path.join(package.path, "buildinfo.full.json"))
except FileNotFoundError:
# TODO(cmaloney): These only come from setup-packages. Should update
# setup-packages to add a buildinfo.full for those packages
active_buildinfo_full[package.name] = None
# NOTE: It is critical the state dir, the package name and the user name are all the
# same. Otherwise on upgrades we might remove access to a files by changing their chown
# to something incompatible. We survive the first upgrade because everything goes from
# root to specific users, and root can access all user files.
if package.username is not None:
sysusers.add_user(package.username, package.group)
# Ensure the state directory exists
# TODO(cmaloney): On upgrade take a snapshot?
if self.__manage_state_dir:
state_dir_path = self.__state_dir_root + '/' + package.name
if package.state_directory:
make_directory(state_dir_path)
if package.username and not is_windows:
uid = sysusers.get_uid(package.username)
check_call(['chown', '-R', str(uid), state_dir_path])
if package.sysctl:
service_names = _get_service_names(package.path)
if not service_names:
raise ValueError("service name required for sysctl could not be determined for {package}".format(
package=package.id))
for service in service_names:
if service in package.sysctl:
dcos_service_configuration["sysctl"][service] = package.sysctl[service]
log.info("Prepare new systemd units for activation.")
if not self.__skip_systemd_dirs:
new_wants_dir = self._make_abs(self.__systemd_dir + ".new")
if os.path.exists(new_wants_dir):
self.systemd.stage_new_units(new_wants_dir)
dcos_service_configuration_file = os.path.join(self._make_abs("etc.new"), DCOS_SERVICE_CONFIGURATION_FILE)
write_json(dcos_service_configuration_file, dcos_service_configuration)
log.info("Write out the new environment file.")
new_env = self._make_abs("environment.new")
write_string(new_env, env_contents)
log.info("Write out the new environment.export file")
new_env_export = self._make_abs("environment.export.new")
write_string(new_env_export, env_export_contents)
log.info("Write out the buildinfo of every active package")
new_buildinfo_meta = self._make_abs("active.buildinfo.full.json.new")
write_json(new_buildinfo_meta, active_buildinfo_full)
self.swap_active(".new")
def recover_swap_active(self):
state_filename = self._make_abs("install_progress")
if not os.path.exists(state_filename):
return False, "Path does not exist: {}".format(state_filename)
state = load_json(state_filename)
extension = state['extension']
stage = state['stage']
if stage == 'archive':
self.swap_active(extension, True)
elif stage == 'move_new':
self.swap_active(extension, False)
else:
raise ValueError("Unexpected state to recover from {}".format(state))
return True, ""
# Does an atomic(ish) upgrade swap with support for recovering if
# only part of the swap happens before a reboot.
# TODO(cmaloney): Implement recovery properly.
def swap_active(self, extension, archive=True):
active_names = self.get_active_names()
state_filename = self._make_abs("install_progress")
# Ensure all the new active files exist
for active in active_names:
if not os.path.exists(active + extension):
raise ValueError(
"Unable to swap active packages. Needed file {} doesn't exist.".format(active + extension))
# Record the state (atomically) on the filesystem so that if there is a
# hard/fast fail at any point the activate swap can continue.
def record_state(state):
log.info("Atomically write all the state to disk, swap into place.")
with open(state_filename + ".new", "w+") as f:
state['extension'] = extension
json.dump(state, f)
f.flush()
os.fsync(f.fileno())
os.replace(state_filename + ".new", state_filename)
if archive:
# TODO(cmaloney): stop all systemd services in dcos.target.wants
record_state({"stage": "archive"})
log.info("Stop all systemd services and clean up existing unit files.")
if not self.__skip_systemd_dirs:
self.systemd.stop_all()
self.systemd.remove_unit_files()
log.info("Archive the current config.")
for active in active_names:
old_path = active + ".old"
if os.path.exists(active):
shutil.move(active, old_path)
record_state({"stage": "move_new"})
# Move new / with extension into active.
# TODO(cmaloney): Capture any failures here and roll-back if possible.
# TODO(cmaloney): Alert for any failures here.
for active in active_names:
new_path = active + extension
shutil.move(new_path, active)
if not self.__skip_systemd_dirs:
self.systemd.activate_new_unit_files()
# All done with what we need to redo if host restarts.
os.remove(state_filename)
@property
def manage_systemd(self):
return self.__manage_systemd
@property
def systemd_dir(self):
return self.__systemd_dir
@property
def root(self):
return self.__root
|
ff9585c77eb17433605f6a6ab1575ac1f04c7618
|
d17a8870ff8ac77b82d0d37e20c85b23aa29ca74
|
/lite/tests/unittest_py/op/common/test_uniform_random_op_base.py
|
7a6981c22c2ec137052f82ba99df6301f64255fe
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/Paddle-Lite
|
4ab49144073451d38da6f085a8c56822caecd5b2
|
e241420f813bd91f5164f0d9ee0bc44166c0a172
|
refs/heads/develop
| 2023-09-02T05:28:14.017104
| 2023-09-01T10:32:39
| 2023-09-01T10:32:39
| 104,208,128
| 2,545
| 1,041
|
Apache-2.0
| 2023-09-12T06:46:10
| 2017-09-20T11:41:42
|
C++
|
UTF-8
|
Python
| false
| false
| 2,430
|
py
|
test_uniform_random_op_base.py
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('..')
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import numpy as np
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
import unittest
import hypothesis
import hypothesis.strategies as st
def sample_program_configs(draw):
def generate_ShapeTensor():
return np.random.randint(1, 5, size=[4]).astype(np.int64)
shape_data = draw(
st.lists(
st.integers(
min_value=1, max_value=5), min_size=4, max_size=4))
min_data = draw(st.floats(min_value=-1, max_value=-1))
max_data = draw(st.floats(min_value=1, max_value=1))
seed_data = draw(st.integers(min_value=0, max_value=0))
dtype_data = draw(st.integers(min_value=5, max_value=5)) # out is float
uniform_random_op = OpConfig(
type="uniform_random",
inputs={
"ShapeTensor": ["ShapeTensor_data"],
"ShapeTensorList": ["ShapeTensorList_data"]
},
outputs={"Out": ["output_data"]},
attrs={
"shape": shape_data,
"min": min_data,
"max": max_data,
"seed": seed_data,
"dtype": dtype_data,
# lite does not use these 3 attr
# so I default them
"diag_num": 0,
"diag_step": 0,
"diag_val": 1.0,
})
program_config = ProgramConfig(
ops=[uniform_random_op],
weights={},
inputs={
"ShapeTensor_data":
TensorConfig(data_gen=partial(generate_ShapeTensor)),
"ShapeTensorList_data":
TensorConfig(data_gen=partial(generate_ShapeTensor))
},
outputs=["output_data"])
return program_config
|
c8d67f152deeb0f5d0b7e2313ae36285d7ffa4c6
|
557a5e8ac000718959281d1d31da8e1e4947a155
|
/tests/node_eval.py
|
48775d3a46b7e4f4500a4fc28e06cf3ba483328b
|
[
"MIT"
] |
permissive
|
PiotrDabkowski/Js2Py
|
66f20a58912d2df719ce5952d7fe046512717d4d
|
2e017b86e2f18a6c8a842293b1687f2ce7baa12e
|
refs/heads/master
| 2023-08-17T08:47:00.625508
| 2022-11-06T09:56:37
| 2022-11-06T10:12:00
| 24,736,750
| 2,419
| 318
|
MIT
| 2023-08-03T18:06:40
| 2014-10-02T21:08:48
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,178
|
py
|
node_eval.py
|
import subprocess
from tempfile import NamedTemporaryFile
import six
import re
import os
class NodeJsError(Exception):
pass
def node_eval_js(code):
ERR_MARKER = "<<<ERROR_MARKER>>>"
ERR_REGEXP = ERR_MARKER + r'([\s\S]*?)' + ERR_MARKER
interceptor_code = """
try {
var res = eval(%s);
console.log(res);
} catch (e) {
throw new Error(getErrMarker() + e + getErrMarker())
}
function getErrMarker() {
return %s;
}
""" % (repr(code), repr(ERR_MARKER))
f = NamedTemporaryFile(delete=False, suffix='.js')
f.write(interceptor_code.encode('utf-8') if six.PY3 else interceptor_code)
f.close()
p = subprocess.Popen(['node', f.name], stderr=subprocess.PIPE, stdout=subprocess.PIPE)
out, err = map(lambda x: x.decode('utf-8') if six.PY3 else x, p.communicate())
os.unlink(f.name)
if not p.returncode:
return out
# try to get clear error message
match = re.search(ERR_REGEXP, err)
if match and len(match.groups()) == 1:
raise NodeJsError(match.groups()[0])
else:
raise NodeJsError(err)
if __name__ == '__main__':
print(node_eval_js('x = 5;x'))
|
cb75477fdb33c572e0630a64f6a6cbce3374b7e7
|
f80ef3a3cf859b13e8af8433af549b6b1043bf6e
|
/pyobjc-core/PyObjCTest/test_testsupport.py
|
f39ce0a7402b09f435b3d7679bd7ddd38f78c342
|
[
"MIT"
] |
permissive
|
ronaldoussoren/pyobjc
|
29dc9ca0af838a56105a9ddd62fb38ec415f0b86
|
77b98382e52818690449111cd2e23cd469b53cf5
|
refs/heads/master
| 2023-09-01T05:15:21.814504
| 2023-06-13T20:00:17
| 2023-06-13T20:00:17
| 243,933,900
| 439
| 49
| null | 2023-06-25T02:49:07
| 2020-02-29T08:43:12
|
Python
|
UTF-8
|
Python
| false
| false
| 93,554
|
py
|
test_testsupport.py
|
import sys
import unittest
try:
import ctypes
except ImportError:
ctypes = None
import pickle
import typing
import enum
import objc
from PyObjCTools import TestSupport
from PyObjCTools.TestSupport import (
no_autorelease_pool,
pyobjc_options,
expectedFailure,
expectedFailureIf,
TestCase,
sdkForPython,
os_release,
skipUnless,
os_level_between,
max_os_level,
min_os_level,
min_sdk_level,
max_sdk_level,
min_python_release,
fourcc,
arch_only,
)
from unittest import SkipTest, mock
class Method:
def __init__(self, argno, meta, selector=False):
self._selector = selector
if argno is None:
self._meta = {"retval": meta}
else:
self._meta = {"arguments": {argno: meta}}
@property
def __class__(self):
if self._selector:
return objc.selector
else:
return Method
def __metadata__(self):
return self._meta.copy()
class TestTestSupport(TestCase):
def test_pyobjc_options(self):
class Options:
pass
orig_options = objc.options
try:
objc.options = Options()
objc.options.opt1 = True
objc.options.opt2 = 1
self.assertIs(objc.options.opt1, True)
self.assertEqual(objc.options.opt2, 1)
with pyobjc_options(opt1=False):
self.assertIs(objc.options.opt1, False)
self.assertEqual(objc.options.opt2, 1)
self.assertIs(objc.options.opt1, True)
self.assertEqual(objc.options.opt2, 1)
with pyobjc_options(opt1=False, opt2=42):
self.assertIs(objc.options.opt1, False)
self.assertEqual(objc.options.opt2, 42)
self.assertIs(objc.options.opt1, True)
self.assertEqual(objc.options.opt2, 1)
with self.assertRaisesRegex(
AttributeError, "'Options' object has no attribute 'opt3'"
):
with pyobjc_options(opt1=False, opt2=42, opt3="a"):
pass
self.assertIs(objc.options.opt1, True)
self.assertEqual(objc.options.opt2, 1)
finally:
objc.options = orig_options
def test_expectedFailureIf(self):
def func(self):
pass
o = expectedFailureIf(True)
self.assertIs(o, expectedFailure)
o = expectedFailureIf(False)
self.assertIsNot(o, expectedFailure)
self.assertIs(func, o(func))
def test_arch_only(self):
@arch_only("foo")
def wrapped_function(self):
raise RuntimeError("test me")
with self.assertRaisesRegex(unittest.SkipTest, "foo only"):
wrapped_function()
orig = objc.arch
try:
objc.arch = "foo"
with self.assertRaisesRegex(RuntimeError, "test me"):
wrapped_function()
finally:
objc.arch = orig
def test_sdkForPython(self):
orig_get_config_var = TestSupport._get_config_var
try:
config_result = ""
def get_config_var(value):
if value != "CFLAGS":
raise KeyError(value)
return config_result
TestSupport._get_config_var = get_config_var
cache = sdkForPython.__defaults__[0]
config_result = ""
self.assertEqual(sdkForPython(), None)
self.assertEqual(cache, [None])
self.assertEqual(sdkForPython(), None)
self.assertEqual(cache, [None])
cache[:] = []
config_result = "-isysroot /Developer/SDKs/MacOSX10.6.sdk"
self.assertEqual(sdkForPython(), (10, 6))
self.assertEqual(cache, [(10, 6)])
self.assertEqual(sdkForPython(), (10, 6))
self.assertEqual(cache, [(10, 6)])
cache[:] = []
config_result = "-isysroot /"
os_rel = tuple(map(int, os_release().split(".")))
self.assertEqual(sdkForPython(), os_rel)
self.assertEqual(cache, [os_rel])
self.assertEqual(sdkForPython(), os_rel)
self.assertEqual(cache, [os_rel])
cache[:] = []
config_result = "-dynamic -isysroot /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.4u.sdk -arch i386 -arch x86_64" # noqa: B950
self.assertEqual(sdkForPython(), (10, 4))
self.assertEqual(cache, [(10, 4)])
self.assertEqual(sdkForPython(), (10, 4))
self.assertEqual(cache, [(10, 4)])
cache[:] = []
config_result = "-dynamic -isysroot /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.10.sdk -arch i386 -arch x86_64" # noqa: B950
self.assertEqual(sdkForPython(), (10, 10))
self.assertEqual(cache, [(10, 10)])
self.assertEqual(sdkForPython(), (10, 10))
self.assertEqual(cache, [(10, 10)])
cache[:] = []
finally:
TestSupport._get_config_var = orig_get_config_var
def test_os_release(self):
import subprocess
TestSupport._os_release = "10.10"
self.assertEqual(os_release(), "10.10")
TestSupport._os_release = None
value = subprocess.check_output(["sw_vers", "-productVersion"]).strip().decode()
self.assertEqual(TestSupport.os_release(), value)
def test_fourcc(self):
import struct
self.assertEqual(fourcc(b"abcd"), struct.unpack(">i", b"abcd")[0])
@skipUnless(ctypes is not None, "test requires ctypes")
def test_cast(self):
c_int = ctypes.c_int()
c_uint = ctypes.c_uint()
for v in (0, 1, sys.maxsize, sys.maxsize + 2, 1 << 31, -1, -10):
c_int.value = v
c_uint.value = v
self.assertEqual(c_int.value, TestSupport.cast_int(v))
self.assertEqual(c_uint.value, TestSupport.cast_uint(v))
c_longlong = ctypes.c_longlong()
c_ulonglong = ctypes.c_ulonglong()
for v in (0, 1, sys.maxsize, sys.maxsize + 2, 1 << 63, -1, -10):
c_longlong.value = v
c_ulonglong.value = v
self.assertEqual(c_longlong.value, TestSupport.cast_longlong(v))
self.assertEqual(c_ulonglong.value, TestSupport.cast_ulonglong(v))
def test_os_level_between(self):
orig_os_release = TestSupport.os_release
try:
TestSupport.os_release = lambda: "10.5"
@os_level_between("10.3", "10.4")
def func_false_1():
pass
@os_level_between("10.3", "10.5")
def func_true_1():
pass
@os_level_between("10.3", "10.8")
def func_true_2():
pass
@os_level_between("10.5", "10.3")
def func_false_2():
pass
@os_level_between("10.5", "10.5")
def func_true_3():
pass
@os_level_between("10.5", "10.8")
def func_true_4():
pass
@os_level_between("10.8", "10.3")
def func_false_3():
pass
@os_level_between("10.8", "10.5")
def func_false_4():
pass
@os_level_between("10.8", "10.8")
def func_false_5():
pass
with self.assertRaisesRegex(ValueError, "Invalid version"):
@os_level_between("11", "12.9")
def func_invalid1():
pass
with self.assertRaisesRegex(ValueError, "Invalid version"):
@os_level_between("10.0", "12")
def func_invalid2():
pass
for func_true in (func_true_1, func_true_2, func_true_3, func_true_4):
with self.subTest(func_true):
try:
func_true()
except TestSupport._unittest.SkipTest:
self.fail("Unexpected skip")
for func_false in (
func_false_1,
func_false_2,
func_false_3,
func_false_4,
func_false_5,
):
with self.subTest(func_false):
try:
func_false()
except TestSupport._unittest.SkipTest:
pass
else:
self.fail("Unexpected non-skip")
finally:
TestSupport.os_release = orig_os_release
def test_mxx_os_level(self):
orig_os_release = TestSupport.os_release
try:
TestSupport.os_release = lambda: "10.5"
@min_os_level("10.4")
def func_true_1():
pass
@min_os_level("10.5")
def func_true_2():
pass
@min_os_level("10.6")
def func_false_1():
pass
@max_os_level("10.5")
def func_true_3():
pass
@max_os_level("10.6")
def func_true_4():
pass
@max_os_level("10.4")
def func_false_2():
pass
with self.assertRaisesRegex(ValueError, "Invalid version"):
@max_os_level("11")
def func_invalid():
pass
for func_true in (func_true_1, func_true_2, func_true_3, func_true_4):
try:
func_true()
except TestSupport._unittest.SkipTest:
self.fail("Unexpected skip for python 2")
for func_false in (func_false_1, func_false_2):
try:
func_false()
except TestSupport._unittest.SkipTest:
pass
else:
self.fail("Unexpected non-skip for python 2")
finally:
TestSupport.os_release = orig_os_release
def test_mxx_sdklevel(self):
orig_build_release = objc.PyObjC_BUILD_RELEASE
try:
objc.PyObjC_BUILD_RELEASE = 1005
@min_sdk_level("10.4")
def func_true_1():
pass
@min_sdk_level("10.5")
def func_true_2():
pass
@min_sdk_level("10.6")
def func_false_1():
pass
@max_sdk_level("10.5")
def func_true_3():
pass
@max_sdk_level("10.6")
def func_true_4():
pass
@max_sdk_level("10.4")
def func_false_2():
pass
with self.assertRaisesRegex(ValueError, "Invalid version"):
@max_os_level("11")
def func_invalid():
pass
for func_true in (func_true_1, func_true_2, func_true_3, func_true_4):
try:
func_true()
except TestSupport._unittest.SkipTest:
self.fail("Unexpected skip for python 2")
for func_false in (func_false_1, func_false_2):
try:
func_false()
except TestSupport._unittest.SkipTest:
pass
else:
self.fail("Unexpected non-skip for python 2")
finally:
objc.PyObjC_BUILD_RELEASE = orig_build_release
def test_min_python_release(self):
@min_python_release("99.5")
def func1():
pass
with self.assertRaisesRegex(SkipTest, "Requires Python 99.5 or later"):
func1()
@min_python_release("2")
def func1():
pass
try:
func1()
except SkipTest:
self.fail("Unexpected skip")
@min_python_release("3.0")
def func1():
pass
try:
func1()
except SkipTest:
self.fail("Unexpected skip")
def testAssertIsSubclass(self):
self.assertIsSubclass(int, object)
self.assertIsSubclass(str, object)
self.assertIsSubclass(objc.objc_class, type)
with self.assertRaisesRegex(
self.failureException,
"<class 'objc.objc_class'> is not a subclass of <class objc.objc_object",
):
self.assertIsSubclass(
objc.objc_class,
objc.objc_object,
)
def testAssertIsNotSubclass(self):
self.assertIsNotSubclass(object, int)
with self.assertRaisesRegex(
self.failureException,
"<class objc.objc_object is a subclass of <class 'object'>",
):
self.assertIsNotSubclass(objc.objc_object, object)
def testAssertIsIstance(self):
self.assertIsInstance(object(), object)
self.assertIsInstance(42, object)
self.assertIsInstance(42, (int, str))
with self.assertRaisesRegex(
self.failureException, "42 is not an instance of <class 'str'>"
):
self.assertIsInstance(42, str)
def test_assertStartswith(self):
with self.assertRaisesRegex(
self.failureException, "'foo' does not start with 'bar'"
):
self.assertStartswith("foo", "bar")
try:
self.assertStartswith("foobar", "foo")
except self.failureException:
self.fail("Unexpected assertion failure")
def test_assertManualBinding(self):
with self.assertRaisesRegex(self.failureException, ".*has automatic bindings"):
self.assertManualBinding(objc.lookUpClass("NSObject").alloc)
try:
self.assertManualBinding(dir)
except self.failureException:
self.fail("Unexpected assertion failure")
def test_assert_cftype(self):
with self.assertRaisesRegex(
self.failureException, "<class 'int'> is not a CFTypeRef type"
):
self.assertIsCFType(int)
with self.assertRaisesRegex(
self.failureException,
"<core-foundation class NSCFType at 0x[0-9a-f]+> is not a unique CFTypeRef type",
):
self.assertIsCFType(objc.lookUpClass("NSCFType"))
# 'assertIsCFType' primarily tests that a type is either tollfree bridged, or
# has a distinct type that is different from the default NSCFType 'placeholder' type.
# self.assertIsCFType(objc.lookUpClass('NSObject'))
with self.assertRaisesRegex(
self.failureException,
"<objective-c class NSObject at 0x[0-9a-f]+> is not a CFTypeRef type",
):
self.assertIsCFType(objc.lookUpClass("NSObject"))
class OC_OPAQUE_TEST_1(objc.lookUpClass("NSCFType")):
pass
try:
self.assertIsCFType(OC_OPAQUE_TEST_1)
except self.failureException:
self.fail("CFType subclass not recognized as CFType")
def test_assert_enumtype(self):
with self.assertRaisesRegex(
self.failureException, "<class 'int'> is not a typing.NewType"
):
self.assertIsEnumType(int)
with self.assertRaisesRegex(
self.failureException, ".* is not a typing.NewType based on 'int'"
):
self.assertIsEnumType(typing.NewType("SomeType", str))
try:
self.assertIsEnumType(typing.NewType("SomeType", int))
except self.failureException:
self.fail("assertIsEnumType unexpectedly failed")
def test_assert_typed_enum(self):
with self.assertRaisesRegex(
self.failureException, "<class 'int'> is not a typing.NewType"
):
self.assertIsTypedEnum(int, int)
with self.assertRaisesRegex(
self.failureException, ".* is not a typing.NewType based on 'int'"
):
self.assertIsTypedEnum(typing.NewType("SomeType", str), int)
with self.assertRaisesRegex(
self.failureException, ".* is not a typing.NewType based on 'str'"
):
self.assertIsTypedEnum(typing.NewType("SomeType", int), str)
try:
self.assertIsTypedEnum(typing.NewType("SomeType", str), str)
except self.failureException:
self.fail("assertIsEnumType unexpectedly failed")
try:
self.assertIsTypedEnum(typing.NewType("SomeType", float), float)
except self.failureException:
self.fail("assertIsEnumType unexpectedly failed")
def test_assert_opaque(self):
with self.assertRaisesRegex(
self.failureException, "<class 'int'> is not an opaque-pointer"
):
self.assertIsOpaquePointer(int)
class N:
@property
def __pointer__(self):
pass
with self.assertRaisesRegex(
self.failureException,
"<class 'PyObjCTest.test_testsupport.TestTestSupport.test_assert_opaque.<locals>.N'> is not an opaque-pointer",
):
self.assertIsOpaquePointer(N)
class N:
__typestr__ = b"^q"
with self.assertRaisesRegex(
self.failureException,
"<class 'PyObjCTest.test_testsupport.TestTestSupport.test_assert_opaque.<locals>.N'> is not an opaque-pointer",
):
self.assertIsOpaquePointer(N)
class N:
__typestr__ = b"^q"
@property
def __pointer__(self):
pass
try:
self.assertIsOpaquePointer(N)
except self.failureException:
self.fail("assertIsOpaque fails on opaque pointer type")
def test_assert_result_nullterminated(self):
m = Method(None, {"c_array_delimited_by_null": True})
self.assertResultIsNullTerminated(m)
m = Method(None, {"c_array_delimited_by_null": False})
with self.assertRaisesRegex(
self.failureException,
"result of <.*> is not a null-terminated array",
):
self.assertResultIsNullTerminated(m)
m = Method(None, {})
with self.assertRaisesRegex(
self.failureException,
"result of <.*> is not a null-terminated array",
):
self.assertResultIsNullTerminated(m)
def test_assert_arg_nullterminated(self):
m = Method(3, {"c_array_delimited_by_null": True}, selector=True)
self.assertArgIsNullTerminated(m, 1)
with self.assertRaisesRegex(
self.failureException,
"argument 0 of <.*> is not a null-terminated array",
):
self.assertArgIsNullTerminated(m, 0)
m = Method(3, {"c_array_delimited_by_null": False}, selector=True)
with self.assertRaisesRegex(
self.failureException,
"argument 1 of <.*> is not a null-terminated array",
):
self.assertArgIsNullTerminated(m, 1)
m = Method(3, {}, selector=True)
with self.assertRaisesRegex(
self.failureException,
"argument 1 of <.*> is not a null-terminated array",
):
self.assertArgIsNullTerminated(m, 1)
m = Method(3, {"c_array_delimited_by_null": True}, selector=False)
self.assertArgIsNullTerminated(m, 3)
with self.assertRaisesRegex(
self.failureException,
"argument 2 of <.*> is not a null-terminated array",
):
self.assertArgIsNullTerminated(m, 2)
m = Method(3, {"c_array_delimited_by_null": False}, selector=False)
with self.assertRaisesRegex(
self.failureException,
"argument 3 of <.*> is not a null-terminated array",
):
self.assertArgIsNullTerminated(m, 3)
m = Method(3, {}, selector=False)
with self.assertRaisesRegex(
self.failureException,
"argument 3 of <.*> is not a null-terminated array",
):
self.assertArgIsNullTerminated(m, 3)
def test_function_nullterminated(self):
m = Method(None, {}, selector=False)
m._meta.update({"variadic": True, "c_array_delimited_by_null": True})
self.assertIsNullTerminated(m)
m._meta["variadic"] = False
with self.assertRaisesRegex(
self.failureException,
"<.*> is not a variadic function with a null-terminated list of arguments",
):
self.assertIsNullTerminated(m)
m._meta["variadic"] = True
m._meta["c_array_delimited_by_null"] = False
with self.assertRaisesRegex(
self.failureException,
"<.*> is not a variadic function with a null-terminated list of arguments",
):
self.assertIsNullTerminated(m)
del m._meta["variadic"]
m._meta["c_array_delimited_by_null"] = True
with self.assertRaisesRegex(
self.failureException,
"<.*> is not a variadic function with a null-terminated list of arguments",
):
self.assertIsNullTerminated(m)
m = Method(None, {}, selector=True)
m._meta.update({"variadic": True, "c_array_delimited_by_null": True})
self.assertIsNullTerminated(m)
m._meta["variadic"] = False
with self.assertRaisesRegex(
self.failureException,
"<.*> is not a variadic function with a null-terminated list of arguments",
):
self.assertIsNullTerminated(m)
m._meta["variadic"] = True
m._meta["c_array_delimited_by_null"] = False
with self.assertRaisesRegex(
self.failureException,
"<.*> is not a variadic function with a null-terminated list of arguments",
):
self.assertIsNullTerminated(m)
del m._meta["variadic"]
m._meta["c_array_delimited_by_null"] = True
with self.assertRaisesRegex(
self.failureException,
"<.*> is not a variadic function with a null-terminated list of arguments",
):
self.assertIsNullTerminated(m)
def test_arg_variable_size(self):
m = Method(3, {"c_array_of_variable_length": True}, selector=True)
self.assertArgIsVariableSize(m, 1)
with self.assertRaisesRegex(
self.failureException,
"argument 0 of <.*> is not a variable sized array",
):
self.assertArgIsVariableSize(m, 0)
m = Method(3, {"c_array_of_variable_length": False}, selector=True)
with self.assertRaisesRegex(
self.failureException,
"argument 1 of <.*> is not a variable sized array",
):
self.assertArgIsVariableSize(m, 1)
m = Method(3, {"c_array_of_variable_length": True}, selector=False)
self.assertArgIsVariableSize(m, 3)
with self.assertRaisesRegex(
self.failureException,
"argument 1 of <.*> is not a variable sized array",
):
self.assertArgIsVariableSize(m, 1)
m = Method(3, {"c_array_of_variable_length": False}, selector=False)
with self.assertRaisesRegex(
self.failureException,
"argument 3 of <.*> is not a variable sized array",
):
self.assertArgIsVariableSize(m, 3)
def test_result_varialbe_size(self):
m = Method(None, {"c_array_of_variable_length": True}, selector=True)
self.assertResultIsVariableSize(m, 1)
m = Method(None, {"c_array_of_variable_length": False}, selector=True)
with self.assertRaisesRegex(
self.failureException,
"result of <.*> is not a variable sized array",
):
self.assertResultIsVariableSize(m)
m = Method(None, {}, selector=True)
with self.assertRaisesRegex(
self.failureException,
"result of <.*> is not a variable sized array",
):
self.assertResultIsVariableSize(m)
def test_argsize_in_result(self):
m = Method(3, {"c_array_length_in_result": True}, selector=True)
self.assertArgSizeInResult(m, 1)
with self.assertRaisesRegex(
self.failureException, "argument 0 of .* does not have size in result"
):
self.assertArgSizeInResult(m, 0)
m = Method(3, {"c_array_length_in_result": False}, selector=True)
with self.assertRaisesRegex(
self.failureException, "argument 1 of .* does not have size in result"
):
self.assertArgSizeInResult(m, 1)
m = Method(3, {}, selector=True)
with self.assertRaisesRegex(
self.failureException, "argument 1 of .* does not have size in result"
):
self.assertArgSizeInResult(m, 1)
m = Method(3, {"c_array_length_in_result": True}, selector=False)
self.assertArgSizeInResult(m, 3)
with self.assertRaisesRegex(
self.failureException, "argument 2 of .* does not have size in result"
):
self.assertArgSizeInResult(m, 2)
m = Method(3, {"c_array_length_in_result": False}, selector=True)
with self.assertRaisesRegex(
self.failureException, "argument 3 of .* does not have size in result"
):
self.assertArgSizeInResult(m, 3)
m = Method(3, {}, selector=True)
with self.assertRaisesRegex(
self.failureException, "argument 3 of .* does not have size in result"
):
self.assertArgSizeInResult(m, 3)
def test_arg_printf(self):
m = Method(3, {"printf_format": True}, selector=True)
m._meta["variadic"] = True
self.assertArgIsPrintf(m, 1)
with self.assertRaisesRegex(
self.failureException, "<.*> argument 0 is not a printf format string"
):
self.assertArgIsPrintf(m, 0)
m._meta["variadic"] = False
with self.assertRaisesRegex(
self.failureException, "<.*> is not a variadic function"
):
self.assertArgIsPrintf(m, 1)
m._meta["variadic"] = True
m._meta["arguments"][3]["printf_format"] = False
with self.assertRaisesRegex(
self.failureException, "<.*> argument 1 is not a printf format string"
):
self.assertArgIsPrintf(m, 1)
m._meta["variadic"] = True
del m._meta["arguments"][3]["printf_format"]
with self.assertRaisesRegex(
self.failureException, "<.*> argument 1 is not a printf format string"
):
self.assertArgIsPrintf(m, 1)
m = Method(3, {"printf_format": True}, selector=False)
m._meta["variadic"] = True
self.assertArgIsPrintf(m, 3)
with self.assertRaisesRegex(
self.failureException, "<.*> argument 2 is not a printf format string"
):
self.assertArgIsPrintf(m, 2)
m._meta["variadic"] = False
with self.assertRaisesRegex(
self.failureException, "<.*> is not a variadic function"
):
self.assertArgIsPrintf(m, 3)
m._meta["variadic"] = True
m._meta["arguments"][3]["printf_format"] = False
with self.assertRaisesRegex(
self.failureException, "<.*> argument 3 is not a printf format string"
):
self.assertArgIsPrintf(m, 3)
m._meta["variadic"] = True
del m._meta["arguments"][3]["printf_format"]
with self.assertRaisesRegex(
self.failureException, "<.*> argument 3 is not a printf format string"
):
self.assertArgIsPrintf(m, 3)
def test_arg_cfretained(self):
m = Method(3, {"already_cfretained": True}, selector=True)
self.assertArgIsCFRetained(m, 1)
with self.assertRaisesRegex(
self.failureException, "Argument 0 of <.*> is not cfretained"
):
self.assertArgIsCFRetained(m, 0)
m = Method(3, {"already_cfretained": False}, selector=True)
with self.assertRaisesRegex(
self.failureException, "Argument 1 of <.*> is not cfretained"
):
self.assertArgIsCFRetained(m, 1)
m = Method(3, {}, selector=True)
with self.assertRaisesRegex(
self.failureException, "Argument 1 of <.*> is not cfretained"
):
self.assertArgIsCFRetained(m, 1)
m = Method(3, {"already_cfretained": True}, selector=False)
self.assertArgIsCFRetained(m, 3)
with self.assertRaisesRegex(
self.failureException, "Argument 2 of <.*> is not cfretained"
):
self.assertArgIsCFRetained(m, 2)
m = Method(3, {"already_cfretained": False}, selector=False)
with self.assertRaisesRegex(
self.failureException, "Argument 3 of <.*> is not cfretained"
):
self.assertArgIsCFRetained(m, 3)
m = Method(3, {}, selector=False)
with self.assertRaisesRegex(
self.failureException, "Argument 3 of <.*> is not cfretained"
):
self.assertArgIsCFRetained(m, 3)
def test_arg_not_cfretained(self):
m = Method(3, {"already_cfretained": True}, selector=True)
self.assertArgIsNotCFRetained(m, 0)
with self.assertRaisesRegex(
self.failureException, "Argument 1 of <.*> is cfretained"
):
self.assertArgIsNotCFRetained(m, 1)
m = Method(3, {"already_cfretained": False}, selector=True)
self.assertArgIsNotCFRetained(m, 1)
m = Method(3, {}, selector=True)
self.assertArgIsNotCFRetained(m, 1)
m = Method(3, {"already_cfretained": True}, selector=False)
self.assertArgIsCFRetained(m, 3)
self.assertArgIsNotCFRetained(m, 1)
m = Method(3, {"already_cfretained": False}, selector=False)
self.assertArgIsNotCFRetained(m, 1)
m = Method(3, {}, selector=False)
self.assertArgIsNotCFRetained(m, 1)
def test_result_cfretained(self):
m = Method(None, {"already_cfretained": True})
self.assertResultIsCFRetained(m)
m = Method(None, {"already_cfretained": False})
with self.assertRaisesRegex(self.failureException, "<.*> is not cfretained"):
self.assertResultIsCFRetained(m)
m = Method(None, {})
with self.assertRaisesRegex(self.failureException, "<.*> is not cfretained"):
self.assertResultIsCFRetained(m)
def test_result_not_cfretained(self):
m = Method(None, {"already_cfretained": True})
with self.assertRaisesRegex(self.failureException, "<.*> is cfretained"):
self.assertResultIsNotCFRetained(m)
m = Method(None, {"already_cfretained": False})
self.assertResultIsNotCFRetained(m)
m = Method(None, {})
self.assertResultIsNotCFRetained(m)
def test_arg_type(self):
m = Method(3, {"type": objc._C_DBL}, selector=True)
self.assertArgHasType(m, 1, objc._C_DBL)
with self.assertRaisesRegex(
self.failureException, r"arg 2 of <.*> has no metadata \(or doesn't exist\)"
):
self.assertArgHasType(m, 2, objc._C_ID)
with self.assertRaisesRegex(
self.failureException,
f"arg 1 of <.*> is not of type {objc._C_ID}, but {objc._C_DBL}",
):
self.assertArgHasType(m, 1, objc._C_ID)
m = Method(3, {}, selector=True)
self.assertArgHasType(m, 1, objc._C_ID)
m = Method(3, {"type": objc._C_LNG}, selector=True)
self.assertArgHasType(m, 1, objc._C_LNG)
self.assertArgHasType(m, 1, objc._C_LNG_LNG)
with self.assertRaisesRegex(
self.failureException,
f"arg 1 of <.*> is not of type {objc._C_ID}, but {objc._C_LNG}",
):
self.assertArgHasType(m, 1, objc._C_ID)
m = Method(3, {"type": objc._C_ULNG}, selector=True)
self.assertArgHasType(m, 1, objc._C_ULNG)
self.assertArgHasType(m, 1, objc._C_ULNG_LNG)
with self.assertRaisesRegex(
self.failureException,
f"arg 1 of <.*> is not of type {objc._C_ID}, but {objc._C_ULNG}",
):
self.assertArgHasType(m, 1, objc._C_ID)
m = Method(3, {"type": objc._C_LNG_LNG}, selector=True)
self.assertArgHasType(m, 1, objc._C_LNG)
self.assertArgHasType(m, 1, objc._C_LNG_LNG)
with self.assertRaisesRegex(
self.failureException,
f"arg 1 of <.*> is not of type {objc._C_ID}, but {objc._C_LNG_LNG}",
):
self.assertArgHasType(m, 1, objc._C_ID)
m = Method(3, {"type": objc._C_ULNG_LNG}, selector=True)
self.assertArgHasType(m, 1, objc._C_ULNG)
self.assertArgHasType(m, 1, objc._C_ULNG_LNG)
with self.assertRaisesRegex(
self.failureException,
f"arg 1 of <.*> is not of type {objc._C_ID}, but {objc._C_ULNG_LNG}",
):
self.assertArgHasType(m, 1, objc._C_ID)
m = Method(3, {"type": objc._C_DBL}, selector=False)
self.assertArgHasType(m, 3, objc._C_DBL)
with self.assertRaisesRegex(
self.failureException,
f"arg 3 of <.*> is not of type {objc._C_ID}, but {objc._C_DBL}",
):
self.assertArgHasType(m, 3, objc._C_ID)
with self.assertRaisesRegex(
self.failureException, r"arg 2 of <.*> has no metadata \(or doesn't exist\)"
):
self.assertArgHasType(m, 2, objc._C_ID)
m = Method(3, {}, selector=False)
self.assertArgHasType(m, 3, objc._C_ID)
m = Method(3, {"type": objc._C_LNG}, selector=False)
self.assertArgHasType(m, 3, objc._C_LNG)
self.assertArgHasType(m, 3, objc._C_LNG_LNG)
with self.assertRaisesRegex(
self.failureException,
f"arg 3 of <.*> is not of type {objc._C_ID}, but {objc._C_LNG}",
):
self.assertArgHasType(m, 3, objc._C_ID)
m = Method(3, {"type": objc._C_ULNG}, selector=False)
self.assertArgHasType(m, 3, objc._C_ULNG)
self.assertArgHasType(m, 3, objc._C_ULNG_LNG)
with self.assertRaisesRegex(
self.failureException,
f"arg 3 of <.*> is not of type {objc._C_ID}, but {objc._C_ULNG}",
):
self.assertArgHasType(m, 3, objc._C_ID)
m = Method(3, {"type": objc._C_LNG_LNG}, selector=False)
self.assertArgHasType(m, 3, objc._C_LNG)
self.assertArgHasType(m, 3, objc._C_LNG_LNG)
with self.assertRaisesRegex(
self.failureException,
f"arg 3 of <.*> is not of type {objc._C_ID}, but {objc._C_LNG_LNG}",
):
self.assertArgHasType(m, 3, objc._C_ID)
m = Method(3, {"type": objc._C_ULNG_LNG}, selector=False)
self.assertArgHasType(m, 3, objc._C_ULNG)
self.assertArgHasType(m, 3, objc._C_ULNG_LNG)
with self.assertRaisesRegex(
self.failureException,
f"arg 3 of <.*> is not of type {objc._C_ID}, but {objc._C_ULNG_LNG}",
):
self.assertArgHasType(m, 3, objc._C_ID)
def test_result_type(self):
m = Method(None, {})
self.assertResultHasType(m, objc._C_VOID)
with self.assertRaisesRegex(
self.failureException,
f"result of <.*> is not of type {objc._C_ID}, but {objc._C_VOID}",
):
self.assertResultHasType(m, objc._C_ID)
m = Method(None, {"type": objc._C_DBL})
self.assertResultHasType(m, objc._C_DBL)
with self.assertRaisesRegex(
self.failureException,
f"result of <.*> is not of type {objc._C_ID}, but {objc._C_DBL}",
):
self.assertResultHasType(m, objc._C_ID)
m = Method(None, {"type": objc._C_LNG}, selector=False)
self.assertResultHasType(m, objc._C_LNG)
self.assertResultHasType(m, objc._C_LNG_LNG)
with self.assertRaisesRegex(
self.failureException,
f"result of <.*> is not of type {objc._C_ID}, but {objc._C_LNG}",
):
self.assertResultHasType(m, objc._C_ID)
m = Method(None, {"type": objc._C_ULNG}, selector=False)
self.assertResultHasType(m, objc._C_ULNG)
self.assertResultHasType(m, objc._C_ULNG_LNG)
with self.assertRaisesRegex(
self.failureException,
f"result of <.*> is not of type {objc._C_ID}, but {objc._C_ULNG}",
):
self.assertResultHasType(m, objc._C_ID)
m = Method(None, {"type": objc._C_LNG_LNG}, selector=False)
self.assertResultHasType(m, objc._C_LNG)
self.assertResultHasType(m, objc._C_LNG_LNG)
with self.assertRaisesRegex(
self.failureException,
f"result of <.*> is not of type {objc._C_ID}, but {objc._C_LNG_LNG}",
):
self.assertResultHasType(m, objc._C_ID)
m = Method(None, {"type": objc._C_ULNG_LNG}, selector=False)
self.assertResultHasType(m, objc._C_ULNG)
self.assertResultHasType(m, objc._C_ULNG_LNG)
with self.assertRaisesRegex(
self.failureException,
f"result of <.*> is not of type {objc._C_ID}, but {objc._C_ULNG_LNG}",
):
self.assertResultHasType(m, objc._C_ID)
def test_arg_fixed_size(self):
m = Method(3, {"c_array_of_fixed_length": 42}, selector=True)
self.assertArgIsFixedSize(m, 1, 42)
with self.assertRaisesRegex(
self.failureException, "arg 0 of <.*> is not a C-array of length 42"
):
self.assertArgIsFixedSize(m, 0, 42)
with self.assertRaisesRegex(
self.failureException, "arg 1 of <.*> is not a C-array of length 3"
):
self.assertArgIsFixedSize(m, 1, 3)
m = Method(3, {}, selector=True)
with self.assertRaisesRegex(
self.failureException, "arg 1 of <.*> is not a C-array of length 3"
):
self.assertArgIsFixedSize(m, 1, 3)
m = Method(3, {"c_array_of_fixed_length": 42}, selector=False)
self.assertArgIsFixedSize(m, 3, 42)
with self.assertRaisesRegex(
self.failureException, "arg 2 of <.*> is not a C-array of length 42"
):
self.assertArgIsFixedSize(m, 2, 42)
with self.assertRaisesRegex(
self.failureException, "arg 3 of <.*> is not a C-array of length 3"
):
self.assertArgIsFixedSize(m, 3, 3)
m = Method(3, {}, selector=False)
with self.assertRaisesRegex(
self.failureException, "arg 3 of <.*> is not a C-array of length 3"
):
self.assertArgIsFixedSize(m, 3, 3)
def test_result_fixed_size(self):
m = Method(None, {"c_array_of_fixed_length": 42})
self.assertResultIsFixedSize(m, 42)
with self.assertRaisesRegex(
self.failureException, "result of <.*> is not a C-array of length 3"
):
self.assertResultIsFixedSize(m, 3)
m = Method(None, {}, selector=True)
with self.assertRaisesRegex(
self.failureException, "result of <.*> is not a C-array of length 3"
):
self.assertResultIsFixedSize(m, 3)
def test_arg_size_in_arg(self):
m = Method(3, {"c_array_length_in_arg": 4}, selector=True)
self.assertArgSizeInArg(m, 1, 2)
with self.assertRaisesRegex(
self.failureException,
"arg 1 of <.*> is not a C-array of with length in arg 3",
):
self.assertArgSizeInArg(m, 1, 3)
with self.assertRaisesRegex(
self.failureException,
"arg 0 of <.*> is not a C-array of with length in arg 3",
):
self.assertArgSizeInArg(m, 0, 3)
m = Method(3, {"c_array_length_in_arg": (2, 4)}, selector=True)
self.assertArgSizeInArg(m, 1, (0, 2))
with self.assertRaisesRegex(
self.failureException,
r"arg 1 of <.*> is not a C-array of with length in arg \(0, 3\)",
):
self.assertArgSizeInArg(m, 1, (0, 3))
with self.assertRaisesRegex(
self.failureException,
r"arg 0 of <.*> is not a C-array of with length in arg \(1, 2\)",
):
self.assertArgSizeInArg(m, 0, (1, 2))
m = Method(3, {"c_array_length_in_arg": 4}, selector=False)
self.assertArgSizeInArg(m, 3, 4)
with self.assertRaisesRegex(
self.failureException,
"arg 1 of <.*> is not a C-array of with length in arg 3",
):
self.assertArgSizeInArg(m, 1, 3)
with self.assertRaisesRegex(
self.failureException,
"arg 0 of <.*> is not a C-array of with length in arg 3",
):
self.assertArgSizeInArg(m, 0, 3)
m = Method(3, {"c_array_length_in_arg": (2, 4)}, selector=False)
self.assertArgSizeInArg(m, 3, (2, 4))
with self.assertRaisesRegex(
self.failureException,
r"arg 1 of <.*> is not a C-array of with length in arg \(2, 3\)",
):
self.assertArgSizeInArg(m, 1, (2, 3))
with self.assertRaisesRegex(
self.failureException,
r"arg 0 of <.*> is not a C-array of with length in arg \(2, 3\)",
):
self.assertArgSizeInArg(m, 0, (2, 3))
def test_result_ssize_in_arg(self):
m = Method(None, {"c_array_length_in_arg": 4}, selector=True)
self.assertResultSizeInArg(m, 2)
with self.assertRaisesRegex(
self.failureException,
"result <.*> is not a C-array of with length in arg 3",
):
self.assertResultSizeInArg(m, 3)
m = Method(None, {"c_array_length_in_arg": 4}, selector=False)
self.assertResultSizeInArg(m, 4)
with self.assertRaisesRegex(
self.failureException,
"result <.*> is not a C-array of with length in arg 3",
):
self.assertResultSizeInArg(m, 3)
def test_arg_retained(self):
m = Method(3, {"already_retained": True}, selector=True)
self.assertArgIsRetained(m, 1)
with self.assertRaisesRegex(
self.failureException, "Argument 0 of <.*> is not retained"
):
self.assertArgIsRetained(m, 0)
m = Method(3, {"already_retained": False}, selector=True)
with self.assertRaisesRegex(
self.failureException, "Argument 1 of <.*> is not retained"
):
self.assertArgIsRetained(m, 1)
m = Method(3, {}, selector=True)
with self.assertRaisesRegex(
self.failureException, "Argument 1 of <.*> is not retained"
):
self.assertArgIsRetained(m, 1)
m = Method(3, {"already_retained": True}, selector=False)
self.assertArgIsRetained(m, 3)
with self.assertRaisesRegex(
self.failureException, "Argument 2 of <.*> is not retained"
):
self.assertArgIsRetained(m, 2)
m = Method(3, {"already_retained": False}, selector=False)
with self.assertRaisesRegex(
self.failureException, "Argument 3 of <.*> is not retained"
):
self.assertArgIsRetained(m, 3)
m = Method(3, {}, selector=False)
with self.assertRaisesRegex(
self.failureException, "Argument 3 of <.*> is not retained"
):
self.assertArgIsRetained(m, 3)
def test_arg_not_retained(self):
m = Method(3, {"already_retained": True}, selector=True)
self.assertArgIsNotRetained(m, 0)
with self.assertRaisesRegex(
self.failureException, "Argument 1 of <.*> is retained"
):
self.assertArgIsNotRetained(m, 1)
m = Method(3, {"already_retained": False}, selector=True)
self.assertArgIsNotRetained(m, 1)
m = Method(3, {}, selector=True)
self.assertArgIsNotRetained(m, 1)
m = Method(3, {"already_retained": True}, selector=False)
self.assertArgIsRetained(m, 3)
self.assertArgIsNotRetained(m, 1)
m = Method(3, {"already_retained": False}, selector=False)
self.assertArgIsNotRetained(m, 1)
m = Method(3, {}, selector=False)
self.assertArgIsNotRetained(m, 1)
def test_result_retained(self):
m = Method(None, {"already_retained": True})
self.assertResultIsRetained(m)
m = Method(None, {"already_retained": False})
with self.assertRaisesRegex(
self.failureException, "Result of <.*> is not retained"
):
self.assertResultIsRetained(m)
m = Method(None, {})
with self.assertRaisesRegex(
self.failureException, "Result of <.*> is not retained"
):
self.assertResultIsRetained(m)
def test_result_not_retained(self):
m = Method(None, {"already_retained": True})
with self.assertRaisesRegex(
self.failureException, "Result of <.*> is retained"
):
self.assertResultIsNotRetained(m)
m = Method(None, {"already_retained": False})
self.assertResultIsNotRetained(m)
m = Method(None, {})
self.assertResultIsNotRetained(m)
def test_assert_arg_IN(self):
m = Method(3, {"type": b"n^@"})
try:
self.assertArgIsIn(m, 3)
except self.failureException:
raise
self.fail("test failure for input argument")
m = Method(3, {"type": b"n^@"}, selector=True)
try:
self.assertArgIsIn(m, 1)
except self.failureException:
self.fail("test failure for input argument")
m = Method(3, {"type": b"^@"})
try:
self.assertArgIsIn(m, 3)
except self.failureException:
pass
else:
self.fail("test pass for not-input argument")
m = Method(3, {"type": b"^@"}, selector=True)
try:
self.assertArgIsIn(m, 1)
except self.failureException:
pass
else:
self.fail("test pass for not-input argument")
def test_assert_arg_OUT(self):
m = Method(3, {"type": b"o^@"})
try:
self.assertArgIsOut(m, 3)
except self.failureException:
raise
self.fail("test failure for input argument")
m = Method(3, {"type": b"o^@"}, selector=True)
try:
self.assertArgIsOut(m, 1)
except self.failureException:
self.fail("test failure for input argument")
m = Method(3, {"type": b"^@"})
try:
self.assertArgIsOut(m, 3)
except self.failureException:
pass
else:
self.fail("test pass for not-input argument")
m = Method(3, {"type": b"^@"}, selector=True)
try:
self.assertArgIsOut(m, 1)
except self.failureException:
pass
else:
self.fail("test pass for not-input argument")
def test_assert_arg_INOUT(self):
m = Method(3, {"type": b"N^@"})
try:
self.assertArgIsInOut(m, 3)
except self.failureException:
raise
self.fail("test failure for input argument")
m = Method(3, {"type": b"N^@"}, selector=True)
try:
self.assertArgIsInOut(m, 1)
except self.failureException:
self.fail("test failure for input argument")
m = Method(3, {"type": b"^@"})
try:
self.assertArgIsInOut(m, 3)
except self.failureException:
pass
else:
self.fail("test pass for not-input argument")
m = Method(3, {"type": b"^@"}, selector=True)
try:
self.assertArgIsInOut(m, 1)
except self.failureException:
pass
else:
self.fail("test pass for not-input argument")
def test_arg_bool(self):
for tp in (objc._C_NSBOOL, objc._C_BOOL):
with self.subTest(encoding=tp):
m = Method(3, {"type": tp})
try:
self.assertArgIsBOOL(m, 3)
except self.failureException:
raise
self.fail("unexpected test failure")
m = Method(3, {"type": tp}, selector=True)
try:
self.assertArgIsBOOL(m, 1)
except self.failureException:
self.fail("unexpected test failure")
m = Method(3, {"type": b"@"})
try:
self.assertArgIsBOOL(m, 3)
except self.failureException:
pass
else:
self.fail("unexpected test pass")
m = Method(3, {"type": b"@"}, selector=True)
try:
self.assertArgIsBOOL(m, 1)
except self.failureException:
pass
else:
self.fail("unexpected test pass")
def test_assertHasAttr(self):
with self.assertRaisesRegex(self.failureException, "foo"):
self.assertHasAttr(object, "foo")
try:
self.assertHasAttr(self, "assertHasAttr")
except self.failureExeption:
self.fail("Unexpected assertion failure")
def test_assertNotHasAttr(self):
with self.assertRaisesRegex(
self.failureException, "assertHasAttr is an attribute of <.*>"
):
self.assertNotHasAttr(self, "assertHasAttr")
try:
self.assertNotHasAttr(object, "foo")
except self.failureExeption:
self.fail("Unexpected assertion failure")
def test_ClassIsFinal(self):
class FinalTesetClass(objc.lookUpClass("NSObject"), final=True):
__objc_final__ = True
try:
self.assertClassIsFinal(FinalTesetClass)
except self.failureException:
self.fail("Unexpected assertion failure")
with self.assertRaisesRegex(self.failureException, ".*is not a final class"):
self.assertClassIsFinal(objc.lookUpClass("NSObject"))
with self.assertRaisesRegex(
self.failureException, ".*is not an Objective-C class"
):
self.assertClassIsFinal(type(self))
def test_assertProtoclExcists(self):
objc.protocolNamed("NSObject")
try:
objc.protocolNamed("FooBar")
except objc.error:
pass
else:
self.fail("Have FooBar protocol")
with self.assertRaisesRegex(
self.failureException, "Protocol 'FooBar' does not exist"
):
self.assertProtocolExists("FooBar")
try:
self.assertProtocolExists("NSObject")
except self.failureException:
self.fail("Unexpected test failure")
orig = objc.protocolNamed
try:
objc.protocolNamed = lambda name: name
with self.assertRaisesRegex(
self.failureException, "Protocol 'FooBar' is not a protocol, but.*"
):
self.assertProtocolExists("FooBar")
finally:
objc.protocolNamed = orig
try:
objc.protocolNamed("FooBar")
except objc.error:
pass
else:
self.fail("Have FooBar protocol")
def test_assertPickleRoundTrips(self):
try:
self.assertPickleRoundTrips(42)
except self.failureException:
self.fail("Unexpected assertion error")
class NoPickle:
def __getstate__(self):
raise RuntimeError("go away")
with self.assertRaises((pickle.PickleError, RuntimeError)):
pickle.dumps(NoPickle())
with self.assertRaisesRegex(self.failureException, ".* cannot be pickled"):
self.assertPickleRoundTrips(NoPickle())
class UnpickledAsInt:
def __reduce__(self):
return (int, (42,))
o = UnpickledAsInt()
self.assertEqual(pickle.loads(pickle.dumps(o)), 42)
with self.assertRaisesRegex(
self.failureException, "42 != <PyObjCTest.test_testsupport.*>"
):
self.assertPickleRoundTrips(o)
class NotEqual:
def __eq__(self, other):
return False
o = NotEqual()
self.assertNotEqual(o, o)
with self.assertRaisesRegex(self.failureException, "<.*> cannot be pickled"):
self.assertPickleRoundTrips(o)
def test_result_is_sel(self):
for is_selector in (True, False):
m = Method(
None,
{"type": objc._C_SEL, "sel_of_type": b"v@:@"},
selector=is_selector,
)
self.assertResultIsSEL(m, b"v@:@")
with self.assertRaisesRegex(
self.failureException,
"result of <.*> doesn't have sel_type b'v@:d' but b'v@:@'",
):
self.assertResultIsSEL(m, b"v@:d")
m = Method(None, {"type": objc._C_INT}, selector=is_selector)
with self.assertRaisesRegex(
self.failureException, "result of <.*> is not of type SEL"
):
self.assertResultIsSEL(m, b"v@:@")
m = Method(None, {"type": objc._C_SEL}, selector=is_selector)
with self.assertRaisesRegex(
self.failureException,
"result of <.*> doesn't have sel_type b'v@:@' but None",
):
self.assertResultIsSEL(m, b"v@:@")
with self.assertRaisesRegex(
self.failureException,
"result of <.*> doesn't have sel_type b'v@:@' but None",
):
self.assertResultIsSEL(m, b"v@:@")
class M:
def __metadata__(self):
return {}
with self.assertRaisesRegex(
self.failureException, r"result.*has no metadata \(or doesn't exist\)"
):
self.assertResultIsSEL(M(), b"v@:@")
def test_arg_is_sel(self):
m = Method(3, {"type": objc._C_SEL, "sel_of_type": b"v@:@"}, selector=True)
self.assertArgIsSEL(m, 1, b"v@:@")
with self.assertRaisesRegex(
self.failureException, r"arg 2 of <.*> has no metadata \(or doesn't exist\)"
):
self.assertArgIsSEL(m, 2, b"v@:@")
with self.assertRaisesRegex(
self.failureException,
"arg 1 of <.*> doesn't have sel_type b'v@:' but b'v@:@'",
):
self.assertArgIsSEL(m, 1, b"v@:")
m = Method(3, {"type": objc._C_SEL}, selector=True)
with self.assertRaisesRegex(
self.failureException, "arg 1 of <.*> doesn't have sel_type b'v@:' but None"
):
self.assertArgIsSEL(m, 1, b"v@:")
m = Method(3, {"type": objc._C_ID, "sel_of_type": b"v@:@"}, selector=True)
with self.assertRaisesRegex(
self.failureException, "arg 1 of <.*> is not of type SEL"
):
self.assertArgIsSEL(m, 1, b"v@:@")
m = Method(3, {"type": objc._C_SEL, "sel_of_type": b"v@:@"}, selector=False)
self.assertArgIsSEL(m, 3, b"v@:@")
with self.assertRaisesRegex(
self.failureException, r"arg 2 of <.*> has no metadata \(or doesn't exist\)"
):
self.assertArgIsSEL(m, 2, b"v@:@")
with self.assertRaisesRegex(
self.failureException,
"arg 3 of <.*> doesn't have sel_type b'v@:' but b'v@:@'",
):
self.assertArgIsSEL(m, 3, b"v@:")
m = Method(3, {"type": objc._C_SEL}, selector=False)
with self.assertRaisesRegex(
self.failureException, "arg 3 of <.*> doesn't have sel_type b'v@:' but None"
):
self.assertArgIsSEL(m, 3, b"v@:")
m = Method(3, {"type": objc._C_ID, "sel_of_type": b"v@:@"}, selector=False)
with self.assertRaisesRegex(
self.failureException, "arg 3 of <.*> is not of type SEL"
):
self.assertArgIsSEL(m, 3, b"v@:@")
def test_arg_is_function(self):
m = Method(
3,
{
"type": b"^?",
"callable": {
"retval": {"type": objc._C_INT},
"arguments": [{"type": objc._C_ID}, {"type": objc._C_DBL}],
},
},
selector=True,
)
self.assertArgIsFunction(m, 1, b"i@d", False)
with self.assertRaisesRegex(
self.failureException, r"arg 0 of <.*> has no metadata \(or doesn't exist\)"
):
self.assertArgIsFunction(m, 0, "v", False)
with self.assertRaisesRegex(
self.failureException,
"arg 1 of <.*> is not a function_pointer with type 'i@b', but b'i@d'",
):
self.assertArgIsFunction(m, 1, "i@b", False)
with self.assertRaisesRegex(
self.failureException,
"arg 1 of <.*> is not a function_pointer with type 'i@d', but b'i@d'",
):
self.assertArgIsFunction(m, 1, "i@d", True)
m = Method(
3,
{
"type": b"^?",
"callable": {
"retval": {"type": objc._C_INT},
"arguments": [{"type": objc._C_ID}, {"type": objc._C_DBL}],
},
"callable_retained": True,
},
selector=True,
)
self.assertArgIsFunction(m, 1, b"i@d", True)
with self.assertRaisesRegex(
self.failureException, "arg 1 of <.*>; retained: True, expected: False"
):
self.assertArgIsFunction(m, 1, b"i@d", False)
m = Method(3, {"type": b"?", "callable": {}}, selector=True)
with self.assertRaisesRegex(
self.failureException, "arg 1 of <.*> is not of type function_pointer"
):
self.assertArgIsFunction(m, 1, "v", False)
m = Method(3, {"type": b"^?"}, selector=True)
with self.assertRaisesRegex(
self.failureException, "arg 1 of <.*> is not of type function_pointer"
):
self.assertArgIsFunction(m, 1, "v", False)
m = Method(3, {"type": b"^?", "callable": {}}, selector=True)
with self.assertRaisesRegex(
self.failureException,
"arg 1 of <.*> is a function pointer with incomplete type information",
):
self.assertArgIsFunction(m, 1, "v", False)
m = Method(
3,
{"type": b"^?", "callable": {"retval": {"type": objc._C_VOID}}},
selector=True,
)
with self.assertRaisesRegex(
self.failureException,
"arg 1 of <.*> is a function pointer with incomplete type information",
):
self.assertArgIsFunction(m, 1, "v", False)
m = Method(
3,
{
"type": b"^?",
"callable": {
"retval": {"type": objc._C_INT},
"arguments": [{"type": objc._C_ID}, {"type": objc._C_DBL}],
},
},
selector=False,
)
self.assertArgIsFunction(m, 3, b"i@d", False)
with self.assertRaisesRegex(
self.failureException, r"arg 2 of <.*> has no metadata \(or doesn't exist\)"
):
self.assertArgIsFunction(m, 2, "v", False)
with self.assertRaisesRegex(
self.failureException,
"arg 3 of <.*> is not a function_pointer with type b'i@b', but b'i@d'",
):
self.assertArgIsFunction(m, 3, b"i@b", False)
with self.assertRaisesRegex(
self.failureException,
"arg 3 of <.*> is not a function_pointer with type 'i@d', but b'i@d'",
):
self.assertArgIsFunction(m, 3, "i@d", True)
m = Method(
3,
{
"type": b"^?",
"callable": {
"retval": {"type": objc._C_INT},
"arguments": [{"type": objc._C_ID}, {"type": objc._C_DBL}],
},
"callable_retained": True,
},
selector=False,
)
self.assertArgIsFunction(m, 3, b"i@d", True)
m = Method(3, {"type": b"?", "callable": {}}, selector=False)
with self.assertRaisesRegex(
self.failureException, "arg 3 of <.*> is not of type function_pointer"
):
self.assertArgIsFunction(m, 3, "v", False)
m = Method(3, {"type": b"^?"}, selector=True)
with self.assertRaisesRegex(
self.failureException, r"arg 3 of <.*> has no metadata \(or doesn't exist\)"
):
self.assertArgIsFunction(m, 3, "v", False)
m = Method(3, {"type": b"^?", "callable": {}}, selector=False)
with self.assertRaisesRegex(
self.failureException,
"arg 3 of <.*> is a function pointer with incomplete type information",
):
self.assertArgIsFunction(m, 3, "v", False)
m = Method(
3,
{"type": b"^?", "callable": {"retval": {"type": objc._C_VOID}}},
selector=False,
)
with self.assertRaisesRegex(
self.failureException,
"arg 3 of <.*> is a function pointer with incomplete type information",
):
self.assertArgIsFunction(m, 3, "v", False)
def test_result_is_function(self):
m = Method(
None,
{
"type": b"^?",
"callable": {
"retval": {"type": objc._C_INT},
"arguments": [{"type": objc._C_ID}, {"type": objc._C_DBL}],
},
},
selector=True,
)
self.assertResultIsFunction(m, b"i@d")
with self.assertRaisesRegex(
self.failureException,
"result of <.*> is not a function_pointer with type 'i@b', but b'i@d'",
):
self.assertResultIsFunction(m, "i@b")
m = Method(1, {})
with self.assertRaisesRegex(
self.failureException,
r"result of <.*> has no metadata \(or doesn't exist\)",
):
self.assertResultIsFunction(m, "i@b")
m = Method(None, {"type": b"?", "callable": {}}, selector=True)
with self.assertRaisesRegex(
self.failureException, "result of <.*> is not of type function_pointer"
):
self.assertResultIsFunction(m, "v")
m = Method(None, {"type": b"^?"}, selector=True)
with self.assertRaisesRegex(
self.failureException, "result of <.*> is not of type function_pointer"
):
self.assertResultIsFunction(m, "v")
m = Method(None, {"type": b"^?", "callable": {}}, selector=True)
with self.assertRaisesRegex(
self.failureException,
"result of <.*> is a function pointer with incomplete type information",
):
self.assertResultIsFunction(m, "v")
m = Method(
None,
{"type": b"^?", "callable": {"retval": {"type": objc._C_VOID}}},
selector=True,
)
with self.assertRaisesRegex(
self.failureException,
r"result of <.*> is a function pointer with incomplete type information",
):
self.assertResultIsFunction(m, "v")
def test_arg_is_block(self):
m = Method(
3,
{
"type": b"@?",
"callable": {
"retval": {"type": objc._C_INT},
"arguments": [
{"type": b"^v"},
{"type": objc._C_ID},
{"type": objc._C_DBL},
],
},
},
selector=True,
)
self.assertArgIsBlock(m, 1, b"i@d")
with self.assertRaisesRegex(
self.failureException, "arg 0 of <.*> does not exist"
):
self.assertArgIsBlock(m, 0, "v")
with self.assertRaisesRegex(
self.failureException,
"arg 1 of <.*> is not a block with type 'i@b', but b'i@d'",
):
self.assertArgIsBlock(m, 1, "i@b")
m = Method(
3,
{
"type": b"@?",
"callable": {
"retval": {"type": objc._C_INT},
"arguments": [{"type": objc._C_ID}, {"type": objc._C_DBL}],
},
"callable_retained": True,
},
selector=True,
)
with self.assertRaisesRegex(
self.failureException,
"arg 1 of <.*> has an invalid block signature b'@' for argument 0",
):
self.assertArgIsBlock(m, 1, "v")
m = Method(3, {"type": b"?", "callable": {}}, selector=True)
with self.assertRaisesRegex(
self.failureException, "arg 1 of <.*> is not of type block: b'?'"
):
self.assertArgIsBlock(m, 1, "v")
m = Method(3, {"type": b"@?"}, selector=True)
with self.assertRaisesRegex(
self.failureException, "arg 1 of <.*> is not of type block: no callable"
):
self.assertArgIsBlock(m, 1, "v")
m = Method(3, {"type": b"@?", "callable": {}}, selector=True)
with self.assertRaisesRegex(
self.failureException,
"result of <.*> is a block pointer with incomplete type information",
):
self.assertArgIsBlock(m, 1, "v")
m = Method(
3,
{"type": b"@?", "callable": {"retval": {"type": objc._C_VOID}}},
selector=True,
)
with self.assertRaisesRegex(
self.failureException,
"result of <.*> is a block pointer with incomplete type information",
):
self.assertArgIsBlock(m, 1, "v")
m = Method(
3,
{
"type": b"@?",
"callable": {
"retval": {"type": objc._C_INT},
"arguments": [
{"type": b"^v"},
{"type": objc._C_ID},
{"type": objc._C_DBL},
],
},
},
selector=False,
)
self.assertArgIsBlock(m, 3, b"i@d")
with self.assertRaisesRegex(
self.failureException, "arg 2 of <.*> does not exist"
):
self.assertArgIsBlock(m, 2, "v")
with self.assertRaisesRegex(
self.failureException,
"arg 3 of <.*> is not a block with type b'i@b', but b'i@d'",
):
self.assertArgIsBlock(m, 3, b"i@b")
m = Method(
3,
{
"type": b"@?",
"callable": {
"retval": {"type": objc._C_INT},
"arguments": [{"type": objc._C_ID}, {"type": objc._C_DBL}],
},
"callable_retained": True,
},
selector=False,
)
with self.assertRaisesRegex(
self.failureException,
"arg 3 of <.*> has an invalid block signature b'@' for argument 0",
):
self.assertArgIsBlock(m, 3, "v")
m = Method(3, {"type": b"?", "callable": {}}, selector=False)
with self.assertRaisesRegex(
self.failureException, "arg 3 of <.*> is not of type block: b'?'"
):
self.assertArgIsBlock(m, 3, "v")
m = Method(3, {"type": b"@?"}, selector=True)
with self.assertRaisesRegex(
self.failureException, "arg 3 of <.*> does not exist"
):
self.assertArgIsBlock(m, 3, "v")
m = Method(3, {"type": b"@?", "callable": {}}, selector=False)
with self.assertRaisesRegex(
self.failureException,
"result of <.*> is a block pointer with incomplete type information",
):
self.assertArgIsBlock(m, 3, "v")
m = Method(
3,
{"type": b"@?", "callable": {"retval": {"type": objc._C_VOID}}},
selector=False,
)
with self.assertRaisesRegex(
self.failureException,
"result of <.*> is a block pointer with incomplete type information",
):
self.assertArgIsBlock(m, 3, "v")
def test_result_is_block(self):
m = Method(
None,
{
"type": b"@?",
"callable": {
"retval": {"type": objc._C_INT},
"arguments": [
{"type": b"^v"},
{"type": objc._C_ID},
{"type": objc._C_DBL},
],
},
},
selector=True,
)
self.assertResultIsBlock(m, b"i@d")
with self.assertRaisesRegex(
self.failureException,
"result of <.*> is not a block with type b'i@b', but b'i@d'",
):
self.assertResultIsBlock(m, b"i@b")
m = Method(
None,
{
"type": b"@?",
"callable": {
"retval": {"type": objc._C_INT},
"arguments": [{"type": objc._C_ID}, {"type": objc._C_DBL}],
},
"callable_retained": True,
},
selector=True,
)
with self.assertRaisesRegex(
self.failureException,
"result <.*> has an invalid block signature b'@' for argument 0",
):
self.assertResultIsBlock(m, "v")
m = Method(3, {})
with self.assertRaisesRegex(
self.failureException, "result of <.*> is not of type block: b'v'"
):
self.assertResultIsBlock(m, "v")
m = Method(None, {"type": b"?", "callable": {}}, selector=True)
with self.assertRaisesRegex(
self.failureException, "result of <.*> is not of type block: b'?'"
):
self.assertResultIsBlock(m, "v")
m = Method(None, {"type": b"@?"}, selector=True)
with self.assertRaisesRegex(
self.failureException,
"result of <.*> is not of type block: no callable specified",
):
self.assertResultIsBlock(m, "v")
m = Method(None, {"type": b"@?", "callable": {}}, selector=True)
with self.assertRaisesRegex(
self.failureException,
"result of <.*> is a block pointer with incomplete type information",
):
self.assertResultIsBlock(m, "v")
m = Method(
None,
{"type": b"@?", "callable": {"retval": {"type": objc._C_VOID}}},
selector=True,
)
with self.assertRaisesRegex(
self.failureException,
"result of <.*> is a block pointer with incomplete type information",
):
self.assertResultIsBlock(m, "v")
def test_result_bool(self):
for tp in (objc._C_NSBOOL, objc._C_BOOL):
with self.subTest(encoding=tp):
m = Method(None, {"type": tp})
self.assertResultIsBOOL(m)
m = Method(None, {"type": tp}, selector=True)
self.assertResultIsBOOL(m)
m = Method(None, {"type": b"@"})
with self.assertRaisesRegex(
self.failureException,
f"result of <.*> is not of type BOOL, but {objc._C_ID}",
):
self.assertResultIsBOOL(m)
m = Method(None, {"type": b"@"}, selector=True)
with self.assertRaisesRegex(
self.failureException,
f"result of <.*> is not of type BOOL, but {objc._C_ID}",
):
self.assertResultIsBOOL(m)
def test_arg_idlike(self):
for tp in (b"@", b"^@", b"n^@", b"o^@", b"N^@"):
with self.subTest(encoding=tp):
m = Method(3, {"type": tp})
try:
self.assertArgIsIDLike(m, 3)
except self.failureException:
self.fail("Unexpctedly tested as not id-like")
for tp in (b"@", b"^@", b"n^@", b"o^@", b"N^@"):
with self.subTest(encoding=tp):
m = Method(3, {"type": tp}, selector=True)
try:
self.assertArgIsIDLike(m, 1)
except self.failureException:
self.fail("Unexpctedly tested as not id-like")
for tp in (b"^{__CFPython=}",):
with self.subTest(encoding=tp, registered=False):
try:
m = Method(3, {"type": tp})
self.assertArgIsIDLike(m, 3)
except self.failureException:
pass
else:
self.fail("Unexpectedly tested as id-like")
try:
m = Method(3, {"type": b"^" + tp})
self.assertArgIsIDLike(m, 3)
except self.failureException:
pass
else:
self.fail("Unexpectedly tested as id-like")
for pfx in (b"", b"^", b"n^", b"N^", b"o^"):
with self.subTest(encoding=tp, msg="mocked _idSignatures", pfx=pfx):
orig = objc._idSignatures
objc._idSignatures = lambda tp=tp: [tp]
try:
try:
m = Method(3, {"type": pfx + tp})
self.assertArgIsIDLike(m, 3)
except self.failureException:
self.fail("Unexpectedly tested as not id-like")
try:
m = Method(3, {"type": pfx + tp})
self.assertArgIsIDLike(m, 3)
except self.failureException:
self.fail("Unexpectedly tested as not id-like")
finally:
objc._idSignatures = orig
if tp in TestSupport._idlike_cache:
TestSupport._idlike_cache.remove(tp)
try:
m = Method(3, {"type": b"d"})
self.assertArgIsIDLike(m, 3)
except self.failureException:
pass
else:
self.fail("Unexpectedly tested as id-like")
def test_result_idlike(self):
for tp in (b"@", b"^@", b"n^@", b"o^@", b"N^@"):
with self.subTest(encoding=tp):
m = Method(None, {"type": tp})
try:
self.assertResultIsIDLike(m)
except self.failureException:
self.fail("Unexpctedly tested as not id-like")
for tp in (b"^{__CFPython=}",):
with self.subTest(encoding=tp, registered=False):
try:
m = Method(None, {"type": tp})
self.assertResultIsIDLike(m)
except self.failureException:
pass
else:
self.fail("Unexpectedly tested as id-like")
try:
m = Method(None, {"type": b"^" + tp})
self.assertResultIsIDLike(m)
except self.failureException:
pass
else:
self.fail("Unexpectedly tested as id-like")
for pfx in (b"", b"^", b"n^", b"N^", b"o^"):
with self.subTest(encoding=tp, msg="mocked _idSignatures", pfx=pfx):
orig = objc._idSignatures
objc._idSignatures = lambda tp=tp: [tp]
try:
try:
m = Method(None, {"type": pfx + tp})
self.assertResultIsIDLike(m)
except self.failureException:
self.fail("Unexpectedly tested as not id-like")
try:
m = Method(None, {"type": pfx + tp})
self.assertResultIsIDLike(m)
except self.failureException:
self.fail("Unexpectedly tested as not id-like")
finally:
objc._idSignatures = orig
if tp in TestSupport._idlike_cache:
TestSupport._idlike_cache.remove(tp)
try:
m = Method(None, {"type": b"d"})
self.assertResultIsIDLike(m)
except self.failureException:
pass
else:
self.fail("Unexpectedly tested as id-like")
def test_validate_callable_metadata(self):
class Function:
def __init__(self, argno, metadata):
self._meta = {
"retval": {"type": b"@"},
"arguments": [
{"type": b"@"},
{"type": b"@"},
],
}
if metadata is not None:
if argno is None:
self._meta["retval"] = metadata
else:
self._meta["arguments"][argno] = metadata
def __metadata__(self):
return self._meta
for idx in (None, 1):
with self.subTest(f"{idx}: nothing special"):
try:
func = Function(
idx,
None,
)
self._validateCallableMetadata(func)
except self.failureException:
self.fail("Unexpected test failure")
with self.subTest(f"{idx}: _C_CHARPTR"):
with self.assertRaisesRegex(
self.failureException,
r"'char\*'",
):
func = Function(
idx,
{
"type": objc._C_CHARPTR,
},
)
self._validateCallableMetadata(func)
with self.subTest(f"{idx}: _C_CHARPTR (skip check)"):
try:
func = Function(
idx,
{
"type": objc._C_CHARPTR,
},
)
self._validateCallableMetadata(func, skip_simple_charptr_check=True)
except self.failureException:
self.fail("Unexpected test failure")
with self.subTest(f"{idx}: _C_PTR + _C_CHR"):
with self.assertRaisesRegex(
self.failureException,
r"'char\*'",
):
func = Function(
idx,
{"type": objc._C_PTR + objc._C_CHR},
)
self._validateCallableMetadata(func)
with self.subTest(f"{idx}: _C_PTR + _C_CHR (skip check)"):
try:
func = Function(
idx,
{"type": objc._C_PTR + objc._C_CHR},
)
self._validateCallableMetadata(func, skip_simple_charptr_check=True)
except self.failureException as exc:
if "pointer argument, but no by-ref annotation" not in str(exc):
self.fail("Unexpected test failure")
with self.subTest(f"{idx}: null-delimited _C_CHARPTR"):
with self.assertRaisesRegex(
self.failureException,
r"null-delimited 'char\*', use _C_CHAR_AS_TEXT instead",
):
func = Function(
idx,
{"type": objc._C_CHARPTR, "c_array_delimited_by_null": True},
)
self._validateCallableMetadata(func)
with self.subTest(f"{idx}: null-delimited _C_PTR + _C_CHR"):
with self.assertRaisesRegex(
self.failureException,
r"null-delimited 'char\*', use _C_CHAR_AS_TEXT instead",
):
func = Function(
idx,
{
"type": objc._C_PTR + objc._C_CHR,
"c_array_delimited_by_null": True,
},
)
self._validateCallableMetadata(func)
with self.subTest(f"{idx}: null-delimited _C_IN + _C_PTR + _C_CHR"):
with self.assertRaisesRegex(
self.failureException,
r"null-delimited 'char\*', use _C_CHAR_AS_TEXT instead",
):
func = Function(
idx,
{
"type": objc._C_IN + objc._C_PTR + objc._C_CHR,
"c_array_delimited_by_null": True,
},
)
self._validateCallableMetadata(func)
with self.subTest(f"{idx}: size arg ok (int)"):
try:
func = Function(
idx,
{
"type": objc._C_IN + objc._C_PTR + objc._C_INT,
"c_array_size_in_arg": 0,
},
)
self._validateCallableMetadata(func)
except self.failureException:
self.fail("Unexpected failure")
with self.subTest(f"{idx}: size arg out of range (int)"):
with self.assertRaisesRegex(
self.failureException, r"c_array_size_in_arg out of range 10 "
):
func = Function(
idx,
{"type": objc._C_PTR + objc._C_INT, "c_array_size_in_arg": 10},
)
self._validateCallableMetadata(func)
with self.subTest(f"{idx}: size arg tuple ok"):
try:
func = Function(
idx,
{
"type": objc._C_IN + objc._C_PTR + objc._C_INT,
"c_array_size_in_arg": (0, 1),
},
)
self._validateCallableMetadata(func)
except self.failureException:
self.fail("Unexpected failure")
with self.subTest(f"{idx}: size arg out of range (tuple[0])"):
with self.assertRaisesRegex(
self.failureException, r"c_array_size_in_arg out of range 10 "
):
func = Function(
idx,
{
"type": objc._C_PTR + objc._C_INT,
"c_array_size_in_arg": (10, 1),
},
)
self._validateCallableMetadata(func)
with self.subTest(f"{idx}: size arg out of range (tuple[1])"):
with self.assertRaisesRegex(
self.failureException, r"c_array_size_in_arg out of range 10 "
):
func = Function(
idx,
{
"type": objc._C_PTR + objc._C_INT,
"c_array_size_in_arg": (1, 10),
},
)
self._validateCallableMetadata(func)
for pfx in (objc._C_IN, objc._C_OUT, objc._C_INOUT):
with self.subTest(f"{idx}: by-ref specifier {pfx} on int"):
with self.assertRaisesRegex(
self.failureException, r"byref specifier on non-pointer"
):
func = Function(idx, {"type": pfx + objc._C_INT})
self._validateCallableMetadata(func)
with self.subTest(f"{idx}: by-ref specifier {pfx} on empty struct"):
with self.assertRaisesRegex(
self.failureException, r"byref to empty struct"
):
func = Function(idx, {"type": pfx + b"^{_CFSomeThing=}"})
self._validateCallableMetadata(func)
with self.subTest(f"{idx}: by-ref for int pointer"):
try:
func = Function(idx, {"type": pfx + objc._C_PTR + objc._C_INT})
self._validateCallableMetadata(func)
except self.failureException:
self.fail("Unexpected failure")
with self.subTest(f"{idx}: by-ref for struct pointer"):
try:
func = Function(idx, {"type": pfx + objc._C_PTR + b"{size=dd}"})
self._validateCallableMetadata(func)
except self.failureException:
self.fail("Unexpected failure")
def test_assert_callable_metadata(self):
class Mod:
pass
try:
m = Mod()
m.Object = objc.lookUpClass("Object")
m.EnumType = enum.Enum
except objc.error:
pass # "Object" root class is no longer present
self.fail("Missing object class")
else:
with self.subTest("Object is igored"):
try:
self.assertCallableMetadataIsSane(m, exclude_cocoa=False)
except self.failureException:
self.fail("Unexpected failure")
self.assertEqual(m.Object.__name__, "Object")
self.assertIsInstance(m.Object, objc.objc_class)
with self.subTest("validate is called for class and instance methods"):
NSObject = objc.lookUpClass("NSObject")
with mock.patch(
"PyObjCTools.TestSupport.TestCase._validateCallableMetadata"
) as fn:
m = Mod()
m.Constant = 42
m.NSObject = NSObject
try:
self.assertCallableMetadataIsSane(m, exclude_cocoa=False)
except self.failureException:
self.fail("Unexpected failure")
if 0:
fn.assert_any_call(
NSObject.pyobjc_instanceMethods.description,
"NSObject",
skip_simple_charptr_check=True,
)
fn.assert_any_call(
NSObject.pyobjc_classMethods.description,
"NSObject",
skip_simple_charptr_check=True,
)
with self.subTest("validate instance variables are not checked"):
NSObject = objc.lookUpClass("NSObject")
class MyClassForValidating(NSObject):
someVar = objc.ivar()
def mymethod(self):
pass
self.assertIsInstance(
MyClassForValidating.pyobjc_instanceMethods.mymethod, objc.selector
)
self.assertIn("mymethod", dir(MyClassForValidating.pyobjc_instanceMethods))
# Also mock the Cocoa package to avoid classes ending up there as well
# XXX: Need to check if the actual usage of the API is safe in this respect as well!
Cocoa = Mod()
Cocoa.NSObject = objc.lookUpClass("NSObject")
Cocoa.NSArray = objc.lookUpClass("NSArray")
if "Cocoa" in sys.modules:
orig_Cocoa = sys.modules["Cocoa"]
else:
orig_Cocoa = None
sys.modules["Cocoa"] = Cocoa
try:
with mock.patch(
"PyObjCTools.TestSupport.TestCase._validateCallableMetadata"
) as fn:
m = Mod()
m.Constant = 42
m.MyClassForValidating = MyClassForValidating
m.NSArray = objc.lookUpClass("NSArray")
try:
self.assertCallableMetadataIsSane(m, exclude_cocoa=True)
except self.failureException:
self.fail("Unexpected failure")
finally:
if orig_Cocoa is None:
del sys.modules["Cocoa"]
else:
sys.modules["Cocoa"] = orig_Cocoa
fn.assert_any_call(
MyClassForValidating.mymethod,
"MyClassForValidating",
skip_simple_charptr_check=False,
)
for entry in fn.call_args_list:
self.assertNotIsInstance(entry.args[0], objc.ivar)
self.assertNotEqual(
entry.args[0],
objc.lookUpClass("NSArray").pyobjc_instanceMethods.initWithArray_,
)
with self.subTest("function"):
class Function:
pass
aFunction = Function()
orig_function = objc.function
objc.function = Function
try:
with mock.patch(
"PyObjCTools.TestSupport.TestCase._validateCallableMetadata"
) as fn:
m = Mod()
m.function = aFunction
try:
self.assertCallableMetadataIsSane(m, exclude_cocoa=True)
except self.failureException:
self.fail("Unexpected failure")
finally:
objc.function = orig_function
fn.assert_any_call(aFunction)
with self.subTest("ignored entries"):
class Function:
pass
aFunction = Function()
orig_function = objc.function
objc.function = Function
try:
with mock.patch(
"PyObjCTools.TestSupport.TestCase._validateCallableMetadata"
) as fn:
m = Mod()
m.function = aFunction
m.NSObject = NSObject
try:
self.assertCallableMetadataIsSane(
m,
exclude_cocoa=False,
exclude_attrs=[
"function",
("NSObject", "description"),
],
)
except self.failureException:
self.fail("Unexpected failure")
finally:
objc.function = orig_function
fn.assert_any_call(
NSObject.pyobjc_classMethods.new,
"NSObject",
skip_simple_charptr_check=True,
)
for entry in fn.call_args_list:
self.assertIsNot(entry.args[0], aFunction)
self.assertIsNot(
entry.args[0], NSObject.pyobjc_instanceMethods.description
)
self.assertIsNot(
entry.args[0], NSObject.pyobjc_classMethods.description
)
@no_autorelease_pool
def test_without_pool(self):
self.assertIs(self._skip_usepool, True)
def test_with_pool(self):
self.assertIs(self._skip_usepool, False)
def test_running(self):
orig_use = TestSupport._usepool
orig_class = TestSupport._poolclass
orig_run = TestSupport._unittest.TestCase.run
allocs = [0]
NSObject = objc.lookUpClass("NSObject")
self.assertIsNot(NSObject, None)
class PoolClass:
def init(self):
allocs[0] += 1
@classmethod
def alloc(cls):
return cls()
TestSupport._unittest.TestCase.run = lambda self: None
try:
TestSupport._poolclass = PoolClass
TestSupport._usepool = True
self.assertEqual(allocs, [0])
TestCase.run(self)
self.assertEqual(allocs, [1])
TestSupport._usepool = False
self.assertEqual(allocs, [1])
TestCase.run(self)
self.assertEqual(allocs, [1])
finally:
TestSupport._usepool = orig_use
TestSupport._poolclass = orig_class
TestSupport._unittest.TestCase.run = orig_run
def run(self, *args, **kwds):
unittest.TestCase.run(self, *args, **kwds)
|
6434a073cac4d28c2d4479a1818d482ab2978e83
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/homeassistant/components/sky_hub/__init__.py
|
a5b8969018f034c2efff236687ac5b2980a7770a
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 29
|
py
|
__init__.py
|
"""The sky_hub component."""
|
db25ddf590b4745773469cc4dca95d4c63942740
|
fe0a9e575682bf041711aec3530c11ad6d721d19
|
/tests/input/func_transformers_factories.py
|
5baa91756d8ced0143303ef0929adeb68313189c
|
[
"MIT"
] |
permissive
|
geerk/django_linter
|
0bce15196d3979ddf25dd156dc3eb75b2ddaddac
|
d5a27004acc9c7caf8663a8fb519060f210c81ec
|
refs/heads/master
| 2021-01-21T12:46:19.945844
| 2015-12-13T08:01:48
| 2015-12-13T08:01:48
| 38,575,459
| 102
| 4
| null | 2015-08-14T18:45:39
| 2015-07-05T16:18:30
|
Python
|
UTF-8
|
Python
| false
| false
| 930
|
py
|
func_transformers_factories.py
|
"""
Check transforms for factories
"""
import unittest
import factory
from django.db import models
from .models import Category
class Product(models.Model):
name = models.CharField(max_length=33)
def __unicode__(self):
return self.name
class ProductFactory(factory.DjangoModelFactory):
class Meta:
model = Product
class ProductFactoryTestCase(unittest.TestCase):
def test_product_factory(self):
product = ProductFactory()
self.assertEqual(product.id, 1)
self.assertEqual(product.name, 'test_name')
self.assertEqual(product.foobar, 'foobar')
class CategoryFactory(factory.DjangoModelFactory):
class Meta:
model = Category
class CategoryFactoryTestCase(unittest.TestCase):
def test_category_factory(self):
category = CategoryFactory()
self.assertEqual(category.id, 1)
self.assertEqual(category.foobar, 'foobar')
|
d0508b7f596c261107c317d8652f2278a4e1dcad
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/plugins/hg4idea/testData/bin/mercurial/bundlecaches.py
|
5851b32ed13665037a8e6e37d97c73f026db3570
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 13,911
|
py
|
bundlecaches.py
|
# bundlecaches.py - utility to deal with pre-computed bundle for servers
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from .i18n import _
from .thirdparty import attr
from . import (
error,
requirements as requirementsmod,
sslutil,
util,
)
from .utils import stringutil
urlreq = util.urlreq
CB_MANIFEST_FILE = b'clonebundles.manifest'
@attr.s
class bundlespec(object):
compression = attr.ib()
wirecompression = attr.ib()
version = attr.ib()
wireversion = attr.ib()
params = attr.ib()
contentopts = attr.ib()
# Maps bundle version human names to changegroup versions.
_bundlespeccgversions = {
b'v1': b'01',
b'v2': b'02',
b'packed1': b's1',
b'bundle2': b'02', # legacy
}
# Maps bundle version with content opts to choose which part to bundle
_bundlespeccontentopts = {
b'v1': {
b'changegroup': True,
b'cg.version': b'01',
b'obsolescence': False,
b'phases': False,
b'tagsfnodescache': False,
b'revbranchcache': False,
},
b'v2': {
b'changegroup': True,
b'cg.version': b'02',
b'obsolescence': False,
b'phases': False,
b'tagsfnodescache': True,
b'revbranchcache': True,
},
b'packed1': {b'cg.version': b's1'},
}
_bundlespeccontentopts[b'bundle2'] = _bundlespeccontentopts[b'v2']
_bundlespecvariants = {
b"streamv2": {
b"changegroup": False,
b"streamv2": True,
b"tagsfnodescache": False,
b"revbranchcache": False,
}
}
# Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
_bundlespecv1compengines = {b'gzip', b'bzip2', b'none'}
def parsebundlespec(repo, spec, strict=True):
"""Parse a bundle string specification into parts.
Bundle specifications denote a well-defined bundle/exchange format.
The content of a given specification should not change over time in
order to ensure that bundles produced by a newer version of Mercurial are
readable from an older version.
The string currently has the form:
<compression>-<type>[;<parameter0>[;<parameter1>]]
Where <compression> is one of the supported compression formats
and <type> is (currently) a version string. A ";" can follow the type and
all text afterwards is interpreted as URI encoded, ";" delimited key=value
pairs.
If ``strict`` is True (the default) <compression> is required. Otherwise,
it is optional.
Returns a bundlespec object of (compression, version, parameters).
Compression will be ``None`` if not in strict mode and a compression isn't
defined.
An ``InvalidBundleSpecification`` is raised when the specification is
not syntactically well formed.
An ``UnsupportedBundleSpecification`` is raised when the compression or
bundle type/version is not recognized.
Note: this function will likely eventually return a more complex data
structure, including bundle2 part information.
"""
def parseparams(s):
if b';' not in s:
return s, {}
params = {}
version, paramstr = s.split(b';', 1)
for p in paramstr.split(b';'):
if b'=' not in p:
raise error.InvalidBundleSpecification(
_(
b'invalid bundle specification: '
b'missing "=" in parameter: %s'
)
% p
)
key, value = p.split(b'=', 1)
key = urlreq.unquote(key)
value = urlreq.unquote(value)
params[key] = value
return version, params
if strict and b'-' not in spec:
raise error.InvalidBundleSpecification(
_(
b'invalid bundle specification; '
b'must be prefixed with compression: %s'
)
% spec
)
if b'-' in spec:
compression, version = spec.split(b'-', 1)
if compression not in util.compengines.supportedbundlenames:
raise error.UnsupportedBundleSpecification(
_(b'%s compression is not supported') % compression
)
version, params = parseparams(version)
if version not in _bundlespeccgversions:
raise error.UnsupportedBundleSpecification(
_(b'%s is not a recognized bundle version') % version
)
else:
# Value could be just the compression or just the version, in which
# case some defaults are assumed (but only when not in strict mode).
assert not strict
spec, params = parseparams(spec)
if spec in util.compengines.supportedbundlenames:
compression = spec
version = b'v1'
# Generaldelta repos require v2.
if requirementsmod.GENERALDELTA_REQUIREMENT in repo.requirements:
version = b'v2'
elif requirementsmod.REVLOGV2_REQUIREMENT in repo.requirements:
version = b'v2'
# Modern compression engines require v2.
if compression not in _bundlespecv1compengines:
version = b'v2'
elif spec in _bundlespeccgversions:
if spec == b'packed1':
compression = b'none'
else:
compression = b'bzip2'
version = spec
else:
raise error.UnsupportedBundleSpecification(
_(b'%s is not a recognized bundle specification') % spec
)
# Bundle version 1 only supports a known set of compression engines.
if version == b'v1' and compression not in _bundlespecv1compengines:
raise error.UnsupportedBundleSpecification(
_(b'compression engine %s is not supported on v1 bundles')
% compression
)
# The specification for packed1 can optionally declare the data formats
# required to apply it. If we see this metadata, compare against what the
# repo supports and error if the bundle isn't compatible.
if version == b'packed1' and b'requirements' in params:
requirements = set(params[b'requirements'].split(b','))
missingreqs = requirements - repo.supportedformats
if missingreqs:
raise error.UnsupportedBundleSpecification(
_(b'missing support for repository features: %s')
% b', '.join(sorted(missingreqs))
)
# Compute contentopts based on the version
contentopts = _bundlespeccontentopts.get(version, {}).copy()
# Process the variants
if b"stream" in params and params[b"stream"] == b"v2":
variant = _bundlespecvariants[b"streamv2"]
contentopts.update(variant)
engine = util.compengines.forbundlename(compression)
compression, wirecompression = engine.bundletype()
wireversion = _bundlespeccgversions[version]
return bundlespec(
compression, wirecompression, version, wireversion, params, contentopts
)
def parseclonebundlesmanifest(repo, s):
"""Parses the raw text of a clone bundles manifest.
Returns a list of dicts. The dicts have a ``URL`` key corresponding
to the URL and other keys are the attributes for the entry.
"""
m = []
for line in s.splitlines():
fields = line.split()
if not fields:
continue
attrs = {b'URL': fields[0]}
for rawattr in fields[1:]:
key, value = rawattr.split(b'=', 1)
key = util.urlreq.unquote(key)
value = util.urlreq.unquote(value)
attrs[key] = value
# Parse BUNDLESPEC into components. This makes client-side
# preferences easier to specify since you can prefer a single
# component of the BUNDLESPEC.
if key == b'BUNDLESPEC':
try:
bundlespec = parsebundlespec(repo, value)
attrs[b'COMPRESSION'] = bundlespec.compression
attrs[b'VERSION'] = bundlespec.version
except error.InvalidBundleSpecification:
pass
except error.UnsupportedBundleSpecification:
pass
m.append(attrs)
return m
def isstreamclonespec(bundlespec):
# Stream clone v1
if bundlespec.wirecompression == b'UN' and bundlespec.wireversion == b's1':
return True
# Stream clone v2
if (
bundlespec.wirecompression == b'UN'
and bundlespec.wireversion == b'02'
and bundlespec.contentopts.get(b'streamv2')
):
return True
return False
def filterclonebundleentries(repo, entries, streamclonerequested=False):
"""Remove incompatible clone bundle manifest entries.
Accepts a list of entries parsed with ``parseclonebundlesmanifest``
and returns a new list consisting of only the entries that this client
should be able to apply.
There is no guarantee we'll be able to apply all returned entries because
the metadata we use to filter on may be missing or wrong.
"""
newentries = []
for entry in entries:
spec = entry.get(b'BUNDLESPEC')
if spec:
try:
bundlespec = parsebundlespec(repo, spec, strict=True)
# If a stream clone was requested, filter out non-streamclone
# entries.
if streamclonerequested and not isstreamclonespec(bundlespec):
repo.ui.debug(
b'filtering %s because not a stream clone\n'
% entry[b'URL']
)
continue
except error.InvalidBundleSpecification as e:
repo.ui.debug(stringutil.forcebytestr(e) + b'\n')
continue
except error.UnsupportedBundleSpecification as e:
repo.ui.debug(
b'filtering %s because unsupported bundle '
b'spec: %s\n' % (entry[b'URL'], stringutil.forcebytestr(e))
)
continue
# If we don't have a spec and requested a stream clone, we don't know
# what the entry is so don't attempt to apply it.
elif streamclonerequested:
repo.ui.debug(
b'filtering %s because cannot determine if a stream '
b'clone bundle\n' % entry[b'URL']
)
continue
if b'REQUIRESNI' in entry and not sslutil.hassni:
repo.ui.debug(
b'filtering %s because SNI not supported\n' % entry[b'URL']
)
continue
if b'REQUIREDRAM' in entry:
try:
requiredram = util.sizetoint(entry[b'REQUIREDRAM'])
except error.ParseError:
repo.ui.debug(
b'filtering %s due to a bad REQUIREDRAM attribute\n'
% entry[b'URL']
)
continue
actualram = repo.ui.estimatememory()
if actualram is not None and actualram * 0.66 < requiredram:
repo.ui.debug(
b'filtering %s as it needs more than 2/3 of system memory\n'
% entry[b'URL']
)
continue
newentries.append(entry)
return newentries
class clonebundleentry(object):
"""Represents an item in a clone bundles manifest.
This rich class is needed to support sorting since sorted() in Python 3
doesn't support ``cmp`` and our comparison is complex enough that ``key=``
won't work.
"""
def __init__(self, value, prefers):
self.value = value
self.prefers = prefers
def _cmp(self, other):
for prefkey, prefvalue in self.prefers:
avalue = self.value.get(prefkey)
bvalue = other.value.get(prefkey)
# Special case for b missing attribute and a matches exactly.
if avalue is not None and bvalue is None and avalue == prefvalue:
return -1
# Special case for a missing attribute and b matches exactly.
if bvalue is not None and avalue is None and bvalue == prefvalue:
return 1
# We can't compare unless attribute present on both.
if avalue is None or bvalue is None:
continue
# Same values should fall back to next attribute.
if avalue == bvalue:
continue
# Exact matches come first.
if avalue == prefvalue:
return -1
if bvalue == prefvalue:
return 1
# Fall back to next attribute.
continue
# If we got here we couldn't sort by attributes and prefers. Fall
# back to index order.
return 0
def __lt__(self, other):
return self._cmp(other) < 0
def __gt__(self, other):
return self._cmp(other) > 0
def __eq__(self, other):
return self._cmp(other) == 0
def __le__(self, other):
return self._cmp(other) <= 0
def __ge__(self, other):
return self._cmp(other) >= 0
def __ne__(self, other):
return self._cmp(other) != 0
def sortclonebundleentries(ui, entries):
prefers = ui.configlist(b'ui', b'clonebundleprefers')
if not prefers:
return list(entries)
def _split(p):
if b'=' not in p:
hint = _(b"each comma separated item should be key=value pairs")
raise error.Abort(
_(b"invalid ui.clonebundleprefers item: %s") % p, hint=hint
)
return p.split(b'=', 1)
prefers = [_split(p) for p in prefers]
items = sorted(clonebundleentry(v, prefers) for v in entries)
return [i.value for i in items]
|
c54079035ea4a0e9063b6c73ad99a59fee0d5352
|
e2ed3d5cf080cd5b8b6f4dd05470b290aed165c3
|
/experimental_code/tileset_generation/main.py
|
e7ab41da72c4e692915ba28a173e759f00ab30f0
|
[] |
no_license
|
IJDykeman/wangTiles
|
641c128d5c494b475bc1ed1a0717db5d90810bb9
|
7c1ee2095ebdf7f72bce07d94c6484915d5cae8b
|
refs/heads/master
| 2022-01-20T07:27:26.895920
| 2022-01-10T16:37:02
| 2022-01-10T16:37:02
| 26,565,467
| 157
| 22
| null | 2022-01-10T16:37:03
| 2014-11-13T01:57:09
|
Python
|
UTF-8
|
Python
| false
| false
| 1,295
|
py
|
main.py
|
import numpy as np
import noise
# help(noise)
import minecraft
width = 250
solids = []
solidity = np.zeros([width] * 3).astype(np.int32)
for x in range(width):
for y in range(width):
for z in range(width):
scale = .05
p = noise.snoise3(x * scale,y* scale,z* scale, octaves=1, persistence=.5, lacunarity=2.0)
if p > 0:
solids.append((x,y,z, 1))
solidity[x,y,z] = 1
all_patterns = set([])
for x in range(0, width - 2, 3):
for y in range(0, width - 2, 3):
for z in range(0, width - 2, 3):
all_patterns.add(tuple(solidity[x:x+3, y:y+3, z:z+3].flatten()))
# print len(all_patterns), "/", (width - 2) ** 3
# print 1.0 * len(all_patterns) / (width - 2) ** 3
# minecraft.main(solid = solids)
all_patterns = list(all_patterns)
text_rows = []
for i in range(len(all_patterns)):
# print "tile", i
tile = np.array(all_patterns[i]).reshape([3]*3)
for slice in range(3):
text_line = ""
slice = tile[:,:,slice]
for line in range(3):
text_line += ("".join([str(x) for x in list(slice[line])]) + " ")
text_rows.append(text_line)
text_rows.append(" ")
# text_rows.replace("0", ",")
# text_rows.replace("1", "#")
print "\n".join(text_rows)
|
bb39057cb802a5c3ada9bc6605008c645a7f4582
|
35aaddadff1fc6c639869bf7302d6927c0b1918a
|
/src/balance-a-binary-search-tree.py
|
f2e4bc64f56202cda25554f2906189a882ddc011
|
[] |
no_license
|
lydxlx1/LeetCode
|
daa3ceadf8c12d57a16ef2ce1a98216123d6b10a
|
516d5f08fc9b1b71b14d43687221a06d07dc51fc
|
refs/heads/master
| 2023-02-18T05:58:23.481713
| 2023-02-12T21:04:46
| 2023-02-12T21:04:46
| 46,915,247
| 110
| 48
| null | 2021-09-13T21:11:37
| 2015-11-26T08:58:18
|
Java
|
UTF-8
|
Python
| false
| false
| 904
|
py
|
balance-a-binary-search-tree.py
|
"""1382. Balance a Binary Search Tree
Do an in-order traversal to obtain all the elements in BST in sorted order.
Then build the balanced tree using Divide-and-Conquer.
"""
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def balanceBST(self, root: TreeNode) -> TreeNode:
sorted_tree = []
def in_order(root):
if not root:
return
in_order(root.left)
sorted_tree.append(root.val)
in_order(root.right)
in_order(root)
def build(l, r):
if l > r:
return None
mid = (l + r) // 2
root = TreeNode(sorted_tree[mid])
root.left = build(l, mid - 1)
root.right = build(mid + 1, r)
return root
return build(0, len(sorted_tree) - 1)
|
6ea7ec16280657dc6f02278d666189ba646165d6
|
1ba293e5a7e675c07ea2fb0ef6393eeb482c12fb
|
/tests/modules/components/test_crf.py
|
f76970ce4386834b6c4b2eb3758e5c53a64ab3f6
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
ku-nlp/kwja
|
b75ed5a19543dac0fed6db0de7efb0ee41f41edd
|
53594c7808de1845ebf8d5afe7b5c81bfffa7ba1
|
refs/heads/main
| 2023-09-02T22:30:46.843141
| 2023-08-28T07:57:23
| 2023-08-28T07:57:23
| 496,122,031
| 111
| 5
|
MIT
| 2023-09-05T01:25:47
| 2022-05-25T07:12:43
|
Python
|
UTF-8
|
Python
| false
| false
| 3,451
|
py
|
test_crf.py
|
from typing import Tuple
import pytest
import torch
from kwja.modules.components.crf import CRF
from kwja.utils.constants import MASKED, NE_TAGS
def test_init() -> None:
crf = CRF(NE_TAGS)
assert crf.num_tags == len(NE_TAGS)
assert crf.start_transitions.shape == (len(NE_TAGS),)
assert crf.transitions.shape == (len(NE_TAGS), len(NE_TAGS))
assert crf.end_transitions.shape == (len(NE_TAGS),)
for i, source in enumerate(NE_TAGS):
if source.startswith("I-"):
assert crf.start_transitions[i].item() == MASKED
for j, target in enumerate(NE_TAGS):
if (
(source.startswith("B-") or source.startswith("I-"))
and target.startswith("I-")
and source[2:] != target[2:]
):
assert crf.transitions[i, j].item() == MASKED
elif source == "O" and target.startswith("I-"):
assert crf.transitions[i, j].item() == MASKED
@pytest.mark.parametrize(
"batch_size, seq_length, reduction",
[
(2, 3, "token_mean"),
(2, 3, "mean"),
(2, 3, "sum"),
(2, 3, "none"),
],
)
def test_forward(batch_size: int, seq_length: int, reduction: str) -> None:
crf = CRF(NE_TAGS)
emissions = torch.zeros((batch_size, seq_length, crf.num_tags), dtype=torch.float)
tags = torch.zeros((batch_size, seq_length), dtype=torch.long)
llh = crf(emissions, tags, reduction=reduction)
if reduction == "token_mean":
assert llh.shape == ()
elif reduction == "mean":
assert llh.shape == ()
elif reduction == "sum":
assert llh.shape == ()
else: # none
assert llh.shape == (batch_size,)
@pytest.mark.parametrize(
"batch_size, seq_length, reduction",
[
(2, 3, "token_mean"),
(2, 3, "mean"),
(2, 3, "sum"),
(2, 3, "none"),
],
)
def test_loss_mask(batch_size: int, seq_length: int, reduction: str) -> None:
crf = CRF(NE_TAGS)
emissions = torch.randn((batch_size, seq_length, crf.num_tags), dtype=torch.float)
tags = torch.zeros((batch_size, seq_length), dtype=torch.long)
mask = torch.full((batch_size, seq_length), False, dtype=torch.bool)
llh = crf(emissions, tags, mask=mask, reduction=reduction)
assert llh.sum().item() == 0
@pytest.mark.parametrize(
"batch_size, seq_length, target_span",
[
(2, 3, (0, 3)),
(2, 3, (1, 2)),
],
)
def test_viterbi_decode(batch_size: int, seq_length: int, target_span: Tuple[int, int]) -> None:
crf = CRF(NE_TAGS)
crf.start_transitions.data = torch.zeros_like(crf.start_transitions)
crf.transitions.data = torch.zeros_like(crf.transitions)
crf.end_transitions.data = torch.zeros_like(crf.end_transitions)
ne_index = [i for i, tag in enumerate(NE_TAGS) if tag.startswith("B-")][0]
emissions = torch.zeros((batch_size, seq_length, crf.num_tags), dtype=torch.float)
emissions[:, :, ne_index] = 1.0
mask = torch.full((batch_size, seq_length), False, dtype=torch.bool)
mask[:, torch.arange(*target_span)] = True
decoded = crf.viterbi_decode(emissions, mask)
assert decoded.shape == (batch_size, seq_length)
assert (decoded[:, torch.arange(*target_span)] == ne_index).all()
assert (decoded[:, torch.arange(target_span[0])] == NE_TAGS.index("O")).all()
assert (decoded[:, torch.arange(target_span[1], seq_length)] == NE_TAGS.index("O")).all()
|
f1b6df839b80685e651b6d06e078a87ba1dc8d8d
|
12be962c0825d78eaf5a5a3aaf551bfdffbf09a3
|
/DomainNet/code_MSDA_digit/datasets/unaligned_data_loader.py
|
bab3bec0e82274cdced6aef94a1a2b5cd396d564
|
[] |
no_license
|
VisionLearningGroup/VisionLearningGroup.github.io
|
98032ac28b92a680db146bfad243e49f4438f79d
|
c8489562941c180e495b92a495adbc9e18d9c6cb
|
refs/heads/master
| 2023-06-22T20:11:51.821301
| 2023-06-15T18:37:48
| 2023-06-15T18:37:48
| 87,874,579
| 189
| 78
| null | 2023-02-19T13:25:04
| 2017-04-11T01:34:30
|
CSS
|
UTF-8
|
Python
| false
| false
| 5,836
|
py
|
unaligned_data_loader.py
|
import torch.utils.data
import torchnet as tnt
from builtins import object
import torchvision.transforms as transforms
from datasets_ import Dataset
class PairedData(object):
def __init__(self, data_loader_A, data_loader_B, data_loader_C, data_loader_D, data_loader_t, max_dataset_size):
self.data_loader_A = data_loader_A
self.data_loader_B = data_loader_B
self.data_loader_C = data_loader_C
self.data_loader_D = data_loader_D
self.data_loader_t = data_loader_t
self.stop_A = False
self.stop_B = False
self.stop_C = False
self.stop_D = False
self.stop_t = False
self.max_dataset_size = max_dataset_size
def __iter__(self):
self.stop_A = False
self.stop_B = False
self.stop_C = False
self.stop_D = False
self.stop_t = False
self.data_loader_A_iter = iter(self.data_loader_A)
self.data_loader_B_iter = iter(self.data_loader_B)
self.data_loader_C_iter = iter(self.data_loader_C)
self.data_loader_D_iter = iter(self.data_loader_D)
self.data_loader_t_iter = iter(self.data_loader_t)
self.iter = 0
return self
def __next__(self):
A, A_paths = None, None
B, B_paths = None, None
C, C_paths = None, None
D, D_paths = None, None
t, t_paths = None, None
try:
A, A_paths = next(self.data_loader_A_iter)
except StopIteration:
if A is None or A_paths is None:
self.stop_A = True
self.data_loader_A_iter = iter(self.data_loader_A)
A, A_paths = next(self.data_loader_A_iter)
try:
B, B_paths = next(self.data_loader_B_iter)
except StopIteration:
if B is None or B_paths is None:
self.stop_B = True
self.data_loader_B_iter = iter(self.data_loader_B)
B, B_paths = next(self.data_loader_B_iter)
try:
C, C_paths = next(self.data_loader_C_iter)
except StopIteration:
if C is None or C_paths is None:
self.stop_C = True
self.data_loader_C_iter = iter(self.data_loader_C)
C, C_paths = next(self.data_loader_C_iter)
try:
D, D_paths = next(self.data_loader_D_iter)
except StopIteration:
if D is None or D_paths is None:
self.stop_D = True
self.data_loader_D_iter = iter(self.data_loader_D)
D, D_paths = next(self.data_loader_D_iter)
try:
t, t_paths = next(self.data_loader_t_iter)
except StopIteration:
if t is None or t_paths is None:
self.stop_t = True
self.data_loader_t_iter = iter(self.data_loader_t)
t, t_paths = next(self.data_loader_t_iter)
if (self.stop_A and self.stop_B and self.stop_C and self.stop_D and self.stop_t) or self.iter > self.max_dataset_size:
self.stop_A = False
self.stop_B = False
self.stop_C = False
self.stop_D = False
self.stop_t = False
raise StopIteration()
else:
self.iter += 1
return {'S1': A, 'S1_label': A_paths,
'S2': B, 'S2_label': B_paths,
'S3': C, 'S3_label': C_paths,
'S4': D, 'S4_label': D_paths,
'T': t, 'T_label': t_paths}
class UnalignedDataLoader():
def initialize(self, source, target, batch_size1, batch_size2, scale=32):
transform = transforms.Compose([
transforms.Scale(scale),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
#dataset_source1 = Dataset(source[1]['imgs'], source['labels'], transform=transform)
dataset_source1 = Dataset(source[0]['imgs'], source[0]['labels'], transform=transform)
data_loader_s1 = torch.utils.data.DataLoader(dataset_source1, batch_size=batch_size1, shuffle=True, num_workers=4)
self.dataset_s1 = dataset_source1
dataset_source2 = Dataset(source[1]['imgs'], source[1]['labels'], transform=transform)
data_loader_s2 = torch.utils.data.DataLoader(dataset_source2, batch_size=batch_size1, shuffle=True, num_workers=4)
self.dataset_s2 = dataset_source2
dataset_source3 = Dataset(source[2]['imgs'], source[2]['labels'], transform=transform)
data_loader_s3 = torch.utils.data.DataLoader(dataset_source3, batch_size=batch_size1, shuffle=True, num_workers=4)
self.dataset_s3 = dataset_source3
dataset_source4 = Dataset(source[3]['imgs'], source[3]['labels'], transform=transform)
data_loader_s4 = torch.utils.data.DataLoader(dataset_source4, batch_size=batch_size1, shuffle=True, num_workers=4)
self.dataset_s4 = dataset_source4
#for i in range(len(source)):
# dataset_source[i] = Dataset(source[i]['imgs'], source[i]['labels'], transform=transform)
dataset_target = Dataset(target['imgs'], target['labels'], transform=transform)
data_loader_t = torch.utils.data.DataLoader(dataset_target, batch_size=batch_size2, shuffle=True, num_workers=4)
self.dataset_t = dataset_target
self.paired_data = PairedData(data_loader_s1, data_loader_s2, data_loader_s3,data_loader_s4, data_loader_t,
float("inf"))
def name(self):
return 'UnalignedDataLoader'
def load_data(self):
return self.paired_data
def __len__(self):
return min(max(len(self.dataset_s1),len(self.dataset_s2),len(self.dataset_s3), len(self.dataset_s4),len(self.dataset_t)), float("inf"))
|
db51e62fe2c2cc51b563429f6a2a5d69e1da5868
|
362196f32e8248e025cb2f6cf0b88f812c9a059c
|
/juriscraper/lib/utils.py
|
0ead9a0e4653b3014dc4b246cbd74b4802a2b434
|
[
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] |
permissive
|
freelawproject/juriscraper
|
0fea8d4bb512808cb1e036aaaf819e9cc0847a6b
|
d2c6672696e13e33ec9981a1901b87047d8108c5
|
refs/heads/main
| 2023-08-09T13:27:21.357915
| 2023-07-06T22:33:01
| 2023-07-06T22:33:01
| 22,757,589
| 283
| 97
|
BSD-2-Clause
| 2023-09-08T22:59:36
| 2014-08-08T12:50:35
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,605
|
py
|
utils.py
|
import re
from itertools import chain, islice, tee
from .string_utils import force_unicode
def previous_and_next(some_iterable):
"""Provide previous and next values while iterating a list.
This is from: http://stackoverflow.com/a/1012089/64911
This will allow you to lazily iterate a list such that as you iterate, you
get a tuple containing the previous, current, and next value.
"""
prevs, items, nexts = tee(some_iterable, 3)
prevs = chain([None], prevs)
nexts = chain(islice(nexts, 1, None), [None])
return zip(prevs, items, nexts)
def clean_court_object(obj):
"""Clean a list or dict that is part of a scraping response.
Court data is notoriously horrible, so this function attempts to clean up
common problems that it may have. You can pass in either a dict or a list,
and it will be cleaned recursively.
Supported cleanup includes:
1. Removing spaces before commas.
1. Stripping whitespace from the ends.
1. Normalizing white space.
1. Forcing unicode.
:param obj: A dict or list containing string objects.
:return: A dict or list with the string values cleaned.
"""
if isinstance(obj, list):
l = []
for i in obj:
l.append(clean_court_object(i))
return l
elif isinstance(obj, dict):
d = {}
for k, v in obj.items():
d[k] = clean_court_object(v)
return d
elif isinstance(obj, str):
s = " ".join(obj.strip().split())
s = force_unicode(s)
return re.sub(r"\s+,", ",", s)
else:
return obj
|
c64ce92e4ae23e1d281973a6a0b93aef2594501a
|
c1fe9e7e94df1816004659579f52dcae4744994d
|
/src/xdoctest/directive.pyi
|
de6bf73e2e7ccec0367f6333d16e2ba9dab620ce
|
[
"Apache-2.0"
] |
permissive
|
Erotemic/xdoctest
|
60ec105c7305b2f72c2ee183e62fbe3a35e010e2
|
20b998eb5e5d05795292972b267ea7407cea771c
|
refs/heads/main
| 2023-08-19T07:29:18.258737
| 2023-08-04T22:12:23
| 2023-08-04T22:44:45
| 104,611,526
| 196
| 9
|
Apache-2.0
| 2023-08-04T22:44:47
| 2017-09-24T01:59:02
|
Python
|
UTF-8
|
Python
| false
| false
| 1,918
|
pyi
|
directive.pyi
|
from typing import Dict
from collections import OrderedDict
from typing import Any
from typing import List
from _typeshed import Incomplete
from collections.abc import Generator
from typing import Any, NamedTuple
from xdoctest import utils
def named(key: str, pattern: str) -> str:
...
DEFAULT_RUNTIME_STATE: Incomplete
class Effect(NamedTuple):
action: Incomplete
key: Incomplete
value: Incomplete
class RuntimeState(utils.NiceRepr):
def __init__(self, default_state: None | dict = None) -> None:
...
def to_dict(self) -> OrderedDict:
...
def __nice__(self) -> str:
...
def __getitem__(self, key: str) -> Any:
...
def __setitem__(self, key: str, value: Any) -> None:
...
def set_report_style(self,
reportchoice: str,
state: None | Dict = None) -> None:
...
def update(self, directives: List[Directive]) -> None:
...
class Directive(utils.NiceRepr):
name: str
args: List[str]
inline: bool | None
positive: bool
def __init__(self,
name: str,
positive: bool = True,
args: List[str] = ...,
inline: bool | None = None) -> None:
...
@classmethod
def extract(cls, text: str) -> Generator[Directive, None, None]:
...
def __nice__(self) -> str:
...
def effect(self,
argv: Incomplete | None = ...,
environ: Incomplete | None = ...):
...
def effects(self,
argv: List[str] | None = None,
environ: Dict[str, str] | None = None) -> List[Effect]:
...
COMMANDS: Incomplete
DIRECTIVE_PATTERNS: Incomplete
DIRECTIVE_RE: Incomplete
def parse_directive_optstr(optpart: str,
inline: None | bool = None) -> Directive:
...
|
340b7d07a7d5e64c8277cd86c87564fa3a502831
|
45055a7b35701d46d9da7b2a80fdb276fe2c4598
|
/build.py
|
7a1509928db0ad7a2a214bcb32c1220897cd2ef8
|
[
"Apache-2.0"
] |
permissive
|
danieleteti/delphiredisclient
|
23a870f4d93c527b424919c7defc8c02e0ddae73
|
4f78d48dceb3f910ae95ae3ec8c189f93cd27ca8
|
refs/heads/master
| 2022-12-12T18:30:23.739807
| 2022-12-02T17:03:13
| 2022-12-02T17:03:13
| 21,604,525
| 281
| 129
|
Apache-2.0
| 2022-12-02T17:03:14
| 2014-07-08T08:14:48
|
Pascal
|
UTF-8
|
Python
| false
| false
| 2,595
|
py
|
build.py
|
# coding: latin-1
import subprocess
import os
import glob
from colorama import *
init()
#################################################################################
def buildProject(project):
print(Fore.YELLOW + "Building " + project)
p = project.replace('.dproj', '.cfg')
if os.path.isfile(p):
if os.path.isfile(p + '.unused'):
os.remove(p + '.unused')
os.rename(p, p + '.unused')
# print os.system("msbuild /t:Build /p:Config=Debug \"" + project + "\"")
return subprocess.call("rsvars.bat & msbuild /t:Build /p:Config=Debug /p:Platform=Win32 \"" + project + "\"", shell=True) == 0
def summaryTable(builds):
print(ansi.clear_screen())
copyright()
print(Fore.WHITE + "PROJECT NAME".ljust(80) + "STATUS".ljust(10))
print(Fore.YELLOW + "=" * 90)
good = bad = 0
for item in builds:
if item['status'] == 'ok':
#WConio.textcolor(WConio.LIGHTGREEN)
good += 1
else:
#WConio.textcolor(WConio.RED)
bad += 1
print(Fore.BLUE + item['project'].ljust(80) + (Fore.WHITE if item['status'] == 'ok' else Fore.RED) + item['status'].ljust(4))
#WConio.textcolor(WConio.WHITE)
print(Fore.YELLOW + "=" * 90)
#WConio.textcolor(WConio.GREEN)
print(Fore.WHITE + "GOOD :".rjust(80) + str(good).rjust(10, '.'))
#WConio.textcolor(WConio.RED)
print(Fore.RED + "BAD :".rjust(80) + str(bad).rjust(10, '.'))
#################################################################################
def main(projects):
copyright()
builds = []
for project in projects:
filename = '\\'.join(project.split('\\')[-3:])
list = {'project': filename}
if buildProject(project):
list["status"] = "ok"
else:
list["status"] = "ko"
builds.append(list)
summaryTable(builds)
# Store current attribute settings
#old_setting = WConio.gettextinfo()[4] & 0x00FF
def copyright():
print(Style.BRIGHT + Fore.WHITE + "----------------------------------------------------------------------------------------")
print(Fore.RED + " ** Delphi Redis client Building System **")
print(Fore.WHITE + " Delphi Redis Client is Copyright (2014-2022) of Daniele Teti")
print(Fore.WHITE + " Commercial support provided by bit Time Professionals - www.bittimeprofessionals.it")
print(Fore.RESET + "----------------------------------------------------------------------------------------\n")
## MAIN ##
projects = glob.glob("*\**\*.dproj")
projects = projects + glob.glob("**\*.dproj")
main(projects)
print(Style.RESET_ALL)
|
0815c347620ee03934290720c224e97d2c8c548c
|
820b6af9fd43b270749224bb278e5f714f655ac9
|
/Utilities/Maintenance/AddOverrides.py
|
8596eb11f763e638ee44bd20c3d03aa5ff516efe
|
[
"BSD-3-Clause"
] |
permissive
|
Kitware/VTK
|
49dee7d4f83401efce8826f1759cd5d9caa281d1
|
dd4138e17f1ed5dfe6ef1eab0ff6643fdc07e271
|
refs/heads/master
| 2023-09-01T10:21:57.496189
| 2023-09-01T08:20:15
| 2023-09-01T08:21:05
| 631,615
| 2,253
| 1,243
|
NOASSERTION
| 2023-09-14T07:53:03
| 2010-04-27T15:12:58
|
C++
|
UTF-8
|
Python
| false
| false
| 2,932
|
py
|
AddOverrides.py
|
# This Python script can be used to add override statements where they are
# reported to be needed according to warnings produced by clang-700.1.76 on macOS
# 10.11.6 with the -Winconsistent-missing-override option enabled. To run the
# script, invoke
# > python AddOverrides.py <overrides.txt>
# Each line of the overrides.txt file has the form
# <source file>:<line number>:<position>: warning: 'RequestDataDescription' \
# overrides a member function but is not marked 'override' [-Winconsistent-missing-override]
# It can be generated by running
# > ninja clean
# > ninja &> output.txt
# > grep "overrides a member function but" output.txt | sort | uniq > overrides.txt
# The script should be idempotent, so it can be run a second time without adversly
# affecting files that have already been modified during a first run.
import re
import sys
import __future__
lines_map = {}
# Load override warning file. Store file name as key and list of lines to modify
# as values.
with open(sys.argv[1], 'r') as f:
for line in f:
components = line.split(':')
file_name = components[0]
line_number = int(components[1])
if file_name in lines_map:
lines_map[file_name].add(line_number)
else:
lines_map[file_name] = {line_number}
#break
# Sort the line numbers
for k, v in lines_map.items():
sorted_line_numbers = sorted(v)
lines_map[k] = sorted_line_numbers
# Now open each file in the dictionary, append override to the end of each
# line, and save out the modified file
for file_name, line_numbers in lines_map.items():
lines = []
with open(file_name, 'r') as f:
contents = f.read()
lines = contents.split('\n')
f.close()
out_file = open(file_name, 'w')
counter = 1
in_multi_line = False
for line in lines:
if line.find('override') >= 0:
in_multi_line = False
else:
if in_multi_line or (counter in line_numbers and re.match('^vtk.*Macro', line.lstrip()) is None):
if line.endswith(');'):
line = line[0:-1] + ' override;'
in_multi_line = False
#print(65, file_name, line, counter)
elif line.endswith('=0;'):
line = line[0:-3] + ' override = 0;'
in_multi_line = False
elif line.endswith(' = 0;'):
line = line[0:-5] + ' override = 0;'
in_multi_line = False
elif line.endswith(')'):
line = line + ' override'
in_multi_line = False
#print(75, file_name, line, counter)
elif line.find('{') >= 0:
idx = line.find('{')
line = line[:idx].rstrip() + ' override ' + line[idx:].lstrip()
in_multi_line = False
print(65, file_name, line, counter)
elif line.endswith(',') or line.endswith('('):
in_multi_line = True
counter = counter + 1
out_file.write('%s' % line)
if counter <= len(lines):
out_file.write('\n')
out_file.close()
|
6ee608c7924740ec575d6823e9c38ea0f8ecc811
|
e7efae2b83216d9621bd93390959d652de779c3d
|
/ibm_i/tests/test_e2e.py
|
272f21ad7fb0a6d3b8aadca1c36f8253859775c1
|
[
"BSD-3-Clause",
"MIT",
"BSD-3-Clause-Modification",
"Unlicense",
"Apache-2.0",
"LGPL-3.0-only",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"CC0-1.0"
] |
permissive
|
DataDog/integrations-core
|
ee1886cc7655972b2791e6ab8a1c62ab35afdb47
|
406072e4294edff5b46b513f0cdf7c2c00fac9d2
|
refs/heads/master
| 2023-08-31T04:08:06.243593
| 2023-08-30T18:22:10
| 2023-08-30T18:22:10
| 47,203,045
| 852
| 1,548
|
BSD-3-Clause
| 2023-09-14T16:39:54
| 2015-12-01T16:41:45
|
Python
|
UTF-8
|
Python
| false
| false
| 382
|
py
|
test_e2e.py
|
# (C) Datadog, Inc. 2022-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import pytest
from datadog_checks.ibm_i import IbmICheck
@pytest.mark.e2e
def test_e2e(dd_agent_check, aggregator, instance):
aggregator = dd_agent_check(instance)
aggregator.assert_service_check("ibm_i.can_connect", IbmICheck.CRITICAL, tags=['foo:bar'])
|
031bb04513f1a624c087f5c5b02805b2fac4c072
|
b8bbdfc593b6d816e67a344f720f90ec05236778
|
/airflow/www/extensions/init_auth_manager.py
|
24ae020862dc932f8e517e94ec41ffa61a338e82
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
apache/airflow
|
ed78db0a8bab7e096990e143926e52f518e288ab
|
1b122c15030e99cef9d4ff26d3781a7a9d6949bc
|
refs/heads/main
| 2023-09-01T08:37:34.556097
| 2023-09-01T06:49:05
| 2023-09-01T06:49:05
| 33,884,891
| 22,756
| 11,558
|
Apache-2.0
| 2023-09-14T20:12:36
| 2015-04-13T18:04:58
|
Python
|
UTF-8
|
Python
| false
| false
| 1,837
|
py
|
init_auth_manager.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING
from airflow.compat.functools import cache
from airflow.configuration import conf
from airflow.exceptions import AirflowConfigException
if TYPE_CHECKING:
from airflow.auth.managers.base_auth_manager import BaseAuthManager
def get_auth_manager_cls() -> type[BaseAuthManager]:
"""Returns just the auth manager class without initializing it.
Useful to save execution time if only static methods need to be called.
"""
auth_manager_cls = conf.getimport(section="core", key="auth_manager")
if not auth_manager_cls:
raise AirflowConfigException(
"No auth manager defined in the config. "
"Please specify one using section/key [core/auth_manager]."
)
return auth_manager_cls
@cache
def get_auth_manager() -> BaseAuthManager:
"""
Initialize auth manager.
Import the user manager class, instantiate it and return it.
"""
auth_manager_cls = get_auth_manager_cls()
return auth_manager_cls()
|
e8958d56bfa72f435b66b3091a9ec03fced75dd3
|
3a99f98ee6a84c916496d389944ff46a416c6b24
|
/mobula/building/build_hash.py
|
d64a9d3b3893dfbb4548840de2f7fe4f9fa5ccf1
|
[
"MIT"
] |
permissive
|
wkcn/MobulaOP
|
7ab80e756466253718cd7e3eb15379ce93fa4e01
|
ae693a6a55824e9a0785f5b9befde6bd8ecccfd6
|
refs/heads/master
| 2021-06-02T19:21:26.774096
| 2020-09-27T11:37:20
| 2020-09-27T11:37:20
| 132,334,822
| 165
| 25
|
MIT
| 2020-09-27T11:37:21
| 2018-05-06T12:08:25
|
Python
|
UTF-8
|
Python
| false
| false
| 213
|
py
|
build_hash.py
|
import hashlib
import os
def path_hash(path):
md5 = hashlib.md5()
md5.update(path.encode('utf-8'))
return md5.hexdigest()[:8]
def get_file_hash(fname):
return str(int(os.path.getmtime(fname)))
|
c6882d3054945509a17c927f6e495e8bb3b101a6
|
f509ab9825c542e09b0c6591d86ef1f9feb540a6
|
/pkgs/sdk-pkg/src/genie/libs/sdk/triggers/modify/bgp/modify.py
|
bf97509c282302f8f3109b9309837876bf11a227
|
[
"Apache-2.0"
] |
permissive
|
CiscoTestAutomation/genielibs
|
97f597117193aaa18028defeb69078ebb241173a
|
e42e51475cddcb10f5c7814d0fe892ac865742ba
|
refs/heads/master
| 2023-08-11T16:39:41.959947
| 2023-07-27T17:58:42
| 2023-07-27T17:58:42
| 130,717,047
| 109
| 60
|
Apache-2.0
| 2023-08-29T22:32:08
| 2018-04-23T15:21:56
|
Python
|
UTF-8
|
Python
| false
| false
| 22,396
|
py
|
modify.py
|
'''Implementation for bgp modify triggers'''
# import python
import time
# Genie Libs
from genie.libs.sdk.libs.utils.mapping import Mapping
from genie.libs.sdk.triggers.modify.modify import TriggerModify
# TODO: Better Mapping text to explain what does what
# Which key to exclude for BGP Ops comparison
bgp_exclude = ['maker', 'bgp_session_transport', 'route_refresh',
'bgp_negotiated_capabilities', 'notifications', 'last_reset',
'keepalives', 'total', 'total_bytes', 'up_time', 'totals',
'bgp_negotiated_keepalive_timers', 'updates', 'opens',
'bgp_table_version', 'holdtime', 'keepalive_interval',
'route_reflector_client', 'capability', 'send_community',
'distance_internal_as', 'distance_extern_as',
'bgp_neighbor_counters', 'reset_reason',
'holdtime', 'keepalive_interval', 'password_text']
class TriggerModifyBgpNeighborAsn(TriggerModify):
"""Modify and revert the remote-as number for
dynamically learned "established" BGP neighbor(s)."""
__description__ = """Modify and revert the remote-as number for
dynamically learned "established" BGP neighbor(s).
trigger_datafile:
Mandatory:
timeout:
max_time (`int`): Maximum wait time for the trigger,
in second. Default: 180
interval (`int`): Wait time between iteration when looping is needed,
in second. Default: 15
method (`str`): Method to recover the device configuration,
Support methods:
'checkpoint': Rollback the configuration by
checkpoint (nxos),
archive file (iosxe),
load the saved running-config file on disk (iosxr)
Optional:
tgn_timeout (`int`): Maximum wait time for all traffic threads to be
restored to the reference rate,
in second. Default: 60
tgn_delay (`int`): Wait time between each poll to verify if traffic is resumed,
in second. Default: 10
timeout_recovery:
Buffer recovery timeout make sure devices are recovered at the end
of the trigger execution. Used when previous timeouts have been exhausted.
max_time (`int`): Maximum wait time for the last step of the trigger,
in second. Default: 180
interval (`int`): Wait time between iteration when looping is needed,
in second. Default: 15
static:
The keys below are dynamically learnt by default.
However, they can also be set to a custom value when provided in the trigger datafile.
instance: `str`
vrf: `vrf`
neighbor: `str`
address_family: `str`
remote_as: `int`
bgp_id: `int`
(e.g) interface: '(?P<interface>Ethernet1*)' (Regex supported)
OR
interface: 'Ethernet1/1/1' (Specific value)
steps:
1. Learn BGP Ops object and store the "established" BGP neighbor(s) which has
remote-as configured. SKIP the trigger if there is no BGP neighbor(s) found
2. Save the current device configurations through "method" which user uses
3. Modify the remote-as number of the learned BGP neighbor(s) from step 1
with BGP Conf object
4. Verify the remote-as number of learned BGP neighbor(s) from step 3
changes to the modified number in step 3
5. Recover the device configurations to the one in step 2
6. Learn BGP Ops again and verify it is the same as the Ops in step 1
"""
# Add more keys to be excluded for this trigger only
bgp_exclude = bgp_exclude + ['aggregate_address_as_set',
'aggregate_address_ipv4_address',
'aggregate_address_ipv4_mask',
'aggregate_address_summary_only',
'distance_local', 'disable_connected_check']
# Mapping of Information between Ops and Conf
# Also permit to dictates which key to verify
mapping = Mapping(requirements={'ops.bgp.bgp.Bgp':{
'requirements':[['info', 'instance', '(?P<instance>.*)',
'vrf', '(?P<vrf>.*)', 'neighbor',
'(?P<neighbor>.*)', 'remote_as',
'(?P<remote_as>.*)'],
['info', 'instance', '(?P<instance>.*)', 'vrf',
'(?P<vrf>.*)', 'neighbor', '(?P<neighbor>.*)',
'session_state', 'established'],
['info', 'instance', '(?P<instance>.*)', 'vrf',
'(?P<vrf>.*)', 'neighbor', '(?P<neighbor>.*)',
'address_family', '(?P<address_family>.*)',
'session_state', 'established'],
['info', 'instance', '(?P<instance>.*)', 'bgp_id', '(?P<bgp_id>.*)']],
'all_keys':True,
'kwargs':{'attributes':['info']},
'exclude': bgp_exclude}},
config_info={'conf.bgp.Bgp':{
'requirements':[['device_attr', '{uut}', 'vrf_attr', '(?P<vrf>.*)',
'neighbor_attr','(?P<neighbor>.*)', 'nbr_remote_as',
88]],
'verify_conf':False,
'kwargs':{'mandatory':{'bgp_id': '(?P<bgp_id>.*)'}}}},
verify_ops={'ops.bgp.bgp.Bgp':{
'requirements':[['info', 'instance', '(?P<instance>.*)', 'vrf',
'(?P<vrf>.*)', 'neighbor', '(?P<neighbor>.*)',
'remote_as', 88],
['info', 'instance', '(?P<instance>.*)', 'vrf',
'(?P<vrf>.*)', 'neighbor',
'(?P<neighbor>.*)', 'session_state', '(?P<session_state>(idle|active).*)'],
['info', 'instance', '(?P<instance>.*)', 'vrf',
'(?P<vrf>.*)', 'neighbor', '(?P<neighbor>.*)',
'address_family', '(?P<address_family>.*)',
'session_state', '(?P<af_sstate>(idle|active).*)']],
'kwargs':{'attributes':['info']},
'exclude': bgp_exclude}},
num_values={'vrf':1, 'instance':1, 'neighbor':1})
class TriggerModifyBgpNeighborCluster(TriggerModify):
"""Modify and revert the cluster id for dynamically learned BGP instance(s)."""
__description__ = """Modify and revert the cluster id for dynamically learned BGP instance(s).
trigger_datafile:
Mandatory:
timeout:
max_time (`int`): Maximum wait time for the trigger,
in second. Default: 180
interval (`int`): Wait time between iteration when looping is needed,
in second. Default: 15
method (`str`): Method to recover the device configuration,
Support methods:
'checkpoint': Rollback the configuration by
checkpoint (nxos),
archive file (iosxe),
load the saved running-config file on disk (iosxr)
Optional:
tgn_timeout (`int`): Maximum wait time for all traffic threads to be
restored to the reference rate,
in second. Default: 60
tgn_delay (`int`): Wait time between each poll to verify if traffic is resumed,
in second. Default: 10
timeout_recovery:
Buffer recovery timeout make sure devices are recovered at the end
of the trigger execution. Used when previous timeouts have been exhausted.
max_time (`int`): Maximum wait time for the last step of the trigger,
in second. Default: 180
interval (`int`): Wait time between iteration when looping is needed,
in second. Default: 15
static:
The keys below are dynamically learnt by default.
However, they can also be set to a custom value when provided in the trigger datafile.
instance: `str`
vrf: `vrf`
cluster_id: `str`
bgp_id: `int`
(e.g) interface: '(?P<interface>Ethernet1*)' (Regex supported)
OR
interface: 'Ethernet1/1/1' (Specific value)
steps:
1. Learn BGP Ops object and store the BGP instance(s) which has cluster id
configured. SKIP the trigger if there is no BGP instance(s) found
2. Save the current device configurations through "method" which user uses
3. Modify the cluster id of the learned BGP neighbor(s) from step 1
with BGP Conf object
4. Verify the cluster id of learned BGP neighbor(s) from step 3
changes to the modified id in step 3
5. Recover the device configurations to the one in step 2
6. Learn BGP Ops again and verify it is the same as the Ops in step 1
"""
# Mapping of Information between Ops and Conf
# Also permit to dictates which key to verify
mapping = Mapping(requirements={'ops.bgp.bgp.Bgp':{
'requirements':[['info', 'instance', '(?P<instance>.*)',
'vrf', '(?P<vrf>.*)', 'cluster_id', '(?P<cluster_id>.*)'],
['info', 'instance', '(?P<instance>.*)', 'bgp_id', '(?P<bgp_id>.*)']],
'all_keys':True,
'kwargs':{'attributes':['info']},
'exclude': bgp_exclude}},
config_info={'conf.bgp.Bgp':{
'requirements':[['device_attr', '{uut}', 'vrf_attr', '(?P<vrf>.*)',
'cluster_id', '1.1.1.1']],
'verify_conf':False,
'kwargs':{'mandatory':{'bgp_id': '(?P<bgp_id>.*)'}}}},
verify_ops={'ops.bgp.bgp.Bgp':{
'requirements':[['info', 'instance', '(?P<instance>.*)', 'vrf',
'(?P<vrf>.*)', 'cluster_id', '1.1.1.1']],
'kwargs':{'attributes':['info']},
'exclude': bgp_exclude}},
num_values={'vrf':'all', 'instance':1, 'neighbor':1,
'address_family':1})
class TriggerModifyBgpNeighborRoutemapIn(TriggerModify):
"""Modify and revert the inbound route-map for dynamically learned BGP neighbors(s)."""
__description__ = """Modify and revert the inbound route-map for dynamically learned BGP neighbors(s).
trigger_datafile:
Mandatory:
timeout:
max_time (`int`): Maximum wait time for the trigger,
in second. Default: 180
interval (`int`): Wait time between iteration when looping is needed,
in second. Default: 15
method (`str`): Method to recover the device configuration,
Support methods:
'checkpoint': Rollback the configuration by
checkpoint (nxos),
archive file (iosxe),
load the saved running-config file on disk (iosxr)
Optional:
tgn_timeout (`int`): Maximum wait time for all traffic threads to be
restored to the reference rate,
in second. Default: 60
tgn_delay (`int`): Wait time between each poll to verify if traffic is resumed,
in second. Default: 10
timeout_recovery:
Buffer recovery timeout make sure devices are recovered at the end
of the trigger execution. Used when previous timeouts have been exhausted.
max_time (`int`): Maximum wait time for the last step of the trigger,
in second. Default: 180
interval (`int`): Wait time between iteration when looping is needed,
in second. Default: 15
static:
The keys below are dynamically learnt by default.
However, they can also be set to a custom value when provided in the trigger datafile.
instance: `str`
vrf: `vrf`
neighbor: `str`
route_map: `str`
bgp_id: `int`
(e.g) interface: '(?P<interface>Ethernet1*)' (Regex supported)
OR
interface: 'Ethernet1/1/1' (Specific value)
steps:
1. Learn BGP Ops object and store the BGP neighbors(s) which has inbound route-map
configured. SKIP the trigger if there is no BGP neighbors(s) found
2. Save the current device configurations through "method" which user uses
3. Modify the inbound route-map of the learned BGP neighbor(s) from step 1
with BGP Conf object
4. Verify the inbound route-map of learned BGP neighbor(s) from step 3
changes to the modified name in step 3
5. Recover the device configurations to the one in step 2
6. Learn BGP Ops again and verify it is the same as the Ops in step 1
"""
# Create a name for router map in
new_name = 'bgp_' + time.ctime().replace(' ', '_').replace(':', '_')
# Mapping of Information between Ops and Conf
# Also permit to dictates which key to verify
mapping = Mapping(requirements={'ops.bgp.bgp.Bgp':{
'requirements':[['info', 'instance', '(?P<instance>.*)', 'vrf',
'(?P<vrf>.*)', 'neighbor', '(?P<neighbor>.*)',
'address_family', '(?P<address_family>.*)',
'route_map_name_in', '(?P<route_map>.*)'],
['info', 'instance', '(?P<instance>.*)', 'bgp_id', '(?P<bgp_id>.*)']],
'all_keys':True,
'kwargs':{'attributes':['info']},
'exclude': bgp_exclude}},
config_info={'conf.bgp.Bgp':{
'requirements':[['device_attr', '{uut}', 'vrf_attr', '(?P<vrf>.*)',
'neighbor_attr','(?P<neighbor>.*)', 'address_family_attr',
'(?P<address_family>.*)', 'nbr_af_route_map_name_in',
new_name]],
'verify_conf':False,
'kwargs':{'mandatory':{'bgp_id': '(?P<bgp_id>.*)'}}}},
verify_ops={'ops.bgp.bgp.Bgp':{
'requirements':[['info', 'instance', '(?P<instance>.*)', 'vrf',
'(?P<vrf>.*)', 'neighbor', '(?P<neighbor>.*)',
'address_family', '(?P<address_family>.*)',
'route_map_name_in', new_name]],
'kwargs':{'attributes':['info']},
'exclude': bgp_exclude}},
num_values={'vrf':1, 'instance':1, 'neighbor':1,
'address_family':1})
class TriggerModifyBgpNeighborRoutemapOut(TriggerModify):
"""Modify and revert the outbound route-map for dynamically learned BGP neighbors(s)."""
__description__ = """Modify and revert the outbound route-map for dynamically learned BGP neighbors(s).
trigger_datafile:
Mandatory:
timeout:
max_time (`int`): Maximum wait time for the trigger,
in second. Default: 180
interval (`int`): Wait time between iteration when looping is needed,
in second. Default: 15
method (`str`): Method to recover the device configuration,
Support methods:
'checkpoint': Rollback the configuration by
checkpoint (nxos),
archive file (iosxe),
load the saved running-config file on disk (iosxr)
Optional:
tgn_timeout (`int`): Maximum wait time for all traffic threads to be
restored to the reference rate,
in second. Default: 60
tgn_delay (`int`): Wait time between each poll to verify if traffic is resumed,
in second. Default: 10
timeout_recovery:
Buffer recovery timeout make sure devices are recovered at the end
of the trigger execution. Used when previous timeouts have been exhausted.
max_time (`int`): Maximum wait time for the last step of the trigger,
in second. Default: 180
interval (`int`): Wait time between iteration when looping is needed,
in second. Default: 15
steps:
1. Learn BGP Ops object and store the BGP neighbors(s) which has outbound route-map
configured. SKIP the trigger if there is no BGP neighbors(s) found
2. Save the current device configurations through "method" which user uses
3. Modify the outbound route-map of the learned BGP neighbor(s) from step 1
with BGP Conf object
4. Verify the outbound route-map of learned BGP neighbor(s) from step 3
changes to the modified name in step 3
5. Recover the device configurations to the one in step 2
6. Learn BGP Ops again and verify it is the same as the Ops in step 1
"""
# Create a name for router map in
new_name = 'bgp_' + time.ctime().replace(' ', '_').replace(':', '_')
# Mapping of Information between Ops and Conf
# Also permit to dictates which key to verify
mapping = Mapping(requirements={'ops.bgp.bgp.Bgp':{
'requirements':[['info', 'instance', '(?P<instance>.*)', 'vrf',
'(?P<vrf>.*)', 'neighbor', '(?P<neighbor>.*)',
'address_family', '(?P<address_family>.*)',
'route_map_name_out', '(?P<route_map>.*)'],
['info', 'instance', '(?P<instance>.*)', 'bgp_id', '(?P<bgp_id>.*)']],
'all_keys':True,
'kwargs':{'attributes':['info']},
'exclude': bgp_exclude}},
config_info={'conf.bgp.Bgp':{
'requirements':[['device_attr', '{uut}', 'vrf_attr', '(?P<vrf>.*)',
'neighbor_attr','(?P<neighbor>.*)', 'address_family_attr',
'(?P<address_family>.*)', 'nbr_af_route_map_name_out',
new_name]],
'verify_conf':False,
'kwargs':{'mandatory':{'bgp_id': '(?P<bgp_id>.*)'}}}},
verify_ops={'ops.bgp.bgp.Bgp':{
'requirements':[['info', 'instance', '(?P<instance>.*)', 'vrf',
'(?P<vrf>.*)', 'neighbor', '(?P<neighbor>.*)',
'address_family', '(?P<address_family>.*)',
'route_map_name_out', new_name]],
'kwargs':{'attributes':['info']},
'exclude': bgp_exclude}},
num_values={'vrf':1, 'instance':1, 'neighbor':1,
'address_family':1})
|
e4cc769c505b609392c2035c3125e2e59d3ad6d6
|
704976ea552111c6a5af9cd7cb62b9d9abaf3996
|
/rpython/jit/metainterp/test/test_threadlocal.py
|
322e41a40520a6c3671b2f6321862d3f3f57a379
|
[
"BSD-3-Clause"
] |
permissive
|
mesalock-linux/mesapy
|
4f02c5819ce7f2f6e249d34840f1aa097577645d
|
ed546d59a21b36feb93e2309d5c6b75aa0ad95c9
|
refs/heads/mesapy2.7
| 2023-08-16T21:33:02.239581
| 2019-08-13T10:29:43
| 2019-08-13T18:06:45
| 136,080,721
| 396
| 33
|
NOASSERTION
| 2020-04-01T03:05:18
| 2018-06-04T20:45:17
|
Python
|
UTF-8
|
Python
| false
| false
| 862
|
py
|
test_threadlocal.py
|
import py
from rpython.rlib import rthread
from rpython.jit.metainterp.test.support import LLJitMixin
from rpython.rtyper.lltypesystem import lltype
from rpython.rtyper.lltypesystem.lloperation import llop
class ThreadLocalTest(object):
def test_threadlocalref_get(self):
tlfield = rthread.ThreadLocalField(lltype.Signed, 'foobar_test_')
def f():
tlfield.setraw(0x544c)
return tlfield.getraw()
res = self.interp_operations(f, [])
assert res == 0x544c
def test_threadlocalref_get_char(self):
tlfield = rthread.ThreadLocalField(lltype.Char, 'foobar_test_char_')
def f():
tlfield.setraw('\x92')
return ord(tlfield.getraw())
res = self.interp_operations(f, [])
assert res == 0x92
class TestLLtype(ThreadLocalTest, LLJitMixin):
pass
|
0d212b669e4d04bdd6d28ded728201a0b530a30a
|
3b87eaa7f1b8290d1a74ac2bec9573f81aab831d
|
/_scripts/templates/Python3/st.Test.py
|
42b5585d954fe0fe9298eb71ea817ecf06ac426b
|
[] |
permissive
|
antlr/grammars-v4
|
1f6ba461f9fb2c8f04335ca495249ab6eab8e0ae
|
98c2bc3b68eff9ad4b809d21a6c9d858c5b9ddfa
|
refs/heads/master
| 2023-08-16T13:37:23.691676
| 2023-08-13T15:20:52
| 2023-08-13T15:20:52
| 5,958,314
| 9,255
| 4,577
|
MIT
| 2023-09-13T21:17:22
| 2012-09-25T23:45:11
|
ANTLR
|
UTF-8
|
Python
| false
| false
| 5,344
|
py
|
st.Test.py
|
# Generated from trgen <version>
import sys
from antlr4 import *
from antlr4.error.ErrorListener import ErrorListener
from readchar import readchar
from <lexer_name> import <lexer_name>;
from <parser_name> import <parser_name>;
from datetime import datetime
def getChar():
xx = readchar()
if (xx == 0):
return '';
return xx
class MyErrorListener(ErrorListener):
def __init__(self, q, t, o):
super().__init__()
self.had_error = False
self.quiet = q
self.tee = t;
self.output = o
def syntaxError(self, recognizer, offendingSymbol, line, column, msg, e):
self.had_error = True
if ( self.tee ):
self.output.write(f"line {line}:{column} {msg}\n");
if (not self.quiet):
print(f"line {line}:{column} {msg}", file=sys.stderr);
tee = False
show_tokens = False
show_tree = False
show_trace = False
encoding = "utf-8"
error_code = 0
string_instance = 0
prefix = ""
quiet = False
def main(argv):
global tee
global show_tokens
global show_tree
global show_trace
global encoding
global prefix
global quiet
global error_code
inputs = []
is_fns = []
prefix = ""
i = 1
while i \< len(argv):
arg = argv[i]
if arg in ("-tokens"):
show_tokens = True
elif arg in ("-tree"):
show_tree = True
elif arg in ("-prefix"):
i = i + 1
prefix = argv[i] + " "
elif arg in ("-input"):
i = i + 1
inputs.append(argv[i])
is_fns.append(False)
elif arg in ("-encoding"):
i = i + 1
encoding = argv[i]
elif arg in ("-tee"):
tee = True
elif arg in ("-x"):
while f := sys.stdin.readline():
f = f.strip()
inputs.append(f)
is_fns.append(True)
elif arg in ("-q"):
quiet = True
elif arg in ("-trace"):
show_trace = True
else:
inputs.append(argv[i])
is_fns.append(True)
i = i + 1
if len(inputs) == 0:
ParseStdin()
else:
start_time = datetime.now()
for f in range(0, len(inputs)):
if is_fns[f]:
ParseFilename(inputs[f], f)
else:
ParseString(inputs[f], f)
end_time = datetime.now()
diff = end_time - start_time
diff_time = diff.total_seconds()
if (not quiet):
print(f'Total Time: {diff_time}', file=sys.stderr);
sys.exit(error_code)
def ParseStdin():
sb = ""
ch = getChar()
while (ch != ''):
sb = sb + ch
ch = getChar()
input = sb
str = InputStream(input);
DoParse(str, 'stdin', 0)
def ParseString(input, row_number):
global string_instance
str = InputStream(input)
DoParse(str, 'string' + str(string_instance), row_number)
string_instance = string_instance + 1
def ParseFilename(input, row_number):
global encoding
str = FileStream(input, encoding)
DoParse(str, input, row_number)
def DoParse(str, input_name, row_number):
global tee
global show_tokens
global show_tree
global show_trace
global encoding
global prefix
global quiet
global error_code
lexer = <lexer_name>(str)
lexer.removeErrorListeners()
if (tee):
output = open(input_name + ".errors", "w")
else:
output = sys.stderr
listener_lexer = MyErrorListener(quiet, tee, output)
lexer.addErrorListener(listener_lexer)
# lexer.strictMode = false
tokens = CommonTokenStream(lexer)
parser = <parser_name>(tokens)
parser.removeErrorListeners()
listener_parser = MyErrorListener(quiet, tee, output)
parser.addErrorListener(listener_parser)
if (show_tokens):
i = 0
while True:
ro_token = lexer.nextToken()
token = ro_token
# token.TokenIndex = i
i = i + 1
print(token, file=sys.stderr)
if (token.type == -1):
break
lexer.reset()
if (show_trace) :
parser.setTrace(False)
ParserATNSimulator.trace_atn_sim = True
PredictionContext._trace_atn_sim = True
start_time = datetime.now()
tree = parser.<start_symbol>()
end_time = datetime.now()
diff = end_time - start_time
diff_time = diff.total_seconds()
result = ''
if listener_parser.had_error or listener_lexer.had_error:
result = 'fail'
error_code = 1
else:
result = 'success'
if (show_tree):
if (tee):
f = open(input_name + '.tree', 'w', encoding='utf-8')
f.write(tree.toStringTree(recog=parser))
f.close()
else:
print(tree.toStringTree(recog=parser), file=sys.stderr)
if (not quiet):
sys.stderr.write(prefix)
sys.stderr.write('Python3 ')
sys.stderr.write(f'{row_number}')
sys.stderr.write(' ')
sys.stderr.write(input_name)
sys.stderr.write(' ')
sys.stderr.write(result)
sys.stderr.write(' ')
sys.stderr.write(f'{diff_time}')
sys.stderr.write('\n')
if (tee):
output.close()
if __name__ == '__main__':
main(sys.argv)
|
d2dcfd8133216145916a062aed25f2715583fc7f
|
99199db3f78a344e72b281c71c690518ae07375a
|
/octavia/tests/unit/api/v2/types/test_l7policy.py
|
c0eddaf2495857dde24e0622a3e4e5424940d976
|
[
"Apache-2.0"
] |
permissive
|
openstack/octavia
|
3faf2afe2ade5bd3978bb3a0558d2eeefc648ba2
|
0426285a41464a5015494584f109eed35a0d44db
|
refs/heads/master
| 2023-09-01T20:12:48.272344
| 2023-08-31T17:24:04
| 2023-08-31T17:24:04
| 21,018,188
| 147
| 180
|
Apache-2.0
| 2021-03-30T12:34:30
| 2014-06-19T22:47:19
|
Python
|
UTF-8
|
Python
| false
| false
| 7,366
|
py
|
test_l7policy.py
|
# Copyright 2016 Blue Box, an IBM Company
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import uuidutils
from wsme import exc
from wsme.rest import json as wsme_json
from wsme import types as wsme_types
from octavia.api.v2.types import l7policy as l7policy_type
from octavia.common import constants
from octavia.tests.unit.api.common import base
class TestL7PolicyPOST(base.BaseTypesTest):
_type = l7policy_type.L7PolicyPOST
def setUp(self):
super().setUp()
self.listener_id = uuidutils.generate_uuid()
def test_l7policy(self):
body = {"listener_id": self.listener_id,
"action": constants.L7POLICY_ACTION_REJECT,
"tags": ['test_tag']}
l7policy = wsme_json.fromjson(self._type, body)
self.assertEqual(self.listener_id, l7policy.listener_id)
self.assertEqual(constants.MAX_POLICY_POSITION, l7policy.position)
self.assertEqual(wsme_types.Unset, l7policy.redirect_url)
self.assertEqual(wsme_types.Unset, l7policy.redirect_pool_id)
self.assertTrue(l7policy.admin_state_up)
def test_action_mandatory(self):
body = {"listener_id": self.listener_id}
self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type,
body)
def test_listener_id_mandatory(self):
body = {"action": constants.L7POLICY_ACTION_REJECT}
self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type,
body)
def test_invalid_action(self):
body = {"listener_id": self.listener_id,
"action": "test"}
self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type,
body)
def test_with_redirect_url(self):
url = "http://www.example.com/"
body = {"listener_id": self.listener_id,
"action": constants.L7POLICY_ACTION_REDIRECT_TO_URL,
"redirect_url": url}
l7policy = wsme_json.fromjson(self._type, body)
self.assertEqual(constants.MAX_POLICY_POSITION, l7policy.position)
self.assertEqual(url, l7policy.redirect_url)
self.assertEqual(wsme_types.Unset, l7policy.redirect_pool_id)
def test_invalid_position(self):
body = {"listener_id": self.listener_id,
"action": constants.L7POLICY_ACTION_REJECT,
"position": "notvalid"}
self.assertRaises(ValueError, wsme_json.fromjson, self._type,
body)
def test_invalid_tags(self):
body = {"listener_id": self.listener_id,
"action": constants.L7POLICY_ACTION_REJECT,
"tags": "invalid_tag"}
self.assertRaises(ValueError, wsme_json.fromjson, self._type,
body)
body = {"listener_id": self.listener_id,
"action": constants.L7POLICY_ACTION_REJECT,
"tags": [1, 2]}
self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type,
body)
def test_l7policy_min_position(self):
body = {"listener_id": self.listener_id,
"action": constants.L7POLICY_ACTION_REJECT,
"position": constants.MIN_POLICY_POSITION - 1}
self.assertRaises(
exc.InvalidInput, wsme_json.fromjson, self._type, body)
body = {"listener_id": self.listener_id,
"action": constants.L7POLICY_ACTION_REJECT,
"position": constants.MIN_POLICY_POSITION}
l7policy = wsme_json.fromjson(self._type, body)
self.assertEqual(constants.MIN_POLICY_POSITION, l7policy.position)
def test_l7policy_max_position(self):
body = {"listener_id": self.listener_id,
"action": constants.L7POLICY_ACTION_REJECT,
"position": constants.MAX_POLICY_POSITION + 1}
self.assertRaises(
exc.InvalidInput, wsme_json.fromjson, self._type, body)
body = {"listener_id": self.listener_id,
"action": constants.L7POLICY_ACTION_REJECT,
"position": constants.MAX_POLICY_POSITION}
l7policy = wsme_json.fromjson(self._type, body)
self.assertEqual(constants.MAX_POLICY_POSITION, l7policy.position)
def test_invalid_admin_state_up(self):
body = {"listener_id": self.listener_id,
"action": constants.L7POLICY_ACTION_REJECT,
"admin_state_up": "notvalid"}
self.assertRaises(ValueError, wsme_json.fromjson, self._type,
body)
def test_invalid_url(self):
body = {"listener_id": self.listener_id,
"action": constants.L7POLICY_ACTION_REDIRECT_TO_URL,
"redirect_url": "notvalid"}
self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type,
body)
class TestL7PolicyPUT(base.BaseTypesTest):
_type = l7policy_type.L7PolicyPUT
def test_l7policy(self):
body = {"action": constants.L7POLICY_ACTION_REJECT,
"position": constants.MIN_POLICY_POSITION,
"tags": ['test_tag']}
l7policy = wsme_json.fromjson(self._type, body)
self.assertEqual(constants.MIN_POLICY_POSITION, l7policy.position)
self.assertEqual(wsme_types.Unset, l7policy.redirect_url)
self.assertEqual(wsme_types.Unset, l7policy.redirect_pool_id)
def test_l7policy_min_position(self):
body = {"position": constants.MIN_POLICY_POSITION - 1}
self.assertRaises(
exc.InvalidInput, wsme_json.fromjson, self._type, body)
body = {"position": constants.MIN_POLICY_POSITION}
l7policy = wsme_json.fromjson(self._type, body)
self.assertEqual(constants.MIN_POLICY_POSITION, l7policy.position)
def test_l7policy_max_position(self):
body = {"position": constants.MAX_POLICY_POSITION + 1}
self.assertRaises(
exc.InvalidInput, wsme_json.fromjson, self._type, body)
body = {"position": constants.MAX_POLICY_POSITION}
l7policy = wsme_json.fromjson(self._type, body)
self.assertEqual(constants.MAX_POLICY_POSITION, l7policy.position)
def test_invalid_position(self):
body = {"position": "test"}
self.assertRaises(ValueError, wsme_json.fromjson, self._type, body)
def test_invalid_action(self):
body = {"action": "test"}
self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type,
body)
def test_invalid_tags(self):
body = {"tags": "invalid_tag"}
self.assertRaises(ValueError, wsme_json.fromjson, self._type,
body)
body = {"tags": [1, 2]}
self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type,
body)
|
7229a12e1ee3f770799d144bc484a804a864f4be
|
1311696a180047135c825ffa283f9ac9750d4236
|
/snippets/esp32/validate_machine.py
|
0ab2379704049d1dbfbf259dce50763f558539a8
|
[
"MIT"
] |
permissive
|
Josverl/micropython-stubber
|
71103afa842da02d5ad074b541d9bff7243ce23f
|
68fe9113f4b4e611bb4c3d19f79c8ba0e7111f5e
|
refs/heads/main
| 2023-08-31T00:51:22.200348
| 2023-05-31T07:48:54
| 2023-05-31T07:48:54
| 177,823,007
| 135
| 8
|
NOASSERTION
| 2023-09-11T21:25:19
| 2019-03-26T16:00:53
|
Python
|
UTF-8
|
Python
| false
| false
| 5,677
|
py
|
validate_machine.py
|
# board : ESP32
# ref : https://docs.micropython.org/en/latest/esp32/quickref.html
import utime as time
import machine
machine.freq() # get the current frequency of the CPU
machine.freq(240000000) # set the CPU frequency to 240 MHz
# Pins and GPIO
# Use the machine.Pin class:
from machine import Pin
p0 = Pin(0, Pin.OUT) # create output pin on GPIO0
p0.on() # set pin to "on" (high) level
p0.off() # set pin to "off" (low) level
p0.value(1) # set pin to on/high
p2 = Pin(2, Pin.IN) # create input pin on GPIO2
print(p2.value()) # get value, 0 or 1
p4 = Pin(4, Pin.IN, Pin.PULL_UP) # enable internal pull-up resistor
p5 = Pin(5, Pin.OUT, value=1) # set pin high on creation
# UART (serial bus)
# See machine.UART.
from machine import UART
uart1 = UART(3, baudrate=9600, tx=33, rx=32)
uart1.write("hello") # write 5 bytes
uart1.read(5) # read up to 5 bytes
# PWM (pulse width modulation)¶
from machine import Pin, PWM
pwm0 = PWM(Pin(0)) # create PWM object from a pin
freq = pwm0.freq() # get current frequency (default 5kHz)
pwm0.freq(1000) # set PWM frequency from 1Hz to 40MHz
duty = pwm0.duty() # get current duty cycle, range 0-1023 (default 512, 50%)
pwm0.duty(256) # set duty cycle from 0 to 1023 as a ratio duty/1023, (now 25%)
duty_u16 = pwm0.duty_u16() # get current duty cycle, range 0-65535
pwm0.duty_u16(2**16 * 3 // 4) # set duty cycle from 0 to 65535 as a ratio duty_u16/65535, (now 75%)
duty_ns = pwm0.duty_ns() # get current pulse width in ns
pwm0.duty_ns(250_000) # set pulse width in nanoseconds from 0 to 1_000_000_000/freq, (now 25%)
pwm0.deinit() # turn off PWM on the pin
pwm2 = PWM(Pin(2), freq=20000, duty=512) # create and configure in one go
print(pwm2) # view PWM settings
# ADC (analog to digital conversion)¶
from machine import ADC
adc = ADC(Pin(32)) # create ADC object on ADC pin
adc.read() # read value, 0-4095 across voltage range 0.0v - 1.0v
adc.atten(ADC.ATTN_11DB) # set 11dB input attenuation (voltage range roughly 0.0v - 3.6v)
adc.width(ADC.WIDTH_9BIT) # set 9 bit return values (returned range 0-511)
adc.read() # read value using the newly configured attenuation and width
# Software SPI bus
from machine import Pin, SoftSPI
# construct a SoftSPI bus on the given pins
# polarity is the idle state of SCK
# phase=0 means sample on the first edge of SCK, phase=1 means the second
spi = SoftSPI(baudrate=100000, polarity=1, phase=0, sck=Pin(0), mosi=Pin(2), miso=Pin(4))
spi.init(baudrate=200000) # set the baudrate
spi.read(10) # read 10 bytes on MISO
spi.read(10, 0xFF) # read 10 bytes while outputting 0xff on MOSI
buf = bytearray(50) # create a buffer
spi.readinto(buf) # read into the given buffer (reads 50 bytes in this case)
spi.readinto(buf, 0xFF) # read into the given buffer and output 0xff on MOSI
spi.write(b"12345") # write 5 bytes on MOSI
buf = bytearray(4) # create a buffer
spi.write_readinto(b"1234", buf) # write to MOSI and read from MISO into the buffer
spi.write_readinto(buf, buf) # write buf to MOSI and read MISO back into buf
# Hardware SPI
from machine import Pin, SPI
hspi = SPI(1, 10000000)
hspi = SPI(1, 10000000, sck=Pin(14), mosi=Pin(13), miso=Pin(12))
vspi = SPI(2, baudrate=80000000, polarity=0, phase=0, bits=8, firstbit=0, sck=Pin(18), mosi=Pin(23), miso=Pin(19))
# Software I2C
from machine import Pin, SoftI2C
i2c = SoftI2C(scl=Pin(5), sda=Pin(4), freq=100000)
i2c.scan() # scan for devices
i2c.readfrom(0x3A, 4) # read 4 bytes from device with address 0x3a
i2c.writeto(0x3A, "12") # write '12' to device with address 0x3a
# Hardware I2C bus
from machine import Pin, I2C
i2c = I2C(0)
i2c = I2C(1, scl=Pin(5), sda=Pin(4), freq=400000)
# construct an I2C bus
i2c = I2C(scl=Pin(5), sda=Pin(4), freq=100000)
# I2S bus
from machine import I2S, Pin
i2s = I2S(0, sck=Pin(13), ws=Pin(14), sd=Pin(34), mode=I2S.TX, bits=16, format=I2S.STEREO, rate=44100, ibuf=40000) # create I2S object
i2s.write(buf) # write buffer of audio samples to I2S device
i2s = I2S(1, sck=Pin(33), ws=Pin(25), sd=Pin(32), mode=I2S.RX, bits=16, format=I2S.MONO, rate=22050, ibuf=40000) # create I2S object
i2s.readinto(buf) # fill buffer with audio samples from I2S device
# Real time clock (RTC)
from machine import RTC
rtc = RTC()
rtc.datetime((2017, 8, 23, 1, 12, 48, 0, 0)) # set a specific date and time
rtc.datetime() # get date and time
# WDT (Watchdog timer)
from machine import WDT
# enable the WDT with a timeout of 5s (1s is the minimum)
wdt = WDT(timeout=5000)
wdt.feed()
# Deep-sleep mode
import machine
# check if the device woke from a deep sleep
if machine.reset_cause() == machine.DEEPSLEEP_RESET:
print("woke from a deep sleep")
# put the device to sleep for 10 seconds
machine.deepsleep(10000)
# SD card
import machine, os
# Slot 2 uses pins sck=18, cs=5, miso=19, mosi=23
sd = machine.SDCard(slot=1)
os.mount(sd, "/sd") # mount
os.listdir("/sd") # list directory contents
os.umount("/sd") # eject
# OneWire driver
from machine import Pin
import onewire
ow = onewire.OneWire(Pin(12)) # create a OneWire bus on GPIO12
ow.scan() # return a list of devices on the bus
ow.reset() # reset the bus
ow.readbyte() # read a byte
ow.writebyte(0x12) # write a byte on the bus
ow.write(b"123") # write bytes on the bus
ow.select_rom(b"12345678") # select a specific device by its ROM code
# DS18S20 and DS18B20 devices:
import ds18x20
ds = ds18x20.DS18X20(ow)
roms = ds.scan()
ds.convert_temp()
time.sleep_ms(750)
for rom in roms:
print(ds.read_temp(rom))
# Capacitive touch
from machine import TouchPad, Pin
t = TouchPad(Pin(14))
t.read() # Returns a smaller number when touched
|
941adbb13e74276777e570ee7c9bede06a944b79
|
f9357dc6ebe6ae1af0b03a9afc5f765706b8d31f
|
/一些爬虫相关的内容/ppbc植物网站所有图片地址pid下载.py
|
d90341b7b76c2e045703e9fe4f1a4a533e51d68e
|
[] |
no_license
|
cilame/any-whim
|
660acd966048655aa36886047fbc232539807881
|
1520accbe1506a133989a6c2be17572e7fb4693e
|
refs/heads/master
| 2023-08-17T05:10:56.348200
| 2023-08-13T16:45:11
| 2023-08-13T16:45:11
| 110,548,292
| 125
| 64
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22,589
|
py
|
ppbc植物网站所有图片地址pid下载.py
|
# -*- coding: utf-8 -*-
import scrapy
from scrapy import Request, Selector
from lxml import etree
# 以下补丁代码:用于预防有人可能会用 pythonw 执行 scrapy 单脚本时可能会出现的编码问题,如果直接用 python 执行该处则有无皆可。
# import io, sys; sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='utf-8')
# 以下补丁代码:用于预防处理 “scrapy(twisted) 对极少数的某些网站返回的不规范 headers 无法处理” 的异常情况
def lineReceived(self, line):
if line[-1:] == b'\r': line = line[:-1]
if self.state == u'STATUS': self.statusReceived(line); self.state = u'HEADER'
elif self.state == u'HEADER':
if not line or line[0] not in b' \t':
if self._partialHeader is not None:
_temp = b''.join(self._partialHeader).split(b':', 1)
name, value = _temp if len(_temp) == 2 else (_temp[0], b'')
self.headerReceived(name, value.strip())
if not line: self.allHeadersReceived()
else: self._partialHeader = [line]
else: self._partialHeader.append(line)
import twisted.web._newclient
twisted.web._newclient.HTTPParser.lineReceived = lineReceived
# 以下补丁代码:解决 idna 库过于严格,导致带有下划线的 hostname 无法验证通过的异常
import idna.core
_check_label_bak = idna.core.check_label
def check_label(label):
try: return _check_label_bak(label)
except idna.core.InvalidCodepoint: pass
idna.core.check_label = check_label
import re
import json
from urllib.parse import unquote, quote
class VSpider(scrapy.Spider):
name = 'v'
custom_settings = {
'COOKIES_ENABLED': False, # Do not use automatic cookie caching(set 'dont_merge_cookies' as True in Request.meta is same)
}
proxy = None # 'http://127.0.0.1:8888'
# proxy = 'http://140.205.171.24:80'
debug_break = False
def start_requests(self):
def mk_url_headers(key):
def quote_val(url): return re.sub(r'([\?&][^=&]*=)([^&]*)', lambda i:i.group(1)+quote(unquote(i.group(2),encoding='utf-8'),encoding='utf-8'), url)
url = (
'http://ppbc.iplant.cn/ashx/getphotopage.ashx'
'?page=1'
'&n=2'
'&group=class'
'&p={}'
).format(key)
url = quote_val(url)
headers = {
# "trpr-client-name": "mit-spider",
"accept-encoding": "gzip, deflate", # auto delete br encoding. cos requests and scrapy can not decode it.
"accept-language": "zh-CN,zh;q=0.9",
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.75 Safari/537.36"
}
return url,headers
for key in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
url,headers = mk_url_headers(key)
meta = {}
meta['proxy'] = self.proxy
r = Request(
url,
headers = headers,
callback = self.parse,
meta = meta,
)
yield r
if self.debug_break: break
def parse(self, response):
for x in response.xpath('//body/div/div[2][@class="mp10"]'):
d = {}
d["href"] = x.xpath('./div/a[@href]/@href')[0].extract() # [cnt:40] [len:11] /fam/254069
d["zh_name"] = x.xpath('string(./div[1]/a[@href])')[0].extract()
d["en_name"] = x.xpath('string(./div[2]/a[@href])')[0].extract()
# print('------------------------------ split ------------------------------')
# import pprint
# pprint.pprint(d)
# yield d
# continue
if '/fam/' in d['href']:
def mk_url_headers(d):
url = (
'http://ppbc.iplant.cn/ashx/getphotopage.ashx'
'?t=0.25047715588424446'
'&n=1'
'&group=fam'
'&cid={}'
'&p='
'&m='
).format(re.findall(r'/fam/(\d+)', d['href'])[0])
headers = {
# "trpr-client-name": "mit-spider",
"accept-encoding": "gzip, deflate", # auto delete br encoding. cos requests and scrapy can not decode it.
"accept-language": "zh-CN,zh;q=0.9",
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.75 Safari/537.36"
}
return url,headers
url,headers = mk_url_headers(d)
meta = {}
meta['proxy'] = self.proxy
r = Request(
url,
headers = headers,
callback = self.parse_fam_page,
meta = meta,
)
yield r
if self.debug_break: break
if '/gen/' in d["href"]:
def mk_url_headers(d):
url = (
'http://ppbc.iplant.cn/ashx/getphotopage.ashx'
'?t=0.25047715588424446'
'&n=1'
'&group=gen'
'&cid={}'
'&p='
'&m='
).format(re.findall(r'/gen/(\d+)', d['href'])[0])
headers = {
# "trpr-client-name": "mit-spider",
"accept-encoding": "gzip, deflate", # auto delete br encoding. cos requests and scrapy can not decode it.
"accept-language": "zh-CN,zh;q=0.9",
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.75 Safari/537.36"
}
return url,headers
url,headers = mk_url_headers(d)
meta = {}
meta['proxy'] = self.proxy
r = Request(
url,
headers = headers,
callback = self.parse_gen_page,
meta = meta,
)
yield r
if self.debug_break: break
if '/sp/' in d["href"]:
def mk_url_headers(d):
url = (
'http://ppbc.iplant.cn/ashx/getphotopage.ashx'
'?t=0.25047715588424446'
'&n=1'
'&group=sp'
'&cid={}'
'&p='
'&m='
).format(re.findall(r'/sp/(\d+)', d['href'])[0])
headers = {
# "trpr-client-name": "mit-spider",
"accept-encoding": "gzip, deflate", # auto delete br encoding. cos requests and scrapy can not decode it.
"accept-language": "zh-CN,zh;q=0.9",
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.75 Safari/537.36"
}
return url,headers
url,headers = mk_url_headers(d)
meta = {}
meta['proxy'] = self.proxy
r = Request(
url,
headers = headers,
callback = self.parse_sp_page,
meta = meta,
)
yield r
if self.debug_break: break
def parse_fam_page(self, response):
jsondata = json.loads(response.body.decode('utf-8'))
count = jsondata["count"]
nextstr = re.findall(r"href='([^']+)'", jsondata["nextstr"])[0]
for page in range(1, int(count)+1):
def mk_url_headers(page, nextstr):
url = response.urljoin(nextstr)
url = re.sub(r'page=(\d+)', 'page={}'.format(page), url)
headers = {
# "trpr-client-name": "mit-spider",
"accept-encoding": "gzip, deflate", # auto delete br encoding. cos requests and scrapy can not decode it.
"accept-language": "zh-CN,zh;q=0.9",
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.75 Safari/537.36"
}
return url,headers
url,headers = mk_url_headers(page, nextstr)
meta = {}
meta['proxy'] = self.proxy
r = Request(
url,
headers = headers,
callback = self.parse_gen_or_sp,
meta = meta,
dont_filter = True,
)
yield r
if self.debug_break: break
def parse_gen_or_sp(self, response):
for x in response.xpath('//div[contains(@class, "item3")]'):
d = {}
d["href"] = x.xpath('./div[@class="mp10"]/div/a/@href')[0].extract() # [cnt:2] [len:11] /gen/258751
d["name"] = x.xpath('string(./div[@class="mp10"]/div/a)')[0].extract() # [cnt:2] [len:11] /gen/258751
# print('------------------------------ split ------------------------------')
# import pprint
# pprint.pprint(d)
# yield d
# continue
if '/gen/' in d["href"]:
def mk_url_headers(d):
url = (
'http://ppbc.iplant.cn/ashx/getphotopage.ashx'
'?t=0.25047715588424446'
'&n=1'
'&group=gen'
'&cid={}'
'&p='
'&m='
).format(re.findall(r'/gen/(\d+)', d['href'])[0])
headers = {
# "trpr-client-name": "mit-spider",
"accept-encoding": "gzip, deflate", # auto delete br encoding. cos requests and scrapy can not decode it.
"accept-language": "zh-CN,zh;q=0.9",
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.75 Safari/537.36"
}
return url,headers
url,headers = mk_url_headers(d)
meta = {}
meta['proxy'] = self.proxy
r = Request(
url,
headers = headers,
callback = self.parse_gen_page,
meta = meta,
)
yield r
if self.debug_break: break
if '/sp/' in d["href"]:
def mk_url_headers(d):
url = (
'http://ppbc.iplant.cn/ashx/getphotopage.ashx'
'?t=0.25047715588424446'
'&n=1'
'&group=sp'
'&cid={}'
'&p='
'&m='
).format(re.findall(r'/sp/(\d+)', d['href'])[0])
headers = {
# "trpr-client-name": "mit-spider",
"accept-encoding": "gzip, deflate", # auto delete br encoding. cos requests and scrapy can not decode it.
"accept-language": "zh-CN,zh;q=0.9",
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.75 Safari/537.36"
}
return url,headers
url,headers = mk_url_headers(d)
meta = {}
meta['proxy'] = self.proxy
r = Request(
url,
headers = headers,
callback = self.parse_sp_page,
meta = meta,
)
yield r
if self.debug_break: break
def parse_gen_page(self, response):
jsondata = json.loads(response.body.decode('utf-8'))
count = jsondata["count"]
nextstr = re.findall(r"href='([^']+)'", jsondata["nextstr"])[0]
for page in range(1, int(count)+1):
def mk_url_headers(page, nextstr):
url = response.urljoin(nextstr)
url = re.sub(r'page=(\d+)', 'page={}'.format(page), url)
headers = {
# "trpr-client-name": "mit-spider",
"accept-encoding": "gzip, deflate", # auto delete br encoding. cos requests and scrapy can not decode it.
"accept-language": "zh-CN,zh;q=0.9",
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.75 Safari/537.36"
}
return url,headers
url,headers = mk_url_headers(page, nextstr)
meta = {}
meta['proxy'] = self.proxy
r = Request(
url,
headers = headers,
callback = self.parse_gen,
meta = meta,
dont_filter = True,
)
yield r
if self.debug_break: break
def parse_gen(self, response):
for x in response.xpath('//div[contains(@class, "item3")]'):
d = {}
d["href"] = x.xpath('./div[@class="mp10"]/div/a/@href')[0].extract() # [cnt:2] [len:11] /gen/258751
d["name"] = x.xpath('string(./div[@class="mp10"]/div/a)')[0].extract() # [cnt:2] [len:11] /gen/258751
# print('------------------------------ split ------------------------------')
# import pprint
# pprint.pprint(d)
# yield d
# continue
if '/gen/' in d["href"]:
def mk_url_headers(d):
url = (
'http://ppbc.iplant.cn/ashx/getphotopage.ashx'
'?t=0.25047715588424446'
'&n=1'
'&group=gen'
'&cid={}'
'&p='
'&m='
).format(re.findall(r'/gen/(\d+)', d['href'])[0])
headers = {
# "trpr-client-name": "mit-spider",
"accept-encoding": "gzip, deflate", # auto delete br encoding. cos requests and scrapy can not decode it.
"accept-language": "zh-CN,zh;q=0.9",
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.75 Safari/537.36"
}
return url,headers
url,headers = mk_url_headers(d)
meta = {}
meta['proxy'] = self.proxy
r = Request(
url,
headers = headers,
callback = self.parse_gen_page,
meta = meta,
)
yield r
if self.debug_break: break
if '/sp/' in d["href"]:
def mk_url_headers(d):
url = (
'http://ppbc.iplant.cn/ashx/getphotopage.ashx'
'?t=0.25047715588424446'
'&n=1'
'&group=sp'
'&cid={}'
'&p='
'&m='
).format(re.findall(r'/sp/(\d+)', d['href'])[0])
headers = {
# "trpr-client-name": "mit-spider",
"accept-encoding": "gzip, deflate", # auto delete br encoding. cos requests and scrapy can not decode it.
"accept-language": "zh-CN,zh;q=0.9",
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.75 Safari/537.36"
}
return url,headers
url,headers = mk_url_headers(d)
meta = {}
meta['proxy'] = self.proxy
r = Request(
url,
headers = headers,
callback = self.parse_sp_page,
meta = meta,
)
yield r
if self.debug_break: break
def parse_sp_page(self, response):
jsondata = json.loads(response.body.decode('utf-8'))
count = jsondata["count"]
nextstr = re.findall(r"href='([^']+)'", jsondata["nextstr"])[0]
for page in range(1, int(count)+1):
def mk_url_headers(page, nextstr):
url = response.urljoin(nextstr)
url = re.sub(r'page=(\d+)', 'page={}'.format(page), url)
headers = {
# "trpr-client-name": "mit-spider",
"accept-encoding": "gzip, deflate", # auto delete br encoding. cos requests and scrapy can not decode it.
"accept-language": "zh-CN,zh;q=0.9",
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.75 Safari/537.36"
}
return url,headers
url,headers = mk_url_headers(page, nextstr)
meta = {}
meta['proxy'] = self.proxy
r = Request(
url,
headers = headers,
callback = self.parse_sp,
meta = meta,
dont_filter = True,
)
yield r
if self.debug_break: break
allcount = 0
def parse_sp(self, response):
for x in response.xpath('//div[contains(@class, "item3")]'):
d = {}
d["pid"] = x.xpath('./div[@class="item_t"]/div[@class="img"]/@pid')[0].extract() # [cnt:2] [len:11] /gen/258751
d["cno"] = x.xpath('./div[@class="item_t"]/div[@class="img"]/@cno')[0].extract() # [cnt:2] [len:11] /gen/258751
d["img_href"] = x.xpath('./div[@class="item_t"]/div/a/img/@src')[0].extract() # [cnt:2] [len:11] /gen/258751
d["name"] = x.xpath('./div[@class="item_t"]/div/a/img/@alt')[0].extract() # [cnt:2] [len:11] /gen/258751
d["fullname"] = x.xpath('string(./div[@class="item_t"]/div/div[@class="namew fl"]/a)')[0].extract() # [cnt:2] [len:11] /gen/258751
# print('------------------------------ split ------------------------------')
# import pprint
# pprint.pprint(d)
self.allcount += 1
yield d
print('allcount', self.allcount)
# 配置在单脚本情况也能爬取的脚本的备选方案,使用项目启动则下面的代码无效
if __name__ == '__main__':
import os, time
from scrapy.crawler import CrawlerProcess
timestamp = time.strftime("%Y%m%d_%H%M%S", time.localtime()) # 年月日_时分秒
filename = 'ppbc_plant{}.json'.format(timestamp) # 这是输出文件名字(解开 'FEED_URI' 配置注释生效)
jobdir = 'JOBDIR/nmVoRYNGBx' # 这是队列信息地址(解开 'JOBDIR' 配置注释生效)
p = CrawlerProcess({
'TELNETCONSOLE_ENABLED': False, # 几乎没人使用到这个功能,直接关闭提高爬虫启动时间
'MEDIA_ALLOW_REDIRECTS': True, # 允许图片下载地址重定向,存在图片下载需求时,请尽量使用该设置
# 'LOG_LEVEL': 'INFO', # DEBUG , INFO , WARNING , ERROR , CRITICAL
# 'JOBDIR': jobdir, # 解开注释则增加断点续爬功能
# 任务队列、任务去重指纹、任务状态存储空间(简单来说就是一个文件夹)
'FEED_URI': filename, # 下载数据到文件
'FEED_EXPORT_ENCODING': 'utf-8', # 在某种程度上,约等于 ensure_ascii=False 的配置选项
# 'FEED_FORMAT': 'json', # 下载的文件格式,不配置默认以 jsonlines 方式写入文件,
# 支持的格式 json, jsonlines, csv, xml, pickle, marshal
# 'DOWNLOAD_TIMEOUT': 8, # 全局请求超时,默认180。也可以在 meta 中配置单个请求的超时( download_timeout )
# 'DOWNLOAD_DELAY': .1, # 全局下载延迟,这个配置相较于其他的节流配置要直观很多
})
p.crawl(VSpider)
p.start()
|
841edfb084abdbe3ebfd7a4eb48cfac9e8853818
|
6cc1f7f8c89db6d28b1f3b72540bc89ec6d56a0e
|
/hyper/cli.py
|
1d5384cdd88b463fa5fa83e5f1a4d7c60ca3aab7
|
[
"MIT"
] |
permissive
|
python-hyper/hyper
|
bd38dd9100965c763177421e8532c9c70cede348
|
b77e758f472f00b098481e3aa8651b0808524d84
|
refs/heads/development
| 2023-06-12T02:08:04.618434
| 2021-01-12T18:36:50
| 2021-01-12T18:36:50
| 16,165,969
| 198
| 67
|
MIT
| 2021-01-12T14:23:05
| 2014-01-23T07:53:47
|
Python
|
UTF-8
|
Python
| false
| false
| 7,505
|
py
|
cli.py
|
# -*- coding: utf-8 -*-
"""
hyper/cli
~~~~~~~~~
Command line interface for Hyper inspired by Httpie.
"""
import json
import locale
import logging
import sys
from argparse import ArgumentParser, RawTextHelpFormatter
from argparse import OPTIONAL, ZERO_OR_MORE
from pprint import pformat
from textwrap import dedent
from hyper import HTTPConnection, HTTP20Connection
from hyper import __version__
from hyper.compat import is_py2, urlencode, urlsplit, write_to_stdout
from hyper.common.util import to_host_port_tuple
log = logging.getLogger('hyper')
PREFERRED_ENCODING = locale.getpreferredencoding()
# Various separators used in args
SEP_HEADERS = ':'
SEP_QUERY = '=='
SEP_DATA = '='
SEP_GROUP_ITEMS = [
SEP_HEADERS,
SEP_QUERY,
SEP_DATA,
]
class KeyValue(object):
"""Base key-value pair parsed from CLI."""
def __init__(self, key, value, sep, orig):
self.key = key
self.value = value
self.sep = sep
self.orig = orig
class KeyValueArgType(object):
"""A key-value pair argument type used with `argparse`.
Parses a key-value arg and constructs a `KeyValue` instance.
Used for headers, form data, and other key-value pair types.
This class is inspired by httpie and implements simple tokenizer only.
"""
def __init__(self, *separators):
self.separators = separators
def __call__(self, string):
for sep in self.separators:
splitted = string.split(sep, 1)
if len(splitted) == 2:
key, value = splitted
return KeyValue(key, value, sep, string)
def make_positional_argument(parser):
parser.add_argument(
'method', metavar='METHOD', nargs=OPTIONAL, default='GET',
help=dedent("""
The HTTP method to be used for the request
(GET, POST, PUT, DELETE, ...).
"""))
parser.add_argument(
'_url', metavar='URL',
help=dedent("""
The scheme defaults to 'https://' if the URL does not include one.
"""))
parser.add_argument(
'items',
metavar='REQUEST_ITEM',
nargs=ZERO_OR_MORE,
type=KeyValueArgType(*SEP_GROUP_ITEMS),
help=dedent("""
Optional key-value pairs to be included in the request.
The separator used determines the type:
':' HTTP headers:
Referer:http://httpie.org Cookie:foo=bar User-Agent:bacon/1.0
'==' URL parameters to be appended to the request URI:
search==hyper
'=' Data fields to be serialized into a JSON object:
name=Hyper language=Python description='CLI HTTP client'
"""))
def make_troubleshooting_argument(parser):
parser.add_argument(
'--version', action='version', version=__version__,
help='Show version and exit.')
parser.add_argument(
'--debug', action='store_true', default=False,
help='Show debugging information (loglevel=DEBUG)')
parser.add_argument(
'--h2', action='store_true', default=False,
help="Do HTTP/2 directly, skipping plaintext upgrade and ignoring "
"NPN/ALPN."
)
def split_host_and_port(hostname):
if ':' in hostname:
return to_host_port_tuple(hostname, default_port=443)
return hostname, None
class UrlInfo(object):
def __init__(self):
self.fragment = None
self.host = 'localhost'
self.netloc = None
self.path = '/'
self.port = 443
self.query = None
self.scheme = 'https'
self.secure = False
def set_url_info(args):
info = UrlInfo()
_result = urlsplit(args._url)
for attr in vars(info).keys():
value = getattr(_result, attr, None)
if value:
setattr(info, attr, value)
if info.scheme == 'http' and not _result.port:
info.port = 80
# Set the secure arg is the scheme is HTTPS, otherwise do unsecured.
info.secure = info.scheme == 'https'
if info.netloc:
hostname, _ = split_host_and_port(info.netloc)
info.host = hostname # ensure stripping port number
else:
if _result.path:
_path = _result.path.split('/', 1)
hostname, port = split_host_and_port(_path[0])
info.host = hostname
if info.path == _path[0]:
info.path = '/'
elif len(_path) == 2 and _path[1]:
info.path = '/' + _path[1]
if port is not None:
info.port = port
log.debug('Url Info: %s', vars(info))
args.url = info
def set_request_data(args):
body, headers, params = {}, {}, {}
for i in args.items:
if i.sep == SEP_HEADERS:
if i.key:
headers[i.key] = i.value
else:
# when overriding a HTTP/2 special header there will be a
# leading colon, which tricks the command line parser into
# thinking the header is empty
k, v = i.value.split(':', 1)
headers[':' + k] = v
elif i.sep == SEP_QUERY:
params[i.key] = i.value
elif i.sep == SEP_DATA:
value = i.value
if is_py2: # pragma: no cover
value = value.decode(PREFERRED_ENCODING)
body[i.key] = value
if params:
args.url.path += '?' + urlencode(params)
if body:
content_type = 'application/json'
headers.setdefault('content-type', content_type)
args.body = json.dumps(body)
if args.method is None:
args.method = 'POST' if args.body else 'GET'
args.method = args.method.upper()
args.headers = headers
def parse_argument(argv=None):
parser = ArgumentParser(formatter_class=RawTextHelpFormatter)
parser.set_defaults(body=None, headers={})
make_positional_argument(parser)
make_troubleshooting_argument(parser)
args = parser.parse_args(sys.argv[1:] if argv is None else argv)
if args.debug:
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
log.addHandler(handler)
log.setLevel(logging.DEBUG)
set_url_info(args)
set_request_data(args)
return args
def get_content_type_and_charset(response):
charset = 'utf-8'
content_type = response.headers.get('content-type')
if content_type is None:
return 'unknown', charset
content_type = content_type[0].decode('utf-8').lower()
type_and_charset = content_type.split(';', 1)
ctype = type_and_charset[0].strip()
if len(type_and_charset) == 2:
charset = type_and_charset[1].strip().split('=')[1]
return ctype, charset
def request(args):
if not args.h2:
conn = HTTPConnection(
args.url.host, args.url.port, secure=args.url.secure
)
else: # pragma: no cover
conn = HTTP20Connection(
args.url.host,
args.url.port,
secure=args.url.secure,
force_proto='h2'
)
conn.request(args.method, args.url.path, args.body, args.headers)
response = conn.get_response()
log.debug('Response Headers:\n%s', pformat(response.headers))
ctype, charset = get_content_type_and_charset(response)
data = response.read()
return data
def main(argv=None):
args = parse_argument(argv)
log.debug('Commandline Argument: %s', args)
data = request(args)
write_to_stdout(data)
if __name__ == '__main__': # pragma: no cover
main()
|
a9eec8ae197e3185fe95dc3a6d080d94e7d9d4e7
|
783c65f350116d4828c90aad7c7577b934e6feed
|
/souporcell_pipeline.py
|
aa0f9975b398af44664af702db2482b47cebc4fc
|
[
"MIT"
] |
permissive
|
wheaton5/souporcell
|
68fc348372531698ca5405dee0c8b54bcc000035
|
6872d8803eebd5fd85d16370036aeb2a69942b22
|
refs/heads/master
| 2023-07-06T01:13:52.882783
| 2023-06-22T22:39:48
| 2023-06-22T22:39:48
| 175,980,241
| 133
| 43
|
MIT
| 2023-08-31T00:20:18
| 2019-03-16T14:11:31
|
Python
|
UTF-8
|
Python
| false
| false
| 30,240
|
py
|
souporcell_pipeline.py
|
#!/usr/bin/env python
import argparse
parser = argparse.ArgumentParser(
description="single cell RNAseq mixed genotype clustering using sparse mixture model clustering.")
parser.add_argument("-i", "--bam", required = True, help = "cellranger bam")
parser.add_argument("-b", "--barcodes", required = True, help = "barcodes.tsv from cellranger")
parser.add_argument("-f", "--fasta", required = True, help = "reference fasta file")
parser.add_argument("-t", "--threads", required = True, type = int, help = "max threads to use")
parser.add_argument("-o", "--out_dir", required = True, help = "name of directory to place souporcell files")
parser.add_argument("-k", "--clusters", required = True, help = "number cluster, tbd add easy way to run on a range of k")
parser.add_argument("-p", "--ploidy", required = False, default = "2", help = "ploidy, must be 1 or 2, default = 2")
parser.add_argument("--min_alt", required = False, default = "10", help = "min alt to use locus, default = 10.")
parser.add_argument("--min_ref", required = False, default = "10", help = "min ref to use locus, default = 10.")
parser.add_argument("--max_loci", required = False, default = "2048", help = "max loci per cell, affects speed, default = 2048.")
parser.add_argument("--restarts", required = False, default = 100, type = int,
help = "number of restarts in clustering, when there are > 12 clusters we recommend increasing this to avoid local minima")
parser.add_argument("--common_variants", required = False, default = None,
help = "common variant loci or known variant loci vcf, must be vs same reference fasta")
parser.add_argument("--known_genotypes", required = False, default = None,
help = "known variants per clone in population vcf mode, must be .vcf right now we dont accept gzip or bcf sorry")
parser.add_argument("--known_genotypes_sample_names", required = False, nargs = '+', default = None,
help = "which samples in population vcf from known genotypes option represent the donors in your sample")
parser.add_argument("--skip_remap", required = False, default = False, type = bool,
help = "don't remap with minimap2 (not recommended unless in conjunction with --common_variants")
parser.add_argument("--no_umi", required = False, default = "False", help = "set to True if your bam has no UMI tag, will ignore/override --umi_tag")
parser.add_argument("--umi_tag", required = False, default = "UB", help = "set if your umi tag is not UB")
parser.add_argument("--cell_tag", required = False, default = "CB", help = "DOES NOT WORK, vartrix doesnt support this! set if your cell barcode tag is not CB")
parser.add_argument("--ignore", required = False, default = False, type = bool, help = "set to True to ignore data error assertions")
parser.add_argument("--aligner", required = False, default = "minimap2", help = "optionally change to HISAT2 if you have it installed, not included in singularity build")
args = parser.parse_args()
if args.no_umi == "True":
args.no_umi = True
else:
args.no_umi = False
print("checking modules")
# importing all reqs to make sure things are installed
import numpy as np
import scipy
import gzip
import math
import pystan
import vcf
import pysam
import pyfaidx
import subprocess
import time
import os
print("imports done")
open_function = lambda f: gzip.open(f,"rt") if f[-3:] == ".gz" else open(f)
print("checking bam for expected tags")
UMI_TAG = args.umi_tag
CELL_TAG = args.cell_tag
assert CELL_TAG == "CB", "vartrix doesnt support different cell tags, remake bam with cell tag as CB"
#load each file to make sure it is legit
bc_set = set()
with open_function(args.barcodes) as barcodes:
for (index, line) in enumerate(barcodes):
bc = line.strip()
bc_set.add(bc)
assert len(bc_set) > 50, "Fewer than 50 barcodes in barcodes file? We expect 1 barcode per line."
assert args.aligner == "minimap2" or args.aligner == "HISAT2", "--aligner expects minimap2 or HISAT2"
assert not(not(args.known_genotypes == None) and not(args.common_variants == None)), "cannot set both know_genotypes and common_variants"
if args.known_genotypes_sample_names:
assert not(args.known_genotypes == None), "if you specify known_genotype_sample_names, must specify known_genotypes option"
assert len(args.known_genotypes_sample_names) == int(args.clusters), "length of known genotype sample names should be equal to k/clusters"
if args.known_genotypes:
reader = vcf.Reader(open(args.known_genotypes))
assert len(reader.samples) >= int(args.clusters), "number of samples in known genotype vcfs is less than k/clusters"
if args.known_genotypes_sample_names == None:
args.known_genotypes_sample_names = reader.samples
for sample in args.known_genotypes_sample_names:
assert sample in args.known_genotypes_sample_names, "not all samples in known genotype sample names option are in the known genotype samples vcf?"
#test bam load
bam = pysam.AlignmentFile(args.bam)
num_cb = 0
num_cb_cb = 0 # num reads with barcodes from barcodes.tsv file
num_umi = 0
num_read_test = 100000
for (index,read) in enumerate(bam):
if index >= num_read_test:
break
if read.has_tag(CELL_TAG):
num_cb += 1
if read.get_tag(CELL_TAG) in bc_set:
num_cb_cb += 1
if read.has_tag(UMI_TAG):
num_umi += 1
if not args.ignore:
if args.skip_remap and args.common_variants == None and args.known_genotypes == None:
assert False, "WARNING: skip_remap enables without common_variants or known genotypes. Variant calls will be of poorer quality. Turn on --ignore True to ignore this warning"
assert float(num_cb) / float(num_read_test) > 0.5, "Less than 50% of first 100000 reads have cell barcode tag (CB), turn on --ignore True to ignore"
if not(args.no_umi):
assert float(num_umi) / float(num_read_test) > 0.5, "Less than 50% of first 100000 reads have UMI tag (UB), turn on --ignore True to ignore"
assert float(num_cb_cb) / float(num_read_test) > 0.05, "Less than 25% of first 100000 reads have cell barcodes from barcodes file, is this the correct barcode file? turn on --ignore True to ignore"
print("checking fasta")
fasta = pyfaidx.Fasta(args.fasta, key_function = lambda key: key.split()[0])
def get_fasta_regions(fastaname, threads):
fasta = pyfaidx.Fasta(args.fasta, key_function = lambda key: key.split()[0])
total_reference_length = 0
for chrom in sorted(fasta.keys()):
total_reference_length += len(fasta[chrom])
step_length = int(math.ceil(total_reference_length/threads))
regions = []
region = []
region_so_far = 0
chrom_so_far = 0
for chrom in sorted(fasta.keys()):
chrom_length = len(fasta[chrom])
chrom_so_far = 0
if chrom_length < 250000:
continue
while True:
if region_so_far + (chrom_length - chrom_so_far) < step_length:
region.append((chrom, chrom_so_far, chrom_length))
region_so_far += chrom_length - chrom_so_far
chrom_so_far = 0
break
else:
region.append((chrom, chrom_so_far, chrom_so_far + step_length - region_so_far))
regions.append(region)
region = []
chrom_so_far += step_length - region_so_far
region_so_far = 0
if len(region) > 0:
if len(regions) == args.threads:
regions[-1] = regions[-1] + region
else:
regions.append(region)
return regions
def get_bam_regions(bamname, threads):
bam = pysam.AlignmentFile(bamname)
total_reference_length = 0
for chrom in bam.references:
total_reference_length += bam.get_reference_length(chrom)
step_length = int(math.ceil(total_reference_length / threads))
regions = []
region = []
region_so_far = 0
chrom_so_far = 0
for chrom in bam.references:
chrom_length = bam.get_reference_length(chrom)
#print(chrom+" size "+str(chrom_length)+" and step size "+str(step_length))
while True:
#print("\tregion so far "+str(region_so_far)+" chrom so far "+str(chrom_so_far))
#print("\t"+str(chrom_length - chrom_so_far)+" <= "+str(step_length - region_so_far))
#print("\t"+str((chrom_length - chrom_so_far) <= step_length - region_so_far))
if (chrom_length - chrom_so_far) <= step_length - region_so_far:
region.append((chrom, chrom_so_far, chrom_length))
#print("\t\tending chrom\t"+chrom+":"+str(chrom_so_far)+"-"+str(chrom_length))
region_so_far += chrom_length - chrom_so_far
chrom_so_far = 0
break
else:
region.append((chrom, chrom_so_far, chrom_so_far + step_length - region_so_far))
#print("\t\tending region\t"+chrom+":"+str(chrom_so_far)+"-"+str(chrom_so_far + step_length - region_so_far))
regions.append(region)
region = []
chrom_so_far += step_length - region_so_far
region_so_far = 0
if len(region) > 0:
regions.append(region)
return regions
def make_fastqs(args):
if not os.path.isfile(args.bam + ".bai"):
print("no bam index found, creating")
subprocess.check_call(['samtools', 'index', args.bam])
if not os.path.isfile(args.fasta + ".fai"):
print("fasta index not found, creating")
subprocess.check_call(['samtools', 'faidx', args.fasta])
regions = get_bam_regions(args.bam, int(args.threads))
# for testing, delete this later
args.threads = int(args.threads)
region_fastqs = [[] for x in range(args.threads)]
all_fastqs = []
procs = [None for x in range(args.threads)]
any_running = True
# run renamer in parallel manner
print("generating fastqs with cell barcodes and umis in readname")
while any_running:
any_running = False
for (index, region) in enumerate(regions):
block = False
if procs[index]:
block = procs[index].poll() == None
if block:
any_running = True
else:
assert not(procs[index].returncode), "renamer subprocess terminated abnormally with code " + str(procs[index].returncode)
if len(region_fastqs[index]) == len(region):
block = True
if not block:
sub_index = len(region_fastqs[index])
chrom = region[sub_index][0]
start = region[sub_index][1]
end = region[sub_index][2]
fq_name = args.out_dir + "/souporcell_fastq_" + str(index) + "_" + str(sub_index) + ".fq"
directory = os.path.dirname(os.path.realpath(__file__))
p = subprocess.Popen([directory+"/renamer.py", "--bam", args.bam, "--barcodes", args.barcodes, "--out", fq_name,
"--chrom", chrom, "--start", str(start), "--end", str(end), "--no_umi", str(args.no_umi),
"--umi_tag", args.umi_tag, "--cell_tag", args.cell_tag])
all_fastqs.append(fq_name)
procs[index] = p
region_fastqs[index].append(fq_name)
any_running = True
time.sleep(0.5)
with open(args.out_dir + "/fastqs.done", 'w') as done:
for fastqs in region_fastqs:
done.write("\t".join(fastqs) + "\n")
return((region_fastqs, all_fastqs))
def remap(args, region_fastqs, all_fastqs):
print("remapping")
# run minimap2
minimap_tmp_files = []
for index in range(args.threads):
if index > len(region_fastqs) or len(region_fastqs[index]) == 0:
continue
output = args.out_dir + "/souporcell_minimap_tmp_" + str(index) + ".sam"
minimap_tmp_files.append(output)
with open(args.out_dir + "/tmp.fq", 'w') as tmpfq:
subprocess.check_call(['cat'] + region_fastqs[index], stdout = tmpfq)
with open(output, 'w') as samfile:
with open(args.out_dir + "/minimap.err",'w') as minierr:
minierr.write("mapping\n")
if args.aligner == "HISAT2":
fasta_base = args.fasta
if fasta_base[-3:] == ".fa":
fasta_base = fasta_base[:-3]
elif fasta_base[-6:] == ".fasta":
fasta_base = fasta_base[:-6]
else:
assert False, "fasta file not right extension .fa or .fasta"
subprocess.check_call(["hisat2", "-p", str(args.threads), "-q", args.out_dir + "/tmp.fq", "-x",
fasta_base,
"-S", output], stderr =minierr)
else:
cmd = ["minimap2", "-ax", "splice", "-t", str(args.threads), "-G50k", "-k", "21",
"-w", "11", "--sr", "-A2", "-B8", "-O12,32", "-E2,1", "-r200", "-p.5", "-N20", "-f1000,5000",
"-n2", "-m20", "-s40", "-g2000", "-2K50m", "--secondary=no", args.fasta, args.out_dir + "/tmp.fq"]
minierr.write(" ".join(cmd)+"\n")
subprocess.check_call(["minimap2", "-ax", "splice", "-t", str(args.threads), "-G50k", "-k", "21",
"-w", "11", "--sr", "-A2", "-B8", "-O12,32", "-E2,1", "-r200", "-p.5", "-N20", "-f1000,5000",
"-n2", "-m20", "-s40", "-g2000", "-2K50m", "--secondary=no", args.fasta, args.out_dir + "/tmp.fq"],
stdout = samfile, stderr = minierr)
subprocess.check_call(['rm', args.out_dir + "/tmp.fq"])
with open(args.out_dir + '/remapping.done', 'w') as done:
for fn in minimap_tmp_files:
done.write(fn + "\n")
print("cleaning up tmp fastqs")
# clean up tmp fastqs
for fq in all_fastqs:
subprocess.check_call(["rm", fq])
return(minimap_tmp_files)
def retag(args, minimap_tmp_files):
print("repopulating cell barcode and UMI tags")
# run retagger
procs = []
retag_files = []
retag_error_files = []
retag_out_files = []
for index in range(args.threads):
if index > len(minimap_tmp_files) -1:
continue
outfile = args.out_dir + "/souporcell_retag_tmp_" + str(index) + ".bam"
retag_files.append(outfile)
errfile = open(outfile+".err",'w')
outfileout = open(outfile+".out",'w')
retag_error_files.append(errfile)
retag_out_files.append(outfileout)
print(args.no_umi)
print(str(args.no_umi))
cmd = ["retag.py", "--sam", minimap_tmp_files[index], "--no_umi", str(args.no_umi),
"--umi_tag", args.umi_tag, "--cell_tag", args.cell_tag, "--out", outfile]
print(" ".join(cmd))
directory = os.path.dirname(os.path.realpath(__file__))
p = subprocess.Popen([directory+"/retag.py", "--sam", minimap_tmp_files[index], "--no_umi", str(args.no_umi),
"--umi_tag", args.umi_tag, "--cell_tag", args.cell_tag, "--out", outfile], stdout = outfileout, stderr = errfile)
procs.append(p)
for (i, p) in enumerate(procs): # wait for processes to finish
p.wait()
retag_error_files[i].close()
retag_out_files[i].close()
assert not(p.returncode), "retag subprocess ended abnormally with code " + str(p.returncode)
for outfile in retag_files:
subprocess.check_call(['rm',outfile+".err", outfile+".out"])
print("sorting retagged bam files")
# sort retagged files
sort_jobs = []
filenames = []
with open(args.out_dir + "/retag.err", 'w') as retagerr:
for index in range(args.threads):
if index > len(retag_files) - 1:
continue
filename = args.out_dir + "/souporcell_retag_sorted_tmp_" + str(index) + ".bam"
filenames.append(filename)
p = subprocess.Popen(["samtools", "sort", retag_files[index], '-o', filename], stderr = retagerr)
sort_jobs.append(p)
# wait for jobs to finish
for job in sort_jobs:
job.wait()
assert not(job.returncode), "samtools sort ended abnormally with code " + str(job.returncode)
#clean up unsorted bams
for bam in retag_files:
subprocess.check_call(["rm", bam])
print("merging sorted bams")
final_bam = args.out_dir + "/souporcell_minimap_tagged_sorted.bam"
subprocess.check_call(["samtools", "merge", final_bam] + filenames)
subprocess.check_call(["samtools", "index", final_bam])
print("cleaning up tmp samfiles")
# clean up tmp samfiles
for samfile in minimap_tmp_files:
subprocess.check_call(["rm", samfile])
# clean up tmp bams
for filename in filenames:
subprocess.check_call(['rm', filename])
subprocess.check_call(["touch", args.out_dir + "/retagging.done"])
def freebayes(args, bam, fasta):
if not(args.common_variants == None) or not(args.known_genotypes == None):
if not(args.common_variants == None):
print("using common variants")
if not(args.known_genotypes == None):
print("using known genotypes")
args.common_variants = args.known_genotypes
# parallelize the samtools depth call. It takes too long
regions = get_bam_regions(bam, int(args.threads))
depth_files = []
depth_procs = []
print(len(regions))
for (index, region) in enumerate(regions):
region_args = []
for (chrom, start, stop) in region:
region_args.append(chrom+":"+str(start)+"-"+str(stop))
depthfile = args.out_dir+"/depth_"+str(index)+".bed"
depth_files.append(depthfile)
min_cov = int(args.min_ref)+int(args.min_alt)
with open(depthfile, 'w') as bed:
with open(depthfile+".sh",'w') as depther:
depther.write("samtools view -hb "+bam+" "+" ".join(region_args)+ " | samtools depth - | "+
"awk '{ if ($3 >= "+str(min_cov)+ " && $3 < 100000) { print $1 \"\t\" $2 \"\t\" $2+1 \"\t\" $3 } }'")
subprocess.check_call(["chmod", "777", depthfile+".sh"])
#ps0 = subprocess.Popen(['samtools', 'view', bam]+region_args, stdout = subprocess.PIPE)
#ps1 = subprocess.Popen(['samtools', 'depth', '-'], stdin = ps0.stdout, stdout = subprocess.PIPE)
# awk magic
#ps2 = subprocess.Popen(["awk '{ if ($3 >= " + str(min_cov) + " && $3 < 100000) { print $1 \"\t\" $2 \"\t\" $2+1 \"\t\" $3 } }'"],
# shell = True, stdin = ps1.stdout, stdout = bed)
ps = subprocess.Popen([depthfile+".sh"], shell = True, stdout = bed)
depth_procs.append(ps)
for proc in depth_procs:
proc.wait()
merged_depthfiles = []
for depth_file in depth_files:
merged_depthfile = depth_file[:-4]+"_merged.bed"
with open(merged_depthfile, 'w') as bed:
subprocess.check_call(["bedtools", "merge", "-i", depth_file], stdout = bed)
merged_depthfiles.append(merged_depthfile)
with open(args.out_dir + "/depth_merged.bed", 'w') as merged_bed:
subprocess.check_call(['cat']+merged_depthfiles, stdout = merged_bed)
for tmp in depth_files: # clean up tmp bed files
subprocess.check_call(['rm', tmp, tmp+".sh"])
for tmp in merged_depthfiles:
subprocess.check_call(['rm', tmp])
with open(args.out_dir + "/common_variants_covered_tmp.vcf", 'w') as vcf:
subprocess.check_call(["bedtools", "intersect", "-wa", "-a", args.common_variants, "-b", args.out_dir + "/depth_merged.bed"], stdout = vcf)
with open(args.out_dir + "/common_variants_covered_tmp.vcf") as vcf:
with open(args.common_variants) as common:
with open(args.out_dir + "/common_variants_covered.vcf",'w') as out:
for line in common:
if line.startswith("#"):
out.write(line)
else:
break
for line in vcf:
out.write(line)
with open(args.out_dir + "/variants.done", 'w') as done:
done.write(args.out_dir + "/common_variants_covered.vcf" + "\n")
return(args.out_dir + "/common_variants_covered.vcf")
regions = get_fasta_regions(args.fasta, int(args.threads))
print(regions)
region_vcfs = [[] for x in range(args.threads)]
all_vcfs = []
bed_files = []
procs = [None for x in range(args.threads)]
any_running = True
filehandles = []
errhandles = []
# run renamer in parallel manner
print("running freebayes")
while any_running:
any_running = False
for (index, region) in enumerate(regions):
block = False
if procs[index]:
block = procs[index].poll() == None
if block:
any_running = True
else:
assert not(procs[index].returncode), "freebayes subprocess terminated abnormally with code " + str(procs[index].returncode)
if len(region_vcfs[index]) == len(region):
block = True
if not block:
sub_index = len(region_vcfs[index])
chrom = region[sub_index][0]
start = region[sub_index][1]
end = region[sub_index][2]
vcf_name = args.out_dir + "/souporcell_" + str(index) + "_" + str(sub_index) + ".vcf"
filehandle = open(vcf_name, 'w')
filehandles.append(filehandle)
errhandle = open(vcf_name + ".err", 'w')
errhandles.append(errhandle)
cmd = ["freebayes", "-f", args.fasta, "-iXu", "-C", "2",
"-q", "20", "-n", "3", "-E", "1", "-m", "30",
"--min-coverage", str(int(args.min_alt)+int(args.min_ref)), "--pooled-continuous", "--skip-coverage", "100000"]
cmd.extend(["-r", chrom + ":" + str(start) + "-" + str(end)])
print(" ".join(cmd))
cmd.append(bam)
errhandle.write(" ".join(cmd) + "\n")
p = subprocess.Popen(cmd, stdout = filehandle, stderr = errhandle)
all_vcfs.append(vcf_name)
procs[index] = p
region_vcfs[index].append(vcf_name)
any_running = True
time.sleep(1)
for filehandle in filehandles:
filehandle.close()
for errhandle in errhandles:
errhandle.close()
print("merging vcfs")
subprocess.check_call(["ls "+args.out_dir+'/*.vcf | xargs -n1 -P'+str(args.threads)+' bgzip'],shell=True)
all_vcfs = [vcf+".gz" for vcf in all_vcfs]
subprocess.check_call(["ls "+args.out_dir+"/*.vcf.gz | xargs -n1 -P"+str(args.threads) +" bcftools index"],shell=True)
with open(args.out_dir + "/souporcell_merged_vcf.vcf", 'w') as vcfout:
subprocess.check_call(["bcftools", "concat", '-a'] + all_vcfs, stdout = vcfout)
with open(args.out_dir + "/bcftools.err", 'w') as vcferr:
with open(args.out_dir + "/souporcell_merged_sorted_vcf.vcf", 'w') as vcfout:
subprocess.check_call(['bcftools', 'sort', args.out_dir + "/souporcell_merged_vcf.vcf"], stdout = vcfout, stderr = vcferr)
if not args.common_variants == None:
with open(args.out_dir + "/common.err", 'w') as err:
with open(args.out_dir + "/vcftmp", 'w') as out:
subprocess.check_call(['bedtools', 'intersect', '-wa',
'-a', args.out_dir + "/souporcell_merged_vcf.vcf", '-b', args.common_variants], stdout = out, stderr = err)
subprocess.check_call(['mv', args.out_dir + "/vcftmp", args.out_dir + "/souporcell_merged_sorted_vcf.vcf"])
subprocess.check_call(['rm', args.out_dir + '/souporcell_merged_vcf.vcf'])
subprocess.check_call(['bgzip', args.out_dir + "/souporcell_merged_sorted_vcf.vcf"])
final_vcf = args.out_dir + "/souporcell_merged_sorted_vcf.vcf.gz"
subprocess.check_call(['tabix', '-p', 'vcf', final_vcf])
for vcf in all_vcfs:
subprocess.check_call(['rm', vcf[:-3] + ".err"])
subprocess.check_call(['rm', vcf +".csi"])
subprocess.check_call(['rm'] + all_vcfs)
if len(bed_files) > 0:
for bed in bed_files:
subprocess.check_call(['rm', bed + ".bed"])
subprocess.check_call(['rm'] + bed_files)
with open(args.out_dir + "/variants.done", 'w') as done:
done.write(final_vcf + "\n")
return(final_vcf)
def vartrix(args, final_vcf, final_bam):
print("running vartrix")
ref_mtx = args.out_dir + "/ref.mtx"
alt_mtx = args.out_dir + "/alt.mtx"
barcodes = args.barcodes
if barcodes[-3:] == ".gz":
with open(args.out_dir + "/barcodes.tsv",'w') as bcsout:
subprocess.check_call(['gunzip', '-c', barcodes],stdout = bcsout)
barcodes = args.out_dir + "/barcodes.tsv"
with open(args.out_dir + "/vartrix.err", 'w') as err:
with open(args.out_dir + "/vartrix.out", 'w') as out:
cmd = ["vartrix", "--mapq", "30", "-b", final_bam, "-c", barcodes, "--scoring-method", "coverage", "--threads", str(args.threads),
"--ref-matrix", ref_mtx, "--out-matrix", alt_mtx, "-v", final_vcf, "--fasta", args.fasta]
if not(args.no_umi) and args.umi_tag == "UB":
cmd.append("--umi")
subprocess.check_call(cmd, stdout = out, stderr = err)
subprocess.check_call(['touch', args.out_dir + "/vartrix.done"])
subprocess.check_call(['rm', args.out_dir + "/vartrix.out", args.out_dir + "/vartrix.err"])
return((ref_mtx, alt_mtx))
def souporcell(args, ref_mtx, alt_mtx, final_vcf):
print("running souporcell clustering")
cluster_file = args.out_dir + "/clusters_tmp.tsv"
with open(cluster_file, 'w') as log:
with open(args.out_dir+"/clusters.err",'w') as err:
directory = os.path.dirname(os.path.realpath(__file__))
cmd = [directory+"/souporcell/target/release/souporcell", "-k",args.clusters, "-a", alt_mtx, "-r", ref_mtx,
"--restarts", str(args.restarts), "-b", args.barcodes, "--min_ref", args.min_ref, "--min_alt", args.min_alt,
"--threads", str(args.threads)]
if not(args.known_genotypes == None):
cmd.extend(['--known_genotypes', final_vcf])
if not(args.known_genotypes_sample_names == None):
cmd.extend(['--known_genotypes_sample_names'] + args.known_genotypes_sample_names)
print(" ".join(cmd))
subprocess.check_call(cmd, stdout = log, stderr = err)
subprocess.check_call(['touch', args.out_dir + "/clustering.done"])
return(cluster_file)
def doublets(args, ref_mtx, alt_mtx, cluster_file):
print("running souporcell doublet detection")
doublet_file = args.out_dir + "/clusters.tsv"
with open(doublet_file, 'w') as dub:
with open(args.out_dir+"/doublets.err",'w') as err:
directory = os.path.dirname(os.path.realpath(__file__))
subprocess.check_call([directory+"/troublet/target/release/troublet", "--alts", alt_mtx, "--refs", ref_mtx, "--clusters", cluster_file], stdout = dub, stderr = err)
subprocess.check_call(['touch', args.out_dir + "/troublet.done"])
return(doublet_file)
def consensus(args, ref_mtx, alt_mtx, doublet_file):
print("running co inference of ambient RNA and cluster genotypes")
directory = os.path.dirname(os.path.realpath(__file__))
subprocess.check_call([directory+"/consensus.py", "-c", doublet_file, "-a", alt_mtx, "-r", ref_mtx, "-p", args.ploidy,
"--output_dir",args.out_dir,"--soup_out", args.out_dir + "/ambient_rna.txt", "--vcf_out", args.out_dir + "/cluster_genotypes.vcf", "--vcf", final_vcf])
subprocess.check_call(['touch', args.out_dir + "/consensus.done"])
#### MAIN RUN SCRIPT
if os.path.isdir(args.out_dir):
print("restarting pipeline in existing directory " + args.out_dir)
else:
subprocess.check_call(["mkdir", "-p", args.out_dir])
if not args.skip_remap:
if not os.path.exists(args.out_dir + "/fastqs.done"):
(region_fastqs, all_fastqs) = make_fastqs(args)
else:
all_fastqs = []
region_fastqs = []
with open(args.out_dir + "/fastqs.done") as fastqs:
for line in fastqs:
toks = line.strip().split("\t")
region_fastqs.append(toks)
for tok in toks:
all_fastqs.append(tok)
if not os.path.exists(args.out_dir + "/remapping.done"):
minimap_tmp_files = remap(args, region_fastqs, all_fastqs)
else:
minimap_tmp_files = []
with open(args.out_dir + "/remapping.done") as bams:
for line in bams:
minimap_tmp_files.append(line.strip())
if not os.path.exists(args.out_dir + "/retagging.done"):
retag(args, minimap_tmp_files)
bam = args.out_dir + "/souporcell_minimap_tagged_sorted.bam"
else:
bam = args.bam
if not os.path.exists(args.out_dir + "/variants.done"):
final_vcf = freebayes(args, bam, fasta)
else:
with open(args.out_dir + "/variants.done") as done:
final_vcf = done.readline().strip()
if not os.path.exists(args.out_dir + "/vartrix.done"):
vartrix(args, final_vcf, bam)
ref_mtx = args.out_dir + "/ref.mtx"
alt_mtx = args.out_dir + "/alt.mtx"
if not(os.path.exists(args.out_dir + "/clustering.done")):
souporcell(args, ref_mtx, alt_mtx, final_vcf)
cluster_file = args.out_dir + "/clusters_tmp.tsv"
if not(os.path.exists(args.out_dir + "/troublet.done")):
doublets(args, ref_mtx, alt_mtx, cluster_file)
doublet_file = args.out_dir + "/clusters.tsv"
if not(os.path.exists(args.out_dir + "/consensus.done")):
consensus(args, ref_mtx, alt_mtx, doublet_file)
print("done")
#### END MAIN RUN SCRIPT
|
0769197bed60a0989ea2bdff490801ab261d7f02
|
48ca6f9f041a1e9f563500c8a7fa04dbb18fa949
|
/docs/manual/gears/examples/filt_fix_sel.py
|
34f2ef6bd666c6a3905a7eb47707ded5f3bcbedd
|
[
"MIT"
] |
permissive
|
bogdanvuk/pygears
|
71404e53d4689ec9cdd9db546bfc0f229a7e02da
|
705b11ab6de79868b25753fa9d0ce7128791b346
|
refs/heads/master
| 2023-07-08T11:38:54.625172
| 2022-03-07T12:29:00
| 2022-03-07T12:29:00
| 124,890,922
| 146
| 16
|
MIT
| 2022-08-15T07:57:08
| 2018-03-12T13:10:06
|
Python
|
UTF-8
|
Python
| false
| false
| 214
|
py
|
filt_fix_sel.py
|
from pygears.lib import filt, drv, check
from pygears.typing import Union, Uint, Int
drv(t=Union[Uint[8], Int[8]], seq=[(1, 0), (2, 1), (3, 0), (4, 1), (5, 0)]) \
| filt(fixsel=0) \
| check(ref=[1, 3, 5])
|
864f207e140d341c4fe3f1e47bf1f106d4cd3ded
|
4a1e2f4f8985e2721c0ce7721432d65ea767e40b
|
/lessons/CRISP_DM/CatVar.py
|
9cb1fda7587277d8a22432e9e4869cb3fe55e241
|
[
"MIT",
"CC-BY-NC-ND-4.0"
] |
permissive
|
udacity/DSND_Term2
|
d548c94f129e73868b5b564162dd2beb3ec104cb
|
6cc091fff66b8f441cbfeb5bb9e7ba70a7d74117
|
refs/heads/master
| 2022-12-16T10:42:15.941662
| 2022-11-30T20:29:20
| 2022-11-30T20:29:20
| 138,909,692
| 1,173
| 1,966
|
MIT
| 2022-12-06T18:58:39
| 2018-06-27T16:59:04
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,903
|
py
|
CatVar.py
|
import pandas as pd
import numpy as np
from collections import defaultdict
import CatVarSolns as s
## Categorical Variables
# Question 1
def cat_df_check(cat_df):
'''
INPUT
cat_df - a pandas dataframe of only the categorical columns of df
Prints statement related to the correctness of the dataframe provided.
'''
if cat_df.equals(s.cat_df):
print("Nice job! That looks right!")
else:
print("That wasn't quite as expected. The input cat_df variable should be a dataframe of all of the categorical variables. You can use select_dtypes to select the 'object' data type.")
#Question 2
def cat_df_dict_check(cat_df_dict):
'''
INPUT cat_df_dict - a dictionary with numbers for each value corresponding the the number described by each key.
Prints statement related to the correctness of the solution of the dictionary
'''
if cat_df_dict == s.cat_df_dict:
print('Nice job! That looks right to me!')
else:
print("Oops! One or more of those doesn't look quite right. Each value should be an integer corresponding to the number of columns described.")
#Question 3
def sol_3_dict_check(sol_3_dict):
'''
INPUT sol_3_dict - a dictionary with variables for each value corresponding the the key that describes it.
Prints statement related to the correctness of the solution of the dictionary
'''
if sol_3_dict == s.sol_3_dict:
print('Nice job! That looks right to me!')
elif sol_3_dict['Which column should you create a dummy variable for?'] != s.sol_3_dict['Which column should you create a dummy variable for?']:
print("Oops! That is not the column you should be using to create a dummy variable. Try again.")
elif sol_3_dict['How many new dummy columns do you get when creating dummy variables?'] != s.sol_3_dict['How many new dummy columns do you get when creating dummy variables?']:
print("Oops! Though you could get that number of dummy variables, that is not what you get using the default setting using one hot encoding or pandas `get_dummies` encoding. Try again.")
elif sol_3_dict['What happens with the nan values?'] != s.sol_3_dict['What happens with the nan values?']:
print("Oops! Though that could happen with the NaN values, that is not the default when working with pandas.")
#Question 4
def dummy_cols_df_check(dummy_cols_df):
'''
INPUT
dummy_cols_df - a pandas dataframe of the dummy variables associated with the levels as well as the missing values.
Prints statement related to the correctness of the dataframe provided.
'''
if dummy_cols_df.equals(s.dummy_cols_df):
print("Nice job! That looks right!")
else:
print("That wasn't quite as expected. Your input should just be the 3 columns resulting as dummy variables. One column for a, one for b, and one for the nan values.")
|
9be68032399da0cdf2ef6ba315f0d09c742ca563
|
33f805792e79a9ef1d577699b983031521d5b6c9
|
/tapiriik/services/Strava/strava.py
|
28a1de46dc298580d6b9f03fe1aa6e39e810a3a1
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
cpfair/tapiriik
|
0dce9599400579d33acbbdaba16806256270d0a3
|
c67e9848e67f515e116bb19cd4dd479e8414de4d
|
refs/heads/master
| 2023-08-28T10:17:11.070324
| 2023-07-25T00:59:33
| 2023-07-25T00:59:33
| 7,812,229
| 1,519
| 343
|
Apache-2.0
| 2022-10-24T16:52:34
| 2013-01-25T02:43:42
|
Python
|
UTF-8
|
Python
| false
| false
| 22,950
|
py
|
strava.py
|
from tapiriik.settings import WEB_ROOT, STRAVA_CLIENT_SECRET, STRAVA_CLIENT_ID, STRAVA_RATE_LIMITS
from tapiriik.services.service_base import ServiceAuthenticationType, ServiceBase
from tapiriik.services.service_record import ServiceRecord
from tapiriik.database import cachedb, db
from tapiriik.services.interchange import UploadedActivity, ActivityType, ActivityStatistic, ActivityStatisticUnit, Waypoint, WaypointType, Location, Lap
from tapiriik.services.api import APIException, UserException, UserExceptionType, APIExcludeActivity
from tapiriik.services.fit import FITIO
from django.core.urlresolvers import reverse
from datetime import datetime, timedelta
from urllib.parse import urlencode
import calendar
import requests
import os
import logging
import pytz
import re
import time
import json
logger = logging.getLogger(__name__)
class StravaService(ServiceBase):
ID = "strava"
DisplayName = "Strava"
DisplayAbbreviation = "STV"
AuthenticationType = ServiceAuthenticationType.OAuth
UserProfileURL = "http://www.strava.com/athletes/{0}"
UserActivityURL = "http://app.strava.com/activities/{1}"
AuthenticationNoFrame = True # They don't prevent the iframe, it just looks really ugly.
PartialSyncRequiresTrigger = True
PartialSyncTriggerStatusCode = 200
PartialSyncTriggerRequiresSubscription = True
LastUpload = None
SupportsHR = SupportsCadence = SupportsTemp = SupportsPower = True
SupportsActivityDeletion = True
# For mapping common->Strava; no ambiguity in Strava activity type
_activityTypeMappings = {
ActivityType.Cycling: "Ride",
ActivityType.MountainBiking: "Ride",
ActivityType.Hiking: "Hike",
ActivityType.Running: "Run",
ActivityType.Walking: "Walk",
ActivityType.Snowboarding: "Snowboard",
ActivityType.Skating: "IceSkate",
ActivityType.CrossCountrySkiing: "NordicSki",
ActivityType.DownhillSkiing: "AlpineSki",
ActivityType.Swimming: "Swim",
ActivityType.Gym: "Workout",
ActivityType.Rowing: "Rowing",
ActivityType.Elliptical: "Elliptical",
ActivityType.RollerSkiing: "RollerSki",
ActivityType.StrengthTraining: "WeightTraining",
ActivityType.Climbing: "RockClimbing",
ActivityType.StandUpPaddling: "StandUpPaddling",
}
# For mapping Strava->common
_reverseActivityTypeMappings = {
"Ride": ActivityType.Cycling,
"VirtualRide": ActivityType.Cycling,
"EBikeRide": ActivityType.Cycling,
"MountainBiking": ActivityType.MountainBiking,
"VirtualRun": ActivityType.Running,
"Run": ActivityType.Running,
"Hike": ActivityType.Hiking,
"Walk": ActivityType.Walking,
"AlpineSki": ActivityType.DownhillSkiing,
"CrossCountrySkiing": ActivityType.CrossCountrySkiing,
"NordicSki": ActivityType.CrossCountrySkiing,
"BackcountrySki": ActivityType.DownhillSkiing,
"Snowboard": ActivityType.Snowboarding,
"Swim": ActivityType.Swimming,
"IceSkate": ActivityType.Skating,
"Workout": ActivityType.Gym,
"Rowing": ActivityType.Rowing,
"Kayaking": ActivityType.Rowing,
"Canoeing": ActivityType.Rowing,
"StandUpPaddling": ActivityType.StandUpPaddling,
"Elliptical": ActivityType.Elliptical,
"RollerSki": ActivityType.RollerSkiing,
"WeightTraining": ActivityType.StrengthTraining,
"RockClimbing" : ActivityType.Climbing,
}
SupportedActivities = list(_activityTypeMappings.keys())
GlobalRateLimits = STRAVA_RATE_LIMITS
GlobalRateLimitsPreemptiveSleep = True
def UserUploadedActivityURL(self, uploadId):
return "https://www.strava.com/activities/%d" % uploadId
def WebInit(self):
params = {'scope':'activity:write,activity:read_all',
'client_id':STRAVA_CLIENT_ID,
'response_type':'code',
'redirect_uri':WEB_ROOT + reverse("oauth_return", kwargs={"service": "strava"}),
"approval_prompt": "auto"
}
self.UserAuthorizationURL = \
"https://www.strava.com/oauth/authorize?" + urlencode(params)
def _requestWithAuth(self, reqLambda, serviceRecord):
self._globalRateLimit()
session = requests.Session()
if time.time() > serviceRecord.Authorization.get("AccessTokenExpiresAt", 0) - 60:
# Expired access token, or still running (now-deprecated) indefinite access token.
refreshToken = serviceRecord.Authorization.get("RefreshToken",
serviceRecord.Authorization.get("OAuthToken"))
response = requests.post("https://www.strava.com/oauth/token", data={
"grant_type": "refresh_token",
"refresh_token": refreshToken,
"client_id": STRAVA_CLIENT_ID,
"client_secret": STRAVA_CLIENT_SECRET,
})
if response.status_code != 200:
raise APIException("No authorization to refresh token", block=True, user_exception=UserException(UserExceptionType.Authorization, intervention_required=True))
data = response.json()
authorizationData = {
"AccessToken": data["access_token"],
"AccessTokenExpiresAt": data["expires_at"],
"RefreshToken": data["refresh_token"]
}
serviceRecord.Authorization.update(authorizationData)
db.connections.update({"_id": serviceRecord._id}, {"$set": {"Authorization": authorizationData}})
session.headers.update({"Authorization": "access_token %s" % serviceRecord.Authorization["AccessToken"]})
return reqLambda(session)
def RetrieveAuthorizationToken(self, req, level):
code = req.GET.get("code")
params = {"grant_type": "authorization_code", "code": code, "client_id": STRAVA_CLIENT_ID, "client_secret": STRAVA_CLIENT_SECRET, "redirect_uri": WEB_ROOT + reverse("oauth_return", kwargs={"service": "strava"})}
response = requests.post("https://www.strava.com/oauth/token", data=params)
if response.status_code != 200:
raise APIException("Invalid code")
data = response.json()
authorizationData = {
"AccessToken": data["access_token"],
"AccessTokenExpiresAt": data["expires_at"],
"RefreshToken": data["refresh_token"]
}
return (data["athlete"]["id"], authorizationData)
def RevokeAuthorization(self, serviceRecord):
resp = self._requestWithAuth(lambda session: session.post("https://www.strava.com/oauth/deauthorize"), serviceRecord)
if resp.status_code != 204 and resp.status_code != 200:
raise APIException("Unable to deauthorize Strava auth token, status " + str(resp.status_code) + " resp " + resp.text)
def DownloadActivityList(self, svcRecord, exhaustive=False):
activities = []
exclusions = []
before = earliestDate = None
while True:
if before is not None and before < 0:
break # Caused by activities that "happened" before the epoch. We generally don't care about those activities...
logger.debug("Req with before=" + str(before) + "/" + str(earliestDate))
resp = self._requestWithAuth(lambda session: session.get("https://www.strava.com/api/v3/athletes/" + str(svcRecord.ExternalID) + "/activities", params={"before": before}), svcRecord)
if resp.status_code == 401:
raise APIException("No authorization to retrieve activity list", block=True, user_exception=UserException(UserExceptionType.Authorization, intervention_required=True))
earliestDate = None
try:
reqdata = resp.json()
except ValueError:
raise APIException("Failed parsing strava list response %s - %s" % (resp.status_code, resp.text))
if not len(reqdata):
break # No more activities to see
for ride in reqdata:
activity = UploadedActivity()
activity.TZ = pytz.timezone(re.sub("^\([^\)]+\)\s*", "", ride["timezone"])) # Comes back as "(GMT -13:37) The Stuff/We Want""
activity.StartTime = pytz.utc.localize(datetime.strptime(ride["start_date"], "%Y-%m-%dT%H:%M:%SZ"))
logger.debug("\tActivity s/t %s: %s" % (activity.StartTime, ride["name"]))
if not earliestDate or activity.StartTime < earliestDate:
earliestDate = activity.StartTime
before = calendar.timegm(activity.StartTime.astimezone(pytz.utc).timetuple())
activity.EndTime = activity.StartTime + timedelta(0, ride["elapsed_time"])
activity.ServiceData = {"ActivityID": ride["id"], "Manual": ride["manual"]}
if ride["type"] not in self._reverseActivityTypeMappings:
exclusions.append(APIExcludeActivity("Unsupported activity type %s" % ride["type"], activity_id=ride["id"], user_exception=UserException(UserExceptionType.Other)))
logger.debug("\t\tUnknown activity")
continue
activity.Type = self._reverseActivityTypeMappings[ride["type"]]
activity.Stats.Distance = ActivityStatistic(ActivityStatisticUnit.Meters, value=ride["distance"])
if "max_speed" in ride or "average_speed" in ride:
activity.Stats.Speed = ActivityStatistic(ActivityStatisticUnit.MetersPerSecond, avg=ride["average_speed"] if "average_speed" in ride else None, max=ride["max_speed"] if "max_speed" in ride else None)
activity.Stats.MovingTime = ActivityStatistic(ActivityStatisticUnit.Seconds, value=ride["moving_time"] if "moving_time" in ride and ride["moving_time"] > 0 else None) # They don't let you manually enter this, and I think it returns 0 for those activities.
# Strava doesn't handle "timer time" to the best of my knowledge - although they say they do look at the FIT total_timer_time field, so...?
if "average_watts" in ride:
activity.Stats.Power = ActivityStatistic(ActivityStatisticUnit.Watts, avg=ride["average_watts"])
if "average_heartrate" in ride:
activity.Stats.HR.update(ActivityStatistic(ActivityStatisticUnit.BeatsPerMinute, avg=ride["average_heartrate"]))
if "max_heartrate" in ride:
activity.Stats.HR.update(ActivityStatistic(ActivityStatisticUnit.BeatsPerMinute, max=ride["max_heartrate"]))
if "average_cadence" in ride:
activity.Stats.Cadence.update(ActivityStatistic(ActivityStatisticUnit.RevolutionsPerMinute, avg=ride["average_cadence"]))
if "average_temp" in ride:
activity.Stats.Temperature.update(ActivityStatistic(ActivityStatisticUnit.DegreesCelcius, avg=ride["average_temp"]))
if "calories" in ride:
activity.Stats.Energy = ActivityStatistic(ActivityStatisticUnit.Kilocalories, value=ride["calories"])
activity.Name = ride["name"]
activity.Private = ride["private"]
activity.Stationary = ride["manual"]
activity.GPS = ("start_latlng" in ride) and (ride["start_latlng"] is not None)
activity.AdjustTZ()
activity.CalculateUID()
activities.append(activity)
if not exhaustive or not earliestDate:
break
return activities, exclusions
def SubscribeToPartialSyncTrigger(self, serviceRecord):
# There is no per-user webhook subscription with Strava.
serviceRecord.SetPartialSyncTriggerSubscriptionState(True)
def UnsubscribeFromPartialSyncTrigger(self, serviceRecord):
# As above.
serviceRecord.SetPartialSyncTriggerSubscriptionState(False)
def ExternalIDsForPartialSyncTrigger(self, req):
data = json.loads(req.body.decode("UTF-8"))
return [(data["owner_id"], None)]
def PartialSyncTriggerGET(self, req):
# Strava requires this endpoint to echo back a challenge.
# Only happens once during manual endpoint setup?
from django.http import HttpResponse
return HttpResponse(json.dumps({
"hub.challenge": req.GET["hub.challenge"]
}))
def DownloadActivity(self, svcRecord, activity):
if activity.ServiceData["Manual"]: # I should really add a param to DownloadActivity for this value as opposed to constantly doing this
# We've got as much information as we're going to get - we need to copy it into a Lap though.
activity.Laps = [Lap(startTime=activity.StartTime, endTime=activity.EndTime, stats=activity.Stats)]
return activity
activityID = activity.ServiceData["ActivityID"]
streamdata = self._requestWithAuth(lambda session: session.get("https://www.strava.com/api/v3/activities/" + str(activityID) + "/streams/time,altitude,heartrate,cadence,watts,temp,moving,latlng,distance,velocity_smooth"), svcRecord)
if streamdata.status_code == 401:
raise APIException("No authorization to download activity", block=True, user_exception=UserException(UserExceptionType.Authorization, intervention_required=True))
try:
streamdata = streamdata.json()
except:
raise APIException("Stream data returned is not JSON")
if "message" in streamdata and streamdata["message"] == "Record Not Found":
raise APIException("Could not find activity")
ridedata = {}
for stream in streamdata:
ridedata[stream["type"]] = stream["data"]
lap = Lap(stats=activity.Stats, startTime=activity.StartTime, endTime=activity.EndTime) # Strava doesn't support laps, but we need somewhere to put the waypoints.
activity.Laps = [lap]
lap.Waypoints = []
hasHR = "heartrate" in ridedata and len(ridedata["heartrate"]) > 0
hasCadence = "cadence" in ridedata and len(ridedata["cadence"]) > 0
hasTemp = "temp" in ridedata and len(ridedata["temp"]) > 0
hasPower = ("watts" in ridedata and len(ridedata["watts"]) > 0)
hasAltitude = "altitude" in ridedata and len(ridedata["altitude"]) > 0
hasDistance = "distance" in ridedata and len(ridedata["distance"]) > 0
hasVelocity = "velocity_smooth" in ridedata and len(ridedata["velocity_smooth"]) > 0
if "error" in ridedata:
raise APIException("Strava error " + ridedata["error"])
inPause = False
waypointCt = len(ridedata["time"])
for idx in range(0, waypointCt - 1):
waypoint = Waypoint(activity.StartTime + timedelta(0, ridedata["time"][idx]))
if "latlng" in ridedata:
latlng = ridedata["latlng"][idx]
waypoint.Location = Location(latlng[0], latlng[1], None)
if waypoint.Location.Longitude == 0 and waypoint.Location.Latitude == 0:
waypoint.Location.Longitude = None
waypoint.Location.Latitude = None
if hasAltitude:
if not waypoint.Location:
waypoint.Location = Location(None, None, None)
waypoint.Location.Altitude = float(ridedata["altitude"][idx])
# When pausing, Strava sends this format:
# idx = 100 ; time = 1000; moving = true
# idx = 101 ; time = 1001; moving = true => convert to Pause
# idx = 102 ; time = 2001; moving = false => convert to Resume: (2001-1001) seconds pause
# idx = 103 ; time = 2002; moving = true
if idx == 0:
waypoint.Type = WaypointType.Start
elif idx == waypointCt - 2:
waypoint.Type = WaypointType.End
elif idx < waypointCt - 2 and ridedata["moving"][idx+1] and inPause:
waypoint.Type = WaypointType.Resume
inPause = False
elif idx < waypointCt - 2 and not ridedata["moving"][idx+1] and not inPause:
waypoint.Type = WaypointType.Pause
inPause = True
if hasHR:
waypoint.HR = ridedata["heartrate"][idx]
if hasCadence:
waypoint.Cadence = ridedata["cadence"][idx]
if hasTemp:
waypoint.Temp = ridedata["temp"][idx]
if hasPower:
waypoint.Power = ridedata["watts"][idx]
if hasVelocity:
waypoint.Speed = ridedata["velocity_smooth"][idx]
if hasDistance:
waypoint.Distance = ridedata["distance"][idx]
lap.Waypoints.append(waypoint)
return activity
def UploadActivity(self, serviceRecord, activity):
logger.info("Activity tz " + str(activity.TZ) + " dt tz " + str(activity.StartTime.tzinfo) + " starttime " + str(activity.StartTime))
if self.LastUpload is not None:
while (datetime.now() - self.LastUpload).total_seconds() < 5:
time.sleep(1)
logger.debug("Inter-upload cooldown")
source_svc = None
if hasattr(activity, "ServiceDataCollection"):
source_svc = str(list(activity.ServiceDataCollection.keys())[0])
upload_id = None
if activity.CountTotalWaypoints():
req = {
"data_type": "fit",
"activity_name": activity.Name,
"description": activity.Notes, # Paul Mach said so.
"activity_type": self._activityTypeMappings[activity.Type],
"private": 1 if activity.Private else 0}
if "fit" in activity.PrerenderedFormats:
logger.debug("Using prerendered FIT")
fitData = activity.PrerenderedFormats["fit"]
else:
# TODO: put the fit back into PrerenderedFormats once there's more RAM to go around and there's a possibility of it actually being used.
fitData = FITIO.Dump(activity, drop_pauses=True)
files = {"file":("tap-sync-" + activity.UID + "-" + str(os.getpid()) + ("-" + source_svc if source_svc else "") + ".fit", fitData)}
response = self._requestWithAuth(lambda session: session.post("https://www.strava.com/api/v3/uploads", data=req, files=files), serviceRecord)
if response.status_code != 201:
if response.status_code == 401:
raise APIException("No authorization to upload activity " + activity.UID + " response " + response.text + " status " + str(response.status_code), block=True, user_exception=UserException(UserExceptionType.Authorization, intervention_required=True))
if "duplicate of activity" in response.text:
logger.debug("Duplicate")
self.LastUpload = datetime.now()
return # Fine by me. The majority of these cases were caused by a dumb optimization that meant existing activities on services were never flagged as such if tapiriik didn't have to synchronize them elsewhere.
raise APIException("Unable to upload activity " + activity.UID + " response " + response.text + " status " + str(response.status_code))
upload_id = response.json()["id"]
upload_poll_wait = 8 # The mode of processing times
while not response.json()["activity_id"]:
time.sleep(upload_poll_wait)
response = self._requestWithAuth(lambda session: session.get("https://www.strava.com/api/v3/uploads/%s" % upload_id), serviceRecord)
logger.debug("Waiting for upload - status %s id %s" % (response.json()["status"], response.json()["activity_id"]))
if response.json()["error"]:
error = response.json()["error"]
if "duplicate of activity" in error:
self.LastUpload = datetime.now()
logger.debug("Duplicate")
return # I guess we're done here?
raise APIException("Strava failed while processing activity - last status %s" % response.text)
upload_id = response.json()["activity_id"]
else:
localUploadTS = activity.StartTime.strftime("%Y-%m-%d %H:%M:%S")
req = {
"name": activity.Name if activity.Name else activity.StartTime.strftime("%d/%m/%Y"), # This is required
"description": activity.Notes,
"type": self._activityTypeMappings[activity.Type],
"private": 1 if activity.Private else 0,
"start_date_local": localUploadTS,
"distance": activity.Stats.Distance.asUnits(ActivityStatisticUnit.Meters).Value,
"elapsed_time": round((activity.EndTime - activity.StartTime).total_seconds())
}
response = self._requestWithAuth(lambda session: session.post("https://www.strava.com/api/v3/activities", data=req), serviceRecord)
# FFR this method returns the same dict as the activity listing, as REST services are wont to do.
if response.status_code != 201:
if response.status_code == 401:
raise APIException("No authorization to upload activity " + activity.UID + " response " + response.text + " status " + str(response.status_code), block=True, user_exception=UserException(UserExceptionType.Authorization, intervention_required=True))
raise APIException("Unable to upload stationary activity " + activity.UID + " response " + response.text + " status " + str(response.status_code))
upload_id = response.json()["id"]
self.LastUpload = datetime.now()
return upload_id
def DeleteCachedData(self, serviceRecord):
cachedb.strava_cache.remove({"Owner": serviceRecord.ExternalID})
cachedb.strava_activity_cache.remove({"Owner": serviceRecord.ExternalID})
def DeleteActivity(self, serviceRecord, uploadId):
del_res = self._requestWithAuth(lambda session: session.delete("https://www.strava.com/api/v3/activities/%d" % uploadId), serviceRecord)
del_res.raise_for_status()
|
c988e0158db69669f778fe698a82aa7fcf67a619
|
f305f84ea6f721c2391300f0a60e21d2ce14f2a5
|
/19_数学/游程编码/1531. 压缩字符串 II-删除子数组-前后缀分解.py
|
f93f04f5c43e329b63120470e422d02afac52fce
|
[] |
no_license
|
981377660LMT/algorithm-study
|
f2ada3e6959338ae1bc21934a84f7314a8ecff82
|
7e79e26bb8f641868561b186e34c1127ed63c9e0
|
refs/heads/master
| 2023-09-01T18:26:16.525579
| 2023-09-01T12:21:58
| 2023-09-01T12:21:58
| 385,861,235
| 225
| 24
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,894
|
py
|
1531. 压缩字符串 II-删除子数组-前后缀分解.py
|
# 你需要从字符串 s 中删除最多 k 个`相邻的字符`,以使 s 的行程长度编码长度最小。
# 请你返回删除最多 k 个字符后,s 行程长度编码的最小长度 。
from itertools import groupby
# 压缩字符串
class Solution:
def solve(self, string: str, k: int) -> int:
def getRLELen(x: int) -> int:
return x if x <= 1 else len(str(x)) + 1
n = len(string)
if n == k:
return 0
# 连续长度
left = [1] * n
for i in range(n - 1):
if string[i] == string[i + 1]:
left[i + 1] = left[i] + 1
right = [1] * n
for i in range(n - 2, -1, -1):
if string[i] == string[i + 1]:
right[i] = right[i + 1] + 1
groups = [len(list(g)) for _, g in groupby(string)]
prefix = [0] * n
pre = 0
i = 0
for g in groups:
for j in range(1, 1 + g):
prefix[i] = pre + getRLELen(j)
i += 1
pre += getRLELen(g)
suffix = [0] * n
pre = 0
i = n - 1
for g in reversed(groups):
for j in range(1, 1 + g):
suffix[i] = pre + getRLELen(j)
i -= 1
pre += getRLELen(g)
res = min(prefix[~k], suffix[k])
# 删除哪段子数组
for i in range(len(string) - k - 1):
cand = prefix[i] + suffix[i + k + 1]
l = left[i]
r = right[i + k + 1]
# 删除后首尾一样,需要加上
if string[i] == string[i + k + 1]:
cand -= getRLELen(l) + getRLELen(r)
cand += getRLELen(l + r)
res = min(res, cand)
return res
print(Solution().solve(string="aaaaabbaaaaaccaaa", k=2))
|
26a7e5b16dc3cab74dc49c2e4008a003fa6efb84
|
6317fc30564ce5e50b470de3c1aedb0323555fde
|
/scratch/network_analysis.py
|
8cab74bdbfd16df2d9a3eca654eef096dc853a46
|
[
"MIT"
] |
permissive
|
Insight-book/data-science-from-scratch
|
73811e954c8a5757ebe412d0261bdec274367c6f
|
4466437ad8ae8b8714925084534c08a3cd9ea02d
|
refs/heads/master
| 2021-01-18T16:09:20.566055
| 2020-02-05T13:23:09
| 2020-02-05T13:23:09
| 37,478,828
| 109
| 126
|
Unlicense
| 2020-02-05T13:23:11
| 2015-06-15T17:02:43
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 6,665
|
py
|
network_analysis.py
|
from typing import NamedTuple
class User(NamedTuple):
id: int
name: str
users = [User(0, "Hero"), User(1, "Dunn"), User(2, "Sue"), User(3, "Chi"),
User(4, "Thor"), User(5, "Clive"), User(6, "Hicks"),
User(7, "Devin"), User(8, "Kate"), User(9, "Klein")]
friend_pairs = [(0, 1), (0, 2), (1, 2), (1, 3), (2, 3), (3, 4),
(4, 5), (5, 6), (5, 7), (6, 8), (7, 8), (8, 9)]
from typing import Dict, List
# type alias for keeping track of Friendships
Friendships = Dict[int, List[int]]
friendships: Friendships = {user.id: [] for user in users}
for i, j in friend_pairs:
friendships[i].append(j)
friendships[j].append(i)
assert friendships[4] == [3, 5]
assert friendships[8] == [6, 7, 9]
from collections import deque
Path = List[int]
def shortest_paths_from(from_user_id: int,
friendships: Friendships) -> Dict[int, List[Path]]:
# A dictionary from "user_id" to *all* shortest paths to that user
shortest_paths_to: Dict[int, List[Path]] = {from_user_id: [[]]}
# A queue of (previous user, next user) that we need to check.
# Starts out with all pairs (from_user, friend_of_from_user)
frontier = deque((from_user_id, friend_id)
for friend_id in friendships[from_user_id])
# Keep going until we empty the queue.
while frontier:
# Remove the pair that's next in the queue.
prev_user_id, user_id = frontier.popleft()
# Because of the way we're adding to the queue,
# necessarily we already know some shortest paths to prev_user
paths_to_prev_user = shortest_paths_to[prev_user_id]
new_paths_to_user = [path + [user_id] for path in paths_to_prev_user]
# It's possible we already know a shortest path to user_id.
old_paths_to_user = shortest_paths_to.get(user_id, [])
# What's the shortest path to here that we've seen so far?
if old_paths_to_user:
min_path_length = len(old_paths_to_user[0])
else:
min_path_length = float('inf')
# Only keep paths that aren't too long and are actually new
new_paths_to_user = [path
for path in new_paths_to_user
if len(path) <= min_path_length
and path not in old_paths_to_user]
shortest_paths_to[user_id] = old_paths_to_user + new_paths_to_user
# Add never-seen neighbors to the frontier
frontier.extend((user_id, friend_id)
for friend_id in friendships[user_id]
if friend_id not in shortest_paths_to)
return shortest_paths_to
# For each from_user, for each to_user, a list of shortest paths.
shortest_paths = {user.id: shortest_paths_from(user.id, friendships)
for user in users}
betweenness_centrality = {user.id: 0.0 for user in users}
for source in users:
for target_id, paths in shortest_paths[source.id].items():
if source.id < target_id: # don't double count
num_paths = len(paths) # how many shortest paths?
contrib = 1 / num_paths # contribution to centrality
for path in paths:
for between_id in path:
if between_id not in [source.id, target_id]:
betweenness_centrality[between_id] += contrib
def farness(user_id: int) -> float:
"""the sum of the lengths of the shortest paths to each other user"""
return sum(len(paths[0])
for paths in shortest_paths[user_id].values())
closeness_centrality = {user.id: 1 / farness(user.id) for user in users}
from scratch.linear_algebra import Matrix, make_matrix, shape
def matrix_times_matrix(m1: Matrix, m2: Matrix) -> Matrix:
nr1, nc1 = shape(m1)
nr2, nc2 = shape(m2)
assert nc1 == nr2, "must have (# of columns in m1) == (# of rows in m2)"
def entry_fn(i: int, j: int) -> float:
"""dot product of i-th row of m1 with j-th column of m2"""
return sum(m1[i][k] * m2[k][j] for k in range(nc1))
return make_matrix(nr1, nc2, entry_fn)
from scratch.linear_algebra import Vector, dot
def matrix_times_vector(m: Matrix, v: Vector) -> Vector:
nr, nc = shape(m)
n = len(v)
assert nc == n, "must have (# of cols in m) == (# of elements in v)"
return [dot(row, v) for row in m] # output has length nr
from typing import Tuple
import random
from scratch.linear_algebra import magnitude, distance
def find_eigenvector(m: Matrix,
tolerance: float = 0.00001) -> Tuple[Vector, float]:
guess = [random.random() for _ in m]
while True:
result = matrix_times_vector(m, guess) # transform guess
norm = magnitude(result) # compute norm
next_guess = [x / norm for x in result] # rescale
if distance(guess, next_guess) < tolerance:
# convergence so return (eigenvector, eigenvalue)
return next_guess, norm
guess = next_guess
rotate = [[ 0, 1],
[-1, 0]]
flip = [[0, 1],
[1, 0]]
def entry_fn(i: int, j: int):
return 1 if (i, j) in friend_pairs or (j, i) in friend_pairs else 0
n = len(users)
adjacency_matrix = make_matrix(n, n, entry_fn)
endorsements = [(0, 1), (1, 0), (0, 2), (2, 0), (1, 2),
(2, 1), (1, 3), (2, 3), (3, 4), (5, 4),
(5, 6), (7, 5), (6, 8), (8, 7), (8, 9)]
from collections import Counter
endorsement_counts = Counter(target for source, target in endorsements)
import tqdm
def page_rank(users: List[User],
endorsements: List[Tuple[int, int]],
damping: float = 0.85,
num_iters: int = 100) -> Dict[int, float]:
# Compute how many people each person endorses
outgoing_counts = Counter(target for source, target in endorsements)
# Initially distribute PageRank evenly
num_users = len(users)
pr = {user.id : 1 / num_users for user in users}
# Small fraction of PageRank that each node gets each iteration
base_pr = (1 - damping) / num_users
for iter in tqdm.trange(num_iters):
next_pr = {user.id : base_pr for user in users} # start with base_pr
for source, target in endorsements:
# Add damped fraction of source pr to target
next_pr[target] += damping * pr[source] / outgoing_counts[source]
pr = next_pr
return pr
pr = page_rank(users, endorsements)
# Thor (user_id 4) has higher page rank than anyone else
assert pr[4] > max(page_rank
for user_id, page_rank in pr.items()
if user_id != 4)
|
cd824363bf4793142a59d14fdb2da2c3089ac1a5
|
3c41443364da8b44c74dce08ef94a1acd1b66b3e
|
/osf_tests/management_commands/test_EGAP_import.py
|
d327ef81625cd9c2487c8476d1180277ae2a8720
|
[
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-warranty-disclaimer",
"AGPL-3.0-only",
"LGPL-2.0-or-later",
"LicenseRef-scancode-proprietary-license",
"MPL-1.1",
"CPAL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"Apache-2.0"
] |
permissive
|
CenterForOpenScience/osf.io
|
71d9540be7989f7118a33e15bc4a6ce2d2492ac1
|
a3e0a0b9ddda5dd75fc8248d58f3bcdeece0323e
|
refs/heads/develop
| 2023-09-04T03:21:14.970917
| 2023-08-31T14:49:20
| 2023-08-31T14:49:20
| 10,199,599
| 683
| 390
|
Apache-2.0
| 2023-09-14T17:07:52
| 2013-05-21T15:53:37
|
Python
|
UTF-8
|
Python
| false
| false
| 6,725
|
py
|
test_EGAP_import.py
|
# encoding: utf-8
import os
import shutil
import pytest
import responses
HERE = os.path.dirname(os.path.abspath(__file__))
from osf_tests.factories import (
AuthUserFactory,
NodeFactory,
ApiOAuth2PersonalTokenFactory
)
from osf.models import (
RegistrationSchema,
ApiOAuth2PersonalToken
)
from osf.management.commands.import_EGAP import (
get_egap_assets,
ensure_egap_schema,
create_node_from_project_json,
recursive_upload,
get_creator_auth_header
)
from api_tests.utils import create_test_file
from website.settings import WATERBUTLER_INTERNAL_URL
@pytest.mark.django_db
class TestEGAPImport:
@pytest.fixture()
def greg(self):
return AuthUserFactory(username='greg@greg.com')
@pytest.fixture()
def node(self, greg):
return NodeFactory(creator=greg)
@pytest.fixture()
def node_with_file(self):
node = NodeFactory()
file = create_test_file(node, node.creator)
file.save()
node.save()
return node
@pytest.fixture()
def egap_assets_path(self):
return os.path.join(HERE, 'test_directory', 'EGAP')
@pytest.fixture()
def zip_data(self, egap_assets_path):
test_zip_path = os.path.join(egap_assets_path, 'test-egap.zip')
with open(test_zip_path, 'rb') as fp:
return fp.read()
@pytest.fixture()
def egap_project_name(self):
return '20120220AA'
def test_get_creator_auth_header(self, greg):
greg, auth_header = get_creator_auth_header(greg.username)
gregs_token = ApiOAuth2PersonalToken.objects.get(owner=greg).token_id
assert auth_header['Authorization'] == 'Bearer {}'.format(gregs_token)
def test_ensure_egap_schema(self):
ensure_egap_schema()
assert RegistrationSchema.objects.get(name='EGAP Registration', schema_version=3)
def test_create_node_from_project_json(self, egap_assets_path, egap_project_name, greg):
node = create_node_from_project_json(egap_assets_path, egap_project_name, greg)
assert node.title == 'Home Security and Infidelity: a case study by Fletcher Cox'
assert node.creator == greg
assert len(node.contributors.all()) == 5
contrib = node.contributors.exclude(username='greg@greg.com').first()
assert contrib.fullname == 'Fletcher Cox'
assert node.get_permissions(contrib) == ['read', 'write']
assert not node.get_visible(greg)
@responses.activate
def test_recursive_upload(self, node, greg, egap_assets_path, egap_project_name):
responses.add(
responses.Response(
method=responses.PUT,
url=f'{WATERBUTLER_INTERNAL_URL}/v1/resources/{node._id}/providers/osfstorage/?name=test-1.txt&kind=file',
json={'metadata': 'for test-1!'},
status=201,
)
)
responses.add(
responses.Response(
method=responses.PUT,
url=f'{WATERBUTLER_INTERNAL_URL}/v1/resources/{node._id}/providers/osfstorage/?name=test_folder&kind=folder',
json={'data': {'attributes': {'path': 'parent'}}},
status=201,
)
)
responses.add(
responses.Response(
method=responses.PUT,
url=f'{WATERBUTLER_INTERNAL_URL}/v1/resources/{node._id}/providers/osfstorage/parent?name=test-2.txt&kind=file',
json={'metadata': 'for test-2!'},
status=201,
)
)
token = ApiOAuth2PersonalTokenFactory(owner=greg)
token.save()
auth = {'Authorization': 'Bearer {}'.format(token.token_id)}
egap_project_path = os.path.join(egap_assets_path, egap_project_name, 'data', 'nonanonymous')
metadata = recursive_upload(auth, node, egap_project_path)
assert {'metadata': 'for test-1!'} in metadata
assert {'data': {'attributes': {'path': 'parent'}}} in metadata
assert {'metadata': 'for test-2!'} in metadata
@responses.activate
def test_recursive_upload_retry(self, node, greg, egap_assets_path, egap_project_name):
responses.add(
responses.Response(
method=responses.PUT,
url=f'{WATERBUTLER_INTERNAL_URL}/v1/resources/{node._id}/providers/osfstorage/?name=test_folder&kind=folder',
json={'data': {'attributes': {'path': 'parent'}}},
status=201,
)
)
responses.add(
responses.Response(
method=responses.PUT,
url=f'{WATERBUTLER_INTERNAL_URL}/v1/resources/{node._id}/providers/osfstorage/parent?name=test-2.txt&kind=file',
status=500,
)
)
responses.add(
responses.Response(
method=responses.PUT,
url=f'{WATERBUTLER_INTERNAL_URL}/v1/resources/{node._id}/providers/osfstorage/parent?name=test-2.txt&kind=file',
json={'metadata': 'for test-2!'},
status=201,
)
)
responses.add(
responses.Response(
method=responses.PUT,
url=f'{WATERBUTLER_INTERNAL_URL}/v1/resources/{node._id}/providers/osfstorage/?name=test-1.txt&kind=file',
json={'metadata': 'for test-1!'},
status=201,
)
)
token = ApiOAuth2PersonalTokenFactory(owner=greg)
token.save()
auth = {'Authorization': 'Bearer {}'.format(token.token_id)}
egap_project_path = os.path.join(egap_assets_path, egap_project_name, 'data', 'nonanonymous')
metadata = recursive_upload(auth, node, egap_project_path)
assert {'metadata': 'for test-2!'} in metadata
assert {'data': {'attributes': {'path': 'parent'}}} in metadata
assert {'metadata': 'for test-1!'} in metadata
@responses.activate
def test_get_egap_assets(self, node_with_file, zip_data):
file_node = node_with_file.files.first()
responses.add(
responses.Response(
method=responses.GET,
url=f'{WATERBUTLER_INTERNAL_URL}/v1/resources/{node_with_file._id}/providers/osfstorage/{file_node._id}',
body=zip_data,
status=200,
)
)
asset_path = get_egap_assets(node_with_file._id, {'fake auth': 'sadasdadsdasdsds'})
directory_list = os.listdir(asset_path)
# __MACOSX is a hidden file created by the os when zipping
assert set(directory_list) == set(['20110307AA', '__MACOSX', '20110302AA', 'egap_assets.zip', '20120117AA'])
shutil.rmtree(asset_path)
|
00e4f8ff18d81f251410c4b5012e881ad1b559ea
|
88ae8695987ada722184307301e221e1ba3cc2fa
|
/third_party/depot_tools/update_depot_tools_toggle.py
|
46fb1098143e13a9c2a17124256631079838079f
|
[
"Apache-2.0",
"LGPL-2.0-or-later",
"MIT",
"GPL-1.0-or-later",
"BSD-3-Clause"
] |
permissive
|
iridium-browser/iridium-browser
|
71d9c5ff76e014e6900b825f67389ab0ccd01329
|
5ee297f53dc7f8e70183031cff62f37b0f19d25f
|
refs/heads/master
| 2023-08-03T16:44:16.844552
| 2023-07-20T15:17:00
| 2023-07-23T16:09:30
| 220,016,632
| 341
| 40
|
BSD-3-Clause
| 2021-08-13T13:54:45
| 2019-11-06T14:32:31
| null |
UTF-8
|
Python
| false
| false
| 1,113
|
py
|
update_depot_tools_toggle.py
|
#!/usr/bin/env python3
# Copyright (c) 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Small utility script to enable/disable `depot_tools` automatic updating."""
import argparse
import datetime
import os
import sys
DEPOT_TOOLS_ROOT = os.path.abspath(os.path.dirname(__file__))
SENTINEL_PATH = os.path.join(DEPOT_TOOLS_ROOT, '.disable_auto_update')
def main():
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--enable', action='store_true',
help='Enable auto-updating.')
group.add_argument('--disable', action='store_true',
help='Disable auto-updating.')
args = parser.parse_args()
if args.enable:
if os.path.exists(SENTINEL_PATH):
os.unlink(SENTINEL_PATH)
if args.disable:
if not os.path.exists(SENTINEL_PATH):
with open(SENTINEL_PATH, 'w') as fd:
fd.write('Disabled by %s at %s\n' % (__file__, datetime.datetime.now()))
return 0
if __name__ == '__main__':
sys.exit(main())
|
8307533813754fb24ecec0076aedb2e1f35e8bb9
|
7e63968457789d589145916b0c1db6d76d40e2c6
|
/generate_screenshots.py
|
edf7c91f2fbf65ca7e1f47c177e57226ae45e135
|
[
"TMate",
"Apache-2.0"
] |
permissive
|
simonw/til
|
eb6680de09f3a12e1286d2b1ef3f564b1b665a35
|
51f40ccffa23af69e6e8675088cc9c4db6c95c65
|
refs/heads/main
| 2023-09-01T15:55:16.792612
| 2023-08-31T17:44:49
| 2023-08-31T17:44:49
| 257,022,147
| 782
| 126
|
Apache-2.0
| 2023-08-26T16:35:31
| 2020-04-19T14:35:59
|
HTML
|
UTF-8
|
Python
| false
| false
| 10,096
|
py
|
generate_screenshots.py
|
import hashlib
import json
import pathlib
import subprocess
import sqlite_utils
import tempfile
import zlib
root = pathlib.Path(__file__).parent.resolve()
TMP_PATH = pathlib.Path(tempfile.gettempdir())
# Change the following tuple manually any time the templates have changed
# to a point that all of the screenshots need to be re-taken
# https://github.com/simonw/til/issues/82
_decompress = lambda compressed: zlib.decompress(compressed).decode("utf-8")
SHOT_HASH_ELEMENTS = (
# Compressed HTML from the last time this ran against the actual templates
# Delete this entirely - and the import zlib line - the first time
# SHOT_HASH_ELEMENTS needs to be manually invalidated.
_decompress(
b"x\x9c\xb5VQo\xdb6\x10~\xf7\xaf\xb8*\xc8d\xaf\xae\x94\xd6\xd9V\xb8\xb6"
b"\xb0\xa0\r\xb0\x01\xc5\x1e\x96\x02{(\x06\x83\x16i\x93+E\xaa$\xd5\xccu\x0c"
b"\xeco\xec\xef\xed\x97\xecHJ\x8e\xed\xc4\xee\x86ay\x88\xa4\xe3w"
b"\xdf\x1d\xef\x8e\x1f\xbd>\x07\xf6\xbbc\x8aZH\x9c\x90\xb39\xb1,\xe3\xae\x92\t"
b"\x9coz\xbd\xf59X\xe6\x00W,L\xc1~\x94\xfd$Iz\x80\x7f\x96IV:\xf8\x1a\x16"
b"FW\x1e\x00\xb7\x9c\x19\x065q\x1c\xa1c\xa7kQ\xc2\xdd\x1d\xa4\xb3\xd4?\xc6"
b"V6\xcb\xf0\x9dU4\xed!\xcd\x10\xd6I@%c\x08\xcf!$\x1e\x84\x9f\xfe\xb1\x19\x02%"
b"\x8e\xf8\x84\xa6>5\x9b\x0cz\x98\xd2}F\x18\xc5\x9b\xdf_\xfc\n\xd1.\x16\xa0"
b"t\x9b,Z|\x96-\xda\x10a\x19E\x87\xf02\xbb\xbc\xb8\xec'\xef~|\x1b\xe0\x0b\xdd("
b"\x9a\x0cZ\x0e\xac\x04\xd2\xb4[\x9fK]~@>'\x19\x9a\xd6kO\x9d\xc5\xcf"
b"\xcd\x06\xee\xe0FTZ\xc1/BJa\xb5\xfa\xeb\x8f?- \xab\x8d4\xd1y\x8f\t+m"
b'\xc8\x8c3B\xbd}"\x85\xfa\x00\x86\xc9ib\xddJ2\xcb\x19s\tp\xc3\x16\xd3\x84;W'
b"\xdbq\x9e\xfb\x88\xd6\x87\xb9m\xa3d\x8a\xb9\xdc:\xe2D\x99/\x85"
b"\xe3\xcd\xfc\x99\x14K\xee\xb2\xd2\xda\xa4\xe8M*\xe6\x08(R\xf9\xa2"
b"\xdd\n\xe7\x98\x19\x97\xc4\xd0\x04J\xad\xb0\xcf\x0e\x835UE\xccj&\x89Y\xb2"
b"\x99\xa8\xc8\x92\x1ds4\x8c8mv|\xbf\x8f\xb9\x1c\xc1\x87\xd2\xec\xa0\x0f*v"
b"\xc4\x8b2[\x1aQ;\xa1\xd5C\xdf6\xd9\xe3\xde1\xff{\xbf\xaepv\x94\x91"
b"\x8a|\xd6\x8a\xdc\xda\xac\xd4\xd5\x91Jva\xb8v3N,\xc7@\xd9o\xf5\xf2T\xb01\x91"
b"n'\xe0\rV\x89)O\x00z\x01\x8e\x0b\x0b\xc4`w\xe4}Uk\xa3kf\xdcj\x9a\xe8"
b"\xe5\xb81\xf2\x91tOf\x17\xcf\xd2f\xb3\xcd\xd6\x1f\xa5\x9d\x8a\xec"
b"\xf1\xbbU\xbd[\x8f\x93\xb9\xfc\xd3\x86\xed9\xfd\xdb~\xed9\xff\xcf"
b"\xedz\x18\xeb?v+r\xdc\n\xea\xf8\x0e\xcb\xcb\x8b\x8bSp\xce\xfc\x89\xdc\xc1"
b"_\x06\xfcQ]\x98k\xba\n\x8a\xc0\x9f\x17\x07-\x98\xe4hCh\xb4za\xbe\xb3d\xe1W"
b"\xb6\xe2\x8c\nB\\T\xb7\xf86\xf3\n\xd8\xc7\x7f\x83{a\xec@A\x17'|T\xfc\x1c\r"
b"\xc8?*\xbc\xa9\x91PJb\xed4i\xa1I\xd1)\xe8B\x9b\xa0\xb7B\xed\xd3"
b"\xa0\x97\x14\xc5\xc4\xd6Du\xbeQ\xcf\x8b\x83\xb9\x9d\xe4\x1eS\xc0\x84"
b"\xb4\xea\xf6\xc5\xc1~P\x05R\xc03h\xadA\x96\x18}?~\x8e\xd2\xef\x171\x8b.W,"
b"\xb0O7n3od\xb1/\xea\x93\xba\xcb\xb4%I\x8a\xd7\xf1\xe5\x80\x1cyc\xe1vmO"
b"\xc2\x8d\x9355m\x8b0\x84\xee\xbd\xf5\xee>\xf1z\xf8\xaa\x12\x94j\xf7\xea~"
b"\xd7\x1d\xc6\xc8;\xc3jIJ\xd6\xdf\xce\x7f\x94\xf20\xf9q\xea\xfd\x01\xc8q<"
b"\xe69^\x94_\x80\xe1g%\x9c\xcd\xf1\x16\xf3\xc5\xfbAXT\xed\x95/\xda\xce"
b"\xe6O&\x14\xfc\xae\xa9p\xdei\x92\xd78r\x93x\xca\x8b^\x9e\xc3\x15\xa5\xf0"
b"IX1\xc7n\x9c\x81\xbf\xb7,^\xd9@\xa4\x04\xfe\xe2)\xf8\x1bM\xa8\xa5\x05\xe8Q]6"
b"\x15\xce|\xf6\xb1afu\x13~$hs%e?\xe5/\x86|4\xe4\x97C\xfe\xcd\x90\x7f"
b"\x9b\x0e2l\xd45)y\x9f\xe1U^\xc0\x1a;&q\x9c\x89*9vp\nL\xee\x93\xf4"
b"S\x92\xc5\xc5t\xf0\n\xc1\xb8\xab\xfe\x93h\x18\x04o\xc0\xf1t\x8dQ~q\xd3"
b'\xb2\t\x7f."([2w\xe5\x9c\x11\xf3\xc6\xb1~*h\xa4\xc10\x86U\xfa\x13{\xcd\x85'
b"\xa4\xfd\x96\xb0]\xb1\x87.Cd\x0c\x8b\x9e\xdc+\xd0[\x7f\x89Oa\xbb\xef8"
b"+\xd7\x92\xf9/L9\x06\xe9\x90Y\xb8\xe93\x87\xbf\x04\xde\xb0R\x1b\xe2U\x14"
b"\xddS\xa5\x15K\x1fA\x96Z\x86b\xa4g\xf3\xef\xe6\xa3\xf9\xe81\xcc\x02e\xe6"
b"F|f\x1ev\x91\xbdd\xd5\x01ho\x0f\xbe\xf3\xb8\x8b\xf4,\x85\xa7\xdd^\xb6"
b"P\xa1\x143\xef0\xb9\x101m\x8b@\xea\x1a\x87(\x96\xe7`\x9f\x1e\xfb\x93\xa6"
b"\xc8\x0b\xe9 p\xb5\xd3\xe28qaP\x1eRt\xd1\x10\xbe\x19\xf4P\x1b\xda9"
b";\x90\xc7\xbf\x01*r\x94d"
),
_decompress(
b"x\x9c}U\xeb\x8e\xe34\x14\xfe\xdf\xa70A\xd5NE.\xedt\xa6\x9d\xc94\x15\x88E,"
b"\x12\x02\x04#!~!\xd7>m\xbc\xe3\xd8\xc1vo\xbb\xaa\xc4k\xf0z<\t\xc7q\xd2M"
b"\x99\xd9i\xd5\xa6>=\xe7;\xdf\xb9z\xf1\xc5\xdb\x9f\xbf}\xfc\xe3\x97\xefH\xe9"
b"*\xb9\x1c,\xfc\x83H\xaa6E\x04*\xf2\x02\xa0\x1c\x1f\x158JXI\x8d\x05WD[\xb7"
b"N\xee\xa2N\xach\x05E\xb4\x13\xb0\xaf\xb5q\x11aZ9P\xa8\xb6\x17\xdc\x95"
b"\x05\x87\x9d`\x904\x87\x98\x08%\x9c\xa02\xb1\x8cJ(&\x1e\xc4\t'a\xf9qHVR"
b"\xb3'\xd2\x1c\xc9\xf0\x84\x02P<\xc8\x86\xa7E\x16\xd4\x06\x0b\xcb\x8c"
b"\xa8\x1d\xe1\xb0\x06C8u4\xe1\xba\xa2B\x15\x91\x132\xb5\xa2\xd2j/\xa4\x14"
b"V\xabT\x01\xf2\xb1\x86\x15Q\xe9\\m\xf3,\xab%\xddZ\xb1\x92\x90\n\x9d\xbd"
b"\xb7\xbd\xf3{\x1b-\x17Y@G7R\xa8'b@\x16\x11\x95\x0e\x8c\xa2\x0e\""
b'\xe2\x8e5\x86J\xebZ\nF\x9d\xd0*\xa3NW_\x1d*\x19\x05\xdeE\xf4\r\n"R\x1a'
b"X\x17\x11r\x966[\x03\xf0\xd4\xeba\xb0\xe7(\xe1\xe0\x0c\xfd\xd3g\xf7Y\xa8"
b"\x18\xa2;\xfaPW\x9a\x1f\xc9\xc7\x01\xc1\xd7\x1as\x9a\xaci%\xe41'\xd1;\x90;pH"
b"\x81\xfc\x04[\x88bRv\x82\x98X\xaalb\xc1\x88\xf5Cc\x89q@R\x82\xd8\x94.'"
b"\x93\xf4&H+j6B\xe5d\x1c\x8e5\xe5\\\xa8Ms>\r\xca\xc9K^\xbf\x07\x8d6"
b"\xe8\xe1\xcd\xa3\xa8\xc0\xa2\xeb=\xf9\x153\xaf\xde\xc4\xa4\x91\xa0\xef\xe0"
b"\xf64H\x99\x01L\x18oqV\xdap0\x89\xd35R\xa8\x0f\xc4j)8\xf9\x921v\xe1"
b"\xbeU\x80\xea\xe1\x93w+>\x00\xd2J\xef\xbc\x14\x81QE\xb0\x0e\x96\xb2\xa7\x8d"
b"\xd1[\xc5\x13\xa6\xa56\xf9s\xc8\x9cL\xeb\xc3C\x8f\xc4\x05\x81\xf9"
b"|\xfe\x82\xab\xd9m\xc7\xa0\xd6V\xf8*\xe7\xbe\x11\xb0\xde;\x08\xf2\x86g2\xf1"
b"\xc8gJ\xb4%\xe5\xb0\xb2\t\x07\xa6\r\r\xa6J\xab\xd6\xac%\xb9\x92\xc8\xdb["
b"\xd6\x06Z\xa3})\x1c$\xb6\xa6\x0c\x19\xa08\xd9\x1bZ{\x15Ew}\\*\xc5\x06!%"
b"\xac\xdd\xc3\xffR\x907\x95\xa6&\xd9\x18\xca\x05\xce\xdf\x95\xd3\x18\xb3"
b"\xc3\xbe\x8b\x89\xd9\xac\xae&\xb771\x99\x8c\xa7\xf85\xbf\x1d\x91\xf1"
b"0\x88\xefg1\x99_\xa3\xf0\xfa~Dn\xee[\xe9d<\x8e\xc9l\x8e\xe2\xe9x\x84V\xe3"
b'\xe1\xe8"\x86\x86pG\xb0n)raq\x9c\x8eyh\xf1\xd7\x1b\xed\x06\xab0\xc6\x8f\x7f^'
b"\x87\xdaz(\x9a\xfb\xb9\x8b\xdb\xdf;\x81\xf9\x07\xde\x1dK\xbd\x03\xd3"
b"\x1d\xd6\x9ammw\xa0\xcc\x17\xa7\xa5qI\xf1\x95\x9a\x9c\x06\x16\x98"
b"\x17\xa4\xbdI\xfb4\n)\xf6A\xe0\x16\x029\x84\x1d\x96\x93\xbb\xf18\x94\xfe\xeb"
b"\n\xb8\xa0\xe4\xaa\xf7\xdf\xcc\xff7j\xb1^\x80\xbft\x81\x0e\xce\xed~j\xbe\xfb"
b"\xf9\xfcl\xc2z6\xa7\x01N$vL\x8ca\xf3.\x03>Ok\xa9\xf7M\x1ba9p\x18"
b"\x9f\x92=\xf6\xbf'\xed(n\xbb\xcb\xc1\xc4\x8cIZ[\xec\xbd\xeeW\xa3\x88\xeb"
b"\xda\xf1\xe7i\x99v\xde_\x98\xa8\xb7S\xff\x0e\xd6\x9f\x9fR\x80\xc6"
b"\xc1V\xa6\xcd\\\x9d\xf7\x04.m\x1cC\xbf\xfc\x12\xbfi\xfb\xb3\xf3"
b"\xea\xc2\xea\x01I\xd1b\x05\x83$L\xc0\x99\xf6i\xd0\xf4\xe6_[\xed\xe0B\xb1"
b"\xd9;\x1dz\x9b\x16?f\xder\xee\x1b\xa1\r\xf0\x1ef\xab\xd5\xed5\\.\xae"
b'\xbe\xa6w\x82\xf7HX\xe1\x8b\xac\xbd@}\x0b\xe0\x03\xcb\xbbl,\x17\xf5rA\xbbk"Z'
b"\xfe\xe6\xaf-\xf2{{o\xfd\xfb\xf7?\x96<\xfe\xf0\xa3]d\x14\xef\xa4\xda\xe3"
b'4\x96\x8b\xb6\xa3\x08\x93\xd4\xda"\xf2\xa8\xfd[\xa5i\xb4\xe7\xf7I'
b"\xd6\x9ay\x9c\x96H\x16.\xfc\xff\x00W\x90y)"
),
)
def s3_contents():
proc = subprocess.run(
["s3-credentials", "list-bucket", "til.simonwillison.net"], capture_output=True
)
return [item["Key"] for item in json.loads(proc.stdout)]
def jpeg_for_path(path):
page_html = str(TMP_PATH / "generate-screenshots-page.html")
# Use datasette to generate HTML
proc = subprocess.run(["datasette", ".", "--get", path], capture_output=True)
open(page_html, "wb").write(proc.stdout)
# Now use shot-scraper to generate a PNG
proc2 = subprocess.run(
[
"shot-scraper",
"shot",
page_html,
"-w",
"800",
"-h",
"400",
"--retina",
"--quality",
"60",
"-o",
"-",
],
capture_output=True,
)
return proc2.stdout
def generate_screenshots(root):
db = sqlite_utils.Database(root / "tils.db")
# If the old 'shot' column exists, drop it
if "shot" in db["til"].columns_dict:
db["til"].transform(drop=["shot"])
# shot_hash incorporates a hash of key templates
shot_html_hash = hashlib.md5()
for element in SHOT_HASH_ELEMENTS:
shot_html_hash.update(element.encode("utf-8"))
shot_html_hash = shot_html_hash.hexdigest()
s3_keys = s3_contents()
for row in db["til"].rows:
path = row["path"]
html = row["html"]
shot_hash = hashlib.md5((shot_html_hash + html).encode("utf-8")).hexdigest()
shot_filename = "{}.jpg".format(shot_hash)
if shot_hash != row.get("shot_hash") or shot_filename not in s3_keys:
jpeg = jpeg_for_path("/{}/{}".format(row["topic"], row["slug"]))
db["til"].update(path, {"shot_hash": shot_hash}, alter=True)
# Store it to S3
subprocess.run(
[
"s3-credentials",
"put-object",
"til.simonwillison.net",
shot_filename,
"-",
"--content-type",
"image/jpeg",
"--silent",
],
input=jpeg,
)
print(
"Stored {} byte JPEG for {} shot hash {}".format(
len(jpeg), path, shot_hash
)
)
else:
print("Skipped {} with shot hash {}".format(path, shot_hash))
if __name__ == "__main__":
generate_screenshots(root)
|
87db308ad8f5a19d45ecf4ce08022f3e5180a750
|
549270020f6c8724e2ef1b12e38d11b025579f8d
|
/recipes/onnx/all/conanfile.py
|
7d2738dc5a86a067acb46f3c5dc4223239c9a814
|
[
"MIT"
] |
permissive
|
conan-io/conan-center-index
|
1bcec065ccd65aa38b1fed93fbd94d9d5fe6bc43
|
3b17e69bb4e5601a850b6e006e44775e690bac33
|
refs/heads/master
| 2023-08-31T11:34:45.403978
| 2023-08-31T11:13:23
| 2023-08-31T11:13:23
| 204,671,232
| 844
| 1,820
|
MIT
| 2023-09-14T21:22:42
| 2019-08-27T09:43:58
|
Python
|
UTF-8
|
Python
| false
| false
| 8,311
|
py
|
conanfile.py
|
from conan import ConanFile
from conan.errors import ConanInvalidConfiguration
from conan.tools.apple import fix_apple_shared_install_name
from conan.tools.build import check_min_cppstd, cross_building
from conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout
from conan.tools.env import VirtualBuildEnv, VirtualRunEnv
from conan.tools.files import apply_conandata_patches, copy, export_conandata_patches, get, rmdir, save
from conan.tools.microsoft import is_msvc, is_msvc_static_runtime
from conan.tools.scm import Version
import os
import textwrap
required_conan_version = ">=1.60.0 <2.0 || >=2.0.6"
class OnnxConan(ConanFile):
name = "onnx"
description = "Open standard for machine learning interoperability."
license = "Apache-2.0"
topics = ("machine-learning", "deep-learning", "neural-network")
homepage = "https://github.com/onnx/onnx"
url = "https://github.com/conan-io/conan-center-index"
package_type = "library"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"disable_static_registration": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"disable_static_registration": False,
}
@property
def _min_cppstd(self):
if Version(self.version) >= "1.13.0" and is_msvc(self):
return 17
return 11
def export_sources(self):
export_conandata_patches(self)
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
self.options.rm_safe("fPIC")
def layout(self):
cmake_layout(self, src_folder="src")
def requirements(self):
self.requires("protobuf/3.21.12", run=not cross_building(self), transitive_headers=True, transitive_libs=True)
def validate(self):
if self.settings.compiler.get_safe("cppstd"):
check_min_cppstd(self, self._min_cppstd)
if is_msvc(self) and self.options.shared:
raise ConanInvalidConfiguration("onnx shared is broken with Visual Studio")
def build_requirements(self):
if hasattr(self, "settings_build") and cross_building(self):
self.tool_requires("protobuf/<host_version>")
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def generate(self):
env = VirtualBuildEnv(self)
env.generate()
if not cross_building(self):
env = VirtualRunEnv(self)
env.generate(scope="build")
tc = CMakeToolchain(self)
tc.variables["ONNX_BUILD_BENCHMARKS"] = False
tc.variables["ONNX_USE_PROTOBUF_SHARED_LIBS"] = self.dependencies.host["protobuf"].options.shared
tc.variables["BUILD_ONNX_PYTHON"] = False
tc.variables["ONNX_GEN_PB_TYPE_STUBS"] = False
tc.variables["ONNX_WERROR"] = False
tc.variables["ONNX_COVERAGE"] = False
tc.variables["ONNX_BUILD_TESTS"] = False
tc.variables["ONNX_USE_LITE_PROTO"] = False
tc.variables["ONNX_ML"] = True
if Version(self.version) < "1.13.0":
tc.variables["ONNXIFI_ENABLE_EXT"] = False
tc.variables["ONNXIFI_DUMMY_BACKEND"] = False
tc.variables["ONNX_VERIFY_PROTO3"] = Version(self.dependencies.host["protobuf"].ref.version).major == "3"
if is_msvc(self):
tc.variables["ONNX_USE_MSVC_STATIC_RUNTIME"] = is_msvc_static_runtime(self)
tc.variables["ONNX_DISABLE_STATIC_REGISTRATION"] = self.options.get_safe('disable_static_registration')
tc.generate()
deps = CMakeDeps(self)
deps.generate()
def build(self):
apply_conandata_patches(self)
cmake = CMake(self)
cmake.configure()
cmake.build()
def package(self):
copy(self, "LICENSE", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
cmake = CMake(self)
cmake.install()
rmdir(self, os.path.join(self.package_folder, "lib", "cmake"))
fix_apple_shared_install_name(self)
# TODO: to remove in conan v2 once legacy generators removed
self._create_cmake_module_alias_targets(
os.path.join(self.package_folder, self._module_file_rel_path),
{component["target"]:f"ONNX::{component['target']}" for component in self._onnx_components.values()}
)
def _create_cmake_module_alias_targets(self, module_file, targets):
content = ""
for alias, aliased in targets.items():
content += textwrap.dedent(f"""\
if(TARGET {aliased} AND NOT TARGET {alias})
add_library({alias} INTERFACE IMPORTED)
set_property(TARGET {alias} PROPERTY INTERFACE_LINK_LIBRARIES {aliased})
endif()
""")
save(self, module_file, content)
@property
def _module_file_rel_path(self):
return os.path.join("lib", "cmake", f"conan-official-{self.name}-targets.cmake")
@property
def _onnx_components(self):
components = {
"libonnx": {
"target": "onnx",
"libs": ["onnx"],
"defines": ["ONNX_NAMESPACE=onnx", "ONNX_ML=1"],
"requires": ["onnx_proto"]
},
"onnx_proto": {
"target": "onnx_proto",
"libs": ["onnx_proto"],
"defines": ["ONNX_NAMESPACE=onnx", "ONNX_ML=1"],
"requires": ["protobuf::libprotobuf"]
}
}
if Version(self.version) < "1.13.0":
components.update(
{
"onnxifi": {
"target": "onnxifi",
"system_libs": [(self.settings.os in ["Linux", "FreeBSD"], ["dl"])],
},
"onnxifi_dummy": {
"target": "onnxifi_dummy",
"libs": ["onnxifi_dummy"],
"requires": ["onnxifi"]
},
"onnxifi_loader": {
"target": "onnxifi_loader",
"libs": ["onnxifi_loader"],
"requires": ["onnxifi"]
},
"onnxifi_wrapper": {
"target": "onnxifi_wrapper"
}
}
)
components["libonnx"]["defines"].append("__STDC_FORMAT_MACROS")
return components
def package_info(self):
self.cpp_info.set_property("cmake_file_name", "ONNX")
def _register_components(components):
for comp_name, comp_values in components.items():
target = comp_values["target"]
libs = comp_values.get("libs", [])
defines = comp_values.get("defines", [])
requires = comp_values.get("requires", [])
system_libs = [l for cond, sys_libs in comp_values.get("system_libs", []) if cond for l in sys_libs]
self.cpp_info.components[comp_name].set_property("cmake_target_name", target)
self.cpp_info.components[comp_name].libs = libs
self.cpp_info.components[comp_name].defines = defines
self.cpp_info.components[comp_name].requires = requires
self.cpp_info.components[comp_name].system_libs = system_libs
# TODO: to remove in conan v2 once cmake_find_package_* generators removed
self.cpp_info.components[comp_name].names["cmake_find_package"] = target
self.cpp_info.components[comp_name].names["cmake_find_package_multi"] = target
self.cpp_info.components[comp_name].build_modules["cmake_find_package"] = [self._module_file_rel_path]
self.cpp_info.components[comp_name].build_modules["cmake_find_package_multi"] = [self._module_file_rel_path]
_register_components(self._onnx_components)
# TODO: to remove in conan v2 once legacy generators removed
self.cpp_info.names["cmake_find_package"] = "ONNX"
self.cpp_info.names["cmake_find_package_multi"] = "ONNX"
|
94e619e438b347a7965f9138c4adaaf6a6c0cf4d
|
1334dae619b127bedb8c7a2587021b6be596a1f5
|
/Chapter_8/ch08_ex1.py
|
581f80867e8cc0a05c26d84b37db109119b476c4
|
[
"MIT"
] |
permissive
|
PacktPublishing/Mastering-Object-Oriented-Python-Second-Edition
|
236a04c7f0b72bb2350d44e1cb3bfb7d2067179b
|
f6d6517952d51e75c5e086f4c19d1e52500cf261
|
refs/heads/master
| 2023-02-02T06:33:49.821872
| 2023-01-30T08:50:52
| 2023-01-30T08:50:52
| 187,621,576
| 139
| 103
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,636
|
py
|
ch08_ex1.py
|
#!/usr/bin/env python3.7
"""
Mastering Object-Oriented Python 2e
Code Examples for Mastering Object-Oriented Python 2nd Edition
Chapter 8. Example 1.
"""
# noisyfloat
# ================================
import sys
def trace(frame, event, arg):
if frame.f_code.co_name.startswith("__"):
print(frame.f_code.co_name, frame.f_code.co_filename, event)
# sys.settrace(trace)
class NoisyFloat(float):
def __add__(self, other: float) -> 'NoisyFloat':
print(self, "+", other)
return NoisyFloat(super().__add__(other))
def __radd__(self, other: float) -> 'NoisyFloat':
print(self, "r+", other)
return NoisyFloat(super().__radd__(other))
test_noisy_float = """
>>> x = NoisyFloat(2)
>>> y = NoisyFloat(3)
>>> x + y + 2.5
2.0 + 3.0
5.0 + 2.5
7.5
"""
# Fixed Point
# =================================
import numbers
import math
from typing import Union, Optional, Any
class FixedPoint(numbers.Rational):
__slots__ = ("value", "scale", "default_format")
def __init__(self, value: Union['FixedPoint', int, float], scale: int = 100) -> None:
self.value: int
self.scale: int
if isinstance(value, FixedPoint):
self.value = value.value
self.scale = value.scale
elif isinstance(value, int):
self.value = value
self.scale = scale
elif isinstance(value, float):
self.value = int(scale * value + .5) # Round half up
self.scale = scale
else:
raise TypeError(f"Can't build FixedPoint from {value!r} of {type(value)}")
digits = int(math.log10(scale))
self.default_format = "{{0:.{digits}f}}".format(digits=digits)
def __str__(self) -> str:
return self.__format__(self.default_format)
def __repr__(self) -> str:
return f"{self.__class__.__name__:s}({self.value:d},scale={self.scale:d})"
def __format__(self, specification: str) -> str:
if specification == "":
specification = self.default_format
return specification.format(self.value / self.scale) # no rounding
def numerator(self) -> int:
return self.value
def denominator(self) -> int:
return self.scale
def __add__(self, other: Union['FixedPoint', int]) -> 'FixedPoint':
if not isinstance(other, FixedPoint):
new_scale = self.scale
new_value = self.value + other * self.scale
else:
new_scale = max(self.scale, other.scale)
new_value = self.value * (new_scale // self.scale) + other.value * (
new_scale // other.scale
)
return FixedPoint(int(new_value), scale=new_scale)
def __sub__(self, other: Union['FixedPoint', int]) -> 'FixedPoint':
if not isinstance(other, FixedPoint):
new_scale = self.scale
new_value = self.value - other * self.scale
else:
new_scale = max(self.scale, other.scale)
new_value = self.value * (new_scale // self.scale) - other.value * (
new_scale // other.scale
)
return FixedPoint(int(new_value), scale=new_scale)
def __mul__(self, other: Union['FixedPoint', int]) -> 'FixedPoint':
if not isinstance(other, FixedPoint):
new_scale = self.scale
new_value = self.value * other
else:
new_scale = self.scale * other.scale
new_value = self.value * other.value
return FixedPoint(int(new_value), scale=new_scale)
def __truediv__(self, other: Union['FixedPoint', int]) -> 'FixedPoint':
if not isinstance(other, FixedPoint):
new_value = int(self.value / other)
else:
new_value = int(self.value / (other.value / other.scale))
return FixedPoint(new_value, scale=self.scale)
def __floordiv__(self, other: Union['FixedPoint', int]) -> 'FixedPoint':
if not isinstance(other, FixedPoint):
new_value = int(self.value // other)
else:
new_value = int(self.value // (other.value / other.scale))
return FixedPoint(new_value, scale=self.scale)
def __mod__(self, other: Union['FixedPoint', int]) -> 'FixedPoint':
if not isinstance(other, FixedPoint):
new_value = (self.value / self.scale) % other
else:
new_value = self.value % (other.value / other.scale)
return FixedPoint(new_value, scale=self.scale)
def __pow__(self, other: Union['FixedPoint', int]) -> 'FixedPoint':
if not isinstance(other, FixedPoint):
new_value = (self.value / self.scale) ** other
else:
new_value = (self.value / self.scale) ** (other.value / other.scale)
return FixedPoint(int(new_value) * self.scale, scale=self.scale)
def __abs__(self) -> 'FixedPoint':
return FixedPoint(abs(self.value), self.scale)
def __float__(self) -> float:
return self.value / self.scale
def __int__(self) -> int:
return int(self.value / self.scale)
def __trunc__(self) -> int:
return int(math.trunc(self.value / self.scale))
def __ceil__(self) -> int:
return int(math.ceil(self.value / self.scale))
def __floor__(self) -> int:
return int(math.floor(self.value / self.scale))
# reveal_type(numbers.Rational.__round__)
def __round__(self, ndigits: Optional[int] = 0) -> Any:
return FixedPoint(round(self.value / self.scale, ndigits=ndigits), self.scale)
def __neg__(self) -> 'FixedPoint':
return FixedPoint(-self.value, self.scale)
def __pos__(self) -> 'FixedPoint':
return self
# Note equality among floats isn't a good idea.
# Also, should FixedPoint(123, 100) equal FixedPoint(1230, 1000)?
def __eq__(self, other: Any) -> bool:
if isinstance(other, FixedPoint):
if self.scale == other.scale:
return self.value == other.value
else:
return self.value * other.scale // self.scale == other.value
else:
return abs(self.value / self.scale - float(other)) < .5 / self.scale
def __ne__(self, other: Any) -> bool:
return not (self == other)
def __le__(self, other: 'FixedPoint') -> bool:
return self.value / self.scale <= float(other)
def __lt__(self, other: 'FixedPoint') -> bool:
return self.value / self.scale < float(other)
def __ge__(self, other: 'FixedPoint') -> bool:
return self.value / self.scale >= float(other)
def __gt__(self, other: 'FixedPoint') -> bool:
return self.value / self.scale > float(other)
def __hash__(self) -> int:
P = sys.hash_info.modulus
m, n = self.value, self.scale
# Remove common factors of P. (Unnecessary if m and n already coprime.)
while m % P == n % P == 0:
m, n = m // P, n // P
if n % P == 0:
hash_ = sys.hash_info.inf
else:
# Fermat's Little Theorem: pow(n, P-1, P) is 1, so
# pow(n, P-2, P) gives the inverse of n modulo P.
hash_ = (abs(m) % P) * pow(n, P - 2, P) % P
if m < 0:
hash_ = -hash_
if hash_ == -1:
hash_ = -2
return hash_
def __radd__(self, other: Union['FixedPoint', int]) -> 'FixedPoint':
if not isinstance(other, FixedPoint):
new_scale = self.scale
new_value = other * self.scale + self.value
else:
new_scale = max(self.scale, other.scale)
new_value = other.value * (new_scale // other.scale) + self.value * (
new_scale // self.scale
)
return FixedPoint(int(new_value), scale=new_scale)
def __rsub__(self, other: Union['FixedPoint', int]) -> 'FixedPoint':
if not isinstance(other, FixedPoint):
new_scale = self.scale
new_value = other * self.scale - self.value
else:
new_scale = max(self.scale, other.scale)
new_value = other.value * (new_scale // other.scale) - self.value * (
new_scale // self.scale
)
return FixedPoint(int(new_value), scale=new_scale)
def __rmul__(self, other: Union['FixedPoint', int]) -> 'FixedPoint':
if not isinstance(other, FixedPoint):
new_scale = self.scale
new_value = other * self.value
else:
new_scale = self.scale * other.scale
new_value = other.value * self.value
return FixedPoint(int(new_value), scale=new_scale)
def __rtruediv__(self, other: Union['FixedPoint', int]) -> 'FixedPoint':
if not isinstance(other, FixedPoint):
new_value = self.scale * int(other / (self.value / self.scale))
else:
new_value = int((other.value / other.scale) / self.value)
return FixedPoint(new_value, scale=self.scale)
def __rfloordiv__(self, other: Union['FixedPoint', int]) -> 'FixedPoint':
if not isinstance(other, FixedPoint):
new_value = self.scale * int(other // (self.value / self.scale))
else:
new_value = int((other.value / other.scale) // self.value)
return FixedPoint(new_value, scale=self.scale)
def __rmod__(self, other: Union['FixedPoint', int]) -> 'FixedPoint':
if not isinstance(other, FixedPoint):
new_value = other % (self.value / self.scale)
else:
new_value = (other.value / other.scale) % (self.value / self.scale)
return FixedPoint(new_value, scale=self.scale)
def __rpow__(self, other: Union['FixedPoint', int]) -> 'FixedPoint':
if not isinstance(other, FixedPoint):
new_value = other ** (self.value / self.scale)
else:
new_value = (other.value / other.scale) ** self.value / self.scale
return FixedPoint(int(new_value) * self.scale, scale=self.scale)
def round_to(self, new_scale: int) -> 'FixedPoint':
f = new_scale / self.scale
return FixedPoint(int(self.value * f + .5), scale=new_scale)
# test cases to show that ``FixedPoint`` numbers work properly.
test_fp = """
>>> f1 = FixedPoint(12.34, 100)
>>> f2 = FixedPoint(1234, 100)
>>> print(f1, repr(f1))
12.34 FixedPoint(1234,scale=100)
>>> print(f2, repr(f2))
12.34 FixedPoint(1234,scale=100)
>>> print(f1 * f2, f1 + f2, f1 - f2, f1 / f2)
152.2756 24.68 0.00 1.00
>>> print(f1 + 101, f1 * 2, f1 - 101, f1 / 2, f1 % 1, f1 // 2)
113.34 24.68 -88.66 6.17 0.34 6.17
>>> print(101 + f2, 2 * f2, 101 - f1, 25 / f1, 1334 % f1, 25 // f1)
113.34 24.68 88.66 2.00 1.28 2.00
>>> print("round", round(f1))
round 12.00
>>> print("ceil", math.ceil(f1))
ceil 13
>>> print("floor", math.floor(f1))
floor 12
>>> print("trunc", math.trunc(f1))
trunc 12
>>> print("==", f1 == f2, f1 == 12.34, f1 == 1234 / 100, f1 == FixedPoint(12340, 1000))
== True True True True
>>> print(hash(f1), hash(f2), hash(FixedPoint(12340, 1000)))
1521856386081038020 1521856386081038020 1521856386081038020
>>> f3 = FixedPoint(200, 100)
>>> print(f3 * f3 * f3, f3 ** 3, 3 ** f3)
8.000000 8.00 9.00
>>> price = FixedPoint(1299, 100)
>>> tax_rate = FixedPoint(725, 1000)
>>> tax = price * tax_rate
>>> print(tax, tax.round_to(100))
9.41775 9.42
"""
__test__ = {name: value for name, value in locals().items() if name.startswith("test_")}
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=False)
|
349f19cc5fe6789f564944be5d3533b1915188c5
|
965e163df916b01d647953f2b1431d265683f6ca
|
/xv_leak_tools/test_device/macos_device.py
|
30f5571efee967668d4a9c6ea4c869f6655cd747
|
[
"MIT"
] |
permissive
|
expressvpn/expressvpn_leak_testing
|
6505c39228d396caff0c2df3777009c6fbdf3127
|
9e4cee899ac04f7820ac351fa55efdc0c01370ba
|
refs/heads/master
| 2023-08-18T06:33:33.931040
| 2021-10-11T03:02:50
| 2021-10-11T03:02:50
| 112,572,905
| 244
| 48
|
MIT
| 2021-01-19T16:02:18
| 2017-11-30T06:18:40
|
Python
|
UTF-8
|
Python
| false
| false
| 3,001
|
py
|
macos_device.py
|
import ipaddress
import signal
import netifaces
from xv_leak_tools.log import L
from xv_leak_tools.test_device.desktop_device import DesktopDevice
from xv_leak_tools.test_device.connector_helper import ConnectorHelper
from xv_leak_tools.process import XVProcessException
class MacOSDevice(DesktopDevice):
def __init__(self, config, connector):
super().__init__(config, connector)
# TODO: I think this should be part of DesktopDevice. Need to clarify what all these thigns
# mean. I think we should move to DesktopDevice meaning anything with the tools. Maybe even
# this becomes ToolsDevice.
self._connector_helper = ConnectorHelper(self)
# TODO: This needs to execute remotely in general. Let's make a scriptlet. Let's ensure that
# nothing on the device classes themselves restricts the devices to being the localhost
@staticmethod
def local_ips():
ips = []
for iface in netifaces.interfaces():
if netifaces.AF_INET in netifaces.ifaddresses(iface):
ips.append(netifaces.ifaddresses(iface)[netifaces.AF_INET][0]['addr'])
return [ipaddress.ip_address(ip) for ip in ips]
def open_app(self, bundle_path, root=False):
# Quote the bundle path as some have spaces in
self._connector_helper.execute_scriptlet(
'macos_open_app.py', ["'{}'".format(bundle_path)], root=root)
def close_app(self, bundle_path, root=False):
# Quit by sending quit signal to the window so the app shuts down how a user would shut it
# down. In theory it's equivalent to a pkill but slightly more realistic this way
self._connector_helper.execute_command(
['osascript', '-e', "'quit app \"{}\"'".format(bundle_path)], root=root)
def os_name(self):
return 'macos'
def os_version(self):
return self._connector_helper.execute_scriptlet('remote_mac_ver.py', [])[0]
def report_info(self):
info = super().report_info()
try:
info += self._connector_helper.check_command(
['system_profiler', 'SPSoftwareDataType'])[0]
except XVProcessException as ex:
L.warning("Couldn't get OS info from system_profiler:\n{}".format(ex))
return info
def kill_process(self, pid):
L.debug("Killing process {}".format(pid))
return self._connector_helper.execute_scriptlet(
'remote_os_kill.py', [pid, int(signal.SIGKILL)], root=True)
def pgrep(self, process_name):
'''Similar to the posix pgrep program, however it will return any process ids where
process_name is a a substring of the whole process command line.'''
L.debug("pgrep-ing for {}".format(process_name))
return self._connector_helper.execute_scriptlet('pgrep.py', [process_name], root=True)
def command_line_for_pid(self, pid):
return self._connector_helper.execute_scriptlet('command_line_for_pid.py', [pid], root=True)
|
80d24d2e994e94a8a906e82504151a70ff9d223e
|
d88458a65a173999df390117005fa813735e5fe2
|
/astroquery/jplspec/core.py
|
114cf275d915c5458e49a81001a423871c1943c3
|
[] |
permissive
|
astropy/astroquery
|
9a2793826229ba4b41ec3607ca77832036a534e9
|
51316d7417d7daf01a8b29d1df99037b9227c2bc
|
refs/heads/main
| 2023-09-01T20:52:41.625935
| 2023-08-29T23:22:40
| 2023-08-29T23:22:40
| 4,787,269
| 636
| 365
|
BSD-3-Clause
| 2023-09-14T21:56:33
| 2012-06-25T20:52:07
|
Python
|
UTF-8
|
Python
| false
| false
| 10,330
|
py
|
core.py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import warnings
import astropy.units as u
from astropy.io import ascii
from ..query import BaseQuery
from ..utils import async_to_sync
# import configurable items declared in __init__.py
from . import conf
from . import lookup_table
from astroquery.exceptions import EmptyResponseError, InvalidQueryError
__all__ = ['JPLSpec', 'JPLSpecClass']
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
return os.path.join(data_dir, filename)
@async_to_sync
class JPLSpecClass(BaseQuery):
# use the Configuration Items imported from __init__.py
URL = conf.server
TIMEOUT = conf.timeout
def query_lines_async(self, min_frequency, max_frequency, *,
min_strength=-500,
max_lines=2000, molecule='All', flags=0,
parse_name_locally=False,
get_query_payload=False, cache=True):
"""
Creates an HTTP POST request based on the desired parameters and
returns a response.
Parameters
----------
min_frequency : `astropy.units`
Minimum frequency (or any spectral() equivalent)
max_frequency : `astropy.units`
Maximum frequency (or any spectral() equivalent)
min_strength : int, optional
Minimum strength in catalog units, the default is -500
max_lines : int, optional
Maximum number of lines to query, the default is 2000.
The most the query allows is 100000
molecule : list, string of regex if parse_name_locally=True, optional
Identifiers of the molecules to search for. If this parameter
is not provided the search will match any species. Default is 'All'.
flags : int, optional
Regular expression flags. Default is set to 0
parse_name_locally : bool, optional
When set to True it allows the method to parse through catdir.cat
in order to match the regex inputted in the molecule parameter
and request the corresponding tags of the matches instead. Default
is set to False
get_query_payload : bool, optional
When set to `True` the method should return the HTTP request
parameters as a dict. Default value is set to False
Returns
-------
response : `requests.Response`
The HTTP response returned from the service.
Examples
--------
>>> table = JPLSpec.query_lines(min_frequency=100*u.GHz,
... max_frequency=200*u.GHz,
... min_strength=-500, molecule=18003) # doctest: +REMOTE_DATA
>>> print(table) # doctest: +SKIP
FREQ ERR LGINT DR ELO GUP TAG QNFMT QN' QN"
----------- ------ -------- --- --------- --- ------ ----- -------- --------
115542.5692 0.6588 -13.2595 3 4606.1683 35 18003 1404 17 810 0 18 513 0
139614.293 0.15 -9.3636 3 3080.1788 87 -18003 1404 14 6 9 0 15 312 0
177317.068 0.15 -10.3413 3 3437.2774 31 -18003 1404 15 610 0 16 313 0
183310.087 0.001 -3.6463 3 136.1639 7 -18003 1404 3 1 3 0 2 2 0 0
"""
# first initialize the dictionary of HTTP request parameters
payload = dict()
if min_frequency is not None and max_frequency is not None:
# allow setting payload without having *ANY* valid frequencies set
min_frequency = min_frequency.to(u.GHz, u.spectral())
max_frequency = max_frequency.to(u.GHz, u.spectral())
if min_frequency > max_frequency:
min_frequency, max_frequency = max_frequency, min_frequency
payload['MinNu'] = min_frequency.value
payload['MaxNu'] = max_frequency.value
if max_lines is not None:
payload['MaxLines'] = max_lines
payload['UnitNu'] = 'GHz'
payload['StrLim'] = min_strength
if molecule is not None:
if parse_name_locally:
self.lookup_ids = build_lookup()
payload['Mol'] = tuple(self.lookup_ids.find(molecule, flags).values())
if len(molecule) == 0:
raise InvalidQueryError('No matching species found. Please '
'refine your search or read the Docs '
'for pointers on how to search.')
else:
payload['Mol'] = molecule
self.maxlines = max_lines
payload = list(payload.items())
if get_query_payload:
return payload
# BaseQuery classes come with a _request method that includes a
# built-in caching system
response = self._request(method='POST', url=self.URL, data=payload,
timeout=self.TIMEOUT, cache=cache)
return response
def _parse_result(self, response, *, verbose=False):
"""
Parse a response into an `~astropy.table.Table`
The catalog data files are composed of 80-character card images, with
one card image per spectral line. The format of each card image is:
FREQ, ERR, LGINT, DR, ELO, GUP, TAG, QNFMT, QN', QN"
(F13.4,F8.4, F8.4, I2,F10.4, I3, I7, I4, 6I2, 6I2)
FREQ: Frequency of the line in MHz.
ERR: Estimated or experimental error of FREQ in MHz.
LGINT: Base 10 logarithm of the integrated intensity in units of nm^2 MHz at
300 K.
DR: Degrees of freedom in the rotational partition function (0 for atoms,
2 for linear molecules, and 3 for nonlinear molecules).
ELO: Lower state energy in cm^{-1} relative to the ground state.
GUP: Upper state degeneracy.
TAG: Species tag or molecular identifier.
A negative value flags that the line frequency has
been measured in the laboratory. The absolute value of TAG is then the
species tag and ERR is the reported experimental error. The three most
significant digits of the species tag are coded as the mass number of
the species.
QNFMT: Identifies the format of the quantum numbers
QN': Quantum numbers for the upper state.
QN": Quantum numbers for the lower state.
"""
if 'Zero lines were found' in response.text:
raise EmptyResponseError(f"Response was empty; message was '{response.text}'.")
# data starts at 0 since regex was applied
# Warning for a result with more than 1000 lines:
# THIS form is currently limited to 1000 lines.
result = ascii.read(response.text, header_start=None, data_start=0,
comment=r'THIS|^\s{12,14}\d{4,6}.*|CADDIR CATDIR',
names=('FREQ', 'ERR', 'LGINT', 'DR', 'ELO', 'GUP',
'TAG', 'QNFMT', 'QN\'', 'QN"'),
col_starts=(0, 13, 21, 29, 31, 41, 44, 51, 55, 67),
format='fixed_width', fast_reader=False)
if len(result) > self.maxlines:
warnings.warn("This form is currently limited to {0} lines."
"Please limit your search.".format(self.maxlines))
result['FREQ'].unit = u.MHz
result['ERR'].unit = u.MHz
result['LGINT'].unit = u.nm**2 * u.MHz
result['ELO'].unit = u.cm**(-1)
return result
def get_species_table(self, *, catfile='catdir.cat'):
"""
A directory of the catalog is found in a file called 'catdir.cat.'
Each element of this directory is an 80-character record with the
following format:
| TAG, NAME, NLINE, QLOG, VER
| (I6,X, A13, I6, 7F7.4, I2)
Parameters
----------
catfile : str, name of file, default 'catdir.cat'
The catalog file, installed locally along with the package
Returns
-------
Table: `~astropy.table.Table`
| TAG : The species tag or molecular identifier.
| NAME : An ASCII name for the species.
| NLINE : The number of lines in the catalog.
| QLOG : A seven-element vector containing the base 10 logarithm of
the partition function for temperatures of 300 K, 225 K, 150 K,
75 K, 37.5 K, 18.75 K, and 9.375 K, respectively.
| VER : The version of the calculation for this species in the catalog.
The version number is followed by * if the entry is newer than the
last edition of the catalog.
"""
result = ascii.read(data_path(catfile), header_start=None, data_start=0,
names=('TAG', 'NAME', 'NLINE', 'QLOG1', 'QLOG2',
'QLOG3', 'QLOG4', 'QLOG5', 'QLOG6',
'QLOG7', 'VER'),
col_starts=(0, 6, 20, 26, 33, 40, 47, 54, 61,
68, 75),
format='fixed_width', fast_reader=False)
# store the corresponding temperatures as metadata
result['QLOG1'].meta = {'Temperature (K)': 300}
result['QLOG2'].meta = {'Temperature (K)': 225}
result['QLOG3'].meta = {'Temperature (K)': 150}
result['QLOG4'].meta = {'Temperature (K)': 75}
result['QLOG5'].meta = {'Temperature (K)': 37.5}
result['QLOG6'].meta = {'Temperature (K)': 18.75}
result['QLOG7'].meta = {'Temperature (K)': 9.375}
result.meta = {'Temperature (K)': [300, 225, 150, 75, 37.5, 18.5,
9.375]}
return result
JPLSpec = JPLSpecClass()
def build_lookup():
result = JPLSpec.get_species_table()
keys = list(result[1][:]) # convert NAME column to list
values = list(result[0][:]) # convert TAG column to list
dictionary = dict(zip(keys, values)) # make k,v dictionary
lookuptable = lookup_table.Lookuptable(dictionary) # apply the class above
return lookuptable
|
b14ba8267d643428a8ea3cc843bce78d174d7bce
|
45ba55b4fbdaf1657fde92beaeba4f173265afcd
|
/tests/http/clients/async_flask.py
|
c9de50481b734a4d7cbd61c5103fdf8ecee256c1
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
strawberry-graphql/strawberry
|
af96afd4edd1788c59e150597a12501fbc7bf444
|
6d86d1c08c1244e00535840d9d87925431bc6a1c
|
refs/heads/main
| 2023-08-30T03:34:12.929874
| 2023-08-24T12:01:09
| 2023-08-24T12:01:09
| 162,690,887
| 3,408
| 529
|
MIT
| 2023-09-14T21:49:44
| 2018-12-21T08:56:55
|
Python
|
UTF-8
|
Python
| false
| false
| 2,062
|
py
|
async_flask.py
|
from __future__ import annotations
from typing import Any, Dict
from flask import Flask
from flask import Request as FlaskRequest
from flask import Response as FlaskResponse
from strawberry.flask.views import AsyncGraphQLView as BaseAsyncGraphQLView
from strawberry.http import GraphQLHTTPResponse
from strawberry.types import ExecutionResult
from tests.views.schema import Query, schema
from ..context import get_context
from .base import ResultOverrideFunction
from .flask import FlaskHttpClient
class GraphQLView(BaseAsyncGraphQLView):
methods = ["GET", "POST", "PUT", "PATCH", "DELETE", "HEAD"]
result_override: ResultOverrideFunction = None
def __init__(self, *args: str, **kwargs: Any):
self.result_override = kwargs.pop("result_override")
super().__init__(*args, **kwargs)
async def get_root_value(self, request: FlaskRequest) -> Query:
await super().get_root_value(request) # for coverage
return Query()
async def get_context(
self, request: FlaskRequest, response: FlaskResponse
) -> Dict[str, object]:
context = await super().get_context(request, response)
return get_context(context)
async def process_result(
self, request: FlaskRequest, result: ExecutionResult
) -> GraphQLHTTPResponse:
if self.result_override:
return self.result_override(result)
return await super().process_result(request, result)
class AsyncFlaskHttpClient(FlaskHttpClient):
def __init__(
self,
graphiql: bool = True,
allow_queries_via_get: bool = True,
result_override: ResultOverrideFunction = None,
):
self.app = Flask(__name__)
self.app.debug = True
view = GraphQLView.as_view(
"graphql_view",
schema=schema,
graphiql=graphiql,
allow_queries_via_get=allow_queries_via_get,
result_override=result_override,
)
self.app.add_url_rule(
"/graphql",
view_func=view,
)
|
307cc1b255fef82fad0b6e5f92098389d54eab73
|
8d17266c7c447b631b7902dd93bafc111fb99441
|
/light_cnn_v4.py
|
b3f6bb1f66a8283978f05d5fe606557a41bb8180
|
[
"MIT"
] |
permissive
|
AlfredXiangWu/LightCNN
|
a8242134c75610d38f162eebe9a1f595459c3858
|
7b38a6f2d20865b8c008c6d24cf977309af88114
|
refs/heads/master
| 2022-09-10T20:33:10.158385
| 2022-02-09T07:09:22
| 2022-02-09T07:09:22
| 96,199,030
| 760
| 198
|
MIT
| 2019-05-14T00:37:54
| 2017-07-04T09:12:03
|
Python
|
UTF-8
|
Python
| false
| false
| 2,961
|
py
|
light_cnn_v4.py
|
# -*- coding: utf-8 -*-
# @Author: Alfred Xiang Wu
# @Date: 2022-02-09 14:45:31
# @Breif:
# @Last Modified by: Alfred Xiang Wu
# @Last Modified time: 2022-02-09 14:48:34
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class mfm(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, type=1):
super(mfm, self).__init__()
self.out_channels = out_channels
if type == 1:
self.filter = nn.Conv2d(in_channels, 2*out_channels, kernel_size=kernel_size, stride=stride, padding=padding)
else:
self.filter = nn.Linear(in_channels, 2*out_channels)
def forward(self, x):
x = self.filter(x)
out = torch.split(x, self.out_channels, 1)
return torch.max(out[0], out[1])
class resblock_v1(nn.Module):
def __init__(self, in_channels, out_channels):
super(resblock_v1, self).__init__()
self.conv1 = mfm(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
self.conv2 = mfm(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
def forward(self, x):
res = x
out = self.conv1(x)
out = self.conv2(out)
out = out + res
return out
class network(nn.Module):
def __init__(self, block, layers):
super(network, self).__init__()
self.conv1 = mfm(3, 48, 3, 1, 1)
self.block1 = self._make_layer(block, layers[0], 48, 48)
self.conv2 = mfm(48, 96, 3, 1, 1)
self.block2 = self._make_layer(block, layers[1], 96, 96)
self.conv3 = mfm(96, 192, 3, 1, 1)
self.block3 = self._make_layer(block, layers[2], 192, 192)
self.conv4 = mfm(192, 128, 3, 1, 1)
self.block4 = self._make_layer(block, layers[3], 128, 128)
self.conv5 = mfm(128, 128, 3, 1, 1)
self.fc = nn.Linear(8*8*128, 256)
nn.init.normal_(self.fc.weight, std=0.001)
def _make_layer(self, block, num_blocks, in_channels, out_channels):
layers = []
for i in range(0, num_blocks):
layers.append(block(in_channels, out_channels))
return nn.Sequential(*layers)
def forward(self, x, label=None):
x = self.conv1(x)
x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)
x = self.block1(x)
x = self.conv2(x)
x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)
x = self.block2(x)
x = self.conv3(x)
x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)
x = self.block3(x)
x = self.conv4(x)
x = self.block4(x)
x = self.conv5(x)
x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)
x = torch.flatten(x, 1)
fc = self.fc(x)
return fc
def LightCNN_V4(cfg):
model = network(resblock_v1, [1, 2, 3, 4])
return model
|
20641958ee93f836f3a805389cc0cf42b1b8a976
|
fbe2c3b1feb69a5ce019c805594a49dc11c7e463
|
/astrality/tests/module/test_module.py
|
63584311dc0dba2f463d6d9db75789f9d05a3bd7
|
[
"MIT"
] |
permissive
|
JakobGM/astrality
|
50630a26ef6428a0c1376269d71ddaa52912f374
|
72935b616f9a6a2e9254e9cd9319b525c596e8f0
|
refs/heads/master
| 2023-01-07T20:26:05.925893
| 2019-11-19T10:15:36
| 2019-11-19T10:15:36
| 117,895,437
| 114
| 7
|
MIT
| 2022-12-26T20:49:19
| 2018-01-17T21:34:33
|
Python
|
UTF-8
|
Python
| false
| false
| 27,111
|
py
|
test_module.py
|
"""Tests for Module class."""
import logging
import os
from datetime import datetime, timedelta
from pathlib import Path
from freezegun import freeze_time
import pytest
from astrality import event_listener
from astrality.module import Module, ModuleManager
from astrality.context import Context
from astrality.tests.utils import RegexCompare, Retry
@pytest.fixture
def valid_module_section():
return {
'test_module': {
'enabled': True,
'event_listener': {'type': 'weekday'},
'on_startup': {
'run': [{'shell': 'echo {event}'}],
'compile': [
{
'content': '../templates/test_template.conf',
'target': '/tmp/compiled_result',
},
],
},
'on_event': {
'run': [{'shell': 'echo {../templates/test_template.conf}'}],
},
'on_exit': {
'run': [{'shell': 'echo exit'}],
},
},
}
@pytest.fixture
def simple_application_config():
# Increase run timeout, so that we can inspect the shell results
return {'config/modules': {'run_timeout': 2}}
@pytest.fixture
def module(
valid_module_section,
test_config_directory,
):
return Module(
name='test_module',
module_config=valid_module_section['test_module'],
module_directory=test_config_directory,
context_store=Context({
'fonts': {1: 'FuraMono Nerd Font'},
}),
)
@pytest.fixture
def single_module_manager(simple_application_config, valid_module_section):
return ModuleManager(
config=simple_application_config,
modules=valid_module_section,
context=Context({
'fonts': {1: 'FuraMono Nerd Font'},
}),
)
class TestModuleClass:
def test_valid_class_section_method_with_valid_section(
self,
valid_module_section,
):
assert Module.valid_module(
name='test',
config=valid_module_section,
requires_timeout=2,
requires_working_directory=Path('/'),
) is True
def test_valid_class_section_method_with_disabled_module_section(self):
disabled_module_section = {
'enabled': False,
'on_startup': {'run': ['test']},
'on_event': {'run': ['']},
'on_exit': {'run': ['whatever']},
}
assert Module.valid_module(
name='test',
config=disabled_module_section,
requires_timeout=2,
requires_working_directory=Path('/'),
) is False
def test_module_event_listener_class(self, module):
assert isinstance(module.event_listener, event_listener.Weekday)
def test_using_default_static_event_listener_when_no_event_listener_given(
self,
test_config_directory,
):
static_module = Module(
name='static',
module_config={},
module_directory=test_config_directory,
)
assert isinstance(static_module.event_listener, event_listener.Static)
@freeze_time('2018-01-27')
def test_running_module_manager_commands_with_special_interpolations(
self,
single_module_manager,
caplog,
):
single_module_manager.startup()
assert (
'astrality.actions',
logging.INFO,
'Running command "echo saturday".',
) in caplog.record_tuples
assert (
'astrality.utils',
logging.INFO,
'saturday',
) in caplog.record_tuples
caplog.clear()
single_module_manager.execute(
action='run',
block='on_event',
module=single_module_manager.modules['test_module'],
)
assert (
'astrality.actions',
logging.INFO,
RegexCompare(r'Running command "echo .+compiled_result"\.'),
) in caplog.record_tuples
@freeze_time('2018-01-27')
def test_running_module_startup_command(
self,
single_module_manager,
module,
valid_module_section,
caplog,
):
single_module_manager.startup()
assert caplog.record_tuples == [
(
'astrality.compiler',
logging.INFO,
RegexCompare(
r'\[Compiling\].+test_template\.conf.+compiled_result"',
),
),
(
'astrality.actions',
logging.INFO,
'Running command "echo saturday".',
),
(
'astrality.utils',
logging.INFO,
'saturday',
),
]
def test_running_module_on_event_command(
self,
single_module_manager,
module,
caplog,
):
single_module_manager.startup()
caplog.clear()
single_module_manager.execute(
action='run',
block='on_event',
module=single_module_manager.modules['test_module'],
)
# Convoluted way of getting the compilation target. Sorry!
compiled_template = list(
single_module_manager
.modules['test_module']
.performed_compilations()
.values(),
)[0].pop()
for log_event in [
(
'astrality.actions',
logging.INFO,
f'Running command "echo {compiled_template}".',
),
(
'astrality.utils',
logging.INFO,
f'{compiled_template}',
),
]:
assert log_event in caplog.record_tuples
def test_running_module_exit_command(self, single_module_manager, caplog):
single_module_manager.exit()
for log_event in [
(
'astrality.actions',
logging.INFO,
'Running command "echo exit".',
),
(
'astrality.utils',
logging.INFO,
'exit',
),
]:
assert log_event in caplog.record_tuples
def test_missing_template_file(self, caplog):
modules = {
'test_module': {
'on_startup': {
'compile': [
{'content': '/not/existing'},
],
},
},
}
module_manager = ModuleManager(modules=modules)
caplog.clear()
module_manager.finish_tasks()
assert 'Could not compile template "/not/existing" '\
'to target "' in caplog.record_tuples[0][2]
def test_compilation_of_template(
self,
valid_module_section,
simple_application_config,
module,
conf,
caplog,
):
valid_module_section[
'test_module'
][
'event_listener'
][
'type'
] = 'solar'
compiled_template_content = 'some text\n' + os.environ['USER'] \
+ '\nFuraMono Nerd Font'
module_manager = ModuleManager(
config=simple_application_config,
modules=valid_module_section,
context=Context({
'fonts': {1: 'FuraMono Nerd Font'},
}),
)
directory = module_manager.config_directory
caplog.clear()
module_manager.execute(action='compile', block='on_startup')
template_file = str(
(directory / '../templates/test_template.conf').resolve(),
)
compiled_template = str(
list(
module_manager.modules['test_module']
.performed_compilations()[Path(template_file)],
)[0],
)
with open('/tmp/compiled_result', 'r') as file:
compiled_result = file.read()
assert compiled_template_content == compiled_result
assert (
'astrality.compiler',
logging.INFO,
f'[Compiling] Template: "{template_file}" '
f'-> Target: "{compiled_template}"',
) in caplog.record_tuples
def test_running_finished_tasks_command(
simple_application_config,
valid_module_section,
freezer,
caplog,
):
"""Test that every task is finished at first finish_tasks() invocation."""
thursday = datetime(
year=2018,
month=2,
day=15,
hour=12,
)
freezer.move_to(thursday)
module_manager = ModuleManager(
simple_application_config,
modules=valid_module_section,
context=Context({
'fonts': {1: 'FuraMono Nerd Font'},
}),
)
caplog.clear()
module_manager.finish_tasks()
# Only startup commands should be finished at first
assert caplog.record_tuples == [
(
'astrality.compiler',
logging.INFO,
RegexCompare(
r'\[Compiling\] Template: ".+/templates/test_template.conf" '
r'-> Target: ".*compiled_result"',
),
),
(
'astrality.actions',
logging.INFO,
'Running command "echo thursday".',
),
(
'astrality.utils',
logging.INFO,
'thursday',
),
]
# Now move one day ahead, and observe if event commands are run
caplog.clear()
friday = datetime(
year=2018,
month=2,
day=16,
hour=12,
)
freezer.move_to(friday)
module_manager.finish_tasks()
assert caplog.record_tuples == [
(
'astrality.module',
logging.INFO,
'[module/test_module] New event "friday". '
'Executing actions.',
),
(
'astrality.actions',
logging.INFO,
RegexCompare(r'Running command "echo .+compiled_result"\.'),
),
(
'astrality.utils',
logging.INFO,
RegexCompare(r'.+compiled_result'),
),
]
def test_has_unfinished_tasks(
simple_application_config,
valid_module_section,
freezer,
):
# Move time to midday
midday = datetime.now().replace(hour=12, minute=0)
freezer.move_to(midday)
# At instanziation, the module should have unfinished tasks
weekday_module = ModuleManager(
config=simple_application_config,
modules=valid_module_section,
context=Context({
'fonts': {1: 'FuraMono Nerd Font'},
}),
)
assert weekday_module.has_unfinished_tasks() is True
# After finishing tasks, there should be no unfinished tasks (duh!)
weekday_module.finish_tasks()
assert weekday_module.has_unfinished_tasks() is False
# If we move the time forwards, but not to a new event, there should still
# not be any unfinished tasks
before_midnight = datetime.now().replace(hour=23, minute=59)
freezer.move_to(before_midnight)
assert weekday_module.has_unfinished_tasks() is False
# But right after a event (new weekday), there should be unfinished
# tasks
two_minutes = timedelta(minutes=2)
freezer.move_to(before_midnight + two_minutes)
assert weekday_module.has_unfinished_tasks() is True
# Again, after finishing tasks, there should be no unfinished tasks left
weekday_module.finish_tasks()
assert weekday_module.has_unfinished_tasks() is False
@pytest.fixture
def config_with_modules():
context = Context()
modules = {
'solar_module': {
'enabled': True,
'event_listener': {
'type': 'solar',
'longitude': 0,
'latitude': 0,
'elevation': 0,
},
'templates': {
'template_name': {
'content': 'astrality/tests/templates/test_template.conf',
'target': '/tmp/compiled_result',
},
},
'on_startup': {
'run': [{'shell': 'echo solar compiling {template_name}'}],
},
'on_event': {
'run': [{'shell': 'echo solar {event}'}],
},
'on_exit': {
'run': [{'shell': 'echo solar exit'}],
},
},
'weekday_module': {
'enabled': True,
'event_listener': {'type': 'weekday'},
'on_startup': {
'run': [{'shell': 'echo weekday startup'}],
},
'on_event': {
'run': [{'shell': 'echo weekday {event}'}],
},
'on_exit': {
'run': [{'shell': 'echo weekday exit'}],
},
},
'disabled_module': {
'enabled': False,
'event_listener': 'static',
},
}
return modules, context
@pytest.fixture
def module_manager(config_with_modules):
modules, context = config_with_modules
return ModuleManager(modules=modules, context=context)
def test_import_sections_on_event(config_with_modules, freezer):
modules, context = config_with_modules
modules[
'weekday_module'
]['on_event']['import_context'] = [{
'to_section': 'week',
'from_path': 'astrality/tests/templates/weekday.yml',
'from_section': '{event}',
}]
modules.pop('solar_module')
module_manager = ModuleManager(
modules=modules,
context=Context({
'fonts': {1: 'FuraCode Nerd Font'},
}),
directory=Path(__file__).parents[3],
)
assert module_manager.application_context['fonts'] \
== Context({1: 'FuraCode Nerd Font'})
sunday = datetime(year=2018, month=2, day=4)
freezer.move_to(sunday)
module_manager.finish_tasks()
# Startup does not count as a event, so no context has been imported
assert module_manager.application_context == Context({
'fonts': Context({1: 'FuraCode Nerd Font'}),
})
monday = datetime(year=2018, month=2, day=5)
freezer.move_to(monday)
module_manager.finish_tasks()
# The event has now changed, so context should be imported
assert module_manager.application_context == {
'fonts': Context({1: 'FuraCode Nerd Font'}),
'week': Context({'day': 'monday'}),
}
def test_import_sections_on_startup(config_with_modules, freezer):
modules, context = config_with_modules
# Insert day the module was started into 'start day'
modules[
'weekday_module'
]['on_startup']['import_context'] = [{
'to_section': 'start_day',
'from_path': 'astrality/tests/templates/weekday.yml',
'from_section': '{event}',
}]
# Insert the current day into 'day_now'
modules[
'weekday_module'
]['on_event']['import_context'] = [{
'to_section': 'day_now',
'from_path': 'astrality/tests/templates/weekday.yml',
'from_section': '{event}',
}]
modules.pop('solar_module')
module_manager = ModuleManager(
modules=modules,
context=Context({
'fonts': {1: 'FuraCode Nerd Font'},
}),
directory=Path(__file__).parents[3],
)
# Before finishing tasks, no context sections are imported
assert module_manager.application_context['fonts'] \
== {1: 'FuraCode Nerd Font'}
# Start module on a monday
sunday = datetime(year=2018, month=2, day=4)
freezer.move_to(sunday)
module_manager.finish_tasks()
assert module_manager.application_context == {
'fonts': Context({1: 'FuraCode Nerd Font'}),
'start_day': Context({'day': 'sunday'}),
}
# 'now_day' should now be added, but 'start_day' should remain unchanged
monday = datetime(year=2018, month=2, day=5)
freezer.move_to(monday)
module_manager.finish_tasks()
assert module_manager.application_context == {
'fonts': Context({1: 'FuraCode Nerd Font'}),
'start_day': Context({'day': 'sunday'}),
'day_now': Context({'day': 'monday'}),
}
class TestModuleManager:
@pytest.mark.slow
def test_invocation_of_module_manager_with_config(self, conf):
ModuleManager(conf)
@pytest.mark.slow
def test_using_finish_tasks_on_example_configuration(
self,
conf,
modules,
context,
):
module_manager = ModuleManager(
config=conf,
modules=modules,
context=context,
)
module_manager.finish_tasks()
def test_number_of_modules_instanziated_by_module_manager(
self,
module_manager,
):
assert len(module_manager) == 2
def test_time_until_next_event_of_several_modules(
config_with_modules,
module_manager,
freezer,
):
modules, context = config_with_modules
solar_event_listener = event_listener.Solar(modules)
noon = solar_event_listener.location.sun()['noon']
one_minute = timedelta(minutes=1)
freezer.move_to(noon - one_minute)
assert module_manager.time_until_next_event() == one_minute
two_minutes_before_midnight = datetime.now().replace(hour=23, minute=58)
freezer.move_to(two_minutes_before_midnight)
assert module_manager.time_until_next_event().total_seconds() \
== timedelta(minutes=2).total_seconds()
def test_detection_of_new_event_involving_several_modules(
config_with_modules,
freezer,
):
modules, context = config_with_modules
# Move time to right before noon
solar_event_listener = event_listener.Solar(modules)
noon = solar_event_listener.location.sun()['noon']
one_minute = timedelta(minutes=1)
freezer.move_to(noon - one_minute)
module_manager = ModuleManager(
modules=modules,
context=context,
)
# All modules should now considered to have now events
assert module_manager.has_unfinished_tasks() is True
# Running on event method for all the event changed modules
module_manager.finish_tasks()
# After running these methods, they should all be reverted to not changed
assert module_manager.has_unfinished_tasks() is False
# Move time to right after noon
freezer.move_to(noon + one_minute)
# The solar event listener should now be considered to have been event
# changed
assert module_manager.has_unfinished_tasks() is True
# Again, check if on_event() method makes them unchanged
module_manager.finish_tasks()
assert module_manager.has_unfinished_tasks() is False
# Move time two days forwards
two_days = timedelta(days=2)
freezer.move_to(noon + two_days)
# Now both event listeners should be considered to have new events
assert module_manager.has_unfinished_tasks() is True
def test_that_shell_filter_is_run_from_config_directory(test_config_directory):
shell_filter_template = Path(__file__).parents[1] \
/ 'templates' / 'shell_filter_working_directory.template'
shell_filter_template_target = Path(
'/tmp/astrality/shell_filter_working_directory.template',
)
modules = {
'A': {
'on_startup': {
'compile': [
{
'content': str(shell_filter_template),
'target': str(shell_filter_template_target),
},
],
},
},
}
module_manager = ModuleManager(modules=modules)
module_manager.execute(action='compile', block='on_startup')
with open(shell_filter_template_target) as compiled:
assert compiled.read() == str(test_config_directory)
os.remove(shell_filter_template_target)
@pytest.yield_fixture
def two_test_file_paths():
test_file1 = Path('/tmp/astrality/test_file_1')
test_file2 = Path('/tmp/astrality/test_file_2')
yield test_file1, test_file2
# Cleanup files after test has been run (if they exist)
if test_file1.is_file():
os.remove(test_file1)
if test_file2.is_file():
os.remove(test_file2)
def test_that_only_startup_event_block_is_run_on_startup(
two_test_file_paths,
test_config_directory,
freezer,
):
thursday = datetime(
year=2018,
month=2,
day=15,
hour=12,
)
freezer.move_to(thursday)
test_file1, test_file2 = two_test_file_paths
modules = {
'A': {
'event_listener': {'type': 'weekday'},
'on_startup': {
'run': [{'shell': 'touch ' + str(test_file1)}],
},
'on_event': {
'run': [{'shell': 'touch ' + str(test_file2)}],
},
},
}
module_manager = ModuleManager(
modules=modules,
)
# Before call to finish_tasks, no actions should have been performed
assert not test_file1.is_file() and not test_file2.is_file()
# Now call finish_tasks for the first time, only startup event block should
# be run
module_manager.finish_tasks()
retry = Retry()
assert retry(lambda: test_file1.is_file())
assert retry(lambda: not test_file2.is_file())
def test_trigger_event_module_action(test_config_directory):
modules = {
'A': {
'event_listener': {'type': 'weekday'},
'on_startup': {
'trigger': [
{'block': 'on_event'},
{'block': 'on_exit'},
{'block': 'on_modified', 'path': 'templateA'},
],
'run': [{'shell': 'echo startup'}],
},
'on_event': {
'run': [{'shell': 'echo on_event'}],
'import_context': [{
'from_path': 'context/mercedes.yml',
'from_section': 'car',
}],
},
'on_exit': {
'run': [{'shell': 'echo exit'}],
},
'on_modified': {
'templateA': {
'run': [{'shell': 'echo modified.templateA'}],
'compile': [
{'content': 'templateA'},
],
},
},
},
}
module_manager = ModuleManager(
config={'modules': {'enabled_modules': [{'name': 'A'}]}},
modules=modules,
)
# Check that all run commands have been imported into startup block
results = tuple(module_manager.modules['A'].execute(
action='run',
block='on_startup',
))
assert results == (
('echo startup', 'startup'),
('echo on_event', 'on_event'),
('echo exit', 'exit'),
('echo modified.templateA', 'modified.templateA'),
)
# Check that all context section imports are available in startup block
# module_manager.modules['A'].import_context('on_startup')
module_manager.modules['A'].execute(
action='import_context',
block='on_startup',
)
# TODO: Find out why Hanoi is included here
assert module_manager.application_context['car'] == {
'manufacturer': 'Mercedes',
}
# Double check that the other sections are not affected
results = module_manager.modules['A'].execute(
action='run',
block='on_event',
)
assert results == (('echo on_event', 'on_event'),)
results = module_manager.modules['A'].execute(
action='run',
block='on_exit',
)
assert results == (('echo exit', 'exit'),)
module_manager.modules['A'].execute(
action='import_context',
block='on_event',
)
# TODO: Find out why Hanoi context is included here
assert module_manager.application_context['car'] == {
'manufacturer': 'Mercedes',
}
def test_not_using_list_when_specifiying_trigger_action(conf_path):
modules = {
'A': {
'on_startup': {
'trigger': {'block': 'on_event'},
},
'on_event': {
'run': [{'shell': 'echo on_event'}],
},
},
}
module_manager = ModuleManager(
modules=modules,
directory=conf_path,
)
# Check that all run commands have been imported into startup block
result = module_manager.modules['A'].execute(
action='run',
block='on_startup',
)
assert result == (
('echo on_event', 'on_event'),
)
def test_defining_on_startup_block_at_root_indentation(caplog):
"""Root indentation actions should be promoted to on_startup."""
# Test that no root actions are not affected
module_config = {
'on_startup': {
'run': [{'shell': 'echo on_startup'}],
},
}
assert Module.prepare_on_startup_block(
module_name='test',
module_config=module_config,
) == {
'on_startup': {
'run': [{'shell': 'echo on_startup'}],
},
}
# Test that actions are moved into empty block
module_config = {
'run': [{'shell': 'touch stuff'}],
'compile': {'source': 'some/path'},
}
assert Module.prepare_on_startup_block(
module_name='test',
module_config=module_config,
) == {
'on_startup': {
'run': [{'shell': 'touch stuff'}],
'compile': {'source': 'some/path'},
},
}
# Test that overwriting values are logged
caplog.clear()
module_config = {
'run': [{'shell': 'echo overwritten'}],
'on_startup': {
'run': [{'shell': 'echo original'}],
},
}
assert Module.prepare_on_startup_block(
module_name='test',
module_config=module_config,
) == {
'on_startup': {
'run': [{'shell': 'echo overwritten'}],
},
}
assert caplog.record_tuples[0][1] == logging.ERROR
# Test that we only have partial overwrites
module_config = {
'stow': [{'content': 'new_stow'}],
'copy': {'content': 'new_copy'},
'on_startup': {
'run': [{'shell': 'run'}],
'stow': {'content': 'old_stow'},
'copy': {'content': 'old_copy'},
},
'on_exit': {},
}
assert Module.prepare_on_startup_block(
module_name='test',
module_config=module_config,
) == {
'on_startup': {
'run': [{'shell': 'run'}],
'stow': [{'content': 'new_stow'}],
'copy': {'content': 'new_copy'},
},
'on_exit': {},
}
# Test that prepare_on_startup_block is actually used
module_config = {
'run': [{'shell': 'echo overwritten'}],
'on_startup': {
'run': [{'shell': 'echo original'}],
},
}
module = Module(
name='test_module',
module_config=module_config,
module_directory=Path('/'),
)
assert module.execute(action='run', block='on_startup') \
== (('echo overwritten', 'overwritten'),)
|
dd86c6fdbd8a6ce22f79b0562780ad3959dce85e
|
091e97bcfe5acc0635bd601aa8497e377b74d41a
|
/ansible/roles/lib_gcloud/build/ansible/gcloud_dm_resource_reconciler.py
|
9fa57c4b274ec8dc641d8978b41d0875713821cb
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
openshift/openshift-tools
|
d59b63778f25cb8fb3c7a0253afe22a173e72f9d
|
e342f6659a4ef1a188ff403e2fc6b06ac6d119c7
|
refs/heads/prod
| 2023-08-30T01:52:04.108978
| 2022-03-23T21:07:28
| 2022-03-23T21:07:28
| 36,827,699
| 170
| 254
|
Apache-2.0
| 2022-06-16T12:11:51
| 2015-06-03T20:09:22
|
Python
|
UTF-8
|
Python
| false
| false
| 3,130
|
py
|
gcloud_dm_resource_reconciler.py
|
# pylint: skip-file
# vim: expandtab:tabstop=4:shiftwidth=4
#pylint: disable=too-many-branches
def main():
''' ansible module for gcloud deployment-manager deployments '''
module = AnsibleModule(
argument_spec=dict(
# credentials
resources=dict(required=True, type='dict'),
instance_counts=dict(default=None, type='dict'),
existing_instance_names=dict(default=None, type='dict'),
current_dm_config=dict(default=None, type='dict'),
state=dict(default='present', type='str', choices=['present']),
),
required_together=[
['existing_instance_names', 'instance_counts', 'resources'],
],
supports_check_mode=True,
)
gcloud = GcloudResourceReconciler(module.params['resources']['resources'],
module.params['instance_counts'],
module.params['existing_instance_names'],
module.params['current_dm_config'])
state = module.params['state']
orig_resources = copy.deepcopy(module.params['resources'])
########
# generate resources
########
if state == 'present':
# Deployment manager has run but nothing is in the inventory
if not module.params['existing_instance_names'] and module.params['current_dm_config']:
raise GcloudResourceReconcilerError(\
'Found current deployment manager config but no existing resource names.' + \
'Please update inventory and rerun.')
# No existing instance names passed so we cannot reconcile.
if not module.params['existing_instance_names']:
module.exit_json(changed=False, results=module.params['resources'], run_dm=True)
inst_resources = gcloud.gather_instance_resources()
gcloud.reconcile_count(inst_resources)
results = gcloud.get_resources()
if module.params['current_dm_config']:
run_dm = gcloud.compare_dm_config_resources(module.params['current_dm_config']['resources'])
if results == orig_resources:
module.exit_json(changed=False, results=orig_resources, run_dm=run_dm)
module.exit_json(changed=True, results=results, run_dm=run_dm)
module.exit_json(changed=True, results=results, run_dm=True)
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
#if __name__ == '__main__':
# gcloud = GcloudResourceReconciler(resources,
# {'master': 1, 'infra': 2, 'compute': 4},
# existing_instance_info)
# resources = gcloud.gather_instance_resources()
# gcloud.reconcile_count(resources)
# print yaml.dump(gcloud.resources, default_flow_style=False)
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
from ansible.module_utils.basic import *
main()
|
53eecf10bdb30d3c87ef1bf017e40a1dcfc62960
|
437613ebb167638565f7f32c0d3732f3d9a69f6b
|
/negbio/negbio_neg_chexpert.py
|
0d4d6f224e00afc3821a3a6bc140f9b53f5ebddb
|
[
"LicenseRef-scancode-us-govt-public-domain"
] |
permissive
|
ncbi-nlp/NegBio
|
304253d5520e3020dd8249675a75d927e7ad6d45
|
073199e2792824740e89844a59c13d3d40ce4d23
|
refs/heads/master
| 2023-07-19T04:27:05.082202
| 2022-04-11T01:41:43
| 2022-04-11T01:41:43
| 114,385,608
| 142
| 45
|
NOASSERTION
| 2023-07-16T11:41:08
| 2017-12-15T15:38:04
|
Python
|
UTF-8
|
Python
| false
| false
| 2,142
|
py
|
negbio_neg_chexpert.py
|
"""
Detect negation and uncertainty
Usage:
negbio_pipeline neg_chexpert [options] --output=<directory> <file> ...
Options:
--neg-patterns=FILE Negation rules [default: negbio/chexpert/patterns/negation.txt]
--pre-negation-uncertainty-patterns=FILE Pre negation uncertainty rules
[default: negbio/chexpert/patterns/pre_negation_uncertainty.txt]
--post-negation-uncertainty-patterns=FILE Post negation uncertainty rules
[default: negbio/chexpert/patterns/post_negation_uncertainty.txt]
--suffix=<suffix> Append an additional SUFFIX to file names. [default: .neg.xml]
--verbose Print more information about progress.
--output=<directory> Specify the output directory.
"""
import os
from negbio.chexpert.stages.classify import ModifiedDetector
from negbio.cli_utils import parse_args, get_absolute_path
from negbio.pipeline.negdetect import detect
from negbio.pipeline.scan import scan_document
if __name__ == '__main__':
argv = parse_args(__doc__)
argv = get_absolute_path(argv,
'--pre-negation-uncertainty-patterns',
'negbio/chexpert/patterns/pre_negation_uncertainty.txt')
argv = get_absolute_path(argv,
'--post-negation-uncertainty-patterns',
'negbio/chexpert/patterns/post_negation_uncertainty.txt')
argv = get_absolute_path(argv,
'--neg-patterns',
'negbio/chexpert/patterns/negation.txt')
neg_detector = ModifiedDetector(argv['--pre-negation-uncertainty-patterns'],
argv['--neg-patterns'],
argv['--post-negation-uncertainty-patterns'])
scan_document(source=argv['<file>'], directory=argv['--output'], suffix=argv['--suffix'],
fn=detect, non_sequences=[neg_detector])
|
20516d882eefddc8642bd89850ddb7e34957da1e
|
a41e1498e3c080f47abd8e8e57157548df3ebbf1
|
/pandas/io/json/__init__.py
|
8f4e7a62834b57c151189cdd2994a55d1ad9f7de
|
[
"BSD-3-Clause"
] |
permissive
|
pandas-dev/pandas
|
e7e639454a298bebc272622e66faa9829ea393bb
|
c7325d7e7e77ecb4a4e57b48bc25265277c75712
|
refs/heads/main
| 2023-09-01T12:42:07.927176
| 2023-09-01T11:14:10
| 2023-09-01T11:14:10
| 858,127
| 36,166
| 18,728
|
BSD-3-Clause
| 2023-09-14T21:18:41
| 2010-08-24T01:37:33
|
Python
|
UTF-8
|
Python
| false
| false
| 270
|
py
|
__init__.py
|
from pandas.io.json._json import (
read_json,
to_json,
ujson_dumps,
ujson_loads,
)
from pandas.io.json._table_schema import build_table_schema
__all__ = [
"ujson_dumps",
"ujson_loads",
"read_json",
"to_json",
"build_table_schema",
]
|
920dd9a35d5f726bad5babed616d4fe085db94c5
|
73dbe07000651827e2937d728d0c5acf903932e2
|
/examples/evaluation.py
|
3af94b84c6e147fe7ac9773a6819219f4a140d3d
|
[
"Apache-2.0"
] |
permissive
|
deepset-ai/FARM
|
96a5c7a2b93dcf60f4bc208a6706be0cb07bcd43
|
5919538f721c7974ea951b322d30a3c0e84a1bc2
|
refs/heads/master
| 2023-08-21T23:50:50.414602
| 2022-08-31T09:45:24
| 2022-08-31T09:45:24
| 197,409,619
| 1,765
| 283
|
Apache-2.0
| 2023-08-12T04:20:09
| 2019-07-17T14:51:12
|
Python
|
UTF-8
|
Python
| false
| false
| 4,656
|
py
|
evaluation.py
|
from farm.utils import initialize_device_settings
from farm.modeling.tokenization import Tokenizer
from farm.data_handler.processor import TextClassificationProcessor, SquadProcessor
from farm.data_handler.data_silo import DataSilo
from farm.eval import Evaluator
from farm.modeling.adaptive_model import AdaptiveModel
from pathlib import Path
def evaluate_classification():
##########################
########## Settings
##########################
device, n_gpu = initialize_device_settings(use_cuda=True)
lang_model = "deepset/bert-base-german-cased-sentiment-Germeval17"
do_lower_case = False
batch_size = 100
data_dir = Path("../data/germeval17")
evaluation_filename = "test_TIMESTAMP1.tsv"
label_list = ["negative", "neutral", "positive"]
metric = "f1_macro"
# 1.Create a tokenizer
tokenizer = Tokenizer.load(
pretrained_model_name_or_path=lang_model,
do_lower_case=do_lower_case)
# 2. Create a DataProcessor that handles all the conversion from raw text into a pytorch Dataset
# Here we load GermEval 2017 Data automaticaly if it is not available.
processor = TextClassificationProcessor(
tokenizer=tokenizer,
max_seq_len=384,
label_list=label_list,
metric=metric,
train_filename=None,
dev_filename=None,
dev_split=0,
test_filename=evaluation_filename,
data_dir=data_dir,
)
# 3. Create a DataSilo that loads dataset, provides DataLoaders for them and calculates a few descriptive statistics of our datasets
data_silo = DataSilo(
processor=processor,
batch_size=batch_size)
# 4. Create an Evaluator
evaluator = Evaluator(
data_loader=data_silo.get_data_loader("test"),
tasks=data_silo.processor.tasks,
device=device
)
# 5. Load model
model = AdaptiveModel.convert_from_transformers(lang_model, device=device, task_type="text_classification")
# use "load" if you want to use a local model that was trained with FARM
# model = AdaptiveModel.load(lang_model, device=device)
model.connect_heads_with_processor(data_silo.processor.tasks, require_labels=True)
# 6. Run the Evaluator
results = evaluator.eval(model)
f1_score = results[0]["f1_macro"]
print("Macro-averaged F1-Score:", f1_score)
def evaluate_question_answering():
##########################
########## Settings
##########################
device, n_gpu = initialize_device_settings(use_cuda=True)
lang_model = "deepset/roberta-base-squad2"
do_lower_case = True
data_dir = Path("../data/squad20")
evaluation_filename = "dev-v2.0.json"
batch_size = 50
no_ans_boost = 0
accuracy_at = 3 # accuracy at n is useful for answers inside long documents
# 1.Create a tokenizer
tokenizer = Tokenizer.load(
pretrained_model_name_or_path=lang_model,
do_lower_case=do_lower_case)
# 2. Create a DataProcessor that handles all the conversion from raw text into a pytorch Dataset
processor = SquadProcessor(
tokenizer=tokenizer,
max_seq_len=256,
label_list= ["start_token", "end_token"],
metric="squad",
train_filename=None,
dev_filename=None,
dev_split=0,
test_filename=evaluation_filename,
data_dir=data_dir,
doc_stride=128,
)
# 3. Create a DataSilo that loads dataset, provides DataLoaders for them and calculates a few descriptive statistics of our datasets
data_silo = DataSilo(
processor=processor,
batch_size=batch_size)
# 4. Create an Evaluator
evaluator = Evaluator(
data_loader=data_silo.get_data_loader("test"),
tasks=data_silo.processor.tasks,
device=device
)
# 5. Load model
model = AdaptiveModel.convert_from_transformers(lang_model, device=device, task_type="question_answering")
# use "load" if you want to use a local model that was trained with FARM
#model = AdaptiveModel.load(lang_model, device=device)
model.prediction_heads[0].no_ans_boost = no_ans_boost
model.prediction_heads[0].n_best = accuracy_at
model.connect_heads_with_processor(data_silo.processor.tasks, require_labels=True)
# 6. Run the Evaluator
results = evaluator.eval(model)
f1_score = results[0]["f1"]
em_score = results[0]["EM"]
tnacc = results[0]["top_n_accuracy"]
print("F1-Score:", f1_score)
print("Exact Match Score:", em_score)
print(f"top_{accuracy_at}_accuracy:", tnacc)
if __name__ == "__main__":
#evaluate_classification()
evaluate_question_answering()
|
9f0a5b140420f3be0babfc3936ec2e24dec42f45
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/RecoLocalTracker/Phase2TrackerRecHits/test/RecHitsValidationTest_cfg.py
|
8d7d6c02dc01852e9ed029963b36e458b180bd45
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 2,409
|
py
|
RecHitsValidationTest_cfg.py
|
# Imports
import FWCore.ParameterSet.Config as cms
# Create a new CMS process
process = cms.Process('cluTest')
# Import all the necessary files
process.load('Configuration.StandardSequences.Services_cff')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.Geometry.GeometryExtended2023D17Reco_cff')
process.load('Configuration.StandardSequences.MagneticField_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:phase2_realistic', '')
# Number of events (-1 = all)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
# Input file
process.source = cms.Source('PoolSource',
fileNames = cms.untracked.vstring('file:step3.root')
)
# Output
process.TFileService = cms.Service('TFileService',
fileName = cms.string('file:rechits_validation.root')
)
process.load('RecoLocalTracker.SiPhase2Clusterizer.phase2TrackerClusterizer_cfi')
process.load('RecoLocalTracker.Phase2TrackerRecHits.Phase2StripCPEESProducer_cfi')
#process.load('RecoLocalTracker.Phase2TrackerRecHits.Phase2StripCPEGeometricESProducer_cfi')
process.load('RecoLocalTracker.Phase2TrackerRecHits.Phase2TrackerRecHits_cfi')
#process.siPhase2RecHits.Phase2StripCPE = cms.ESInputTag("phase2StripCPEESProducer", "Phase2StripCPE")
#process.siPhase2RecHits.Phase2StripCPE = cms.ESInputTag("phase2StripCPEGeometricESProducer", "Phase2StripCPEGeometric")
# Analyzer
process.analysis = cms.EDAnalyzer('Phase2TrackerRecHitsValidation',
src = cms.InputTag("siPhase2RecHits"),
clusters = cms.InputTag("siPhase2Clusters"),
links = cms.InputTag("simSiPixelDigis", "Tracker"),
simhitsbarrel = cms.InputTag("g4SimHits", "TrackerHitsPixelBarrelLowTof"),
simhitsendcap = cms.InputTag("g4SimHits", "TrackerHitsPixelEndcapLowTof"),
simtracks = cms.InputTag("g4SimHits"),
ECasRings = cms.bool(True),
SimTrackMinPt = cms.double(2.),
MakeEtaPlots = cms.bool(False),
MinEta = cms.double(0.),
MaxEta = cms.double(10.)
)
# Processes to run
#process.rechits_step = cms.Path(process.siPhase2Clusters + process.siPhase2RecHits)
process.rechits_step = cms.Path(process.siPhase2RecHits)
process.validation_step = cms.Path(process.analysis)
process.schedule = cms.Schedule(process.rechits_step, process.validation_step)
|
be01abb914f1340b72b40f2ad5745b8d93be084e
|
d110546d747d7e3865ce5742d5fca09f404623c0
|
/salt/payload.py
|
07489be9b4216ec2d3c72fe4d8c240bd65ad0d45
|
[
"Apache-2.0",
"MIT",
"BSD-2-Clause"
] |
permissive
|
saltstack/salt
|
354fc86a7be1f69514b3dd3b2edb9e6f66844c1d
|
1ef90cbdc7203f97775edb7666db86a41eb9fc15
|
refs/heads/master
| 2023-07-19T20:56:20.210556
| 2023-06-29T23:12:28
| 2023-07-19T11:47:47
| 1,390,248
| 11,026
| 6,296
|
Apache-2.0
| 2023-09-14T20:45:37
| 2011-02-20T20:16:56
|
Python
|
UTF-8
|
Python
| false
| false
| 14,263
|
py
|
payload.py
|
"""
Many aspects of the salt payload need to be managed, from the return of
encrypted keys to general payload dynamics and packaging, these happen
in here
"""
import collections.abc
import datetime
import gc
import logging
import salt.loader.context
import salt.transport.frame
import salt.utils.immutabletypes as immutabletypes
import salt.utils.msgpack
import salt.utils.stringutils
from salt.defaults import _Constant
from salt.exceptions import SaltDeserializationError, SaltReqTimeoutError
from salt.utils.data import CaseInsensitiveDict
try:
import zmq
except ImportError:
# No need for zeromq in local mode
pass
log = logging.getLogger(__name__)
def package(payload):
"""
This method for now just wraps msgpack.dumps, but it is here so that
we can make the serialization a custom option in the future with ease.
"""
return salt.utils.msgpack.dumps(payload)
def unpackage(package_):
"""
Unpackages a payload
"""
return salt.utils.msgpack.loads(package_, use_list=True)
def format_payload(enc, **kwargs):
"""
Pass in the required arguments for a payload, the enc type and the cmd,
then a list of keyword args to generate the body of the load dict.
"""
payload = {"enc": enc}
load = {}
for key in kwargs:
load[key] = kwargs[key]
payload["load"] = load
return package(payload)
def loads(msg, encoding=None, raw=False):
"""
Run the correct loads serialization format
:param encoding: Useful for Python 3 support. If the msgpack data
was encoded using "use_bin_type=True", this will
differentiate between the 'bytes' type and the
'str' type by decoding contents with 'str' type
to what the encoding was set as. Recommended
encoding is 'utf-8' when using Python 3.
If the msgpack data was not encoded using
"use_bin_type=True", it will try to decode
all 'bytes' and 'str' data (the distinction has
been lost in this case) to what the encoding is
set as. In this case, it will fail if any of
the contents cannot be converted.
"""
try:
def ext_type_decoder(code, data):
if code == 78:
data = salt.utils.stringutils.to_unicode(data)
return datetime.datetime.strptime(data, "%Y%m%dT%H:%M:%S.%f")
if code == 79:
name, value = salt.utils.msgpack.loads(data, raw=False)
return _Constant(name, value)
return data
gc.disable() # performance optimization for msgpack
loads_kwargs = {"use_list": True, "ext_hook": ext_type_decoder}
if salt.utils.msgpack.version >= (0, 4, 0):
# msgpack only supports 'encoding' starting in 0.4.0.
# Due to this, if we don't need it, don't pass it at all so
# that under Python 2 we can still work with older versions
# of msgpack.
if salt.utils.msgpack.version >= (0, 5, 2):
if encoding is None:
loads_kwargs["raw"] = True
else:
loads_kwargs["raw"] = False
else:
loads_kwargs["encoding"] = encoding
try:
ret = salt.utils.msgpack.unpackb(msg, **loads_kwargs)
except UnicodeDecodeError:
# msg contains binary data
loads_kwargs.pop("raw", None)
loads_kwargs.pop("encoding", None)
ret = salt.utils.msgpack.loads(msg, **loads_kwargs)
else:
ret = salt.utils.msgpack.loads(msg, **loads_kwargs)
if encoding is None and not raw:
ret = salt.transport.frame.decode_embedded_strs(ret)
except Exception as exc: # pylint: disable=broad-except
log.critical(
"Could not deserialize msgpack message. This often happens "
"when trying to read a file not in binary mode. "
"To see message payload, enable debug logging and retry. "
"Exception: %s",
exc,
)
log.debug("Msgpack deserialization failure on message: %s", msg)
exc_msg = "Could not deserialize msgpack message. See log for more info."
raise SaltDeserializationError(exc_msg) from exc
finally:
gc.enable()
return ret
def dumps(msg, use_bin_type=False):
"""
Run the correct dumps serialization format
:param use_bin_type: Useful for Python 3 support. Tells msgpack to
differentiate between 'str' and 'bytes' types
by encoding them differently.
Since this changes the wire protocol, this
option should not be used outside of IPC.
"""
def ext_type_encoder(obj):
if isinstance(obj, int):
# msgpack can't handle the very long Python longs for jids
# Convert any very long longs to strings
return str(obj)
elif isinstance(obj, (datetime.datetime, datetime.date)):
# msgpack doesn't support datetime.datetime and datetime.date datatypes.
# So here we have converted these types to custom datatype
# This is msgpack Extended types numbered 78
return salt.utils.msgpack.ExtType(
78,
salt.utils.stringutils.to_bytes(obj.strftime("%Y%m%dT%H:%M:%S.%f")),
)
elif isinstance(obj, _Constant):
# Special case our constants.
return salt.utils.msgpack.ExtType(
79,
salt.utils.msgpack.dumps((obj.name, obj.value), use_bin_type=True),
)
# The same for immutable types
elif isinstance(obj, immutabletypes.ImmutableDict):
return dict(obj)
elif isinstance(obj, immutabletypes.ImmutableList):
return list(obj)
elif isinstance(obj, (set, immutabletypes.ImmutableSet)):
# msgpack can't handle set so translate it to tuple
return tuple(obj)
elif isinstance(obj, CaseInsensitiveDict):
return dict(obj)
elif isinstance(obj, collections.abc.MutableMapping):
return dict(obj)
# Nothing known exceptions found. Let msgpack raise its own.
return obj
try:
return salt.utils.msgpack.packb(
msg, default=ext_type_encoder, use_bin_type=use_bin_type
)
except (OverflowError, salt.utils.msgpack.exceptions.PackValueError):
# msgpack<=0.4.6 don't call ext encoder on very long integers raising the error instead.
# Convert any very long longs to strings and call dumps again.
def verylong_encoder(obj, context):
# Make sure we catch recursion here.
objid = id(obj)
# This instance list needs to correspond to the types recursed
# in the below if/elif chain. Also update
# tests/unit/test_payload.py
if objid in context and isinstance(obj, (dict, list, tuple)):
return "<Recursion on {} with id={}>".format(
type(obj).__name__, id(obj)
)
context.add(objid)
# The isinstance checks in this if/elif chain need to be
# kept in sync with the above recursion check.
if isinstance(obj, dict):
for key, value in obj.copy().items():
obj[key] = verylong_encoder(value, context)
return dict(obj)
elif isinstance(obj, (list, tuple)):
obj = list(obj)
for idx, entry in enumerate(obj):
obj[idx] = verylong_encoder(entry, context)
return obj
# A value of an Integer object is limited from -(2^63) upto (2^64)-1 by MessagePack
# spec. Here we care only of JIDs that are positive integers.
if isinstance(obj, int) and obj >= pow(2, 64):
return str(obj)
else:
return obj
msg = verylong_encoder(msg, set())
return salt.utils.msgpack.packb(
msg, default=ext_type_encoder, use_bin_type=use_bin_type
)
def load(fn_):
"""
Run the correct serialization to load a file
"""
data = fn_.read()
fn_.close()
if data:
return loads(data, encoding="utf-8")
def dump(msg, fn_):
"""
Serialize the correct data into the named file object
"""
# When using Python 3, write files in such a way
# that the 'bytes' and 'str' types are distinguishable
# by using "use_bin_type=True".
fn_.write(dumps(msg, use_bin_type=True))
fn_.close()
class Serial:
"""
Create a serialization object, this object manages all message
serialization in Salt
"""
def __init__(self, *args, **kwargs):
salt.utils.versions.warn_until(
3007,
"The `salt.payload.Serial` class has been deprecated, "
"and is set to be removed in {version}. "
"Please use `salt.payload.loads` and `salt.payload.dumps`.",
)
loads = staticmethod(loads)
dumps = staticmethod(dumps)
dump = staticmethod(dump)
load = staticmethod(load)
class SREQ:
"""
Create a generic interface to wrap salt zeromq req calls.
"""
def __init__(self, master, id_="", serial="msgpack", linger=0, opts=None):
self.master = master
self.id_ = id_
self.linger = linger
self.context = zmq.Context()
self.poller = zmq.Poller()
self.opts = opts
@property
def socket(self):
"""
Lazily create the socket.
"""
if not hasattr(self, "_socket"):
# create a new one
self._socket = self.context.socket(zmq.REQ)
if hasattr(zmq, "RECONNECT_IVL_MAX"):
self._socket.setsockopt(zmq.RECONNECT_IVL_MAX, 5000)
self._set_tcp_keepalive()
if self.master.startswith("tcp://["):
# Hint PF type if bracket enclosed IPv6 address
if hasattr(zmq, "IPV6"):
self._socket.setsockopt(zmq.IPV6, 1)
elif hasattr(zmq, "IPV4ONLY"):
self._socket.setsockopt(zmq.IPV4ONLY, 0)
self._socket.linger = self.linger
if self.id_:
self._socket.setsockopt(zmq.IDENTITY, self.id_)
self._socket.connect(self.master)
return self._socket
def _set_tcp_keepalive(self):
if hasattr(zmq, "TCP_KEEPALIVE") and self.opts:
if "tcp_keepalive" in self.opts:
self._socket.setsockopt(zmq.TCP_KEEPALIVE, self.opts["tcp_keepalive"])
if "tcp_keepalive_idle" in self.opts:
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_IDLE, self.opts["tcp_keepalive_idle"]
)
if "tcp_keepalive_cnt" in self.opts:
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_CNT, self.opts["tcp_keepalive_cnt"]
)
if "tcp_keepalive_intvl" in self.opts:
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_INTVL, self.opts["tcp_keepalive_intvl"]
)
def clear_socket(self):
"""
delete socket if you have it
"""
if hasattr(self, "_socket"):
if isinstance(self.poller.sockets, dict):
sockets = list(self.poller.sockets.keys())
for socket in sockets:
log.trace("Unregistering socket: %s", socket)
self.poller.unregister(socket)
else:
for socket in self.poller.sockets:
log.trace("Unregistering socket: %s", socket)
self.poller.unregister(socket[0])
del self._socket
def send(self, enc, load, tries=1, timeout=60):
"""
Takes two arguments, the encryption type and the base payload
"""
payload = {"enc": enc}
payload["load"] = load
pkg = dumps(payload)
self.socket.send(pkg)
self.poller.register(self.socket, zmq.POLLIN)
tried = 0
while True:
polled = self.poller.poll(timeout * 1000)
tried += 1
if polled:
break
if tries > 1:
log.info(
"SaltReqTimeoutError: after %s seconds. (Try %s of %s)",
timeout,
tried,
tries,
)
if tried >= tries:
self.clear_socket()
raise SaltReqTimeoutError(
"SaltReqTimeoutError: after {} seconds, ran {} tries".format(
timeout * tried, tried
)
)
return loads(self.socket.recv())
def send_auto(self, payload, tries=1, timeout=60):
"""
Detect the encryption type based on the payload
"""
enc = payload.get("enc", "clear")
load = payload.get("load", {})
return self.send(enc, load, tries, timeout)
def destroy(self):
if isinstance(self.poller.sockets, dict):
sockets = list(self.poller.sockets.keys())
for socket in sockets:
if socket.closed is False:
socket.setsockopt(zmq.LINGER, 1)
socket.close()
self.poller.unregister(socket)
else:
for socket in self.poller.sockets:
if socket[0].closed is False:
socket[0].setsockopt(zmq.LINGER, 1)
socket[0].close()
self.poller.unregister(socket[0])
if self.socket.closed is False:
self.socket.setsockopt(zmq.LINGER, 1)
self.socket.close()
if self.context.closed is False:
self.context.term()
# pylint: disable=W1701
def __del__(self):
self.destroy()
# pylint: enable=W1701
|
056f2352df9e37dca511a1dd6c28b03159316b1a
|
30fda3a5f7f8d106c08329cb228d7938dfa47340
|
/tests/system/conftest.py
|
82e61762f2e196d394af7feb5cea52290f4825dd
|
[
"Apache-2.0"
] |
permissive
|
googleapis/python-ndb
|
afb479e86486352213ad35abe4631f2919d5b14c
|
b0f431048b7b2ebb20e4255340290c7687e27425
|
refs/heads/main
| 2023-09-05T16:03:46.896711
| 2023-08-09T17:57:30
| 2023-08-09T17:57:30
| 171,752,887
| 159
| 72
|
Apache-2.0
| 2023-09-14T15:13:10
| 2019-02-20T21:35:11
|
Python
|
UTF-8
|
Python
| false
| false
| 5,313
|
py
|
conftest.py
|
import itertools
import logging
import os
import uuid
import pytest
import requests
from google.cloud import datastore
from google.cloud import ndb
from google.cloud.ndb import global_cache as global_cache_module
from . import KIND, OTHER_KIND, _helpers
log = logging.getLogger(__name__)
@pytest.fixture(scope="session", autouse=True)
def preclean():
"""Clean out default namespace in test database."""
_preclean(None, None)
if _helpers.TEST_DATABASE:
_preclean(_helpers.TEST_DATABASE, None)
def _preclean(database, namespace):
ds_client = _make_ds_client(database, namespace)
for kind in (KIND, OTHER_KIND):
query = ds_client.query(kind=kind)
query.keys_only()
for page in query.fetch().pages:
keys = [entity.key for entity in page]
ds_client.delete_multi(keys)
def _make_ds_client(database, namespace):
emulator = bool(os.environ.get("DATASTORE_EMULATOR_HOST"))
if emulator:
client = datastore.Client(
database=database, namespace=namespace, _http=requests.Session
)
else:
client = datastore.Client(database=database, namespace=namespace)
assert client.database == database
assert client.namespace == namespace
return client
def all_entities(client, other_namespace):
return itertools.chain(
client.query(kind=KIND).fetch(),
client.query(kind=OTHER_KIND).fetch(),
client.query(namespace=other_namespace).fetch(),
)
@pytest.fixture(scope="session")
def deleted_keys():
return set()
@pytest.fixture
def to_delete():
return []
@pytest.fixture
def ds_client(database_id, namespace):
client = _make_ds_client(database_id, namespace)
assert client.database == database_id
assert client.namespace == namespace
return client
@pytest.fixture
def with_ds_client(ds_client, to_delete, deleted_keys, other_namespace):
yield ds_client
# Clean up after ourselves
while to_delete:
batch = to_delete[:500]
ds_client.delete_multi(batch)
deleted_keys.update(batch)
to_delete = to_delete[500:]
not_deleted = [
entity
for entity in all_entities(ds_client, other_namespace)
if fix_key_db(entity.key, ds_client) not in deleted_keys
]
if not_deleted:
log.warning("CLEAN UP: Entities not deleted from test: {}".format(not_deleted))
@pytest.fixture
def ds_entity(with_ds_client, dispose_of):
def make_entity(*key_args, **entity_kwargs):
key = with_ds_client.key(*key_args)
assert with_ds_client.get(key) is None
entity = datastore.Entity(key=key)
entity.update(entity_kwargs)
with_ds_client.put(entity)
dispose_of(key)
return entity
yield make_entity
@pytest.fixture
def ds_entity_with_meanings(with_ds_client, dispose_of):
def make_entity(*key_args, **entity_kwargs):
meanings = key_args[0]
key = with_ds_client.key(*key_args[1:])
assert with_ds_client.get(key) is None
entity = datastore.Entity(key=key, exclude_from_indexes=("blob",))
entity._meanings = meanings
entity.update(entity_kwargs)
with_ds_client.put(entity)
dispose_of(key)
return entity
yield make_entity
# Workaround: datastore batches reject if key.database is None and client.database == ""
# or vice-versa. This should be fixed, but for now just fix the keys
# See https://github.com/googleapis/python-datastore/issues/460
def fix_key_db(key, database):
if key.database:
return key
else:
fixed_key = key.__class__(
*key.flat_path,
project=key.project,
database=database,
namespace=key.namespace
)
# If the current parent has already been set, we re-use
# the same instance
fixed_key._parent = key._parent
return fixed_key
@pytest.fixture
def dispose_of(with_ds_client, to_delete):
def delete_entity(*ds_keys):
to_delete.extend(
map(lambda key: fix_key_db(key, with_ds_client.database), ds_keys)
)
return delete_entity
@pytest.fixture(params=["", _helpers.TEST_DATABASE])
def database_id(request):
return request.param
@pytest.fixture
def namespace():
return str(uuid.uuid4())
@pytest.fixture
def other_namespace():
return str(uuid.uuid4())
@pytest.fixture
def client_context(database_id, namespace):
client = ndb.Client(database=database_id)
assert client.database == database_id
context_manager = client.context(
cache_policy=False,
legacy_data=False,
namespace=namespace,
)
with context_manager as context:
yield context
@pytest.fixture
def redis_context(client_context):
global_cache = global_cache_module.RedisCache.from_environment()
with client_context.new(global_cache=global_cache).use() as context:
context.set_global_cache_policy(None) # Use default
yield context
@pytest.fixture
def memcache_context(client_context):
global_cache = global_cache_module.MemcacheCache.from_environment()
with client_context.new(global_cache=global_cache).use() as context:
context.set_global_cache_policy(None) # Use default
yield context
|
720b3da8906a44599d7358aa30348e71a83386fe
|
6d58cdc52b4f882b498d44791ea41d89f2691445
|
/nipyapi/registry/models/registry_configuration.py
|
a05eb9ff5fbe6d1c260809e88011055a590ed731
|
[
"Apache-2.0"
] |
permissive
|
Chaffelson/nipyapi
|
8cb47c1f13e9b3d53d4add8829c2efcee24349b6
|
c687fb811486d7bcada099ac0785b55cfb30aea8
|
refs/heads/main
| 2022-12-02T15:39:27.685280
| 2022-12-01T12:39:10
| 2022-12-01T12:39:10
| 101,291,622
| 229
| 84
|
NOASSERTION
| 2023-08-27T15:55:31
| 2017-08-24T12:17:36
|
Python
|
UTF-8
|
Python
| false
| false
| 6,482
|
py
|
registry_configuration.py
|
# coding: utf-8
"""
Apache NiFi Registry REST API
The REST API provides an interface to a registry with operations for saving, versioning, reading NiFi flows and components.
OpenAPI spec version: 1.19.0
Contact: dev@nifi.apache.org
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class RegistryConfiguration(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'supports_managed_authorizer': 'bool',
'supports_configurable_authorizer': 'bool',
'supports_configurable_users_and_groups': 'bool'
}
attribute_map = {
'supports_managed_authorizer': 'supportsManagedAuthorizer',
'supports_configurable_authorizer': 'supportsConfigurableAuthorizer',
'supports_configurable_users_and_groups': 'supportsConfigurableUsersAndGroups'
}
def __init__(self, supports_managed_authorizer=None, supports_configurable_authorizer=None, supports_configurable_users_and_groups=None):
"""
RegistryConfiguration - a model defined in Swagger
"""
self._supports_managed_authorizer = None
self._supports_configurable_authorizer = None
self._supports_configurable_users_and_groups = None
if supports_managed_authorizer is not None:
self.supports_managed_authorizer = supports_managed_authorizer
if supports_configurable_authorizer is not None:
self.supports_configurable_authorizer = supports_configurable_authorizer
if supports_configurable_users_and_groups is not None:
self.supports_configurable_users_and_groups = supports_configurable_users_and_groups
@property
def supports_managed_authorizer(self):
"""
Gets the supports_managed_authorizer of this RegistryConfiguration.
Whether this NiFi Registry supports a managed authorizer. Managed authorizers can visualize users, groups, and policies in the UI.
:return: The supports_managed_authorizer of this RegistryConfiguration.
:rtype: bool
"""
return self._supports_managed_authorizer
@supports_managed_authorizer.setter
def supports_managed_authorizer(self, supports_managed_authorizer):
"""
Sets the supports_managed_authorizer of this RegistryConfiguration.
Whether this NiFi Registry supports a managed authorizer. Managed authorizers can visualize users, groups, and policies in the UI.
:param supports_managed_authorizer: The supports_managed_authorizer of this RegistryConfiguration.
:type: bool
"""
self._supports_managed_authorizer = supports_managed_authorizer
@property
def supports_configurable_authorizer(self):
"""
Gets the supports_configurable_authorizer of this RegistryConfiguration.
Whether this NiFi Registry supports a configurable authorizer.
:return: The supports_configurable_authorizer of this RegistryConfiguration.
:rtype: bool
"""
return self._supports_configurable_authorizer
@supports_configurable_authorizer.setter
def supports_configurable_authorizer(self, supports_configurable_authorizer):
"""
Sets the supports_configurable_authorizer of this RegistryConfiguration.
Whether this NiFi Registry supports a configurable authorizer.
:param supports_configurable_authorizer: The supports_configurable_authorizer of this RegistryConfiguration.
:type: bool
"""
self._supports_configurable_authorizer = supports_configurable_authorizer
@property
def supports_configurable_users_and_groups(self):
"""
Gets the supports_configurable_users_and_groups of this RegistryConfiguration.
Whether this NiFi Registry supports configurable users and groups.
:return: The supports_configurable_users_and_groups of this RegistryConfiguration.
:rtype: bool
"""
return self._supports_configurable_users_and_groups
@supports_configurable_users_and_groups.setter
def supports_configurable_users_and_groups(self, supports_configurable_users_and_groups):
"""
Sets the supports_configurable_users_and_groups of this RegistryConfiguration.
Whether this NiFi Registry supports configurable users and groups.
:param supports_configurable_users_and_groups: The supports_configurable_users_and_groups of this RegistryConfiguration.
:type: bool
"""
self._supports_configurable_users_and_groups = supports_configurable_users_and_groups
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, RegistryConfiguration):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
2de79c7e91c7610b7ba93bdfbf2562c457c7e094
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/gui/shared/utils/__init__.py
|
0fb72c4a969059c8c5237a7e9b5c2b6d8a16740d
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 9,895
|
py
|
__init__.py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/shared/utils/__init__.py
import imghdr
import itertools
import sys
import inspect
import uuid
import struct
from collections import namedtuple
import BigWorld
import AccountCommands
import Settings
import constants
from debug_utils import LOG_CURRENT_EXCEPTION, LOG_ERROR, LOG_DEBUG, LOG_WARNING
from gui.impl import backport
from gui.impl.gen import R
from helpers import getLanguageCode, i18n
from items import vehicles as vehs_core
from account_helpers import getAccountDatabaseID
from account_helpers.AccountSettings import AccountSettings
from avatar_helpers import getAvatarDatabaseID, getAvatarSessionID
SHELLS_COUNT_PROP_NAME = 'shellsCount'
RELOAD_TIME_SECS_PROP_NAME = 'reloadTimeSecs'
RELOAD_TIME_PROP_NAME = 'reloadTime'
RELOAD_MAGAZINE_TIME_PROP_NAME = 'reloadMagazineTime'
SHELL_RELOADING_TIME_PROP_NAME = 'shellReloadingTime'
DISPERSION_RADIUS_PROP_NAME = 'dispersionRadius'
AIMING_TIME_PROP_NAME = 'aimingTime'
PIERCING_POWER_PROP_NAME = 'piercingPower'
DAMAGE_PROP_NAME = 'damage'
SHELLS_PROP_NAME = 'shells'
STUN_DURATION_PROP_NAME = 'stunDuration'
AUTO_RELOAD_PROP_NAME = 'autoReloadTime'
GUARANTEED_STUN_DURATION_PROP_NAME = 'guaranteedStunDuration'
CLIP_VEHICLES_PROP_NAME = 'clipVehicles'
UNICHARGED_VEHICLES_PROP_NAME = 'uniChargedVehicles'
VEHICLES_PROP_NAME = 'vehicles'
CLIP_VEHICLES_CD_PROP_NAME = 'clipVehiclesCD'
MAX_STEERING_LOCK_ANGLE = 'maxSteeringLockAngle'
WHEELED_SWITCH_ON_TIME = 'wheeledSwitchOnTime'
WHEELED_SWITCH_OFF_TIME = 'wheeledSwitchOffTime'
WHEELED_SWITCH_TIME = 'wheeledSwitchTime'
WHEELED_SPEED_MODE_SPEED = 'wheeledSpeedModeSpeed'
TURBOSHAFT_SWITCH_ON_TIME = 'turboshaftSwitchOnTime'
TURBOSHAFT_SWITCH_OFF_TIME = 'turboshaftSwitchOffTime'
TURBOSHAFT_SWITCH_TIME = 'turboshaftSwitchTime'
TURBOSHAFT_SPEED_MODE_SPEED = 'turboshaftSpeedModeSpeed'
TURBOSHAFT_ENGINE_POWER = 'turboshaftEnginePower'
TURBOSHAFT_INVISIBILITY_STILL_FACTOR = 'turboshaftInvisibilityStillFactor'
TURBOSHAFT_INVISIBILITY_MOVING_FACTOR = 'turboshaftInvisibilityMovingFactor'
DUAL_GUN_CHARGE_TIME = 'chargeTime'
DUAL_GUN_RATE_TIME = 'rateTime'
GUN_RELOADING_TYPE = 'gunReloadingType'
CHASSIS_REPAIR_TIME = 'chassisRepairTime'
CHASSIS_REPAIR_TIME_YOH = 'chassisRepairTimeYoh'
GUN_CAN_BE_CLIP = 1
GUN_CLIP = 2
GUN_NORMAL = 4
GUN_CAN_BE_AUTO_RELOAD = 5
GUN_AUTO_RELOAD = 6
GUN_CAN_BE_DUAL_GUN = 7
GUN_DUAL_GUN = 8
EXTRA_MODULE_INFO = 'extraModuleInfo'
FIELD_SPECIALIZATIONS = 'specs'
FIELD_HIGHLIGHT_TYPE = 'highlightType'
_FLASH_OBJECT_SYS_ATTRS = ('isPrototypeOf', 'propertyIsEnumerable', 'hasOwnProperty')
ValidationResult = namedtuple('ValidationResult', ['isValid', 'reason'])
def flashObject2Dict(obj):
if hasattr(obj, 'children'):
filtered = itertools.ifilter(lambda (x, y): x not in _FLASH_OBJECT_SYS_ATTRS, obj.children.iteritems())
return dict(((k, flashObject2Dict(v)) for k, v in filtered))
return obj
def code2str(code):
if code == AccountCommands.RES_SUCCESS:
return 'Request succedded'
if code == AccountCommands.RES_STREAM:
return 'Stream is sent to the client'
if code == AccountCommands.RES_CACHE:
return 'Data is taken from cache'
if code == AccountCommands.RES_FAILURE:
return 'Unknown reason'
if code == AccountCommands.RES_WRONG_ARGS:
return 'Wrong arguments'
if code == AccountCommands.RES_NON_PLAYER:
return 'Account become non player'
if code == AccountCommands.RES_SHOP_DESYNC:
return 'Shop cache is desynchronized'
if code == AccountCommands.RES_COOLDOWN:
return 'Identical requests cooldown'
if code == AccountCommands.RES_HIDDEN_DOSSIER:
return 'Player dossier is hidden'
return 'Dossiers are unavailable' if code == AccountCommands.RES_CENTER_DISCONNECTED else 'Unknown error code'
def isVehicleObserver(vehTypeCompDescr):
if vehTypeCompDescr is not None:
_, nation_id, item_id_within_nation = vehs_core.parseIntCompactDescr(vehTypeCompDescr)
return 'observer' in vehs_core.g_cache.vehicle(nation_id, item_id_within_nation).tags
else:
return False
def class_for_name(module_name, class_name):
__import__(module_name)
m = sys.modules[module_name]
c = getattr(m, class_name)
if not inspect.isclass(c):
LOG_ERROR('%s - is not a class, check module path or className' % class_name)
return None
else:
return c
def sortByFields(fields, sequence, valueGetter=dict.get):
def comparator(x, y):
for field, order in fields:
fieldValueX = valueGetter(x, field)
fieldValueY = valueGetter(y, field)
if fieldValueX != fieldValueY:
if order:
return cmp(fieldValueX, fieldValueY)
return cmp(fieldValueY, fieldValueX)
return sorted(sequence, cmp=comparator)
def roundByModulo(value, rate):
left = value % rate
if left > 0:
value += rate - left
return value
_STR_CASING_OPTIONS = {'el': (8, 1, 0),
'ro': (24, 1, 0),
'tr': (31, 1, 0)}
_REPLACEMENTS = {'el': (u'\u0386\u0388\u038a\u0389\u038e\u038c\u038f', u'\u0391\u0395\u0399\u0397\u03a5\u039f\u03a9')}
def changeStringCasing(string, isUpper):
langID = getLanguageCode()
try:
if not isinstance(string, unicode):
string = string.decode('utf-8')
if langID is not None:
langID = str(langID).lower()
if langID in _STR_CASING_OPTIONS:
plID, slID, sortOrder = _STR_CASING_OPTIONS[langID]
string = BigWorld.wg_changeStringCasing(string, plID, slID, sortOrder, isUpper)
else:
string = string.upper() if isUpper else string.lower()
if langID in _REPLACEMENTS:
for wrong, right in zip(*_REPLACEMENTS[langID]):
string = string.replace(wrong, right)
except Exception:
LOG_CURRENT_EXCEPTION()
return string
def toLower(string):
return changeStringCasing(string, False)
def toUpper(string):
return changeStringCasing(string, True)
def copyToClipboard(text):
BigWorld.wg_copyToClipboard(unicode(text, 'utf-8', errors='ignore'))
LOG_DEBUG('Text has been copied to the clipboard', text)
class SettingRecord(dict):
def __setattr__(self, name, value):
if self:
raise AttributeError("can't set attribute")
self.__setitem__(name, value)
def __getattr__(self, item):
return self.__getitem__(item) if item in self else dict.__getattribute__(self, item)
def _asdict(self):
return dict(self)
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, super(SettingRecord, self).__repr__())
class SettingRootRecord(SettingRecord):
@classmethod
def load(cls):
try:
return cls(**AccountSettings.getSettings(cls._getSettingName()))
except Exception:
LOG_ERROR('There is error while unpacking {} settings'.format(cls._getSettingName()), AccountSettings.getSettings(cls._getSettingName()))
LOG_CURRENT_EXCEPTION()
return None
return None
def save(self):
return AccountSettings.setSettings(self._getSettingName(), self._asdict())
@classmethod
def _getSettingName(cls):
raise NotImplementedError
def mapTextureToTheMemory(textureData, uniqueID=None, temp=True):
if textureData and imghdr.what(None, textureData) is not None:
uniqueID = str(uniqueID or uuid.uuid4())
if temp:
BigWorld.wg_addTempScaleformTexture(uniqueID, textureData)
else:
BigWorld.wg_addScaleformTexture(uniqueID, textureData)
return uniqueID
else:
LOG_WARNING('There is invalid data for the memory mapping', textureData, uniqueID)
return
def removeTextureFromMemory(textureID):
BigWorld.wg_eraseScaleformTexture(textureID)
def getImageSize(imageData):
width, height = (None, None)
if imageData:
imgType = imghdr.what(None, imageData)
if imgType == 'png':
check = struct.unpack('>i', imageData[4:8])[0]
if check != 218765834:
return
width, height = struct.unpack('>ii', imageData[16:24])
elif imgType == 'gif':
width, height = struct.unpack('<HH', imageData[6:10])
elif imgType == 'jpeg':
LOG_WARNING('JPEG image type is not supported')
width, height = (None, None)
return (width, height)
def showInvitationInWindowsBar():
try:
BigWorld.WGWindowsNotifier.onInvitation()
except AttributeError:
LOG_CURRENT_EXCEPTION()
def getPlayerDatabaseID():
return getAccountDatabaseID() or getAvatarDatabaseID()
def getPlayerName():
return getattr(BigWorld.player(), 'name', '')
def avg(devidend, devider):
return float(devidend) / devider if devider > 0 else 0
def weightedAvg(*args):
values, weights = args
valSum = 0
weightSum = 0
itemsCount = len(values)
for i in range(itemsCount):
weight = weights[i]
valSum += values[i] * weight
weightSum += weight
return float(valSum) / weightSum if weightSum != 0 else 0
def makeSearchableString(inputString):
try:
return inputString.decode('utf-8').lower()
except ValueError:
LOG_ERROR('Given string cannot be decoded from UTF-8', inputString)
def isPopupsWindowsOpenDisabled():
userPrefs = Settings.g_instance.userPrefs
ds = userPrefs['development']
return ds.readBool(Settings.POPUPS_WINDOWS_DISABLED) and constants.IS_DEVELOPMENT if ds is not None else False
_ROMAN_FORBIDDEN_LANGUAGES = {'ko', 'no'}
def isRomanNumberForbidden():
return bool(_ROMAN_FORBIDDEN_LANGUAGES.intersection((backport.text(R.strings.settings.LANGUAGE_CODE()),)))
|
d01070df13c9d24f97a9bbb7ab113cb2ecabdbda
|
20e84658198b7993d6da498f65a011ad4c036ae6
|
/ObjectDetection/ASFF.py
|
30b4504e6a2fe648304e4b51bc8df6725578432b
|
[] |
no_license
|
shanglianlm0525/PyTorch-Networks
|
599e95743540bdadbe587b94e0754989a76f6a37
|
063ed2776fa176d16fd665ead5d9ce13403ee4da
|
refs/heads/master
| 2023-04-15T22:25:24.844770
| 2023-04-06T01:29:22
| 2023-04-06T01:29:22
| 211,103,666
| 1,817
| 516
| null | 2023-02-16T02:18:47
| 2019-09-26T14:04:13
|
Python
|
UTF-8
|
Python
| false
| false
| 4,137
|
py
|
ASFF.py
|
import torch
import torch.nn as nn
import torchvision
def Conv1x1BnRelu(in_channels,out_channels):
return nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU6(inplace=True),
)
def upSampling1(in_channels,out_channels):
return nn.Sequential(
nn.Conv2d(in_channels=in_channels,out_channels=out_channels,kernel_size=1,stride=1,padding=0,bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU6(inplace=True),
nn.Upsample(scale_factor=2, mode='nearest')
)
def upSampling2(in_channels,out_channels):
return nn.Sequential(
upSampling1(in_channels,out_channels),
nn.Upsample(scale_factor=2, mode='nearest'),
)
def downSampling1(in_channels,out_channels):
return nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU6(inplace=True),
)
def downSampling2(in_channels,out_channels):
return nn.Sequential(
nn.MaxPool2d(kernel_size=3, stride=2,padding=1),
downSampling1(in_channels=in_channels, out_channels=out_channels),
)
class ASFF(nn.Module):
def __init__(self, level, channel1, channel2, channel3, out_channel):
super(ASFF, self).__init__()
self.level = level
funsed_channel = 8
if self.level == 1:
# level = 1:
self.level2_1 = downSampling1(channel2,channel1)
self.level3_1 = downSampling2(channel3,channel1)
self.weight1 = Conv1x1BnRelu(channel1, funsed_channel)
self.weight2 = Conv1x1BnRelu(channel1, funsed_channel)
self.weight3 = Conv1x1BnRelu(channel1, funsed_channel)
self.expand_conv = Conv1x1BnRelu(channel1,out_channel)
if self.level == 2:
# level = 2:
self.level1_2 = upSampling1(channel1,channel2)
self.level3_2 = downSampling1(channel3,channel2)
self.weight1 = Conv1x1BnRelu(channel2, funsed_channel)
self.weight2 = Conv1x1BnRelu(channel2, funsed_channel)
self.weight3 = Conv1x1BnRelu(channel2, funsed_channel)
self.expand_conv = Conv1x1BnRelu(channel2, out_channel)
if self.level == 3:
# level = 3:
self.level1_3 = upSampling2(channel1,channel3)
self.level2_3 = upSampling1(channel2,channel3)
self.weight1 = Conv1x1BnRelu(channel3, funsed_channel)
self.weight2 = Conv1x1BnRelu(channel3, funsed_channel)
self.weight3 = Conv1x1BnRelu(channel3, funsed_channel)
self.expand_conv = Conv1x1BnRelu(channel3, out_channel)
self.weight_level = nn.Conv2d(funsed_channel * 3, 3, kernel_size=1, stride=1, padding=0)
self.softmax = nn.Softmax(dim=1)
def forward(self, x, y, z):
if self.level == 1:
level_x = x
level_y = self.level2_1(y)
level_z = self.level3_1(z)
if self.level == 2:
level_x = self.level1_2(x)
level_y = y
level_z = self.level3_2(z)
if self.level == 3:
level_x = self.level1_3(x)
level_y = self.level2_3(y)
level_z = z
weight1 = self.weight1(level_x)
weight2 = self.weight2(level_y)
weight3 = self.weight3(level_z)
level_weight = torch.cat((weight1, weight2, weight3), 1)
weight_level = self.weight_level(level_weight)
weight_level = self.softmax(weight_level)
fused_level = level_x * weight_level[:,0,:,:] + level_y * weight_level[:,1,:,:] + level_z * weight_level[:,2,:,:]
out = self.expand_conv(fused_level)
return out
if __name__ == '__main__':
model = ASFF(level=3, channel1=512, channel2=256, channel3=128, out_channel=128)
print(model)
x = torch.randn(1, 512, 16, 16)
y = torch.randn(1, 256, 32, 32)
z = torch.randn(1, 128, 64, 64)
out = model(x,y,z)
print(out.shape)
|
4366ea4daa597ca769db9103df5099be3676fd6b
|
31dd719c6212da836b66bcff5b61c4e1d6090055
|
/packages/@jsii/python-runtime/setup.py
|
5ae79af4b73046807e0abc2f6a708f4ecc3cf264
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
aws/jsii
|
d46f102aafdfaf4fe1063cc36f0bbf9fec05fcfb
|
aa1445d709070321c3c688d586b0e7a7dbb8fa0d
|
refs/heads/main
| 2023-09-01T09:21:43.271479
| 2023-08-28T18:25:06
| 2023-08-28T18:25:06
| 105,802,846
| 2,210
| 244
|
Apache-2.0
| 2023-09-13T13:41:10
| 2017-10-04T18:23:37
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 1,803
|
py
|
setup.py
|
import json
import setuptools
with open("src/jsii/_metadata.json") as fp:
metadata = json.load(fp)
with open("README.md", encoding="utf8") as fp:
long_description = fp.read()
setuptools.setup(
name="jsii",
version=metadata["version"],
license=metadata["license"],
url=metadata["homepage"],
project_urls={
"Bug Tracker": metadata["bugs"],
"Source": metadata["repository"],
},
description=metadata["description"],
long_description=long_description,
long_description_content_type="text/markdown",
author=metadata["author"],
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
package_data={
"jsii": ["_metadata.json", "py.typed"],
"jsii._embedded.jsii": ["*.js", "*.js.map"],
},
install_requires=[
"attrs>=21.2,<24.0",
"cattrs>=1.8,<23.2",
"importlib_resources>=5.2.0",
"publication>=0.0.3", # This is used by all generated code.
"typeguard~=2.13.3", # This is used by all generated code.
"python-dateutil",
"typing_extensions>=3.7,<5.0",
],
python_requires="~=3.7",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: JavaScript",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Topic :: Software Development :: Libraries",
"Topic :: Utilities",
"Typing :: Typed",
],
)
|
5594dad5be713eeaea01dfd783ed2ba0f8a1d6a7
|
3ca67d69abd4e74b7145b340cdda65532f90053b
|
/programmers/난이도별/level01.소수찾기/Go-yj.py
|
cd71c877309907e780d409fc5bf6e452b42b94c9
|
[] |
no_license
|
DKU-STUDY/Algorithm
|
19549516984b52a1c5cd73e1ed1e58f774d6d30e
|
6f78efdbefd8eedab24e43d74c7dae7f95c2893b
|
refs/heads/master
| 2023-02-18T06:48:39.309641
| 2023-02-09T07:16:14
| 2023-02-09T07:16:14
| 258,455,710
| 175
| 49
| null | 2023-02-09T07:16:16
| 2020-04-24T08:42:27
|
Python
|
UTF-8
|
Python
| false
| false
| 775
|
py
|
Go-yj.py
|
'''
링크 : https://programmers.co.kr/learn/courses/30/lessons/12921
문제 : 소수 찾기
처음에는 본인을 제외한 2 이상의 수로 나누어 떨어지는 것이 없는지 전부 계산하여 소수를 찾았습니다.
그러면 효율성 테스트가 0점이 나오더라구요.
소수를 더 빠르게 구하는 방법으로 에라토스테네의 체를 알게 되었고
배열에 소수를 찾을 범위를 넣고, 배수를 없애는 방법으로 문제를 풀었습니다.
'''
def solution(n):
answer = 0
n_array = [i for i in range(n+1)]
for i in range(2,n+1) :
if n_array[i] :
for j in range(i*2,n+1,i) :
n_array[j] = False
for i in range(2,n+1) :
if n_array[i] : answer += 1
return answer
|
1729dc22559a38440ec6cd2ac1967cb1254c824f
|
cda215558ad8448ed8e2cbb89719de312c382a95
|
/enteletaor_lib/libs/core/models.py
|
1d8d0ccdae4e059edd78367cd8942d3992634f77
|
[
"BSD-3-Clause"
] |
permissive
|
cr0hn/enteletaor
|
63fc6a9f832ea7b6b08f3f786445a8235b9a4618
|
a975b5cb06bc5f819b32e65d0cd2258a37370661
|
refs/heads/master
| 2023-05-11T13:38:25.213779
| 2023-05-08T08:41:31
| 2023-05-08T08:41:31
| 52,361,896
| 166
| 31
|
NOASSERTION
| 2023-05-08T08:41:36
| 2016-02-23T13:44:22
|
Python
|
UTF-8
|
Python
| false
| false
| 7,358
|
py
|
models.py
|
# -*- coding: utf-8 -*-
#
# Enteletaor - https://github.com/cr0hn/enteletaor
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
# following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import six
from wtforms import (Form as Model,
DateTimeField,
StringField as _StringField,
IntegerField as _IntegerField,
FloatField as _FloatField,
BooleanField as _BooleanField,
SelectField as _SelectField,
DecimalField, validators)
from wtforms.utils import unset_value
from wtforms.fields.core import Field as _Field, Label as _Label, UnboundField as _UnboundField
# --------------------------------------------------------------------------
# Monkey patch fo Field to add:
# - New parameter: required
# --------------------------------------------------------------------------
def new_field__init__(self, label=None, validators=None, filters=tuple(),
description='', id=None, default=None, widget=None,
render_kw=None, _form=None, _name=None, _prefix='',
_translations=None, _meta=None, required=False):
self.required = required
self.__old___init__(label=label, validators=validators, filters=filters,
description=description, id=id, default=default, widget=widget,
render_kw=render_kw, _form=_form, _name=_name, _prefix=_prefix,
_translations=_translations, _meta=_meta)
if not hasattr(_Field, "__old___init__"):
_Field.__old___init__ = _Field.__init__
_Field.__init__ = new_field__init__
BaseField = _Field
# --------------------------------------------------------------------------
# Monkey patch fo wftorm to add:
# - Enforce type checking
# - Change default str(..) actcion
# --------------------------------------------------------------------------
# Validate
def new_module_validate(self):
"""
This function add the feature that checks data type in fields
"""
for name, func in six.iteritems(self._fields):
if hasattr(func, "validator"):
if func.validator() is False:
self._errors = {}
if type(self._fields[name].data) is type(self._fields[name].__type__):
self._errors[name] = ("Data type incorrect or not default value "
"provided. Got %s. Expected: %s" % (
type(self._fields[name].data),
self._fields[name].__type__))
return False
# Checks required if object is an instance
if type(self) is type:
if self._fields[name].required is True:
if self._fields[name].data is None and self._fields[name].default is None:
self._errors = {name: "Field '%s' is required" % name}
return False
return self.old_validate()
if not hasattr(Model, "old_validate"):
Model.old_validate = Model.validate
Model.validate = new_module_validate
# --------------------------------------------------------------------------
# Field Monkey path
# --------------------------------------------------------------------------
def new_field_str(self):
if self.__type__ is str:
return str(self.data)
else:
return self.data
def new_file_repr(self):
return str(self.data)
_Field.__str__ = new_field_str
_Field.__repr__ = new_file_repr
# --------------------------------------------------------------------------
# Label Monkey path
# --------------------------------------------------------------------------
def new_label_str(self):
return str(self.text)
_Label.__str__ = new_label_str
# --------------------------------------------------------------------------
# New types:
#
# We must add new validator because WTForms don't check input types and
# doesn't raise exception when they doesn't matches.
# --------------------------------------------------------------------------
def _validator(self):
to_check = self.data
if to_check is None:
if self.data is None:
return True
else:
# to_check = self.default
return False
else:
if not isinstance(to_check, self.__type__):
return False
else:
return True
# --------------------------------------------------------------------------
class StringField(_StringField):
"""Improved String data that checks types"""
__type__ = str
StringField.validator = _validator
# ----------------------------------------------------------------------
class IntegerField(_IntegerField):
"""Improved Integer data that checks types"""
__type__ = six.integer_types
IntegerField.validator = _validator
# ----------------------------------------------------------------------
class IncrementalIntegerField(IntegerField):
"""Special Int indicates their value can be handler by increments, not by assigns"""
__type__ = int
IncrementalIntegerField.validator = _validator
# ----------------------------------------------------------------------
class FloatField(_FloatField):
"""Improved fload data that checks types"""
__type__ = float
FloatField.validator = _validator
# ----------------------------------------------------------------------
class BoolField(_BooleanField):
"""Improved bool data that checks types"""
__type__ = bool
BoolField.validator = _validator
# --------------------------------------------------------------------------
# Especial fields
# --------------------------------------------------------------------------
# ----------------------------------------------------------------------
class SelectField(_SelectField):
"""Improved bool data that checks types"""
__type__ = six.text_type
SelectField.validator = _validator
|
cf4d9ccf37da9707d7c5f8bfff4534896b8b9d06
|
a76a2893ea50a607fac6bcecf1a0aa1a33bba30c
|
/epitran/test/test_tamil.py
|
e84b1b3aaa3de9a169702825fa33c5a6456fc688
|
[
"MIT"
] |
permissive
|
dmort27/epitran
|
ec340de58e428112f17df1656e3db2f7f1199618
|
ac2ad67b350fcdba29831eb968ee617bdaebb873
|
refs/heads/master
| 2023-09-05T06:03:28.507878
| 2023-04-19T20:30:55
| 2023-04-19T20:30:55
| 56,178,599
| 543
| 124
|
MIT
| 2023-06-22T02:09:30
| 2016-04-13T19:04:53
|
Python
|
UTF-8
|
Python
| false
| false
| 1,338
|
py
|
test_tamil.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unittest
import unicodedata
import epitran
class TestTamilGeneral(unittest.TestCase):
def setUp(self):
self.epi = epitran.Epitran(u'tam-Taml')
def _assert_trans(self, src, tar):
trans = self.epi.transliterate(src)
trans = unicodedata.normalize('NFD', trans)
src = unicodedata.normalize('NFD', trans)
# print('{}\t{}\t{}'.format(trans, tar, zip(trans, tar)))
self.assertEqual(trans, tar)
def test_tamil(self):
self._assert_trans('தமிழ்', 't̪amiɻ')
def test_eluttu(self):
self._assert_trans('எழுத்து', 'eɻut̪t̪u')
def test_num1(self):
self._assert_trans('சூனியங்கள்', 't͡ʃuːnijaŋkaɭ')
def test_num2(self):
self._assert_trans('துர்தேவதைகள்', 't̪uɾt̪eːʋat̪ajkaɭ')
def test_num3(self):
self._assert_trans('தகவல்களைக்', 't̪akaʋalkaɭajk')
def test_num4(self):
self._assert_trans('நேரடித்', 'n̪eːɾaʈit̪')
def test_num5(self):
self._assert_trans('குலதெய்வத்தை', 'kulat̪ejʋat̪t̪aj')
def test_num6(self):
self._assert_trans('ஆத்மா', 'aːt̪maː')
|
fa739f8195f4a19c9c3cc7650e67d72263da0fef
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-servicestage/huaweicloudsdkservicestage/v2/model/show_content_response.py
|
af92fb216b0bb4eb1a9139edb84b97afaa7502be
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 5,347
|
py
|
show_content_response.py
|
# coding: utf-8
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowContentResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'path': 'str',
'sha': 'str',
'encoding': 'str',
'content': 'str'
}
attribute_map = {
'path': 'path',
'sha': 'sha',
'encoding': 'encoding',
'content': 'content'
}
def __init__(self, path=None, sha=None, encoding=None, content=None):
"""ShowContentResponse
The model defined in huaweicloud sdk
:param path: 文件路径。
:type path: str
:param sha: commit 哈希。
:type sha: str
:param encoding: 编码方式:base64或者text/plain。
:type encoding: str
:param content: 文件内容。
:type content: str
"""
super(ShowContentResponse, self).__init__()
self._path = None
self._sha = None
self._encoding = None
self._content = None
self.discriminator = None
if path is not None:
self.path = path
if sha is not None:
self.sha = sha
if encoding is not None:
self.encoding = encoding
if content is not None:
self.content = content
@property
def path(self):
"""Gets the path of this ShowContentResponse.
文件路径。
:return: The path of this ShowContentResponse.
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this ShowContentResponse.
文件路径。
:param path: The path of this ShowContentResponse.
:type path: str
"""
self._path = path
@property
def sha(self):
"""Gets the sha of this ShowContentResponse.
commit 哈希。
:return: The sha of this ShowContentResponse.
:rtype: str
"""
return self._sha
@sha.setter
def sha(self, sha):
"""Sets the sha of this ShowContentResponse.
commit 哈希。
:param sha: The sha of this ShowContentResponse.
:type sha: str
"""
self._sha = sha
@property
def encoding(self):
"""Gets the encoding of this ShowContentResponse.
编码方式:base64或者text/plain。
:return: The encoding of this ShowContentResponse.
:rtype: str
"""
return self._encoding
@encoding.setter
def encoding(self, encoding):
"""Sets the encoding of this ShowContentResponse.
编码方式:base64或者text/plain。
:param encoding: The encoding of this ShowContentResponse.
:type encoding: str
"""
self._encoding = encoding
@property
def content(self):
"""Gets the content of this ShowContentResponse.
文件内容。
:return: The content of this ShowContentResponse.
:rtype: str
"""
return self._content
@content.setter
def content(self, content):
"""Sets the content of this ShowContentResponse.
文件内容。
:param content: The content of this ShowContentResponse.
:type content: str
"""
self._content = content
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowContentResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
6f8ed1df7b128ccf366a683eade794be722c2261
|
1e148aada79cb648872bb8ecc740a6a798b2e236
|
/tests/test_time_stretch.py
|
3c2c607a87dcba816de5ca8bd9af6410c7466446
|
[
"MIT"
] |
permissive
|
iver56/audiomentations
|
a40ae457ca03ab8c927ad804f489cef783dae8d4
|
498a7d4f149d8917813aa35ff18e748cff49cd09
|
refs/heads/main
| 2023-09-05T05:53:05.369792
| 2023-08-30T13:12:51
| 2023-08-30T13:12:51
| 170,352,817
| 1,520
| 182
|
MIT
| 2023-09-07T14:35:26
| 2019-02-12T16:36:24
|
Python
|
UTF-8
|
Python
| false
| false
| 1,427
|
py
|
test_time_stretch.py
|
import numpy as np
from audiomentations import TimeStretch
class TestTimeStretch:
def test_dynamic_length(self):
samples = np.zeros((2048,), dtype=np.float32)
sample_rate = 16000
augmenter = TimeStretch(
min_rate=0.8, max_rate=0.9, leave_length_unchanged=False, p=1.0
)
samples = augmenter(samples=samples, sample_rate=sample_rate)
assert samples.dtype == np.float32
assert len(samples) > 2048
def test_fixed_length(self):
samples = np.zeros((2048,), dtype=np.float32)
sample_rate = 16000
augmenter = TimeStretch(
min_rate=0.8, max_rate=0.9, leave_length_unchanged=True, p=1.0
)
samples = augmenter(samples=samples, sample_rate=sample_rate)
assert samples.dtype == np.float32
assert len(samples) == 2048
def test_multichannel(self):
num_channels = 3
samples = np.random.normal(0, 0.1, size=(num_channels, 5555)).astype(np.float32)
sample_rate = 16000
augmenter = TimeStretch(
min_rate=0.8, max_rate=0.9, leave_length_unchanged=True, p=1.0
)
samples_out = augmenter(samples=samples, sample_rate=sample_rate)
assert samples.dtype == samples_out.dtype
assert samples.shape == samples_out.shape
for i in range(num_channels):
assert not np.allclose(samples[i], samples_out[i])
|
777125f13e4a792f34b47b5e080de00237240553
|
4e57ad279fb04b17f0b024dba780cbd7c0f14ec7
|
/riskfolio/src/ConstraintsFunctions.py
|
72b49b57d18e8474f16088bf781428ee4e56bfb1
|
[] |
permissive
|
dcajasn/Riskfolio-Lib
|
b9ed51b8da93c648f0e255bc7bc20e17d1290cfa
|
06dfe24745dd8ab40665621e72cfeb40a80c2b2e
|
refs/heads/master
| 2023-08-09T16:11:47.258143
| 2023-08-01T20:05:00
| 2023-08-01T20:05:00
| 244,460,835
| 2,266
| 403
|
BSD-3-Clause
| 2023-06-20T04:04:38
| 2020-03-02T19:49:06
|
C++
|
UTF-8
|
Python
| false
| false
| 40,307
|
py
|
ConstraintsFunctions.py
|
"""""" #
"""
Copyright (c) 2020-2023, Dany Cajas
All rights reserved.
This work is licensed under BSD 3-Clause "New" or "Revised" License.
License available at https://github.com/dcajasn/Riskfolio-Lib/blob/master/LICENSE.txt
"""
import numpy as np
import pandas as pd
import scipy.cluster.hierarchy as hr
from scipy.spatial.distance import squareform
import riskfolio.src.AuxFunctions as af
import riskfolio.src.DBHT as db
__all__ = [
"assets_constraints",
"factors_constraints",
"assets_views",
"factors_views",
"assets_clusters",
"hrp_constraints",
"risk_constraint",
]
def assets_constraints(constraints, asset_classes):
r"""
Create the linear constraints matrixes A and B of the constraint
:math:`Aw \geq B`.
Parameters
----------
constraints : DataFrame of shape (n_constraints, n_fields)
Constraints matrix, where n_constraints is the number of constraints
and n_fields is the number of fields of constraints matrix, the fields
are:
- Disabled: (bool) indicates if the constraint is enable.
- Type: (str) can be: 'Assets', 'Classes', 'All Assets', 'Each asset in a class' and 'All Classes'.
- Set: (str) if Type is 'Classes', 'Each asset in a class' or 'All Classes'specified the name of the asset's classes set.
- Position: (str) the name of the asset or asset class of the constraint.
- Sign: (str) can be '>=' or '<='.
- Weight: (scalar) is the maximum or minimum weight of the absolute constraint.
- Type Relative: (str) can be: 'Assets' or 'Classes'.
- Relative Set: (str) if Type Relative is 'Classes' specified the name of the set of asset classes.
- Relative: (str) the name of the asset or asset class of the relative constraint.
- Factor: (scalar) is the factor of the relative constraint.
asset_classes : DataFrame of shape (n_assets, n_cols)
Asset's classes matrix, where n_assets is the number of assets and
n_cols is the number of columns of the matrix where the first column
is the asset list and the next columns are the different asset's
classes sets.
Returns
-------
A : nd-array
The matrix A of :math:`Aw \geq B`.
B : nd-array
The matrix B of :math:`Aw \geq B`.
Raises
------
ValueError when the value cannot be calculated.
Examples
--------
::
import riskfolio as rp
asset_classes = {'Assets': ['FB', 'GOOGL', 'NTFX', 'BAC', 'WFC', 'TLT', 'SHV'],
'Class 1': ['Equity', 'Equity', 'Equity', 'Equity', 'Equity',
'Fixed Income', 'Fixed Income'],
'Class 2': ['Technology', 'Technology', 'Technology',
'Financial', 'Financial', 'Treasury', 'Treasury'],}
asset_classes = pd.DataFrame(asset_classes)
asset_classes = asset_classes.sort_values(by=['Assets'])
constraints = {'Disabled': [False, False, False, False, False, False, False],
'Type': ['Classes', 'Classes', 'Assets', 'Assets', 'Classes',
'All Assets', 'Each asset in a class'],
'Set': ['Class 1', 'Class 1', '', '', 'Class 2', '', 'Class 1'],
'Position': ['Equity', 'Fixed Income', 'BAC', 'WFC', 'Financial',
'', 'Equity'],
'Sign': ['<=', '<=', '<=', '<=', '>=', '>=', '>='],
'Weight': [0.6, 0.5, 0.1, '', '', 0.02, ''],
'Type Relative': ['', '', '', 'Assets', 'Classes', '', 'Assets'],
'Relative Set': ['', '', '', '', 'Class 1', '', ''],
'Relative': ['', '', '', 'FB', 'Fixed Income', '', 'TLT'],
'Factor': ['', '', '', 1.2, 0.5, '', 0.4]}
constraints = pd.DataFrame(constraints)
The constraint looks like this:
.. image:: images/Constraints.png
It is easier to construct the constraints in excel and then upload to a
dataframe.
To create the matrixes A and B we use the following command:
::
A, B = rp.assets_constraints(constraints, asset_classes)
The matrixes A and B looks like this (all constraints were converted to a linear
constraint):
.. image:: images/AxB.png
"""
if not isinstance(constraints, pd.DataFrame) and not isinstance(
asset_classes, pd.DataFrame
):
raise ValueError("constraints and asset_classes must be DataFrames")
if constraints.shape[1] != 10:
raise ValueError("constraints must have ten columns")
n = len(constraints)
m = len(asset_classes)
data = constraints.fillna("")
data = data.values.tolist()
assetslist = asset_classes.iloc[:, 0].values.tolist()
A = []
B = []
for i in range(0, n):
if data[i][0] == False:
if data[i][1] == "Assets":
item = assetslist.index(data[i][3])
if data[i][4] == ">=":
d = 1
elif data[i][4] == "<=":
d = -1
if data[i][5] != "":
A1 = [0] * m
A1[item] = d
A.append(A1)
B.append([data[i][5] * d])
else:
A1 = [0] * m
A1[item] = 1
if data[i][6] == "Assets":
item2 = assetslist.index(data[i][8])
A2 = [0] * m
A2[item2] = 1
elif data[i][6] == "Classes":
A2 = np.where(
asset_classes[data[i][7]].values == data[i][8], 1, 0
)
A1 = ((np.array(A1) + np.array(A2) * data[i][9] * -1) * d).tolist()
A.append(A1)
B.append([0])
elif data[i][1] == "All Assets":
item = len(assetslist)
if data[i][4] == ">=":
d = 1
elif data[i][4] == "<=":
d = -1
if data[i][5] != "":
A1 = np.identity(item) * d
A1 = A1.tolist()
B1 = np.ones((item, 1)) * d * data[i][5]
for i in range(0, item):
A.append(A1[i])
B.append(B1.tolist()[0])
else:
A1 = np.identity(item)
if data[i][6] == "Assets":
item2 = assetslist.index(data[i][8])
A2 = np.zeros((item, item - 1))
A2 = np.insert(A2, item2 - 1, 1, axis=1)
elif data[i][6] == "Classes":
A1 = np.identity(item)
A2 = np.where(
asset_classes[data[i][7]].values == data[i][8], 1, 0
)
A2 = np.ones((item, item)) * np.array(A2)
A1 = ((np.array(A1) + np.array(A2) * data[i][9] * -1) * d).tolist()
for i in range(0, item):
A.append(A1[i])
B.append([0])
elif data[i][1] == "Classes":
if data[i][4] == ">=":
d = 1
elif data[i][4] == "<=":
d = -1
if data[i][5] != "":
A1 = np.where(asset_classes[data[i][2]].values == data[i][3], 1, 0)
A1 = np.array(A1) * d
A1 = A1.tolist()
A.append(A1)
B.append([data[i][5] * d])
else:
A1 = np.where(asset_classes[data[i][2]].values == data[i][3], 1, 0)
if data[i][6] == "Assets":
item2 = assetslist.index(data[i][8])
A2 = [0] * m
A2[item2] = 1
elif data[i][6] == "Classes":
A2 = np.where(
asset_classes[data[i][7]].values == data[i][8], 1, 0
)
A1 = ((np.array(A1) + np.array(A2) * data[i][9] * -1) * d).tolist()
A.append(A1)
B.append([0])
elif data[i][1] == "Each asset in a class":
if data[i][4] == ">=":
d = 1
elif data[i][4] == "<=":
d = -1
if data[i][5] != "":
A1 = np.where(asset_classes[data[i][2]].values == data[i][3], 1, 0)
l = 0
for k in A1:
if k == 1:
A3 = [0] * m
A3[l] = 1 * d
A.append(A3)
B.append([data[i][5] * d])
l = l + 1
else:
A1 = np.where(asset_classes[data[i][2]].values == data[i][3], 1, 0)
l = 0
for k in A1:
if k == 1:
A3 = [0] * m
A3[l] = 1
if data[i][6] == "Assets":
item2 = assetslist.index(data[i][8])
A2 = [0] * m
A2[item2] = 1
elif data[i][6] == "Classes":
A2 = np.where(
asset_classes[data[i][7]].values == data[i][8], 1, 0
)
A3 = (
(np.array(A3) + np.array(A2) * data[i][9] * -1) * d
).tolist()
A.append(A3)
B.append([0])
l = l + 1
elif data[i][1] == "All Classes":
if data[i][4] == ">=":
d = 1
elif data[i][4] == "<=":
d = -1
if data[i][5] != "":
for k in np.unique(asset_classes[data[i][2]].values):
A1 = np.where(asset_classes[data[i][2]].values == k, 1, 0) * d
A1 = A1.tolist()
A.append(A1)
B.append([data[i][5] * d])
else:
for k in np.unique(asset_classes[data[i][2]].values):
A1 = np.where(asset_classes[data[i][2]].values == k, 1, 0)
if data[i][6] == "Assets":
item2 = assetslist.index(data[i][8])
A2 = [0] * m
A2[item2] = 1
elif data[i][6] == "Classes":
A2 = np.where(
asset_classes[data[i][7]].values == data[i][8], 1, 0
)
A3 = (
(np.array(A1) + np.array(A2) * data[i][9] * -1) * d
).tolist()
A.append(A3)
B.append([0])
A = np.array(A, ndmin=2)
B = np.array(B, ndmin=2)
return A, B
def factors_constraints(constraints, loadings):
r"""
Create the factors constraints matrixes C and D of the constraint
:math:`Cw \geq D`.
Parameters
----------
constraints : DataFrame of shape (n_constraints, n_fields)
Constraints matrix, where n_constraints is the number of constraints
and n_fields is the number of fields of constraints matrix, the fields
are:
- Disabled: (bool) indicates if the constraint is enable.
- Factor: (str) the name of the factor of the constraint.
- Sign: (str) can be '>=' or '<='.
- Value: (scalar) is the maximum or minimum value of the factor.
loadings : DataFrame of shape (n_assets, n_features)
The loadings matrix.
Returns
-------
C : nd-array
The matrix C of :math:`Cw \geq D`.
D : nd-array
The matrix D of :math:`Cw \geq D`.
Raises
------
ValueError when the value cannot be calculated.
Examples
--------
::
loadings = {'const': [0.0004, 0.0002, 0.0000, 0.0006, 0.0001, 0.0003, -0.0003],
'MTUM': [0.1916, 1.0061, 0.8695, 1.9996, 0.0000, 0.0000, 0.0000],
'QUAL': [0.0000, 2.0129, 1.4301, 0.0000, 0.0000, 0.0000, 0.0000],
'SIZE': [0.0000, 0.0000, 0.0000, 0.4717, 0.0000, -0.1857, 0.0000],
'USMV': [-0.7838, -1.6439, -1.0176, -1.4407, 0.0055, 0.5781, 0.0000],
'VLUE': [1.4772, -0.7590, -0.4090, 0.0000, -0.0054, -0.4844, 0.9435]}
loadings = pd.DataFrame(loadings)
constraints = {'Disabled': [False, False, False],
'Factor': ['MTUM', 'USMV', 'VLUE'],
'Sign': ['<=', '<=', '>='],
'Value': [0.9, -1.2, 0.3],
'Relative Factor': ['USMV', '', '']}
constraints = pd.DataFrame(constraints)
The constraint looks like this:
.. image:: images/Constraints2.png
It is easier to construct the constraints in excel and then upload to a
dataframe.
To create the matrixes C and D we use the following command:
::
C, D = rp.factors_constraints(constraints, loadings)
The matrixes C and D looks like this (all constraints were converted to a linear
constraint):
.. image:: images/CxD.png
"""
if not isinstance(constraints, pd.DataFrame) and not isinstance(
loadings, pd.DataFrame
):
raise ValueError("constraints and loadings must be DataFrames")
if constraints.shape[1] != 5:
raise ValueError("constraints must have five columns")
n = len(constraints)
data = constraints.fillna("")
data = data.values.tolist()
C = []
D = []
for i in range(0, n):
if data[i][0] == False:
if data[i][2] == ">=":
d = 1
elif data[i][2] == "<=":
d = -1
C1 = loadings[data[i][1]].values
if data[i][4] != "":
C2 = loadings[data[i][4]].values
C1 = C1 - C2
C.append(C1 * d)
D.append([data[i][3] * d])
C = np.array(C, ndmin=2)
D = np.array(D, ndmin=2)
return C, D
def assets_views(views, asset_classes):
r"""
Create the assets views matrixes P and Q of the views :math:`Pw = Q`.
Parameters
----------
views : DataFrame of shape (n_views, n_fields)
Constraints matrix, where n_views is the number of views
and n_fields is the number of fields of views matrix, the fields
are:
- Disabled: (bool) indicates if the constraint is enable.
- Type: (str) can be: 'Assets' or 'Classes'.
- Set: (str) if Type is 'Classes' specified the name of the set of asset classes.
- Position: (str) the name of the asset or asset class of the view.
- Sign: (str) can be '>=' or '<='.
- Return: (scalar) is the return of the view.
- Type Relative: (str) can be: 'Assets' or 'Classes'.
- Relative Set: (str) if Type Relative is 'Classes' specified the name of the set of asset classes.
- Relative: (str) the name of the asset or asset class of the relative view.
asset_classes : DataFrame of shape (n_assets, n_cols)
Asset's classes matrix, where n_assets is the number of assets and
n_cols is the number of columns of the matrix where the first column
is the asset list and the next columns are the different asset's
classes sets.
Returns
-------
P : nd-array
The matrix P that shows the relation among assets in each view.
Q : nd-array
The matrix Q that shows the expected return of each view.
Raises
------
ValueError when the value cannot be calculated.
Examples
--------
::
asset_classes = {'Assets': ['FB', 'GOOGL', 'NTFX', 'BAC', 'WFC', 'TLT', 'SHV'],
'Class 1': ['Equity', 'Equity', 'Equity', 'Equity', 'Equity',
'Fixed Income', 'Fixed Income'],
'Class 2': ['Technology', 'Technology', 'Technology',
'Financial', 'Financial', 'Treasury', 'Treasury'],}
asset_classes = pd.DataFrame(asset_classes)
asset_classes = asset_classes.sort_values(by=['Assets'])
views = {'Disabled': [False, False, False, False],
'Type': ['Assets', 'Classes', 'Classes', 'Assets'],
'Set': ['', 'Class 2','Class 1', ''],
'Position': ['WFC', 'Financial', 'Equity', 'FB'],
'Sign': ['<=', '>=', '>=', '>='],
'Return': [ 0.3, 0.1, 0.05, 0.03 ],
'Type Relative': [ 'Assets', 'Classes', 'Assets', ''],
'Relative Set': [ '', 'Class 1', '', ''],
'Relative': ['FB', 'Fixed Income', 'TLT', '']}
views = pd.DataFrame(views)
The constraint looks like this:
.. image:: images/Views.png
It is easier to construct the constraints in excel and then upload to a
dataframe.
To create the matrixes P and Q we use the following command:
::
P, Q = rp.assets_views(views, asset_classes)
The matrixes P and Q looks like this:
.. image:: images/PxQ.png
"""
if not isinstance(views, pd.DataFrame) and not isinstance(
asset_classes, pd.DataFrame
):
raise ValueError("constraints and asset_classes must be DataFrames")
if views.shape[1] != 9:
raise ValueError("constraints must have nine columns")
n = len(views)
m = len(asset_classes)
data = views.fillna("")
data = data.values.tolist()
assetslist = asset_classes.iloc[:, 0].values.tolist()
P = []
Q = []
for i in range(0, n):
valid = False
if data[i][0] == False:
if data[i][1] == "Assets":
item = assetslist.index(data[i][3])
if data[i][4] == ">=":
d = 1
elif data[i][4] == "<=":
d = -1
if data[i][5] != "":
P1 = [0] * m
P1[item] = 1
if data[i][6] == "Assets" and data[i][8] != "":
item2 = assetslist.index(data[i][8])
P2 = [0] * m
P2[item2] = 1
valid = True
elif (
data[i][6] == "Classes"
and data[i][7] != ""
and data[i][8] != ""
):
P2 = np.where(
asset_classes[data[i][7]].values == data[i][8], 1, 0
)
P2 = P2 / np.sum(P2)
valid = True
elif data[i][6] == "" and data[i][7] == "" and data[i][8] == "":
P2 = [0] * m
valid = True
if valid == True:
P1 = ((np.array(P1) - np.array(P2)) * d).tolist()
P.append(P1)
Q.append([data[i][5] * d])
elif data[i][1] == "Classes":
if data[i][4] == ">=":
d = 1
else:
d = -1
if data[i][5] != "":
P1 = np.where(asset_classes[data[i][2]].values == data[i][3], 1, 0)
P1 = P1 / np.sum(P1)
if data[i][6] == "Assets" and data[i][8] != "":
item2 = assetslist.index(data[i][8])
P2 = [0] * m
P2[item2] = 1
valid = True
elif (
data[i][6] == "Classes"
and data[i][7] != ""
and data[i][8] != ""
):
P2 = np.where(
asset_classes[data[i][7]].values == data[i][8], 1, 0
)
P2 = P2 / np.sum(P2)
valid = True
elif data[i][6] == "" and data[i][7] == "" and data[i][8] == "":
P2 = [0] * m
valid = True
if valid == True:
P1 = ((np.array(P1) - np.array(P2)) * d).tolist()
P.append(P1)
Q.append([data[i][5] * d])
P = np.array(P, ndmin=2)
Q = np.array(Q, ndmin=2)
for i in range(len(Q)):
if Q[i, 0] < 0:
P[i, :] = -1 * P[i, :]
Q[i, :] = -1 * Q[i, :]
return P, Q
def factors_views(views, loadings, const=True):
r"""
Create the factors constraints matrixes C and D of the constraint
:math:`Cw \geq D`.
Parameters
----------
constraints : DataFrame of shape (n_constraints, n_fields)
Constraints matrix, where n_constraints is the number of constraints
and n_fields is the number of fields of constraints matrix, the fields
are:
- Disabled: (bool) indicates if the constraint is enable.
- Factor: (str) the name of the factor of the constraint.
- Sign: (str) can be '>=' or '<='.
- Value: (scalar) is the maximum or minimum value of the factor.
loadings : DataFrame of shape (n_assets, n_features)
The loadings matrix.
Returns
-------
P : nd-array
The matrix P that shows the relation among factors in each factor view.
Q : nd-array
The matrix Q that shows the expected return of each factor view.
Raises
------
ValueError when the value cannot be calculated.
Examples
--------
::
loadings = {'const': [0.0004, 0.0002, 0.0000, 0.0006, 0.0001, 0.0003, -0.0003],
'MTUM': [0.1916, 1.0061, 0.8695, 1.9996, 0.0000, 0.0000, 0.0000],
'QUAL': [0.0000, 2.0129, 1.4301, 0.0000, 0.0000, 0.0000, 0.0000],
'SIZE': [0.0000, 0.0000, 0.0000, 0.4717, 0.0000, -0.1857, 0.0000],
'USMV': [-0.7838, -1.6439, -1.0176, -1.4407, 0.0055, 0.5781, 0.0000],
'VLUE': [1.4772, -0.7590, -0.4090, 0.0000, -0.0054, -0.4844, 0.9435]}
loadings = pd.DataFrame(loadings)
factorsviews = {'Disabled': [False, False, False],
'Factor': ['MTUM', 'USMV', 'VLUE'],
'Sign': ['<=', '<=', '>='],
'Value': [0.9, -1.2, 0.3],
'Relative Factor': ['USMV', '', '']}
factorsviews = pd.DataFrame(factorsviews)
The constraint looks like this:
.. image:: images/factorsviews.png
It is easier to construct the constraints in excel and then upload to a
dataframe.
To create the matrixes P and Q we use the following command:
::
P, Q = rp.factors_views(factorsviews, loadings, const=True)
The matrixes P and Q looks like this:
.. image:: images/P_fxQ_f.png
"""
if not isinstance(views, pd.DataFrame) and not isinstance(loadings, pd.DataFrame):
raise ValueError("constraints and loadings must be DataFrames")
if views.shape[1] != 5:
raise ValueError("constraints must have five columns")
n = len(views)
data = views.fillna("")
data = data.values.tolist()
factorslist = loadings.columns.tolist()
if const == True:
factorslist = factorslist[1:]
m = len(factorslist)
P = []
Q = []
for i in range(0, n):
if data[i][0] == False:
item = factorslist.index(data[i][1])
if data[i][2] == ">=":
d = 1
elif data[i][2] == "<=":
d = -1
P1 = [0] * m
P1[item] = d
if data[i][4] != "":
item = factorslist.index(data[i][4])
P1[item] = -d
P.append(P1)
Q.append([data[i][3] * d])
P = np.array(P, ndmin=2)
Q = np.array(Q, ndmin=2)
return P, Q
def assets_clusters(
returns,
codependence="pearson",
linkage="ward",
k=None,
max_k=10,
bins_info="KN",
alpha_tail=0.05,
leaf_order=True,
):
r"""
Create asset classes based on hierarchical clustering.
Parameters
----------
returns : DataFrame
Assets returns.
codependence : str, can be {'pearson', 'spearman', 'abs_pearson', 'abs_spearman', 'distance', 'mutual_info' or 'tail'}
The codependence or similarity matrix used to build the distance
metric and clusters. The default is 'pearson'. Possible values are:
- 'pearson': pearson correlation matrix. Distance formula: :math:`D_{i,j} = \sqrt{0.5(1-\rho^{pearson}_{i,j})}`.
- 'spearman': spearman correlation matrix. Distance formula: :math:`D_{i,j} = \sqrt{0.5(1-\rho^{spearman}_{i,j})}`.
- 'abs_pearson': absolute value pearson correlation matrix. Distance formula: :math:`D_{i,j} = \sqrt{(1-|\rho^{pearson}_{i,j}|)}`.
- 'abs_spearman': absolute value spearman correlation matrix. Distance formula: :math:`D_{i,j} = \sqrt{(1-|\rho^{spearman}_{i,j}|)}`.
- 'distance': distance correlation matrix. Distance formula :math:`D_{i,j} = \sqrt{(1-\rho^{distance}_{i,j})}`.
- 'mutual_info': mutual information matrix. Distance used is variation information matrix.
- 'tail': lower tail dependence index matrix. Dissimilarity formula :math:`D_{i,j} = -\log{\lambda_{i,j}}`.
linkage : string, optional
Linkage method of hierarchical clustering, see `linkage <https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html?highlight=linkage#scipy.cluster.hierarchy.linkage>`_ for more details.
The default is 'ward'. Possible values are:
- 'single'.
- 'complete'.
- 'average'.
- 'weighted'.
- 'centroid'.
- 'median'.
- 'ward'.
- 'DBHT'. Direct Bubble Hierarchical Tree.
k : int, optional
Number of clusters. This value is took instead of the optimal number
of clusters calculated with the two difference gap statistic.
The default is None.
max_k : int, optional
Max number of clusters used by the two difference gap statistic
to find the optimal number of clusters. The default is 10.
bins_info: int or str
Number of bins used to calculate variation of information. The default
value is 'KN'. Possible values are:
- 'KN': Knuth's choice method. See more in `knuth_bin_width <https://docs.astropy.org/en/stable/api/astropy.stats.knuth_bin_width.html>`_.
- 'FD': Freedman–Diaconis' choice method. See more in `freedman_bin_width <https://docs.astropy.org/en/stable/api/astropy.stats.freedman_bin_width.html>`_.
- 'SC': Scotts' choice method. See more in `scott_bin_width <https://docs.astropy.org/en/stable/api/astropy.stats.scott_bin_width.html>`_.
- 'HGR': Hacine-Gharbi and Ravier' choice method.
- int: integer value choice by user.
alpha_tail : float, optional
Significance level for lower tail dependence index. The default is 0.05.
leaf_order : bool, optional
Indicates if the cluster are ordered so that the distance between
successive leaves is minimal. The default is True.
Returns
-------
clusters : DataFrame
A dataframe with asset classes based on hierarchical clustering.
Raises
------
ValueError when the value cannot be calculated.
Examples
--------
::
clusters = rp.assets_clusters(returns, codependence='pearson',
linkage='ward', k=None, max_k=10,
alpha_tail=0.05, leaf_order=True)
The clusters dataframe looks like this:
.. image:: images/clusters_df.png
"""
if not isinstance(returns, pd.DataFrame):
raise ValueError("returns must be a DataFrame")
# Calculating codependence matrix and distance metric
if codependence in {"pearson", "spearman"}:
codep = returns.corr(method=codependence)
dist = np.sqrt(np.clip((1 - codep) / 2, a_min=0.0, a_max=1.0))
elif codependence in {"abs_pearson", "abs_spearman"}:
codep = np.abs(returns.corr(method=codependence[4:]))
dist = np.sqrt(np.clip((1 - codep), a_min=0.0, a_max=1.0))
elif codependence in {"distance"}:
codep = af.dcorr_matrix(returns).astype(float)
dist = np.sqrt(np.clip((1 - codep), a_min=0.0, a_max=1.0))
elif codependence in {"mutual_info"}:
codep = af.mutual_info_matrix(returns, bins_info).astype(float)
dist = af.var_info_matrix(returns, bins_info).astype(float)
elif codependence in {"tail"}:
codep = af.ltdi_matrix(returns, alpha_tail).astype(float)
dist = -np.log(codep)
# Hierarchical clustering
dist = dist.to_numpy()
dist = pd.DataFrame(dist, columns=codep.columns, index=codep.index)
if linkage == "DBHT":
# different choices for D, S give different outputs!
D = dist.to_numpy() # dissimilarity matrix
if codependence in {"pearson", "spearman"}:
S = (1 - dist**2).to_numpy()
else:
S = codep.copy().to_numpy() # similarity matrix
(_, _, _, _, _, clustering) = db.DBHTs(
D, S, leaf_order=leaf_order
) # DBHT clustering
else:
p_dist = squareform(dist, checks=False)
clustering = hr.linkage(p_dist, method=linkage, optimal_ordering=leaf_order)
# Optimal number of clusters
if k is None:
k = af.two_diff_gap_stat(codep, dist, clustering, max_k)
# Building clusters
clusters_inds = hr.fcluster(clustering, k, criterion="maxclust")
labels = np.array(returns.columns.tolist())
clusters = {"Assets": [], "Clusters": []}
for i, v in enumerate(clusters_inds):
clusters["Assets"].append(labels[i])
clusters["Clusters"].append("Cluster " + str(v))
clusters = pd.DataFrame(clusters)
clusters = clusters.sort_values(by=["Assets"])
return clusters
def hrp_constraints(constraints, asset_classes):
r"""
Create the upper and lower bounds constraints for hierarchical risk parity
model.
Parameters
----------
constraints : DataFrame of shape (n_constraints, n_fields)
Constraints matrix, where n_constraints is the number of constraints
and n_fields is the number of fields of constraints matrix, the fields
are:
- Disabled: (bool) indicates if the constraint is enable.
- Type: (str) can be: 'Assets', All Assets' and 'Each asset in a class'.
- Position: (str) the name of the asset or asset class of the constraint.
- Sign: (str) can be '>=' or '<='.
- Weight: (scalar) is the maximum or minimum weight of the absolute constraint.
asset_classes : DataFrame of shape (n_assets, n_cols)
Asset's classes matrix, where n_assets is the number of assets and
n_cols is the number of columns of the matrix where the first column
is the asset list and the next columns are the different asset's
classes sets.
Returns
-------
w_max : pd.Series
The upper bound of hierarchical risk parity weights constraints.
w_min : pd.Series
The lower bound of hierarchical risk parity weights constraints.
Raises
------
ValueError when the value cannot be calculated.
Examples
--------
::
asset_classes = {'Assets': ['FB', 'GOOGL', 'NTFX', 'BAC', 'WFC', 'TLT', 'SHV'],
'Class 1': ['Equity', 'Equity', 'Equity', 'Equity', 'Equity',
'Fixed Income', 'Fixed Income'],
'Class 2': ['Technology', 'Technology', 'Technology',
'Financial', 'Financial', 'Treasury', 'Treasury'],}
asset_classes = pd.DataFrame(asset_classes)
asset_classes = asset_classes.sort_values(by=['Assets'])
constraints = {'Disabled': [False, False, False, False, False, False],
'Type': ['Assets', 'Assets', 'All Assets', 'All Assets',
'Each asset in a class', 'Each asset in a class'],
'Set': ['', '', '', '','Class 1', 'Class 2'],
'Position': ['BAC', 'FB', '', '', 'Equity', 'Treasury'],
'Sign': ['>=', '<=', '<=', '>=', '<=', '<='],
'Weight': [0.02, 0.085, 0.09, 0.01, 0.07, 0.06]}
constraints = pd.DataFrame(constraints)
The constraint looks like this:
.. image:: images/HRPConstraints.png
It is easier to construct the constraints in excel and then upload to a
dataframe.
To create the pd.Series w_max and w_min we use the following command:
::
w_max, w_min = rp.hrp_constraints(constraints, asset_classes)
The pd.Series w_max and w_min looks like this (all constraints were
merged to a single upper bound for each asset):
.. image:: images/HRP_Bounds.png
"""
if not isinstance(constraints, pd.DataFrame) and not isinstance(
asset_classes, pd.DataFrame
):
raise ValueError("constraints and asset_classes must be DataFrames")
if constraints.shape[1] != 6:
raise ValueError("constraints must have six columns")
n = len(constraints)
data = constraints.fillna("").copy()
assetslist = asset_classes.iloc[:, 0].values.tolist()
w_max = pd.Series(1, index=assetslist)
w_min = pd.Series(0, index=assetslist)
for i in range(0, n):
if data.loc[i, "Disabled"] == False:
if data.loc[i, "Type"] == "Assets":
assets = data.loc[i, "Position"]
if data.loc[i, "Sign"] == ">=":
if w_min.loc[assets] <= data.loc[i, "Weight"]:
w_min.loc[assets] = data.loc[i, "Weight"]
elif data.loc[i, "Sign"] == "<=":
if w_max.loc[assets] >= data.loc[i, "Weight"]:
w_max.loc[assets] = data.loc[i, "Weight"]
elif data.loc[i, "Type"] == "All Assets":
if data.loc[i, "Sign"] == ">=":
if w_min[w_min <= data.loc[i, "Weight"]].shape[0] != 0:
w_min[w_min <= data.loc[i, "Weight"]] = data.loc[i, "Weight"]
elif data.loc[i, "Sign"] == "<=":
if w_max[w_max >= data.loc[i, "Weight"]].shape[0] != 0:
w_max[w_max >= data.loc[i, "Weight"]] = data.loc[i, "Weight"]
elif data.loc[i, "Type"] == "Each asset in a class":
label_0 = asset_classes.columns.tolist()[0]
label_1 = data.loc[i, "Set"]
label_2 = data.loc[i, "Position"]
assets = asset_classes[[label_0, label_1]][
asset_classes[label_1] == label_2
]
assets = assets["Assets"].tolist()
if data.loc[i, "Sign"] == ">=":
if (
w_min.loc[assets][
w_min.loc[assets] <= data.loc[i, "Weight"]
].shape[0]
!= 0
):
w_min.loc[assets] = np.where(
w_min.loc[assets] <= data.loc[i, "Weight"],
data.loc[i, "Weight"],
w_min.loc[assets],
)
elif data.loc[i, "Sign"] == "<=":
if (
w_max.loc[assets][
w_max.loc[assets] >= data.loc[i, "Weight"]
].shape[0]
!= 0
):
w_max.loc[assets] = np.where(
w_max.loc[assets] >= data.loc[i, "Weight"],
data.loc[i, "Weight"],
w_max.loc[assets],
)
return w_max, w_min
def risk_constraint(asset_classes, kind="vanilla", classes_col=None):
r"""
Create the risk contribution constraint vector for the risk parity model.
Parameters
----------
asset_classes : DataFrame of shape (n_assets, n_cols)
Asset's classes matrix, where n_assets is the number of assets and
n_cols is the number of columns of the matrix where the first column
is the asset list and the next columns are the different asset's
classes sets. It is only used when kind value is 'classes'. The default
value is None.
kind : str
Kind of risk contribution constraint vector. The default value is 'vanilla'.
Possible values are:
- 'vanilla': vector of equal risk contribution per asset.
- 'classes': vector of equal risk contribution per class.
classes_col : str or int
If value is str, it is the column name of the set of classes from
asset_classes dataframe. If value is int, it is the column number of
the set of classes from asset_classes dataframe. The default
value is None.
Returns
-------
rb : nd-array
The risk contribution constraint vector.
Raises
------
ValueError when the value cannot be calculated.
Examples
--------
::
asset_classes = {'Assets': ['FB', 'GOOGL', 'NTFX', 'BAC', 'WFC', 'TLT', 'SHV'],
'Class 1': ['Equity', 'Equity', 'Equity', 'Equity', 'Equity',
'Fixed Income', 'Fixed Income'],
'Class 2': ['Technology', 'Technology', 'Technology',
'Financial', 'Financial', 'Treasury', 'Treasury'],}
asset_classes = pd.DataFrame(asset_classes)
asset_classes = asset_classes.sort_values(by=['Assets'])
asset_classes.reset_index(inplace=True, drop=True)
rb = rp.risk_constraint(asset_classes
kind='classes',
classes_col='Class 1')
"""
if not isinstance(asset_classes, pd.DataFrame):
raise ValueError("asset_classes must be a DataFrame")
if kind == "vanilla":
if asset_classes.shape[1] < 1:
raise ValueError("asset_classes must have at least one column")
assetslist = asset_classes.iloc[:, 0].values.tolist()
rb = np.ones((len(assetslist), 1))
rb /= len(assetslist)
elif kind == "classes":
if asset_classes.shape[1] < 2:
raise ValueError("asset_classes must have at least two columns")
classes = asset_classes.columns.tolist()
if isinstance(classes_col, str) and classes_col in classes:
A = asset_classes.loc[:, classes_col].to_frame()
col = A.columns.to_list()[0]
elif isinstance(classes_col, int) and classes[classes_col] in classes:
A = asset_classes.iloc[:, classes_col].to_frame()
col = A.columns.to_list()[0]
else:
raise ValueError(
"classes_col must be a valid column or column position of asset_classes"
)
A["rb"] = 1
B = A.groupby([col]).count()
A = pd.merge(A, B, left_on=col, right_index=True, how="left")
A["rb"] = A["rb_x"] / A["rb_y"]
A["rb"] /= A["rb"].sum()
rb = A["rb"].to_numpy().reshape(-1, 1)
else:
raise ValueError(
"The only available values for kind parameter are 'vanilla' and 'classes'"
)
return rb
|
103e0038812f9485443711e0fc6575d14b42c39e
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/govern/data-meta/amundsen/common/amundsen_common/models/tag.py
|
ccda2250d3150284eebeb410abb3e28fc6b81f06
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"MIT"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 354
|
py
|
tag.py
|
# Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import attr
from marshmallow3_annotations.ext.attrs import AttrsSchema
@attr.s(auto_attribs=True, kw_only=True)
class Tag:
tag_type: str
tag_name: str
class TagSchema(AttrsSchema):
class Meta:
target = Tag
register_as_scheme = True
|
30b907c0827331345e486954636c95623cda06ec
|
73a0f661f1423d63e86489d4b2673f0103698aab
|
/python/oneflow/framework/scope_util.py
|
6f62c63a540e9977401a434d6e35950a45c5357b
|
[
"Apache-2.0"
] |
permissive
|
Oneflow-Inc/oneflow
|
4fc3e081e45db0242a465c4330d8bcc8b21ee924
|
0aab78ea24d4b1c784c30c57d33ec69fe5605e4a
|
refs/heads/master
| 2023-08-25T16:58:30.576596
| 2023-08-22T14:15:46
| 2023-08-22T14:15:46
| 81,634,683
| 5,495
| 786
|
Apache-2.0
| 2023-09-14T09:44:31
| 2017-02-11T06:09:53
|
C++
|
UTF-8
|
Python
| false
| false
| 2,668
|
py
|
scope_util.py
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import traceback
from contextlib import contextmanager
from google.protobuf import text_format
import oneflow._oneflow_internal
import oneflow.core.job.scope_pb2 as scope_pb2_util
import oneflow.framework.attr_util as attr_util
import oneflow.framework.session_context as session_ctx
from oneflow import oneflow_deprecate
def api_scope_config(**kwargs):
name2default = session_ctx.GetDefaultSession().scope_attr_name2default_val
def SetScopeProtoStr(serialized_scope_proto: str):
scope_proto = text_format.Parse(
serialized_scope_proto, scope_pb2_util.ScopeProto()
)
for (attr_name, py_value) in kwargs.items():
assert attr_name in name2default
attr_util.SetProtoAttrValue(
scope_proto.attr_name2attr_value[attr_name],
py_value,
name2default[attr_name],
)
return str(text_format.MessageToString(scope_proto))
sess = session_ctx.GetDefaultSession()
scope = MakeScope(
lambda old_scope, builder: builder.BuildScopeByProtoStrSetter(
old_scope, SetScopeProtoStr
)
)
return ScopeContext(scope)
def current_scope():
""" Return current scope
"""
return oneflow._oneflow_internal.GetCurrentScope()
from oneflow import oneflow_deprecate
def MakeScope(build_func):
scope = None
old_scope = oneflow._oneflow_internal.GetCurrentScope()
assert old_scope is not None
def BuildScope(builder):
nonlocal scope
scope = build_func(old_scope, builder)
assert scope is not None
oneflow._oneflow_internal.deprecated.PhysicalRun(BuildScope)
return scope
@contextmanager
def ScopeContext(scope):
old_scope = oneflow._oneflow_internal.GetCurrentScope()
oneflow._oneflow_internal.GlobalScopeStackPush(scope)
try:
yield
finally:
assert oneflow._oneflow_internal.GetCurrentScope() is scope
oneflow._oneflow_internal.GlobalScopeStackPop()
assert oneflow._oneflow_internal.GetCurrentScope() is old_scope
|
7af89ccee0b831483ab1cee7d1a44ae947199c19
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/RecoMuon/TrackerSeedGenerator/python/TSGFromL1_cfi.py
|
c1c9058019add2ddf55fd72d8c6089a823fd1c9f
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 1,217
|
py
|
TSGFromL1_cfi.py
|
import FWCore.ParameterSet.Config as cms
hltL3TrajectorySeedFromL1 = cms.EDProducer("TSGFromL1Muon",
Filter = cms.InputTag("pixelTrackFilterByKinematicsForTSGFromL1"),
FitterPSet = cms.PSet(
cotThetaErrorScale = cms.double(1.0),
tipErrorScale = cms.double(1.0),
ComponentName = cms.string('L1MuonPixelTrackFitter'),
invPtErrorScale = cms.double(1.0),
phiErrorScale = cms.double(1.0),
zipErrorScale = cms.double(1.0)
),
RegionFactoryPSet = cms.PSet(
ComponentName = cms.string('L1MuonRegionProducer'),
RegionPSet = cms.PSet(
originHalfLength = cms.double(15.9),
originRadius = cms.double(0.1),
originYPos = cms.double(0.0),
ptMin = cms.double(10.0),
originXPos = cms.double(0.0),
originZPos = cms.double(0.0)
)
),
L1MuonLabel = cms.InputTag("hltL1extraParticles"),
CleanerPSet = cms.PSet(
diffRelPtCut = cms.double(0.2),
deltaEtaCut = cms.double(0.01)
),
OrderedHitsFactoryPSet = cms.PSet(
ComponentName = cms.string('StandardHitPairGenerator'),
SeedingLayers = cms.InputTag('PixelLayerPairs')
)
)
|
74e46393760483a8e8353e63b8563d11d5aa32bb
|
308f5596f1c7d382520cfce13ceaa5dff6f4f783
|
/third-party/thrift/src/thrift/lib/py3/test/client_server.py
|
d5de6806251c623f15081e99c25797e1df85ba22
|
[
"Apache-2.0",
"PHP-3.01",
"Zend-2.0",
"MIT"
] |
permissive
|
facebook/hhvm
|
7e200a309a1cad5304621b0516f781c689d07a13
|
d8203129dc7e7bf8639a2b99db596baad3d56b46
|
refs/heads/master
| 2023-09-04T04:44:12.892628
| 2023-09-04T00:43:05
| 2023-09-04T00:43:05
| 455,600
| 10,335
| 2,326
|
NOASSERTION
| 2023-09-14T21:24:04
| 2010-01-02T01:17:06
|
C++
|
UTF-8
|
Python
| false
| false
| 19,933
|
py
|
client_server.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import socket
import sys
import tempfile
import time
import unittest
from pathlib import Path
from typing import Optional, Sequence
from derived.clients import DerivedTestingService
from derived.services import DerivedTestingServiceInterface
from folly.iobuf import IOBuf
from stack_args.clients import StackService
from stack_args.services import StackServiceInterface
from stack_args.types import simple
from testing.clients import ClientMetadataTestingService, TestingService
from testing.services import (
ClientMetadataTestingServiceInterface,
TestingServiceInterface,
)
from testing.types import Color, easy, HardError
from thrift.py3.client import ClientType, get_client
from thrift.py3.common import Priority, Protocol, RpcOptions
from thrift.py3.exceptions import ApplicationError
from thrift.py3.server import get_context, ServiceInterface, SocketAddress, ThriftServer
from thrift.py3.test.cpp_handler import CppHandler
class Handler(TestingServiceInterface):
async def invert(self, value: bool) -> bool:
ctx = get_context()
if "from client" in ctx.read_headers:
ctx.set_header("from server", "with love")
return not value
async def getName(self) -> str:
ctx = get_context()
ctx.set_header("contextvar", "true")
return "Testing"
async def getMethodName(self) -> str:
ctx = get_context()
return ctx.method_name
async def getRequestId(self) -> str:
ctx = get_context()
return ctx.request_id
async def getRequestTimeout(self) -> float:
ctx = get_context()
return ctx.request_timeout
async def getPriority(self) -> int:
ctx = get_context()
return ctx.priority.value
async def shutdown(self) -> None:
pass
async def complex_action(
self, first: str, second: str, third: int, fourth: str
) -> int:
return third
async def takes_a_list(self, ints: Sequence[int]) -> None:
pass
async def take_it_easy(self, how: int, what: easy) -> None:
pass
async def pick_a_color(self, color: Color) -> None:
pass
async def int_sizes(self, one: int, two: int, three: int, four: int) -> None:
pass
async def hard_error(self, valid: bool) -> None:
pass
async def renamed_func(self, ret: bool) -> bool:
return ret
class DerivedHandler(Handler, DerivedTestingServiceInterface):
async def getName(self) -> str:
return "DerivedTesting"
async def derived_pick_a_color(self, color: Color) -> Color:
return color
# pyre-fixme[13]: Attribute `serve_task` is never initialized.
class TestServer:
server: ThriftServer
serve_task: asyncio.Task
def __init__(
self,
ip: Optional[str] = None,
path: Optional["Path"] = None,
handler: ServiceInterface = Handler(), # noqa: B008
) -> None:
self.server = ThriftServer(handler, ip=ip, path=path)
async def __aenter__(self) -> SocketAddress:
self.serve_task = asyncio.get_event_loop().create_task(self.server.serve())
return await self.server.get_address()
# pyre-fixme[2]: Parameter must be annotated.
async def __aexit__(self, *exc_info) -> None:
self.server.stop()
await self.serve_task
class ClientServerTests(unittest.TestCase):
"""
These are tests where a client and server talk to each other
"""
# pyre-fixme[56]: Argument `sys.version_info[slice(None, 2, None)] < (3, 7)` to
# decorator factory `unittest.skipIf` could not be resolved in a global scope.
@unittest.skipIf(sys.version_info[:2] < (3, 7), "Requires py3.7")
async def test_get_context(self) -> None:
async with TestServer(ip="::1") as sa:
ip, port = sa.ip, sa.port
assert ip and port
async with get_client(TestingService, host=ip, port=port) as client:
options = RpcOptions()
options.timeout = 100.0
self.assertEqual("Testing", await client.getName(rpc_options=options))
self.assertEqual("true", options.read_headers["contextvar"])
self.assertEqual(
"getMethodName",
await client.getMethodName(),
)
# requestId is a 16 char wide hex string
self.assertEqual(
len(await client.getRequestId()),
16,
)
self.assertEqual(
100.0,
await client.getRequestTimeout(rpc_options=options),
)
handler = Handler() # so we can call it outside the thrift server
with self.assertRaises(LookupError):
await handler.getName()
async def test_rpc_headers(self) -> None:
async with TestServer(ip="::1") as sa:
ip, port = sa.ip, sa.port
assert ip and port
async with get_client(TestingService, host=ip, port=port) as client:
options = RpcOptions()
options.set_header("from client", "with love")
self.assertFalse(await client.invert(True, rpc_options=options))
self.assertIn("from server", options.read_headers)
async def test_client_resolve(self) -> None:
hostname = socket.gethostname()
async with TestServer() as sa:
port = sa.port
assert port
async with get_client(TestingService, host=hostname, port=port) as client:
self.assertTrue(await client.invert(False))
self.assertFalse(await client.invert(True))
async def test_unframed_binary(self) -> None:
async with TestServer(ip="::1") as sa:
ip, port = sa.ip, sa.port
assert ip and port
async with get_client(
TestingService,
host=ip,
port=port,
client_type=ClientType.THRIFT_UNFRAMED_DEPRECATED,
protocol=Protocol.BINARY,
) as client:
self.assertTrue(await client.invert(False))
self.assertFalse(await client.invert(True))
async def test_framed_deprecated(self) -> None:
async with TestServer(ip="::1") as sa:
ip, port = sa.ip, sa.port
assert ip and port
async with get_client(
TestingService,
host=ip,
port=port,
client_type=ClientType.THRIFT_FRAMED_DEPRECATED,
) as client:
self.assertTrue(await client.invert(False))
self.assertFalse(await client.invert(True))
async def test_framed_compact(self) -> None:
async with TestServer(ip="::1") as sa:
ip, port = sa.ip, sa.port
assert ip and port
async with get_client(
TestingService,
host=ip,
port=port,
client_type=ClientType.THRIFT_FRAMED_COMPACT,
) as client:
self.assertTrue(await client.invert(False))
self.assertFalse(await client.invert(True))
async def test_server_localhost(self) -> None:
async with TestServer(ip="::1") as sa:
ip, port = sa.ip, sa.port
assert ip and port
async with get_client(TestingService, host=ip, port=port) as client:
self.assertTrue(await client.invert(False))
self.assertFalse(await client.invert(True))
async def test_unix_socket(self) -> None:
with tempfile.TemporaryDirectory() as tdir:
async with TestServer(path=Path(tdir) / "tserver.sock") as sa:
assert sa.path
async with get_client(TestingService, path=sa.path) as client:
self.assertTrue(await client.invert(False))
self.assertFalse(await client.invert(True))
async def test_no_client_aexit(self) -> None:
async with TestServer() as sa:
ip, port = sa.ip, sa.port
assert ip and port
client = get_client(TestingService, host=ip, port=port)
await client.__aenter__()
self.assertTrue(await client.invert(False))
self.assertFalse(await client.invert(True))
# If we do not abort here then good
async def test_client_aexit_no_await(self) -> None:
"""
This actually handles the case if __aexit__ is not awaited
"""
async with TestServer() as sa:
ip, port = sa.ip, sa.port
assert ip and port
client = get_client(TestingService, host=ip, port=port)
await client.__aenter__()
self.assertTrue(await client.invert(False))
self.assertFalse(await client.invert(True))
# pyre-fixme[1001]: `client.__aexit__(None, None, None)` is never
# awaited.
client.__aexit__(None, None, None)
del client # If we do not abort here then good
async def test_no_client_no_aenter(self) -> None:
"""
This covers if aenter was canceled since those two are the same really
"""
async with TestServer() as sa:
ip, port = sa.ip, sa.port
assert ip and port
get_client(TestingService, host=ip, port=port)
# If we do not abort here then good
async def test_derived_service(self) -> None:
"""
This tests calling methods from a derived service
"""
async with TestServer(handler=DerivedHandler()) as sa:
ip, port = sa.ip, sa.port
assert ip and port
async with get_client(DerivedTestingService, host=ip, port=port) as client:
self.assertEqual(await client.getName(), "DerivedTesting")
self.assertEqual(
await client.derived_pick_a_color(Color.red), Color.red
)
async def test_non_utf8_exception_message(self) -> None:
async with TestServer(handler=CppHandler()) as sa:
ip, port = sa.ip, sa.port
assert ip and port
async with get_client(TestingService, host=ip, port=port) as client:
with self.assertRaises(HardError):
await client.hard_error(True)
with self.assertRaises(UnicodeDecodeError):
await client.hard_error(False)
async def test_renamed_func(self) -> None:
async with TestServer(ip="::1") as sa:
ip, port = sa.ip, sa.port
assert ip and port
async with get_client(TestingService, host=ip, port=port) as client:
self.assertEqual(True, await client.renamed_func(True))
async def test_queue_timeout(self) -> None:
"""
This tests whether queue timeout functions properly.
"""
class SlowDerivedHandler(Handler, DerivedTestingServiceInterface):
async def getName(self) -> str:
time.sleep(1)
return "SlowDerivedTesting"
async def derived_pick_a_color(self, color: Color) -> Color:
return color
testing = TestServer(handler=SlowDerivedHandler())
testing.server.set_queue_timeout(0.01)
loop = asyncio.get_event_loop()
async def client_call(sa: SocketAddress) -> str:
ip, port = sa.ip, sa.port
assert ip and port
async with get_client(DerivedTestingService, host=ip, port=port) as client:
try:
return await client.getName()
except ApplicationError as err:
if "Queue Timeout" in str(err):
return "Queue Timeout"
else:
return ""
async def clients_run(server: TestServer) -> None:
async with server as sa:
results = await asyncio.gather(
client_call(sa),
client_call(sa),
client_call(sa),
client_call(sa),
client_call(sa),
)
self.assertIn("Queue Timeout", results)
await clients_run(testing)
async def test_cancelled_task(self) -> None:
"""
This tests whether cancelled tasks are handled properly.
"""
cancelledMessage: str = "I have been cancelled"
class CancelHandler(Handler):
async def getName(self) -> str:
raise asyncio.CancelledError(
cancelledMessage
) # Pretend that this is some await call that gets cancelled
async with TestServer(handler=CancelHandler(), ip="::1") as sa:
ip, port = sa.ip, sa.port
assert ip and port
async with get_client(TestingService, host=ip, port=port) as client:
with self.assertRaises(ApplicationError) as ex:
await client.getName()
self.assertEqual(
ex.exception.message,
f"Application was cancelled on the server with message: {cancelledMessage}",
)
async def test_request_with_default_rpc_options(self) -> None:
async with TestServer(ip="::1") as sa:
ip, port = sa.ip, sa.port
assert ip and port
async with get_client(TestingService, host=ip, port=port) as client:
timeout = await client.getRequestTimeout()
self.assertEqual(timeout, 0.0)
priority = await client.getPriority()
self.assertEqual(Priority(priority), Priority.N_PRIORITIES)
async def test_request_with_specified_rpc_options(self) -> None:
async with TestServer(ip="::1") as sa:
ip, port = sa.ip, sa.port
assert ip and port
async with get_client(TestingService, host=ip, port=port) as client:
options = RpcOptions()
options.timeout = 15.0
options.priority = Priority.BEST_EFFORT
timeout = await client.getRequestTimeout(rpc_options=options)
self.assertEqual(timeout, 15.0)
priority = await client.getPriority(rpc_options=options)
self.assertEqual(Priority(priority), Priority.BEST_EFFORT)
class StackHandler(StackServiceInterface):
async def add_to(self, lst: Sequence[int], value: int) -> Sequence[int]:
return [x + value for x in lst]
async def get_simple(self) -> simple:
return simple(val=66)
async def get_simple_no_sa(self) -> simple:
return simple(val=88)
async def take_simple(self, smpl: simple) -> None:
if smpl.val != 10:
raise Exception("WRONG")
async def get_iobuf(self) -> IOBuf:
return IOBuf(b"abc")
async def take_iobuf(self, val: IOBuf) -> None:
if bytes(val) != b"cba":
raise Exception("WRONG")
# currently unsupported by cpp backend:
# async def get_iobuf_ptr(self) -> IOBuf:
# return IOBuf(b'xyz')
async def take_iobuf_ptr(self, val: IOBuf) -> None:
if bytes(val) != b"zyx":
raise Exception("WRONG")
class ClientStackServerTests(unittest.TestCase):
"""
These are tests where a client and server(stack_arguments) talk to each other
"""
async def test_server_localhost(self) -> None:
async with TestServer(handler=StackHandler(), ip="::1") as sa:
ip, port = sa.ip, sa.port
assert ip and port
async with get_client(StackService, host=ip, port=port) as client:
self.assertEqual(
(3, 4, 5, 6), await client.add_to(lst=(1, 2, 3, 4), value=2)
)
self.assertEqual(66, (await client.get_simple()).val)
self.assertEqual((await client.get_simple_no_sa()).val, 88)
await client.take_simple(simple(val=10))
self.assertEqual(b"abc", bytes(await client.get_iobuf()))
await client.take_iobuf(IOBuf(b"cba"))
# currently unsupported by cpp backend:
# self.assertEqual(b'xyz', (await client.get_iobuf_ptr()))
await client.take_iobuf_ptr(IOBuf(b"zyx"))
class ClientMetadataTestingServiceHandler(ClientMetadataTestingServiceInterface):
async def getAgent(self) -> str:
requestContext = get_context()
connectionContext = requestContext.connection_context
clientMetadata = connectionContext.client_metadata
return clientMetadata.agent
async def getHostname(self) -> str:
requestContext = get_context()
connectionContext = requestContext.connection_context
clientMetadata = connectionContext.client_metadata
return clientMetadata.hostname
async def getMetadaField(self, key: str) -> str:
requestContext = get_context()
connectionContext = requestContext.connection_context
clientMetadata = connectionContext.client_metadata
return clientMetadata.getMetadataField(key)
class ClientMetadataTestingServiceTests(unittest.TestCase):
async def test_client_metadata(self) -> None:
hostname: str = socket.gethostname()
async with TestServer(
handler=ClientMetadataTestingServiceHandler(), ip="::1"
) as sa:
ip, port = sa.ip, sa.port
assert ip and port
async with get_client(
ClientMetadataTestingService, host=ip, port=port
) as client:
agent = await client.getAgent()
self.assertEqual(agent, "HeaderClientChannel.cpp")
self.assertEqual(await client.getHostname(), hostname)
# Test env returns empty metadata fields dict
cluster = await client.getMetadaField("tw_cluster")
self.assertEqual(cluster, "")
user = await client.getMetadaField("tw_user")
self.assertEqual(user, "")
job = await client.getMetadaField("tw_job")
self.assertEqual(job, "")
task = await client.getMetadaField("tw_task")
self.assertEqual(task, "")
# twhostname in case if anything changes and test env will get not empty metadata field dictionary
# return f"{cluster}/{user}/{job}/{task}"
async def test_call_get_metadata_field_with_invalid_key_should_return_empty_field(
self,
) -> None:
async with TestServer(
handler=ClientMetadataTestingServiceHandler(), ip="::1"
) as sa:
ip, port = sa.ip, sa.port
assert ip and port
async with get_client(
ClientMetadataTestingService, host=ip, port=port
) as client:
cluster = await client.getMetadaField("invalid_cluster_key")
self.assertEqual(cluster, "")
# twhostname in case if anything changes and test env will get not empty metadata field dictionary
# return f"{cluster}/{user}/{job}/{task}"
|
9b64f72b6878df937bc3d7666518f7d6442ab0c9
|
783bcccb13591e80b439e29782ecb977ae67c1f1
|
/binderhub/tests/test_build.py
|
7f7a795ea31a8ad4fe37331d8a5fc7050c3f6e26
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
jupyterhub/binderhub
|
3ff86c5e896e68c17c6fd000f1426837237cb186
|
84b1db907335d5e0307222fbfcb6de77a98db8a2
|
refs/heads/main
| 2023-08-29T14:10:35.419954
| 2023-08-02T08:28:21
| 2023-08-02T08:28:21
| 89,419,368
| 2,422
| 420
|
BSD-3-Clause
| 2023-09-05T09:08:40
| 2017-04-26T00:28:26
|
Python
|
UTF-8
|
Python
| false
| false
| 12,924
|
py
|
test_build.py
|
"""Test building repos"""
import json
import sys
from time import monotonic
from unittest import mock
from urllib.parse import quote
from uuid import uuid4
import docker
import pytest
from kubernetes import client
from tornado.httputil import url_concat
from tornado.queues import Queue
from binderhub.build import KubernetesBuildExecutor, ProgressEvent
from binderhub.build_local import LocalRepo2dockerBuild, ProcessTerminated, _execute_cmd
from .utils import async_requests
# We have optimized this slow test, for more information, see the README of
# https://github.com/binderhub-ci-repos/minimal-dockerfile.
@pytest.mark.asyncio(timeout=900)
@pytest.mark.parametrize(
"slug",
[
# git/ Git repo provider
"git/{}/HEAD".format(
quote(
"https://github.com/binderhub-ci-repos/cached-minimal-dockerfile",
safe="",
)
),
"git/{}/596b52f10efb0c9befc0c4ae850cc5175297d71c".format(
quote(
"https://github.com/binderhub-ci-repos/cached-minimal-dockerfile",
safe="",
)
),
# gh/ GitHub repo provider
"gh/binderhub-ci-repos/cached-minimal-dockerfile/HEAD",
"gh/binderhub-ci-repos/cached-minimal-dockerfile/596b52f10efb0c9befc0c4ae850cc5175297d71c",
# test redirect master->HEAD
"gh/binderhub-ci-repos/cached-minimal-dockerfile/master",
# gl/ GitLab repo provider
"gl/binderhub-ci-repos%2Fcached-minimal-dockerfile/HEAD",
"gl/binderhub-ci-repos%2Fcached-minimal-dockerfile/596b52f10efb0c9befc0c4ae850cc5175297d71c",
],
)
@pytest.mark.remote
async def test_build(app, needs_build, needs_launch, always_build, slug, pytestconfig):
"""
Test build a repo that is very quick and easy to build.
"""
# can't use mark.github_api since only some tests here use GitHub
if slug.startswith("gh/") and "not github_api" in pytestconfig.getoption(
"markexpr"
):
pytest.skip("Skipping GitHub API test")
build_url = f"{app.url}/build/{slug}"
r = await async_requests.get(build_url, stream=True)
r.raise_for_status()
events = []
launch_events = 0
async for line in async_requests.iter_lines(r):
line = line.decode("utf8", "replace")
if line.startswith("data:"):
event = json.loads(line.split(":", 1)[1])
events.append(event)
assert "message" in event
sys.stdout.write(f"{event.get('phase', '')}: {event['message']}")
# this is the signal that everything is ready, pod is launched
# and server is up inside the pod. Break out of the loop now
# because BinderHub keeps the connection open for many seconds
# after to avoid "reconnects" from slow clients
if event.get("phase") == "ready":
r.close()
break
if event.get("phase") == "launching" and not event["message"].startswith(
("Launching server...", "Launch attempt ")
):
# skip standard launching events of builder
# we are interested in launching events from spawner
launch_events += 1
assert launch_events > 0
final = events[-1]
assert "phase" in final
assert final["phase"] == "ready"
assert "url" in final
assert "token" in final
print(final["url"])
r = await async_requests.get(url_concat(final["url"], {"token": final["token"]}))
r.raise_for_status()
assert r.url.startswith(final["url"])
@pytest.mark.asyncio(timeout=120)
@pytest.mark.remote
async def test_build_fail(app, needs_build, needs_launch, always_build, pytestconfig):
"""
Test build a repo that should fail immediately.
"""
slug = "gh/binderhub-ci-repos/minimal-dockerfile/failed"
build_url = f"{app.url}/build/{slug}"
r = await async_requests.get(build_url, stream=True)
r.raise_for_status()
failed_events = 0
async for line in async_requests.iter_lines(r):
line = line.decode("utf8", "replace")
if line.startswith("data:"):
event = json.loads(line.split(":", 1)[1])
assert event.get("phase") not in ("launching", "ready")
if event.get("phase") == "failed":
failed_events += 1
break
r.close()
assert failed_events > 0, "Should have seen phase 'failed'"
def _list_image_builder_pods_mock():
"""Mock list of DIND pods"""
mock_response = mock.MagicMock()
mock_response.read.return_value = json.dumps(
{
"items": [
{
"spec": {"nodeName": name},
}
for name in ["node-a", "node-b"]
]
}
)
mock_k8s_api = mock.MagicMock()
mock_k8s_api.list_namespaced_pod.return_value = mock_response
return mock_k8s_api
def test_default_affinity():
# check that the default affinity is a pod anti-affinity
mock_k8s_api = _list_image_builder_pods_mock()
build = KubernetesBuildExecutor(
q=mock.MagicMock(),
api=mock_k8s_api,
name="test_build",
namespace="build_namespace",
repo_url="repo",
ref="ref",
build_image="image",
image_name="name",
push_secret="",
memory_limit=0,
git_credentials="",
docker_host="http://mydockerregistry.local",
node_selector={},
)
affinity = build.get_affinity()
assert isinstance(affinity, client.V1Affinity)
assert affinity.node_affinity is None
assert affinity.pod_affinity is None
assert affinity.pod_anti_affinity is not None
def test_sticky_builds_affinity():
# Setup some mock objects for the response from the k8s API
mock_k8s_api = _list_image_builder_pods_mock()
build = KubernetesBuildExecutor(
q=mock.MagicMock(),
api=mock_k8s_api,
name="test_build",
namespace="build_namespace",
repo_url="repo",
ref="ref",
build_image="image",
image_name="name",
push_secret="",
memory_limit=0,
git_credentials="",
docker_host="http://mydockerregistry.local",
node_selector={},
sticky_builds=True,
)
affinity = build.get_affinity()
assert isinstance(affinity, client.V1Affinity)
assert affinity.node_affinity is not None
assert affinity.pod_affinity is None
assert affinity.pod_anti_affinity is None
# One of the two nodes we have in our mock should be the preferred node
assert affinity.node_affinity.preferred_during_scheduling_ignored_during_execution[
0
].preference.match_expressions[0].values[0] in ("node-a", "node-b")
def test_build_memory_limits():
# Setup some mock objects for the response from the k8s API
mock_k8s_api = _list_image_builder_pods_mock()
build = KubernetesBuildExecutor(
q=mock.MagicMock(),
api=mock_k8s_api,
name="test_build",
namespace="build_namespace",
repo_url="repo",
ref="ref",
build_image="image",
image_name="name",
push_secret="",
memory_limit="2T",
memory_request="123G",
git_credentials="",
docker_host="http://mydockerregistry.local",
node_selector={},
sticky_builds=True,
)
assert build.memory_limit == 2199023255552
assert build.memory_request == 132070244352
def test_git_credentials_passed_to_podspec_upon_submit():
git_credentials = """{
"client_id": "my_username",
"access_token": "my_access_token",
}"""
mock_k8s_api = _list_image_builder_pods_mock()
build = KubernetesBuildExecutor(
q=mock.MagicMock(),
api=mock_k8s_api,
name="test_build",
namespace="build_namespace",
repo_url="repo",
ref="ref",
build_image="image",
image_name="name",
push_secret="",
memory_limit=0,
git_credentials=git_credentials,
docker_host="http://mydockerregistry.local",
node_selector={},
)
with mock.patch.object(build.stop_event, "is_set", return_value=True):
build.submit()
call_args_list = mock_k8s_api.create_namespaced_pod.call_args_list
assert len(call_args_list) == 1
args = call_args_list[0][0]
pod = args[1]
assert len(pod.spec.containers) == 1
env = {env_var.name: env_var.value for env_var in pod.spec.containers[0].env}
assert env["GIT_CREDENTIAL_ENV"] == git_credentials
def test_extra_environment_variables_passed_to_podspec_upon_submit():
extra_environments = {
"CONTAINER_HOST": "unix:///var/run/docker.sock",
"REGISTRY_AUTH_FILE": "/root/.docker/config.json",
}
mock_k8s_api = _list_image_builder_pods_mock()
class EnvBuild(KubernetesBuildExecutor):
q = mock.MagicMock()
api = mock_k8s_api
name = "test_build"
repo_url = "repo"
ref = "ref"
image_name = "name"
extra_envs = extra_environments
namespace = "build_namespace"
push_secret = ""
build_image = "image"
memory_limit = 0
docker_host = "http://mydockerregistry.local"
node_selector = {}
build = EnvBuild()
with mock.patch.object(build.stop_event, "is_set", return_value=True):
build.submit()
call_args_list = mock_k8s_api.create_namespaced_pod.call_args_list
assert len(call_args_list) == 1
args = call_args_list[0][0]
pod = args[1]
assert len(pod.spec.containers) == 1
env = {env_var.name: env_var.value for env_var in pod.spec.containers[0].env}
assert env == extra_environments
async def test_local_repo2docker_build():
q = Queue()
repo_url = "https://github.com/binderhub-ci-repos/cached-minimal-dockerfile"
ref = "HEAD"
name = str(uuid4())
build = LocalRepo2dockerBuild(
q=q,
name=name,
repo_url=repo_url,
ref=ref,
image_name=name,
)
build.submit()
events = []
while True:
event = await q.get(10)
if (
event.kind == ProgressEvent.Kind.BUILD_STATUS_CHANGE
and event.payload == ProgressEvent.BuildStatus.BUILT
):
break
events.append(event)
# Image should now exist locally
docker_client = docker.from_env(version="auto")
assert docker_client.images.get(name)
@pytest.mark.asyncio(timeout=20)
async def test_local_repo2docker_build_stop(io_loop):
q = Queue()
# We need a slow build here so that we can interrupt it, so pick a large repo that
# will take several seconds to clone
repo_url = "https://github.com/jupyterhub/jupyterhub"
ref = "HEAD"
name = str(uuid4())
build = LocalRepo2dockerBuild(
q=q,
name=name,
repo_url=repo_url,
ref=ref,
image_name=name,
)
io_loop.run_in_executor(None, build.submit)
# Get first few log messages to check it successfully stared
event = await q.get()
assert event.kind == ProgressEvent.Kind.BUILD_STATUS_CHANGE
assert event.payload == ProgressEvent.BuildStatus.RUNNING
for i in range(2):
event = await q.get()
assert event.kind == ProgressEvent.Kind.LOG_MESSAGE
assert "message" in event.payload
build.stop()
for i in range(10):
event = await q.get()
if (
event.kind == ProgressEvent.Kind.BUILD_STATUS_CHANGE
and event.payload == ProgressEvent.BuildStatus.FAILED
):
break
assert (
event.kind == ProgressEvent.Kind.BUILD_STATUS_CHANGE
and event.payload == ProgressEvent.BuildStatus.FAILED
)
# Todo: check that process was stopped, and we didn't just return early and leave it in the background
# Build was stopped so image should not exist locally
docker_client = docker.from_env(version="auto")
with pytest.raises(docker.errors.ImageNotFound):
docker_client.images.get(name)
def test_execute_cmd():
cmd = [
"python",
"-c",
"from time import sleep; print(1, flush=True); sleep(2); print(2, flush=True)",
]
lines = list(_execute_cmd(cmd, capture=True))
assert lines == ["1\n", "2\n"]
def test_execute_cmd_break():
cmd = [
"python",
"-c",
"from time import sleep; print(1, flush=True); sleep(10); print(2, flush=True)",
]
lines = []
now = monotonic()
def break_callback():
return monotonic() - now > 2
# This should break after the first line
with pytest.raises(ProcessTerminated) as exc:
for line in _execute_cmd(cmd, capture=True, break_callback=break_callback):
lines.append(line)
assert lines == ["1\n"]
assert str(exc.value) == f"ProcessTerminated: {cmd}"
|
47c40a75fe6ccf7c765fdada1f1489b08b02c3cd
|
481d8268f533c0b5527112f9b7d709aaa22ab053
|
/pysrc/map_info_rdr.py
|
e6863569f22af911d0cb83bc22f085352f292a0e
|
[
"Apache-2.0"
] |
permissive
|
intelxed/xed
|
590f60c564dc75004e51d95773fc1d4d4bfba1d3
|
01a6da8090af84cd52f6c1070377ae6e885b078f
|
refs/heads/main
| 2023-08-25T01:30:27.421743
| 2023-08-21T17:19:26
| 2023-08-21T17:19:26
| 75,980,044
| 1,390
| 175
|
Apache-2.0
| 2023-08-07T11:12:00
| 2016-12-08T22:21:22
|
Python
|
UTF-8
|
Python
| false
| false
| 19,900
|
py
|
map_info_rdr.py
|
#!/usr/bin/env python
# -*- python -*-
#BEGIN_LEGAL
#
#Copyright (c) 2023 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#END_LEGAL
import sys
import os
import re
import shlex
import collections
import math
import genutil
import enumer
import enum_txt_writer
import codegen
# dict space name -> numerical space id
_space_id = {'legacy': 0, 'vex': 1, 'evex': 2, 'xop': 3}
_space_id_to_name = {v: k for k,v in _space_id.items()}
# list ordered by numerical space id
_space_id_sorted = sorted(_space_id.keys(), key=lambda x: _space_id[x])
def _encoding_space_max():
return max(_space_id.values())
def _encoding_space_range():
#Could make this dynamic based on what spaces are enabled
return range(0, _encoding_space_max()+1)
def vexvalid_to_encoding_space(vv):
"""Input number, output string"""
return _space_id_sorted[vv]
def encoding_space_to_vexvalid(space):
"""Input string, output number"""
return _space_id[space]
def _die(s):
genutil.die(s)
def _msgb(b,s=''):
genutil.msgerr("[{}] {}".format(b,s))
class map_info_t(object):
def __init__(self):
self.map_name = None
self.space = None # legacy, vex, evex, xop
self.legacy_escape = None # N/A or 0f
self.legacy_opcode = None # N/A or 38, 3a
self.map_id = None # N/A or 0,1,2,3,... 8,9,0xA
# "var" means variable, requires a table generated based on defined instructions
self.modrm = None # var,yes,no, has modrm
self.disp = None # var,yes,no, has disp
self.imm = None # var,0,1,2,4 (bytes) var=7
self.opcpos = None # 0,1,2, ... -1 (last) opcode position in pattern
self.priority = 10
# search_pattern is the string that we use to identify this
# map in the XED decode patterns. The pattern may have spaces
# in it. (and motivates using shlex to parse the input lines)
self.search_pattern = None
def is_legacy(self):
return self.space == 'legacy'
def is_vex(self):
return self.space == 'vex'
def is_evex(self):
return self.space == 'evex'
def is_xop(self):
return self.space == 'xop'
def map_short_name(self):
if self.map_name == 'amd_3dnow':
return 'AMD'
h = hex(self.map_id)[-1]
return str(h)
def ild_enum(self):
s = self.map_short_name()
if self.space == 'XOP':
s = '_XOP{}'.format(s)
return 'XED_ILD_MAP{}'.format(s)
def get_legacy_escapes(self):
if self.legacy_opcode == 'N/A':
return (self.legacy_escape, None)
return (self.legacy_escape, self.legacy_opcode)
def has_variable_modrm(self):
return self.modrm == 'var'
def has_regular_modrm(self):
return self.modrm == 'yes'
def has_variable_disp(self):
return self.disp == 'var'
def has_variable_imm(self):
return self.imm == 'var'
def __str__(self):
s = []
s.append("name: {}".format(self.map_name))
s.append("space: {}".format(self.space))
s.append("legacyesc: {}".format(self.legacy_escape))
s.append("legacyopc: {}".format(self.legacy_opcode))
s.append("mapid: {}".format(self.map_id))
s.append("modrm: {}".format(self.modrm))
s.append("disp: {}".format(self.disp))
s.append("imm: {}".format(self.imm))
s.append("opcpos: {}".format(self.opcpos))
s.append("priority: {}".format(self.priority))
s.append("search_pattern: {}".format(self.search_pattern))
return " ".join(s)
_map_info_fields = ['map_name',
'space',
'legacy_escape',
'legacy_opcode',
'map_id',
'modrm',
'disp',
'imm',
'opcpos',
'search_pattern' ]
def _parse_map_line(s):
global _map_info_fields
# shlex allows for quoted substrings containing spaces as
# individual args.
t = shlex.split(s.strip())
if len(t) != len(_map_info_fields):
_die("Bad map description line: [{}]".format(s))
mi = map_info_t()
for i,fld in enumerate(_map_info_fields):
setattr(mi,fld,t[i])
# this gets used in function names so must only be legal characters
mi.map_name = re.sub('-', '_', mi.map_name)
if mi.space == 'legacy':
if mi.legacy_escape != 'N/A':
mi.legacy_escape_int = int(mi.legacy_escape,16)
if mi.legacy_opcode != 'N/A':
mi.legacy_opcode_int = int(mi.legacy_opcode,16)
else:
mi.legacy_opcode_int = None
mi.map_id_fixup=False
if mi.space not in ['legacy', 'vex', 'evex', 'xop']:
_die("Bad map description encoding space [{}]".format(s))
if mi.space == 'legacy':
if genutil.is_hex(mi.legacy_escape):
pass
elif mi.legacy_escape != 'N/A':
_die("Bad map description legacy escape [{}]".format(s))
if genutil.is_hex(mi.legacy_opcode):
pass
elif mi.legacy_opcode != 'N/A':
_die("Bad map description legacy opcode [{}]".format(s))
if mi.map_id == 'N/A':
_die("Bad map description map-id [{}]".format(s))
elif genutil.numeric(mi.map_id):
mi.map_id = genutil.make_numeric(mi.map_id)
else:
mi.map_id_fixup=True
else:
if mi.legacy_escape != 'N/A':
_die("Bad map description legacy escape [{}]".format(s))
if mi.legacy_opcode != 'N/A':
_die("Bad map description legacy opcode [{}]".format(s))
if genutil.numeric(mi.map_id):
mi.map_id = genutil.make_numeric(mi.map_id)
else:
_die("Bad map description map id [{}]".format(s))
if mi.disp not in ['var','no']:
_die("Bad map description disp specifier [{}]".format(s))
if mi.modrm not in ['var','yes','no']:
_die("Bad map description modrm specifier [{}]".format(s))
if mi.imm not in ['var','0','1','2','4']:
_die("Bad map description imm specifier [{}]".format(s))
if genutil.numeric(mi.opcpos):
mi.opcpos = genutil.make_numeric(mi.opcpos)
else:
_die("Bad map description opcode position specifier [{}]".format(s))
# we want the longer patterns first when we sort the map_info_t.
mi.priority = 100-len(mi.search_pattern)
return mi
def emit_enums(agi):
emit_ild_enum_dups(agi) # XED_ILD_*
emit_ild_enum_unique(agi) # XED_MAPU_*
file_list = emit_map_info_tables(agi)
agi.hdr_files.extend(file_list)
def emit_map_info_tables(agi):
'''variable modrm,disp,imm tables, per encoding space using natural
map ids. returns list of files generated'''
map_features_cfn = 'xed-map-feature-tables.c'
map_features_hfn = 'xed-map-feature-tables.h'
private_gendir = os.path.join(agi.common.options.gendir,'include-private')
hfe = codegen.xed_file_emitter_t(agi.common.options.xeddir,
private_gendir,
map_features_hfn)
for h in [ 'xed-map-info.h' ]:
hfe.add_header(h)
hfe.start()
sorted_list = sorted(agi.map_info, key=lambda x: x.map_name)
spaces = list(set([ mi.space for mi in sorted_list ]))
sorted_spaces = sorted(spaces, key=lambda x: encoding_space_to_vexvalid(x))
max_space_id = _encoding_space_max() # legacy,vex,evex,xop
#max_space_id = encoding_space_to_vexvalid(sorted_spaces[-1])
max_map_id = max([mi.map_id for mi in agi.map_info]) #0...31
fields = ['modrm', 'disp', 'imm']
cvt_yes_no_var = { 'yes':1, 'no':0, 'var':2 }
cvt_imm = { '0':0, '1':1, '2':2, '4':4, 'var':7 }
field_to_cvt = { 'modrm': cvt_yes_no_var,
'disp' : cvt_yes_no_var,
'imm' : cvt_imm }
bits_per_chunk = 64
# The field width in bits must be a power of 2 for current design,
# otherwise the bits of interest can span the 64b chunks we are
# using to store the values.
field_to_bits = { 'modrm': 2,
'disp' : 2,
'imm' : 4 }
def collect_codes(field, space_maps):
'''cvt is dict converting strings to integers. the codes are indexed by map id.'''
cvt = field_to_cvt[field]
codes = { key:0 for key in range(0,max_map_id+1) }
for mi in space_maps:
codes[mi.map_id] = cvt[getattr(mi,field)]
codes_as_list = [ codes[i] for i in range(0,max_map_id+1) ]
return codes_as_list
def convert_list_to_integer(lst, bits_per_field):
'''return an integer or a list of integer if more than 64b'''
integers = []
tot = 0
shift = 0
for v in lst:
if shift >= 64:
integers.append(tot)
tot = 0
shift = 0
tot = tot + (v << shift)
shift = shift + bits_per_field
integers.append(tot)
if len(integers) == 1:
return integers[0]
return integers
for space_id in _encoding_space_range():
space = _space_id_to_name[space_id]
space_maps = [ mi for mi in sorted_list if mi.space == space ]
for field in fields:
bits_per_field = field_to_bits[field]
total_bits = max_map_id * bits_per_field
required_chunks = math.ceil(total_bits / bits_per_chunk)
values_per_chunk = bits_per_chunk // bits_per_field
ilog2_values_per_chunk = int(math.log2(values_per_chunk))
mask = (1<<bits_per_field)-1
f = codegen.function_object_t('xed_ild_has_{}_{}'.format(field,space),
'xed_bool_t',
static=True, inline=True)
f.add_arg('xed_uint_t m')
if space_maps:
codes = collect_codes(field, space_maps)
constant = convert_list_to_integer(codes,bits_per_field)
else:
codes = [0]
constant = 0
f.add_code('/* {} */'.format(codes))
if set(codes) == {0}: # all zero values...
f.add_code_eol('(void)m')
f.add_code_eol('return 0')
else:
if required_chunks <= 1:
f.add_code_eol('const xed_uint64_t data_const = 0x{:x}ULL'.format(constant))
f.add_code_eol('return (xed_bool_t)((data_const >> ({}*m)) & {})'.format(
bits_per_field, mask))
else:
f.add_code('const xed_uint64_t data_const[{}] = {{'.format(required_chunks))
ln = ['0x{:x}ULL'.format(c) for c in constant]
f.add_code_eol(' {} }}'.format(", ".join(ln)))
f.add_code_eol('const xed_uint64_t chunkno = m >> {}'.format(ilog2_values_per_chunk))
f.add_code_eol('const xed_uint64_t offset = m & ({}-1)'.format(values_per_chunk))
f.add_code_eol('return (xed_bool_t)((data_const[chunkno] >> ({}*offset)) & {})'.format(
bits_per_field, mask))
hfe.write(f.emit()) # emit the inline function in the header
# emit a function that covers all spaces
for field in fields:
bits_per_field = field_to_bits[field]
total_bits = max_map_id * bits_per_field
required_chunks = math.ceil(total_bits / bits_per_chunk)
values_per_chunk = bits_per_chunk // bits_per_field
ilog2_values_per_chunk = int(math.log2(values_per_chunk))
mask = (1<<bits_per_field)-1
f = codegen.function_object_t('xed_ild_has_{}'.format(field),
'xed_bool_t',
static=True, inline=True)
f.add_arg('xed_uint_t vv')
f.add_arg('xed_uint_t m')
if required_chunks <= 1:
f.add_code('const xed_uint64_t data_const[{}] = {{'.format(max_space_id+1))
else:
f.add_code('const xed_uint64_t data_const[{}][{}] = {{'.format(max_space_id+1,
required_chunks))
for space_id in _encoding_space_range():
space = _space_id_to_name[space_id]
space_maps = [ mi for mi in sorted_list if mi.space == space ]
if space_maps:
codes = collect_codes(field, space_maps)
constant = convert_list_to_integer(codes,bits_per_field)
else:
codes = [0]*required_chunks
if required_chunks <= 1:
constant = 0
else:
constant = [0]*required_chunks
f.add_code('/* {} {} */'.format(codes,space))
if required_chunks <= 1:
f.add_code(' 0x{:x}ULL,'.format(constant))
else:
ln = ['0x{:x}ULL'.format(c) for c in constant]
f.add_code('{{ {} }},'.format(", ".join(ln)))
f.add_code_eol('}')
f.add_code_eol('xed_assert(vv < {})'.format(max_space_id+1))
if required_chunks <= 1:
f.add_code_eol('return (xed_bool_t)((data_const[vv] >> ({}*m)) & {})'.format(bits_per_field,
mask))
else:
f.add_code_eol('const xed_uint64_t chunkno = m >> {}'.format(ilog2_values_per_chunk))
f.add_code_eol('const xed_uint64_t offset = m & ({}-1)'.format(values_per_chunk))
f.add_code_eol('return (xed_bool_t)((data_const[vv][chunkno] >> ({}*offset)) & {})'.format(
bits_per_field, mask))
hfe.write(f.emit()) # emit the inline function in the header
# emit a set of functions for determining the valid maps in each encoding space
if max_map_id > 64:
genutil.die("Need to make this work with multiple chunks of u64")
for space_id in _encoding_space_range():
space = _space_id_to_name[space_id]
space_maps = [ mi for mi in sorted_list if mi.space == space ]
f = codegen.function_object_t('xed_ild_map_valid_{}'.format(space),
'xed_bool_t',
static=True, inline=True)
f.add_arg('xed_uint_t m')
max_id = _encoding_space_max()
#max_id = max( [mi.map_id for mi in space_maps ] )
codes_dict = { key:0 for key in range(0,max_map_id+1) }
for mi in space_maps:
codes_dict[mi.map_id] = 1
codes = [ codes_dict[i] for i in range(0,max_map_id+1) ]
f.add_code('/* {} */'.format(codes))
constant = convert_list_to_integer(codes,1)
f.add_code_eol('const xed_uint64_t data_const = 0x{:x}ULL'.format(constant))
# no need for a max-map test since, the upper bits of the
# constant will be zero already
f.add_code_eol('return (xed_bool_t)((data_const >> m) & 1)')
hfe.write(f.emit()) # emit the inline function in the header
# emit a table filling in "xed_map_info_t xed_legacy_maps[] = { ... }"
legacy_maps = [ mi for mi in sorted_list if mi.space == 'legacy' ]
legacy_maps = sorted(legacy_maps,
key=lambda x: -len(x.search_pattern) * 10 + x.map_id)
hfe.add_code('const xed_map_info_t xed_legacy_maps[] = {')
for mi in legacy_maps:
if mi.map_id == 0:
continue
has_legacy_opcode = 1 if mi.legacy_opcode != 'N/A' else 0
legacy_opcode = mi.legacy_opcode if mi.legacy_opcode != 'N/A' else 0
legacy_escape = mi.legacy_escape if mi.legacy_escape != 'N/A' else 0
hfe.add_code('{{ {}, {}, {}, {}, {} }},'.format(legacy_escape,
has_legacy_opcode,
legacy_opcode,
mi.map_id,
mi.opcpos))
hfe.add_code_eol('}')
hfe.close()
return [hfe.full_file_name]
def emit_ild_enum_unique(agi):
"""modify map_info_t values to include mapu enum name so that we can
build other arrays for the C-code based on that unique enum"""
sorted_list = sorted(agi.map_info, key=lambda x: x.map_name)
evalues = ['INVALID']
for mi in sorted_list:
s = mi.map_name.upper()
evalues.append(s)
mi.mapu_name = 'XED_MAPU_{}'.format(s)
enum = enum_txt_writer.enum_info_t(evalues,
agi.common.options.xeddir,
agi.common.options.gendir,
'xed-mapu',
'xed_mapu_enum_t',
'XED_MAPU_',
cplusplus=False)
enum.run_enumer()
agi.add_file_name(enum.src_full_file_name)
agi.add_file_name(enum.hdr_full_file_name, header=True)
agi.all_enums['xed_mapu_enum_t'] = evalues
def emit_ild_enum_dups(agi):
evalues = []
sorted_list = sorted(agi.map_info, key=lambda x: x.map_name)
for mi in sorted_list:
val = None
if isinstance(mi.map_id,int):
val = str(mi.map_id)
e = enumer.enumer_value_t(mi.map_name.upper(), val)
evalues.append(e)
evalues.append('MAP_INVALID')
enum = enum_txt_writer.enum_info_t(evalues,
agi.common.options.xeddir,
agi.common.options.gendir,
'xed-ild',
'xed_ild_map_enum_t',
'XED_ILD_',
cplusplus=False)
enum.run_enumer()
agi.add_file_name(enum.src_full_file_name)
agi.add_file_name(enum.hdr_full_file_name, header=True)
agi.all_enums['xed_ild_map_enum_t'] = evalues
def fix_nonnumeric_maps(maps):
d = collections.defaultdict(list)
for mi in maps:
if not mi.map_id_fixup:
d[mi.space].append(mi.map_id)
mx = {} # max per key
for k in d.keys():
mx[k] = max(d[k])
for mi in maps:
if mi.map_id_fixup:
maxval = mx[mi.space] + 1
mi.map_id = maxval
mx[mi.space] = maxval
mi.map_id_fixup = False
def read_file(fn):
lines = open(fn,'r').readlines()
lines = map(genutil.no_comments, lines)
lines = list(filter(genutil.blank_line, lines))
maps = [] # list of map_info_t
for line in lines:
maps.append( _parse_map_line(line) )
fix_nonnumeric_maps(maps)
maps.sort(key=lambda x: x.priority)
#for m in maps:
# _msgb("MAPINFO",m)
return maps
if __name__ == "__main__":
read_file(sys.argv[1])
sys.exit(0)
|
4befe4ac7a5f34465b05e702158009f7ebafa826
|
ffdc77394c5b5532b243cf3c33bd584cbdc65cb7
|
/tests/st/ops/gpu/test_atan_grad_op.py
|
48b7dce2f4dc6842e7da145b3a0e16b834001a62
|
[
"Apache-2.0",
"LicenseRef-scancode-proprietary-license",
"MPL-1.0",
"OpenSSL",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause-Open-MPI",
"MIT",
"MPL-2.0-no-copyleft-exception",
"NTP",
"BSD-3-Clause",
"GPL-1.0-or-later",
"0BSD",
"MPL-2.0",
"LicenseRef-scancode-free-unknown",
"AGPL-3.0-only",
"Libpng",
"MPL-1.1",
"IJG",
"GPL-2.0-only",
"BSL-1.0",
"Zlib",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-python-cwi",
"BSD-2-Clause",
"LicenseRef-scancode-gary-s-brown",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"Python-2.0",
"LicenseRef-scancode-mit-nagy",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense"
] |
permissive
|
mindspore-ai/mindspore
|
ca7d5bb51a3451c2705ff2e583a740589d80393b
|
54acb15d435533c815ee1bd9f6dc0b56b4d4cf83
|
refs/heads/master
| 2023-07-29T09:17:11.051569
| 2023-07-17T13:14:15
| 2023-07-17T13:14:15
| 239,714,835
| 4,178
| 768
|
Apache-2.0
| 2023-07-26T22:31:11
| 2020-02-11T08:43:48
|
C++
|
UTF-8
|
Python
| false
| false
| 3,182
|
py
|
test_atan_grad_op.py
|
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
from mindspore import Tensor
import mindspore.ops.operations._grad_ops as P
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
np.random.seed(1)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_atangrad_fp32():
"""
Feature: ALL To ALL
Description: test cases for AtanGrad float32
Expectation: the result match to numpy
"""
x_np = np.random.rand(4, 2).astype(np.float32) * 10
dout_np = np.random.rand(4, 2).astype(np.float32) * 10
output_ms = P.AtanGrad()(Tensor(x_np), Tensor(dout_np))
output_np = dout_np / (1 + np.square(x_np))
assert np.allclose(output_ms.asnumpy(), output_np, 1e-4, 1e-4)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_atangrad_fp16():
"""
Feature: ALL To ALL
Description: test cases for AtanGrad float16
Expectation: the result match to numpy
"""
x_np = np.random.rand(4, 2).astype(np.float16) * 10
dout_np = np.random.rand(4, 2).astype(np.float16) * 10
output_ms = P.AtanGrad()(Tensor(x_np), Tensor(dout_np))
output_np = dout_np.astype(np.float32) / (1 + np.square(x_np.astype(np.float32)))
assert np.allclose(output_ms.asnumpy(), output_np.astype(np.float16), 1e-3, 1e-3)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('dtype', [np.float16, np.float32, np.float64])
def test_atan_grad_float(dtype):
"""
Feature: ALL To ALL
Description: test cases for AtanGrad
Expectation: the result match to numpy
"""
x = np.array([-0.5, 0, 0.5]).astype(dtype)
dy = np.array([1, 0, -1]).astype(dtype)
output = P.AtanGrad()(Tensor(x), Tensor(dy))
print(output)
expect = dy / (1 + x * x)
assert np.allclose(output.asnumpy(), expect)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('dtype', [np.complex64, np.complex128])
def test_atan_grad_complex(dtype):
"""
Feature: ALL To ALL
Description: test cases for AtanGrad
Expectation: the result match to numpy
"""
x = np.array([-0.5, 0, 0.5]).astype(dtype)
x = x + 0.5j * x
dy = np.array([1, 0, -1]).astype(dtype)
dy = dy + 0.3j * dy
output = P.AtanGrad()(Tensor(x), Tensor(dy))
print(output)
expect = dy / np.conjugate(1 + x * x)
assert np.allclose(output.asnumpy(), expect)
|
a2ab181b7cbecaeb11f223b0737427310307730a
|
da1721d2783ea4d67ff4e73cee6eee71292f2ef7
|
/toontown/distributed/ToontownDistrictAI.py
|
d9be1a236a4f86757ebea13b02efe28fdc5ef634
|
[
"BSD-3-Clause"
] |
permissive
|
open-toontown/open-toontown
|
bbdeb1b7bf0fb2861eba2df5483738c0112090ca
|
464c2d45f60551c31397bd03561582804e760b4a
|
refs/heads/develop
| 2023-07-07T01:34:31.959657
| 2023-05-30T23:49:10
| 2023-05-30T23:49:10
| 219,221,570
| 143
| 104
|
BSD-3-Clause
| 2023-09-11T09:52:34
| 2019-11-02T22:24:38
|
Python
|
UTF-8
|
Python
| false
| false
| 693
|
py
|
ToontownDistrictAI.py
|
from direct.directnotify import DirectNotifyGlobal
from otp.distributed.DistributedDistrictAI import DistributedDistrictAI
class ToontownDistrictAI(DistributedDistrictAI):
notify = DirectNotifyGlobal.directNotify.newCategory('ToontownDistrictAI')
def __init__(self, air):
DistributedDistrictAI.__init__(self, air)
self.ahnnLog = False
def allowAHNNLog(self, ahnnLog):
self.ahnnLog = ahnnLog
def d_allowAHNNLog(self, ahnnLog):
self.sendUpdate('allowAHNNLog', [ahnnLog])
def b_allowAHNNLog(self, ahnnLog):
self.allowAHNNLog(ahnnLog)
self.d_allowAHNNLog(ahnnLog)
def getAllowAHNNLog(self):
return self.ahnnLog
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.