hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a001b72e15408b387fd344059eb6c6600a0146a
| 783
|
py
|
Python
|
aliyun/log/cursor_response.py
|
SeraphLiu/aliyun-log-sdk-python
|
35f608bd6de9f5ed7a89c40288c550cfc3bea8ba
|
[
"BSD-3-Clause"
] | null | null | null |
aliyun/log/cursor_response.py
|
SeraphLiu/aliyun-log-sdk-python
|
35f608bd6de9f5ed7a89c40288c550cfc3bea8ba
|
[
"BSD-3-Clause"
] | null | null | null |
aliyun/log/cursor_response.py
|
SeraphLiu/aliyun-log-sdk-python
|
35f608bd6de9f5ed7a89c40288c550cfc3bea8ba
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
#encoding: utf-8
# Copyright (C) Alibaba Cloud Computing
# All rights reserved.
from aliyun.log.util import Util
from logresponse import LogResponse
class GetCursorResponse(LogResponse) :
""" The response of the get_cursor API from log.
:type header: dict
:param header: ListShardResponse HTTP response header
:type resp: dict
:param resp: the HTTP response body
"""
def __init__(self, resp, header):
LogResponse.__init__(self, header)
self.cursor = Util.convert_unicode_to_str(resp["cursor"])
def get_cursor(self) :
return self.cursor
def log_print(self):
print 'GetCursorResponse'
print 'headers:', self.get_all_headers()
print 'cursor:', self.cursor
| 23.727273
| 65
| 0.671775
|
4a001c7df6570cc3fdafc78459e0473d52136932
| 1,549
|
py
|
Python
|
python/__init__.py
|
DalavanCloud/coda
|
3c05f8360446a51157f2d86d2bd1c42e1764503c
|
[
"BSD-3-Clause"
] | 1
|
2019-02-08T02:06:13.000Z
|
2019-02-08T02:06:13.000Z
|
python/__init__.py
|
DalavanCloud/coda
|
3c05f8360446a51157f2d86d2bd1c42e1764503c
|
[
"BSD-3-Clause"
] | null | null | null |
python/__init__.py
|
DalavanCloud/coda
|
3c05f8360446a51157f2d86d2bd1c42e1764503c
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Copyright (C) 2007-2019 S[&]T, The Netherlands.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
from .codapython import *
| 46.939394
| 75
| 0.801162
|
4a001ddc10550e6ed4bbc2d5b256cdc7552146a7
| 658
|
py
|
Python
|
testnet/prolog/builtin/__init__.py
|
ForoughA/CORGI
|
c28ecd0e0375569f9f05e94e6ae5b7a994caacf5
|
[
"MIT"
] | 22
|
2020-06-18T23:36:29.000Z
|
2021-12-11T12:39:51.000Z
|
prolog/builtin/__init__.py
|
cosmoharrigan/pyrolog
|
b250e3ec0109049dea09419f2ad6a8ed14d92ff0
|
[
"MIT"
] | 1
|
2020-06-27T01:57:12.000Z
|
2020-10-05T03:54:44.000Z
|
prolog/builtin/__init__.py
|
leonweber/spyrolog
|
31834f64a7f4bc67e8aad5174ed71f3b7af9bc09
|
[
"MIT"
] | 5
|
2019-08-08T04:58:53.000Z
|
2021-11-13T08:06:29.000Z
|
# imports to register builtins
import prolog.builtin.allsolution
import prolog.builtin.arithmeticbuiltin
import prolog.builtin.atomconstruction
import prolog.builtin.control
import prolog.builtin.database
import prolog.builtin.exception
import prolog.builtin.formatting
import prolog.builtin.metacall
#import prolog.builtin.parseraccess
#import prolog.builtin.statistics
import prolog.builtin.source
import prolog.builtin.termconstruction
import prolog.builtin.unify
import prolog.builtin.numberchars
import prolog.builtin.modules
import prolog.builtin.streams
import prolog.builtin.term_variables
import prolog.builtin.attvars
import prolog.builtin.tracing
| 31.333333
| 39
| 0.87234
|
4a001de983de2ce2e455f49178d24ace0a2c36d9
| 2,045
|
py
|
Python
|
test/test_db_executer.py
|
tcristi74/yamsleu_server
|
d16b039d4c64a7092a559a77555e37afa6cbe8d4
|
[
"MIT"
] | 1
|
2020-09-28T06:46:16.000Z
|
2020-09-28T06:46:16.000Z
|
test/test_db_executer.py
|
tcristi74/yamsleu_server
|
d16b039d4c64a7092a559a77555e37afa6cbe8d4
|
[
"MIT"
] | null | null | null |
test/test_db_executer.py
|
tcristi74/yamsleu_server
|
d16b039d4c64a7092a559a77555e37afa6cbe8d4
|
[
"MIT"
] | null | null | null |
import logging
import logging.config
import random
from dotenv import load_dotenv
import sys
import os
import pathlib
# sys.path.append(os.path.join(
# str(pathlib.Path(os.path.abspath(__file__)).parent.parent), "data"))
sys.path.append(str(pathlib.Path(os.path.abspath(__file__)).parent.parent))
from data.db_executer import DbExecuter # pylint: disable=F0401
from data.db_config import DbConfig # pylint: disable=F0401
import pytest
@pytest.fixture()
def db():
print("setup")
db_config_instance = DbConfig()
db_exec = DbExecuter(db_config_instance)
yield db_exec
print("teardown")
class TestDbExecuter():
def test_nsert_one_record(self,db):
# insert test
email_id = f"test_{random.randint(10000,300000)}"
obj = {"first_name" : "Oce_test", "last_name": "Tudose", "email_address" : email_id, "skill_level":4}
res = db.insert_one_record(obj,"users")
assert res[1]==None
res = db.get_query("select id from public.users where email_address= %s limit 10",tuple([email_id]))
assert res[1]==None
pytest.user_id= res[0][0][0]
assert pytest.user_id>0
def test_get_query(self,db):
res = db.get_query("select * from public.users where id= %s",tuple([pytest.user_id]))
assert res[1]==None
assert len(res[0])==1
def test_execute_query(self,db):
res = db.execute("delete from public.users where id= %s",tuple([pytest.user_id]))
return res
# if __name__ == "__main__":
# logging.basicConfig(filename='info.log', level=logging.INFO)
# logging.getLogger(__name__)
# load_dotenv()
# logging.debug("Start")
# db_config_instance = DbConfig()
# email_id = f"test_{random.randint(10000,300000)}"
# user_id = insert_one_record()
# assert (user_id>0),"user has not been created"
# ret = get_query()
# assert (ret[1]==None),f"error:{ret[1]}"
# ret = execute()
# assert (ret[1]==None),f"error:{ret[1]}"
# print ("db_executer = done")
| 26.558442
| 109
| 0.655746
|
4a001ec2ed7d1402b8d886dd836d75ed1760888a
| 418
|
py
|
Python
|
data/secscan_model/__init__.py
|
jakedt/quay
|
424c1a19d744be444ed27aa1718fd74af311d863
|
[
"Apache-2.0"
] | 1
|
2020-10-16T19:30:41.000Z
|
2020-10-16T19:30:41.000Z
|
data/secscan_model/__init__.py
|
jakedt/quay
|
424c1a19d744be444ed27aa1718fd74af311d863
|
[
"Apache-2.0"
] | 15
|
2020-06-18T15:32:06.000Z
|
2022-03-03T23:06:24.000Z
|
data/secscan_model/__init__.py
|
jakedt/quay
|
424c1a19d744be444ed27aa1718fd74af311d863
|
[
"Apache-2.0"
] | null | null | null |
import os
import logging
from data.secscan_model.secscan_v2_model import V2SecurityScanner
logger = logging.getLogger(__name__)
class SecurityScannerModelProxy(object):
def configure(self, app, instance_keys, storage):
self._model = V2SecurityScanner(app, instance_keys, storage)
def __getattr__(self, attr):
return getattr(self._model, attr)
secscan_model = SecurityScannerModelProxy()
| 23.222222
| 68
| 0.772727
|
4a001f271e56451604cfbddcdee834dde24e3251
| 413
|
py
|
Python
|
roompotify_backend/roompotify_backend/wsgi.py
|
AmirAbaskohi/Roompotify
|
34d281df61b42906cd6843ad7a413752668bbaa5
|
[
"Apache-2.0"
] | null | null | null |
roompotify_backend/roompotify_backend/wsgi.py
|
AmirAbaskohi/Roompotify
|
34d281df61b42906cd6843ad7a413752668bbaa5
|
[
"Apache-2.0"
] | null | null | null |
roompotify_backend/roompotify_backend/wsgi.py
|
AmirAbaskohi/Roompotify
|
34d281df61b42906cd6843ad7a413752668bbaa5
|
[
"Apache-2.0"
] | null | null | null |
"""
WSGI config for roompotify_backend project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'roompotify_backend.settings')
application = get_wsgi_application()
| 24.294118
| 78
| 0.79661
|
4a001f4f7f1ade6347cd69b29ebd881dec024bd6
| 2,520
|
py
|
Python
|
ScoringEngine/ScoringEngine/web/session_providers.py
|
norserage/scoring
|
b3f5c199b64bb3a78342924349fe6a713257b1a0
|
[
"MIT"
] | 1
|
2020-07-29T16:12:02.000Z
|
2020-07-29T16:12:02.000Z
|
ScoringEngine/ScoringEngine/web/session_providers.py
|
norserage/scoring
|
b3f5c199b64bb3a78342924349fe6a713257b1a0
|
[
"MIT"
] | null | null | null |
ScoringEngine/ScoringEngine/web/session_providers.py
|
norserage/scoring
|
b3f5c199b64bb3a78342924349fe6a713257b1a0
|
[
"MIT"
] | null | null | null |
import json
from datetime import timedelta
from flask.sessions import SessionInterface, SessionMixin
from redis import StrictRedis
from werkzeug.datastructures import CallbackDict
try:
from Crypto.Random import random
except:
print("WARNING USING SYSTEM RANDOM")
import random
from ScoringEngine.core import config
import string
class RedisSession(CallbackDict, SessionMixin):
def __init__(self, initial=None, sid=None, new=False):
def on_update(self):
self.modified = True
CallbackDict.__init__(self, initial, on_update)
self.sid = sid
self.new = new
self.modified = False
class RedisSessionInterface(SessionInterface):
serializer = json
session_class = RedisSession
def __init__(self, redis=None, prefix='session:'):
if redis is None:
redis = StrictRedis(config.get_item("session_redis"))
self.redis = redis
self.prefix = prefix
def generate_sid(self):
# return str(uuid4())
return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(128))
def get_redis_expiration_time(self, app, session):
if session.permanent:
return app.permanent_session_lifetime
return timedelta(days=1)
def open_session(self, app, request):
sid = request.cookies.get(app.session_cookie_name)
if not sid:
sid = self.generate_sid()
return self.session_class(sid=sid, new=True)
val = self.redis.get(self.prefix + sid)
if val is not None:
data = self.serializer.loads(val.decode())
return self.session_class(data, sid=sid)
return self.session_class(sid=sid, new=True)
def save_session(self, app, session, response):
domain = self.get_cookie_domain(app)
if not session:
self.redis.delete(self.prefix + session.sid)
if session.modified:
response.delete_cookie(app.session_cookie_name,
domain=domain)
return
redis_exp = self.get_redis_expiration_time(app, session)
cookie_exp = self.get_expiration_time(app, session)
val = self.serializer.dumps(dict(session))
self.redis.setex(self.prefix + session.sid, int(redis_exp.total_seconds()), val)
response.set_cookie(app.session_cookie_name, session.sid,
expires=cookie_exp, httponly=True,
domain=domain)
| 34.054054
| 95
| 0.649603
|
4a001f5363f17112c9c704f1879b5516cfbc3f42
| 51,443
|
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_04_01/aio/operations_async/_express_route_circuits_operations_async.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | 2
|
2019-05-17T21:24:53.000Z
|
2020-02-12T11:13:42.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_04_01/aio/operations_async/_express_route_circuits_operations_async.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | 15
|
2019-07-12T18:18:04.000Z
|
2019-07-25T20:55:51.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_04_01/aio/operations_async/_express_route_circuits_operations_async.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | 2
|
2020-05-21T22:51:22.000Z
|
2020-05-26T20:53:01.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteCircuitsOperations:
"""ExpressRouteCircuitsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
circuit_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
circuit_name: str,
**kwargs
) -> None:
"""Deletes the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: None, or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'} # type: ignore
async def get(
self,
resource_group_name: str,
circuit_name: str,
**kwargs
) -> "models.ExpressRouteCircuit":
"""Gets information about the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of express route circuit.
:type circuit_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCircuit, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_04_01.models.ExpressRouteCircuit
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteCircuit"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCircuit', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
circuit_name: str,
parameters: "models.ExpressRouteCircuit",
**kwargs
) -> "models.ExpressRouteCircuit":
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteCircuit"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ExpressRouteCircuit')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuit', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteCircuit', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
circuit_name: str,
parameters: "models.ExpressRouteCircuit",
**kwargs
) -> "models.ExpressRouteCircuit":
"""Creates or updates an express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the circuit.
:type circuit_name: str
:param parameters: Parameters supplied to the create or update express route circuit operation.
:type parameters: ~azure.mgmt.network.v2019_04_01.models.ExpressRouteCircuit
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: ExpressRouteCircuit, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_04_01.models.ExpressRouteCircuit
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteCircuit"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuit', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
circuit_name: str,
parameters: "models.TagsObject",
**kwargs
) -> "models.ExpressRouteCircuit":
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteCircuit"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCircuit', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'} # type: ignore
async def begin_update_tags(
self,
resource_group_name: str,
circuit_name: str,
parameters: "models.TagsObject",
**kwargs
) -> "models.ExpressRouteCircuit":
"""Updates an express route circuit tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the circuit.
:type circuit_name: str
:param parameters: Parameters supplied to update express route circuit tags.
:type parameters: ~azure.mgmt.network.v2019_04_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: ExpressRouteCircuit, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_04_01.models.ExpressRouteCircuit
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteCircuit"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuit', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'} # type: ignore
async def _list_arp_table_initial(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
device_path: str,
**kwargs
) -> "models.ExpressRouteCircuitsArpTableListResult":
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteCircuitsArpTableListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
# Construct URL
url = self._list_arp_table_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitsArpTableListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_list_arp_table_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/arpTables/{devicePath}'} # type: ignore
async def begin_list_arp_table(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
device_path: str,
**kwargs
) -> "models.ExpressRouteCircuitsArpTableListResult":
"""Gets the currently advertised ARP table associated with the express route circuit in a resource
group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param device_path: The path of the device.
:type device_path: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: ExpressRouteCircuitsArpTableListResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_04_01.models.ExpressRouteCircuitsArpTableListResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteCircuitsArpTableListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._list_arp_table_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
device_path=device_path,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitsArpTableListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_list_arp_table.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/arpTables/{devicePath}'} # type: ignore
async def _list_routes_table_initial(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
device_path: str,
**kwargs
) -> "models.ExpressRouteCircuitsRoutesTableListResult":
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteCircuitsRoutesTableListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
# Construct URL
url = self._list_routes_table_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitsRoutesTableListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_list_routes_table_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/routeTables/{devicePath}'} # type: ignore
async def begin_list_routes_table(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
device_path: str,
**kwargs
) -> "models.ExpressRouteCircuitsRoutesTableListResult":
"""Gets the currently advertised routes table associated with the express route circuit in a
resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param device_path: The path of the device.
:type device_path: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: ExpressRouteCircuitsRoutesTableListResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_04_01.models.ExpressRouteCircuitsRoutesTableListResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteCircuitsRoutesTableListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._list_routes_table_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
device_path=device_path,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitsRoutesTableListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_list_routes_table.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/routeTables/{devicePath}'} # type: ignore
async def _list_routes_table_summary_initial(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
device_path: str,
**kwargs
) -> "models.ExpressRouteCircuitsRoutesTableSummaryListResult":
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteCircuitsRoutesTableSummaryListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
# Construct URL
url = self._list_routes_table_summary_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitsRoutesTableSummaryListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_list_routes_table_summary_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/routeTablesSummary/{devicePath}'} # type: ignore
async def begin_list_routes_table_summary(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
device_path: str,
**kwargs
) -> "models.ExpressRouteCircuitsRoutesTableSummaryListResult":
"""Gets the currently advertised routes table summary associated with the express route circuit in
a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param device_path: The path of the device.
:type device_path: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: ExpressRouteCircuitsRoutesTableSummaryListResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_04_01.models.ExpressRouteCircuitsRoutesTableSummaryListResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteCircuitsRoutesTableSummaryListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._list_routes_table_summary_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
device_path=device_path,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitsRoutesTableSummaryListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_list_routes_table_summary.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/routeTablesSummary/{devicePath}'} # type: ignore
async def get_stats(
self,
resource_group_name: str,
circuit_name: str,
**kwargs
) -> "models.ExpressRouteCircuitStats":
"""Gets all the stats from an express route circuit in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCircuitStats, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_04_01.models.ExpressRouteCircuitStats
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteCircuitStats"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
# Construct URL
url = self.get_stats.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCircuitStats', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_stats.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/stats'} # type: ignore
async def get_peering_stats(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
**kwargs
) -> "models.ExpressRouteCircuitStats":
"""Gets all stats from an express route circuit in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCircuitStats, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_04_01.models.ExpressRouteCircuitStats
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteCircuitStats"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
# Construct URL
url = self.get_peering_stats.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCircuitStats', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_peering_stats.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/stats'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["models.ExpressRouteCircuitListResult"]:
"""Gets all the express route circuits in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteCircuitListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_04_01.models.ExpressRouteCircuitListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteCircuitListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits'} # type: ignore
def list_all(
self,
**kwargs
) -> AsyncIterable["models.ExpressRouteCircuitListResult"]:
"""Gets all the express route circuits in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteCircuitListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_04_01.models.ExpressRouteCircuitListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteCircuitListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/expressRouteCircuits'} # type: ignore
| 49.086832
| 261
| 0.672239
|
4a001f7114106c6f5c3926edfcbc55e76814d449
| 988
|
py
|
Python
|
apriori.py
|
rodrigo-brito/co-change-analysis
|
298bb5437371ab29fb94a9e2f9012d3a5cf033f7
|
[
"MIT"
] | 1
|
2019-04-15T22:27:52.000Z
|
2019-04-15T22:27:52.000Z
|
apriori.py
|
rodrigo-brito/co-change-analysis
|
298bb5437371ab29fb94a9e2f9012d3a5cf033f7
|
[
"MIT"
] | 1
|
2019-05-09T01:55:12.000Z
|
2019-05-09T02:14:41.000Z
|
apriori.py
|
rodrigo-brito/co-change-analysis
|
298bb5437371ab29fb94a9e2f9012d3a5cf033f7
|
[
"MIT"
] | 2
|
2019-05-09T01:41:29.000Z
|
2019-06-12T18:59:45.000Z
|
from efficient_apriori import apriori
class Apriori:
def __init__(self, transactions, support=0.5, confidence=0.5, max_length=8):
self.transactions = transactions
self.support = support
self.confidence = confidence
self.max_length = max_length
def set_support(self, support):
self.support = support
def set_confidence(self, confidence):
self.confidence = confidence
def set_max_length(self, max_length):
self.max_length = max_length
def get_rules(self):
_, rules = apriori(self.transactions, min_support=self.support, min_confidence=self.confidence, max_length=self.max_length)
return rules
def get_rules_csv(self, limit):
for index, rule in enumerate(self.get_rules()):
if limit and index > limit:
break
print('%s,%s,"%.0f","%.3f"' % ('/'.join(rule.lhs), '/'.join(rule.rhs), rule.support*rule.num_transactions, rule.confidence))
| 35.285714
| 136
| 0.653846
|
4a0021e596243b2fdaf44abfdef7afa9da783bf1
| 3,358
|
py
|
Python
|
src/olympia/browse/urls.py
|
thundernest/addons-server
|
e7c4ce33fea6697c3327a07638062e0a311a93fa
|
[
"BSD-3-Clause"
] | 10
|
2018-08-16T04:55:06.000Z
|
2022-01-08T16:09:39.000Z
|
src/olympia/browse/urls.py
|
thundernest/addons-server
|
e7c4ce33fea6697c3327a07638062e0a311a93fa
|
[
"BSD-3-Clause"
] | 171
|
2018-05-20T00:27:59.000Z
|
2022-03-21T13:34:27.000Z
|
src/olympia/browse/urls.py
|
thundernest/addons-server
|
e7c4ce33fea6697c3327a07638062e0a311a93fa
|
[
"BSD-3-Clause"
] | 12
|
2018-08-01T16:46:09.000Z
|
2022-01-08T16:09:46.000Z
|
from django.conf.urls import include, url
from django.shortcuts import redirect
from olympia.amo.urlresolvers import reverse
from olympia.browse.feeds import (
ExtensionCategoriesRss, FeaturedRss, SearchToolsRss, ThemeCategoriesRss)
from . import views
impala_patterns = [
# TODO: Impalacize these views.
url('^extensions/(?P<category>[^/]+)/featured$',
views.legacy_creatured_redirect,
name='i_browse.creatured'),
url('^language-tools/(?P<category>[^/]+)?$', views.language_tools,
name='i_browse.language-tools'),
url('^search-tools/(?P<category>[^/]+)?$', views.search_tools,
name='i_browse.search-tools'),
]
urlpatterns = [
url('^i/', include(impala_patterns)),
url('^language-tools/(?P<category>[^/]+)?$', views.language_tools,
name='browse.language-tools'),
url('^dictionaries$',
lambda r: redirect(reverse('browse.language-tools'), permanent=True)),
url('^featured$',
lambda r: redirect(reverse('browse.extensions') + '?sort=featured',
permanent=True)),
# Full Themes are now Complete Themes.
url('^full-themes/(?P<category>[^ /]+)?$',
views.legacy_fulltheme_redirects),
# Personas are now Themes.
url('^personas/(?P<category>[^ /]+)?$',
views.legacy_theme_redirects),
url('^themes/(?:(?P<category>[^/]+)/)?$',
lambda r, category: redirect(
reverse('browse.static-themes',
kwargs=({'category': category} if category else {}))),
name='browse.personas'),
# Themes are now Complete Themes.
url('^themes/(?P<category_name>[^/]+)/format:rss$',
views.legacy_theme_redirects),
url('^complete-themes/(?P<category>[^/]+)?$', views.themes,
name='browse.themes'),
url('^complete-themes/(?:(?P<category_name>[^/]+)/)?format:rss$',
ThemeCategoriesRss(), name='browse.themes.rss'),
# This won't let you browse any themes but detail page needs the url.
url('^static-themes/(?:(?P<category>[^/]+)/)?$', views.staticthemes,
name='browse.static-themes'),
url('^extensions/(?:(?P<category>[^/]+)/)?$', views.extensions,
name='browse.extensions'),
# Creatured URLs now redirect to browse.extensions
url('^extensions/(?P<category>[^/]+)/featured$',
views.legacy_creatured_redirect),
url('^extensions/(?:(?P<category_name>[^/]+)/)?format:rss$',
ExtensionCategoriesRss(), name='browse.extensions.rss'),
url('^browse/type:7$',
lambda r: redirect("https://www.mozilla.org/plugincheck/",
permanent=True)),
url('^browse/type:(?P<type_>\d)(?:/cat:(?P<category>\d+))?'
'(?:/sort:(?P<sort>[^/]+))?(?:/format:(?P<format>[^/]+).*)?',
views.legacy_redirects),
url('^search-tools/(?:(?P<category>[^/]+)/)?format:rss$',
SearchToolsRss(), name='browse.search-tools.rss'),
url('^search-tools/(?P<category>[^/]+)?$', views.search_tools,
name='browse.search-tools'),
url('^featured/format:rss$', FeaturedRss(), name='browse.featured.rss'),
# The plugins page was moved to mozilla.org and so now it is just a
# redirect, per bug 775799.
url('^plugins$',
lambda r: redirect('http://www.mozilla.org/en-US/plugincheck/',
permanent=True)),
]
| 36.5
| 78
| 0.603038
|
4a0022bc3874b5fc037b4ee9a984dbc7020b1469
| 3,355
|
py
|
Python
|
xid.py
|
graham/python_xid
|
4746c669a94241121ee0ae59ee0cb664d460d7a4
|
[
"MIT"
] | 44
|
2017-02-09T15:38:07.000Z
|
2022-03-09T23:06:42.000Z
|
xid.py
|
graham/python_xid
|
4746c669a94241121ee0ae59ee0cb664d460d7a4
|
[
"MIT"
] | 6
|
2017-01-18T12:35:19.000Z
|
2022-01-21T23:01:25.000Z
|
xid.py
|
graham/python_xid
|
4746c669a94241121ee0ae59ee0cb664d460d7a4
|
[
"MIT"
] | 25
|
2017-04-03T11:16:46.000Z
|
2022-03-28T03:14:12.000Z
|
# All credit to github.com/rs
# almost a direct copy of https://github.com/rs/xid
# Changes to make more pythonic as needed.
import hashlib
import os
import platform
import time
import datetime
import threading
import base32hex
# MyPy imports
try:
from typing import List
except:
pass # ignore, we do not need the typing module
# Some Constants
trimLen = 20
encodedLen = 24
decodedLen = 14
rawLen = 12
class InvalidXid(Exception):
pass
def randInt():
# type: () -> int
buf = str(os.urandom(3))
buford = list(map(ord, buf))
return buford[0] << 16 | buford[1] << 8 | buford[2]
def realMachineID():
# type: () -> List[int]
try:
hostname = platform.node()
hw = hashlib.md5()
hw.update(hostname.encode('utf-8'))
val = str(hw.digest()[:3])
return list(map(ord, val))
except:
buf = os.urandom(3)
return list(map(ord, buf))
## Module level items
pid = os.getpid()
machineID = realMachineID()
lock = threading.Lock()
def generateNextId():
id = randInt()
while True:
new_id = id + 1
id += 1
yield new_id
objectIDGenerator = generateNextId()
def generate_new_xid():
# type: () -> List[int]
now = int(time.time())
id = [0] * rawLen
id[0] = (now >> 24) & 0xff
id[1] = (now >> 16) & 0xff
id[2] = (now >> 8) & 0xff
id[3] = (now) & 0xff
id[4] = machineID[0]
id[5] = machineID[1]
id[6] = machineID[2]
id[7] = (pid >> 8) & 0xff
id[8] = (pid) & 0xff
lock.acquire()
i = next(objectIDGenerator)
lock.release()
id[9] = (i >> 16) & 0xff
id[10] = (i >> 8) & 0xff
id[11] = (i) & 0xff
return id
class Xid(object):
def __init__(self, id=None):
# type: (List[int]) -> None
if id is None:
id = generate_new_xid()
self.value = id
def pid(self):
# type: () -> int
return (self.value[7] << 8 | self.value[8])
def counter(self):
# type: () -> int
return (self.value[9] << 16 |
self.value[10] << 8 |
self.value[11])
def machine(self):
# type: () -> str
return ''.join(map(chr, self.value[4:7]))
def datetime(self):
return datetime.datetime.fromtimestamp(self.time())
def time(self):
# type: () -> int
return (self.value[0] << 24 |
self.value[1] << 16 |
self.value[2] << 8 |
self.value[3])
def string(self):
# type: () -> str
byte_value = self.bytes()
return base32hex.b32encode(byte_value).lower()[:trimLen]
def bytes(self):
# type: () -> str
return ''.join(map(chr, self.value))
def __repr__(self):
return "<Xid '%s'>" % self.__str__()
def __str__(self):
return self.string()
def __lt__(self, arg):
# type: (Xid) -> bool
return self.string() < arg.string()
def __gt__(self, arg):
# type: (Xid) -> bool
return self.string() > arg.string()
@classmethod
def from_string(cls, s):
# type: (str) -> Xid
val = base32hex.b32decode(s.upper())
value_check = [0 <= x < 255 for x in val]
if not all(value_check):
raise InvalidXid(s)
return cls(val)
| 21.645161
| 64
| 0.531446
|
4a00232b07f33039247ebc1661c06211c115cd69
| 1,055
|
py
|
Python
|
run.py
|
giussepi/cyto_CRLM
|
4489d5d81c4270ec7b6048ceb2f2a02bfa699177
|
[
"Apache-2.0"
] | null | null | null |
run.py
|
giussepi/cyto_CRLM
|
4489d5d81c4270ec7b6048ceb2f2a02bfa699177
|
[
"Apache-2.0"
] | 6
|
2020-03-24T18:11:41.000Z
|
2022-03-12T00:16:18.000Z
|
run.py
|
giussepi/cyto_CRLM
|
4489d5d81c4270ec7b6048ceb2f2a02bfa699177
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# * Copyright (c) 2009-2018. Authors: see NOTICE file.
# *
# * Licensed under the Apache License, Version 2.0 (the "License");
# * you may not use this file except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from cytomine import CytomineJob
def main(argv):
with CytomineJob.from_cli(argv) as cj:
# Implements your software here.
cj.job.update(statusComment="Finished.")
if __name__ == "__main__":
import sys
main(sys.argv[1:])
| 29.305556
| 76
| 0.725118
|
4a002340e413d41946c1567064c1f223e07bfc84
| 150
|
py
|
Python
|
tests/test_folders.py
|
pierre-chaville/automlk
|
61386beba62f72360e1f5f8d6bcce17df653e2e8
|
[
"MIT"
] | 16
|
2017-09-05T12:26:11.000Z
|
2019-10-26T22:55:41.000Z
|
tests/test_folders.py
|
pierre-chaville/automlk
|
61386beba62f72360e1f5f8d6bcce17df653e2e8
|
[
"MIT"
] | 1
|
2018-02-07T11:16:43.000Z
|
2018-02-07T11:16:43.000Z
|
tests/test_folders.py
|
pierre-chaville/automlk
|
61386beba62f72360e1f5f8d6bcce17df653e2e8
|
[
"MIT"
] | 8
|
2017-09-21T01:20:52.000Z
|
2021-01-21T10:03:34.000Z
|
from automlk.dataset import get_data_folder, get_dataset_list
print('data folder:', get_data_folder())
print('list of datasets:', get_dataset_list())
| 37.5
| 61
| 0.8
|
4a002384f92d0ddc0f73fce1d8221313aa5dc225
| 3,051
|
py
|
Python
|
app/recipe/tests/test_ingredients_api.py
|
varshven/recipe-app-api
|
b746fde4257e9697c5a51b487918fca4b26ae73b
|
[
"MIT"
] | null | null | null |
app/recipe/tests/test_ingredients_api.py
|
varshven/recipe-app-api
|
b746fde4257e9697c5a51b487918fca4b26ae73b
|
[
"MIT"
] | null | null | null |
app/recipe/tests/test_ingredients_api.py
|
varshven/recipe-app-api
|
b746fde4257e9697c5a51b487918fca4b26ae73b
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Ingredient
from recipe.serializers import IngredientSerializer
INGREDIENTS_URL = reverse('recipe:ingredient-list')
class PublicIngredientsApiTests(TestCase):
"""Test the publicly available Ingredients API"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""Test that login is required to access the endpoint"""
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateIngredientsApiTests(TestCase):
"""Test the private ingredients API"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'test@londonappdev.com',
'testpass'
)
self.client.force_authenticate(self.user)
def test_retrieve_ingredients_list(self):
"""Test retrieving a list of ingredients"""
Ingredient.objects.create(user=self.user, name='Kale')
Ingredient.objects.create(user=self.user, name='Salt')
res = self.client.get(INGREDIENTS_URL)
ingredients = Ingredient.objects.all().order_by('-name')
serializer = IngredientSerializer(ingredients, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_ingredients_limited_to_user(self):
"""Test that ingredients for the authenticated user are returned"""
user2 = get_user_model().objects.create_user(
'test1@londonappdev.com',
'testpass1'
)
Ingredient.objects.create(user=user2, name='Vinegar')
ingredient = Ingredient.objects.create(user=self.user, name='Turmeric')
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], ingredient.name)
def test_create_ingredient_successful(self):
"""Test that creating an Ingredient is successful"""
payload = {'name': 'Cabbage'}
res = self.client.post(INGREDIENTS_URL, payload)
exists = Ingredient.objects.filter(
user=self.user,
name=payload['name'],
).exists()
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
self.assertTrue(exists)
def test_create_ingredient_invalid(self):
"""Test that creating an invalid ingredient fails"""
payload = {'name': ''}
res = self.client.post(INGREDIENTS_URL, payload)
# exists = Ingredient.objects.filter(
# self.user,
# name=payload['name'],
# ).exists()
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
# self.assertTrue(exists)
| 34.670455
| 79
| 0.66765
|
4a002433b20015e3226120905d3155b68a51ab67
| 1,321
|
py
|
Python
|
tests/extract_method/test_4.py
|
saeedshakeri/CodART
|
ce203b26883ae73d4665e552482d7afe115123a0
|
[
"MIT"
] | 18
|
2020-11-26T08:31:27.000Z
|
2022-03-28T07:35:41.000Z
|
tests/extract_method/test_4.py
|
MinaTahaei/CodART
|
5bc44560a4c502f85c33e29a9c0345273f8f064e
|
[
"MIT"
] | 82
|
2020-12-25T08:26:27.000Z
|
2022-03-25T06:11:36.000Z
|
tests/extract_method/test_4.py
|
MinaTahaei/CodART
|
5bc44560a4c502f85c33e29a9c0345273f8f064e
|
[
"MIT"
] | 59
|
2020-11-26T08:31:42.000Z
|
2022-02-04T11:09:03.000Z
|
"""
extracting lines containing access to global variables
test status: pass
"""
from refactorings.extract_method import extract_method
import os
import errno
def main():
base_dir = '/mnt/d/Sajad/Uni/Spring00/Compiler/CodART/'
if not os.path.exists(os.path.dirname(
base_dir + "tests/extract_method/out/benchmark_projects/ganttproject/biz.ganttproject.core/src/main/java/biz/ganttproject/core/calendar/WeekendCalendarImpl.java")):
try:
os.makedirs(os.path.dirname(
base_dir + "tests/extract_method/out/benchmark_projects/ganttproject/biz.ganttproject.core/src/main/java/biz/ganttproject/core/calendar/WeekendCalendarImpl.java"))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
_conf = {
'target_file': base_dir + "benchmark_projects/ganttproject/biz.ganttproject.core/src/main/java/biz/ganttproject/core/calendar/WeekendCalendarImpl.java",
'output_file': base_dir + "tests/extract_method/out/benchmark_projects/ganttproject/biz.ganttproject.core/src/main/java/biz/ganttproject/core/calendar/WeekendCalendarImpl.java",
'lines': [87,88],
'new_method_name': 'clearMap',
}
extract_method(_conf)
if __name__ == '__main__':
main()
| 41.28125
| 185
| 0.713853
|
4a0025126c812ea6047e5624826e86627c38247c
| 36,478
|
py
|
Python
|
homeassistant/components/light/__init__.py
|
basicpail/core
|
5cc54618c5af3f75c08314bf2375cc7ac40d2b7e
|
[
"Apache-2.0"
] | 5
|
2020-10-08T12:59:44.000Z
|
2021-12-28T06:46:25.000Z
|
homeassistant/components/light/__init__.py
|
basicpail/core
|
5cc54618c5af3f75c08314bf2375cc7ac40d2b7e
|
[
"Apache-2.0"
] | 75
|
2020-08-05T07:22:42.000Z
|
2022-03-23T21:54:57.000Z
|
homeassistant/components/light/__init__.py
|
winning1120xx/home-assistant
|
53d4c0ce2d374b5e97bbdc37742656c27adf8eea
|
[
"Apache-2.0"
] | 11
|
2020-12-16T13:48:14.000Z
|
2022-02-01T00:28:05.000Z
|
"""Provides functionality to interact with lights."""
from __future__ import annotations
from collections.abc import Iterable
import csv
import dataclasses
from datetime import timedelta
import logging
import os
from typing import cast, final
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ON,
)
from homeassistant.core import HomeAssistant, HomeAssistantError, callback
from homeassistant.helpers import config_validation as cv, entity_registry as er
from homeassistant.helpers.config_validation import ( # noqa: F401
PLATFORM_SCHEMA,
PLATFORM_SCHEMA_BASE,
make_entity_service_schema,
)
from homeassistant.helpers.entity import ToggleEntity, ToggleEntityDescription
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.loader import bind_hass
import homeassistant.util.color as color_util
# mypy: allow-untyped-defs, no-check-untyped-defs
DOMAIN = "light"
SCAN_INTERVAL = timedelta(seconds=30)
DATA_PROFILES = "light_profiles"
ENTITY_ID_FORMAT = DOMAIN + ".{}"
# Bitfield of features supported by the light entity
SUPPORT_BRIGHTNESS = 1 # Deprecated, replaced by color modes
SUPPORT_COLOR_TEMP = 2 # Deprecated, replaced by color modes
SUPPORT_EFFECT = 4
SUPPORT_FLASH = 8
SUPPORT_COLOR = 16 # Deprecated, replaced by color modes
SUPPORT_TRANSITION = 32
SUPPORT_WHITE_VALUE = 128 # Deprecated, replaced by color modes
# Color mode of the light
ATTR_COLOR_MODE = "color_mode"
# List of color modes supported by the light
ATTR_SUPPORTED_COLOR_MODES = "supported_color_modes"
# Possible color modes
COLOR_MODE_UNKNOWN = "unknown" # Ambiguous color mode
COLOR_MODE_ONOFF = "onoff" # Must be the only supported mode
COLOR_MODE_BRIGHTNESS = "brightness" # Must be the only supported mode
COLOR_MODE_COLOR_TEMP = "color_temp"
COLOR_MODE_HS = "hs"
COLOR_MODE_XY = "xy"
COLOR_MODE_RGB = "rgb"
COLOR_MODE_RGBW = "rgbw"
COLOR_MODE_RGBWW = "rgbww"
COLOR_MODE_WHITE = "white" # Must *NOT* be the only supported mode
VALID_COLOR_MODES = {
COLOR_MODE_ONOFF,
COLOR_MODE_BRIGHTNESS,
COLOR_MODE_COLOR_TEMP,
COLOR_MODE_HS,
COLOR_MODE_XY,
COLOR_MODE_RGB,
COLOR_MODE_RGBW,
COLOR_MODE_RGBWW,
COLOR_MODE_WHITE,
}
COLOR_MODES_BRIGHTNESS = VALID_COLOR_MODES - {COLOR_MODE_ONOFF}
COLOR_MODES_COLOR = {
COLOR_MODE_HS,
COLOR_MODE_RGB,
COLOR_MODE_RGBW,
COLOR_MODE_RGBWW,
COLOR_MODE_XY,
}
def valid_supported_color_modes(color_modes: Iterable[str]) -> set[str]:
"""Validate the given color modes."""
color_modes = set(color_modes)
if (
not color_modes
or COLOR_MODE_UNKNOWN in color_modes
or (COLOR_MODE_BRIGHTNESS in color_modes and len(color_modes) > 1)
or (COLOR_MODE_ONOFF in color_modes and len(color_modes) > 1)
or (COLOR_MODE_WHITE in color_modes and not color_supported(color_modes))
):
raise vol.Error(f"Invalid supported_color_modes {sorted(color_modes)}")
return color_modes
def brightness_supported(color_modes: Iterable[str] | None) -> bool:
"""Test if brightness is supported."""
if not color_modes:
return False
return any(mode in COLOR_MODES_BRIGHTNESS for mode in color_modes)
def color_supported(color_modes: Iterable[str] | None) -> bool:
"""Test if color is supported."""
if not color_modes:
return False
return any(mode in COLOR_MODES_COLOR for mode in color_modes)
def color_temp_supported(color_modes: Iterable[str] | None) -> bool:
"""Test if color temperature is supported."""
if not color_modes:
return False
return COLOR_MODE_COLOR_TEMP in color_modes
def get_supported_color_modes(hass: HomeAssistant, entity_id: str) -> set | None:
"""Get supported color modes for a light entity.
First try the statemachine, then entity registry.
This is the equivalent of entity helper get_supported_features.
"""
state = hass.states.get(entity_id)
if state:
return state.attributes.get(ATTR_SUPPORTED_COLOR_MODES)
entity_registry = er.async_get(hass)
entry = entity_registry.async_get(entity_id)
if not entry:
raise HomeAssistantError(f"Unknown entity {entity_id}")
if not entry.capabilities:
return None
return entry.capabilities.get(ATTR_SUPPORTED_COLOR_MODES)
# Float that represents transition time in seconds to make change.
ATTR_TRANSITION = "transition"
# Lists holding color values
ATTR_RGB_COLOR = "rgb_color"
ATTR_RGBW_COLOR = "rgbw_color"
ATTR_RGBWW_COLOR = "rgbww_color"
ATTR_XY_COLOR = "xy_color"
ATTR_HS_COLOR = "hs_color"
ATTR_COLOR_TEMP = "color_temp"
ATTR_KELVIN = "kelvin"
ATTR_MIN_MIREDS = "min_mireds"
ATTR_MAX_MIREDS = "max_mireds"
ATTR_COLOR_NAME = "color_name"
ATTR_WHITE_VALUE = "white_value"
ATTR_WHITE = "white"
# Brightness of the light, 0..255 or percentage
ATTR_BRIGHTNESS = "brightness"
ATTR_BRIGHTNESS_PCT = "brightness_pct"
ATTR_BRIGHTNESS_STEP = "brightness_step"
ATTR_BRIGHTNESS_STEP_PCT = "brightness_step_pct"
# String representing a profile (built-in ones or external defined).
ATTR_PROFILE = "profile"
# If the light should flash, can be FLASH_SHORT or FLASH_LONG.
ATTR_FLASH = "flash"
FLASH_SHORT = "short"
FLASH_LONG = "long"
# List of possible effects
ATTR_EFFECT_LIST = "effect_list"
# Apply an effect to the light, can be EFFECT_COLORLOOP.
ATTR_EFFECT = "effect"
EFFECT_COLORLOOP = "colorloop"
EFFECT_RANDOM = "random"
EFFECT_WHITE = "white"
COLOR_GROUP = "Color descriptors"
LIGHT_PROFILES_FILE = "light_profiles.csv"
# Service call validation schemas
VALID_TRANSITION = vol.All(vol.Coerce(float), vol.Clamp(min=0, max=6553))
VALID_BRIGHTNESS = vol.All(vol.Coerce(int), vol.Clamp(min=0, max=255))
VALID_BRIGHTNESS_PCT = vol.All(vol.Coerce(float), vol.Range(min=0, max=100))
VALID_BRIGHTNESS_STEP = vol.All(vol.Coerce(int), vol.Clamp(min=-255, max=255))
VALID_BRIGHTNESS_STEP_PCT = vol.All(vol.Coerce(float), vol.Clamp(min=-100, max=100))
VALID_FLASH = vol.In([FLASH_SHORT, FLASH_LONG])
LIGHT_TURN_ON_SCHEMA = {
vol.Exclusive(ATTR_PROFILE, COLOR_GROUP): cv.string,
ATTR_TRANSITION: VALID_TRANSITION,
vol.Exclusive(ATTR_BRIGHTNESS, ATTR_BRIGHTNESS): VALID_BRIGHTNESS,
vol.Exclusive(ATTR_BRIGHTNESS_PCT, ATTR_BRIGHTNESS): VALID_BRIGHTNESS_PCT,
vol.Exclusive(ATTR_BRIGHTNESS_STEP, ATTR_BRIGHTNESS): VALID_BRIGHTNESS_STEP,
vol.Exclusive(ATTR_BRIGHTNESS_STEP_PCT, ATTR_BRIGHTNESS): VALID_BRIGHTNESS_STEP_PCT,
vol.Exclusive(ATTR_COLOR_NAME, COLOR_GROUP): cv.string,
vol.Exclusive(ATTR_COLOR_TEMP, COLOR_GROUP): vol.All(
vol.Coerce(int), vol.Range(min=1)
),
vol.Exclusive(ATTR_KELVIN, COLOR_GROUP): cv.positive_int,
vol.Exclusive(ATTR_HS_COLOR, COLOR_GROUP): vol.All(
vol.ExactSequence(
(
vol.All(vol.Coerce(float), vol.Range(min=0, max=360)),
vol.All(vol.Coerce(float), vol.Range(min=0, max=100)),
)
),
vol.Coerce(tuple),
),
vol.Exclusive(ATTR_RGB_COLOR, COLOR_GROUP): vol.All(
vol.ExactSequence((cv.byte,) * 3), vol.Coerce(tuple)
),
vol.Exclusive(ATTR_RGBW_COLOR, COLOR_GROUP): vol.All(
vol.ExactSequence((cv.byte,) * 4), vol.Coerce(tuple)
),
vol.Exclusive(ATTR_RGBWW_COLOR, COLOR_GROUP): vol.All(
vol.ExactSequence((cv.byte,) * 5), vol.Coerce(tuple)
),
vol.Exclusive(ATTR_XY_COLOR, COLOR_GROUP): vol.All(
vol.ExactSequence((cv.small_float, cv.small_float)), vol.Coerce(tuple)
),
vol.Exclusive(ATTR_WHITE, COLOR_GROUP): VALID_BRIGHTNESS,
ATTR_WHITE_VALUE: vol.All(vol.Coerce(int), vol.Range(min=0, max=255)),
ATTR_FLASH: VALID_FLASH,
ATTR_EFFECT: cv.string,
}
LIGHT_TURN_OFF_SCHEMA = {ATTR_TRANSITION: VALID_TRANSITION, ATTR_FLASH: VALID_FLASH}
_LOGGER = logging.getLogger(__name__)
@bind_hass
def is_on(hass: HomeAssistant, entity_id: str) -> bool:
"""Return if the lights are on based on the statemachine."""
return hass.states.is_state(entity_id, STATE_ON)
def preprocess_turn_on_alternatives(hass, params):
"""Process extra data for turn light on request.
Async friendly.
"""
# Bail out, we process this later.
if ATTR_BRIGHTNESS_STEP in params or ATTR_BRIGHTNESS_STEP_PCT in params:
return
if ATTR_PROFILE in params:
hass.data[DATA_PROFILES].apply_profile(params.pop(ATTR_PROFILE), params)
color_name = params.pop(ATTR_COLOR_NAME, None)
if color_name is not None:
try:
params[ATTR_RGB_COLOR] = color_util.color_name_to_rgb(color_name)
except ValueError:
_LOGGER.warning("Got unknown color %s, falling back to white", color_name)
params[ATTR_RGB_COLOR] = (255, 255, 255)
kelvin = params.pop(ATTR_KELVIN, None)
if kelvin is not None:
mired = color_util.color_temperature_kelvin_to_mired(kelvin)
params[ATTR_COLOR_TEMP] = int(mired)
brightness_pct = params.pop(ATTR_BRIGHTNESS_PCT, None)
if brightness_pct is not None:
params[ATTR_BRIGHTNESS] = round(255 * brightness_pct / 100)
def filter_turn_off_params(light, params):
"""Filter out params not used in turn off or not supported by the light."""
supported_features = light.supported_features
if not supported_features & SUPPORT_FLASH:
params.pop(ATTR_FLASH, None)
if not supported_features & SUPPORT_TRANSITION:
params.pop(ATTR_TRANSITION, None)
return {k: v for k, v in params.items() if k in (ATTR_TRANSITION, ATTR_FLASH)}
def filter_turn_on_params(light, params):
"""Filter out params not supported by the light."""
supported_features = light.supported_features
if not supported_features & SUPPORT_EFFECT:
params.pop(ATTR_EFFECT, None)
if not supported_features & SUPPORT_FLASH:
params.pop(ATTR_FLASH, None)
if not supported_features & SUPPORT_TRANSITION:
params.pop(ATTR_TRANSITION, None)
if not supported_features & SUPPORT_WHITE_VALUE:
params.pop(ATTR_WHITE_VALUE, None)
supported_color_modes = (
light._light_internal_supported_color_modes # pylint:disable=protected-access
)
if not brightness_supported(supported_color_modes):
params.pop(ATTR_BRIGHTNESS, None)
if COLOR_MODE_COLOR_TEMP not in supported_color_modes:
params.pop(ATTR_COLOR_TEMP, None)
if COLOR_MODE_HS not in supported_color_modes:
params.pop(ATTR_HS_COLOR, None)
if COLOR_MODE_RGB not in supported_color_modes:
params.pop(ATTR_RGB_COLOR, None)
if COLOR_MODE_RGBW not in supported_color_modes:
params.pop(ATTR_RGBW_COLOR, None)
if COLOR_MODE_RGBWW not in supported_color_modes:
params.pop(ATTR_RGBWW_COLOR, None)
if COLOR_MODE_WHITE not in supported_color_modes:
params.pop(ATTR_WHITE, None)
if COLOR_MODE_XY not in supported_color_modes:
params.pop(ATTR_XY_COLOR, None)
return params
async def async_setup(hass, config): # noqa: C901
"""Expose light control via state machine and services."""
component = hass.data[DOMAIN] = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL
)
await component.async_setup(config)
profiles = hass.data[DATA_PROFILES] = Profiles(hass)
await profiles.async_initialize()
def preprocess_data(data):
"""Preprocess the service data."""
base = {
entity_field: data.pop(entity_field)
for entity_field in cv.ENTITY_SERVICE_FIELDS
if entity_field in data
}
preprocess_turn_on_alternatives(hass, data)
base["params"] = data
return base
async def async_handle_light_on_service(light, call):
"""Handle turning a light on.
If brightness is set to 0, this service will turn the light off.
"""
params = dict(call.data["params"])
# Only process params once we processed brightness step
if params and (
ATTR_BRIGHTNESS_STEP in params or ATTR_BRIGHTNESS_STEP_PCT in params
):
brightness = light.brightness if light.is_on else 0
if ATTR_BRIGHTNESS_STEP in params:
brightness += params.pop(ATTR_BRIGHTNESS_STEP)
else:
brightness += round(params.pop(ATTR_BRIGHTNESS_STEP_PCT) / 100 * 255)
params[ATTR_BRIGHTNESS] = max(0, min(255, brightness))
preprocess_turn_on_alternatives(hass, params)
if (not params or not light.is_on) or (
params and ATTR_TRANSITION not in params
):
profiles.apply_default(light.entity_id, light.is_on, params)
legacy_supported_color_modes = (
light._light_internal_supported_color_modes # pylint: disable=protected-access
)
supported_color_modes = light.supported_color_modes
# Backwards compatibility: if an RGBWW color is specified, convert to RGB + W
# for legacy lights
if ATTR_RGBW_COLOR in params:
if (
COLOR_MODE_RGBW in legacy_supported_color_modes
and not supported_color_modes
):
rgbw_color = params.pop(ATTR_RGBW_COLOR)
params[ATTR_RGB_COLOR] = rgbw_color[0:3]
params[ATTR_WHITE_VALUE] = rgbw_color[3]
# If a color temperature is specified, emulate it if not supported by the light
if (
ATTR_COLOR_TEMP in params
and COLOR_MODE_COLOR_TEMP not in legacy_supported_color_modes
):
color_temp = params.pop(ATTR_COLOR_TEMP)
if color_supported(legacy_supported_color_modes):
temp_k = color_util.color_temperature_mired_to_kelvin(color_temp)
params[ATTR_HS_COLOR] = color_util.color_temperature_to_hs(temp_k)
# If a color is specified, convert to the color space supported by the light
# Backwards compatibility: Fall back to hs color if light.supported_color_modes
# is not implemented
if not supported_color_modes:
if (rgb_color := params.pop(ATTR_RGB_COLOR, None)) is not None:
params[ATTR_HS_COLOR] = color_util.color_RGB_to_hs(*rgb_color)
elif (xy_color := params.pop(ATTR_XY_COLOR, None)) is not None:
params[ATTR_HS_COLOR] = color_util.color_xy_to_hs(*xy_color)
elif ATTR_HS_COLOR in params and COLOR_MODE_HS not in supported_color_modes:
hs_color = params.pop(ATTR_HS_COLOR)
if COLOR_MODE_RGB in supported_color_modes:
params[ATTR_RGB_COLOR] = color_util.color_hs_to_RGB(*hs_color)
elif COLOR_MODE_RGBW in supported_color_modes:
rgb_color = color_util.color_hs_to_RGB(*hs_color)
params[ATTR_RGBW_COLOR] = color_util.color_rgb_to_rgbw(*rgb_color)
elif COLOR_MODE_RGBWW in supported_color_modes:
rgb_color = color_util.color_hs_to_RGB(*hs_color)
params[ATTR_RGBWW_COLOR] = color_util.color_rgb_to_rgbww(
*rgb_color, light.min_mireds, light.max_mireds
)
elif COLOR_MODE_XY in supported_color_modes:
params[ATTR_XY_COLOR] = color_util.color_hs_to_xy(*hs_color)
elif ATTR_RGB_COLOR in params and COLOR_MODE_RGB not in supported_color_modes:
rgb_color = params.pop(ATTR_RGB_COLOR)
if COLOR_MODE_RGBW in supported_color_modes:
params[ATTR_RGBW_COLOR] = color_util.color_rgb_to_rgbw(*rgb_color)
elif COLOR_MODE_RGBWW in supported_color_modes:
params[ATTR_RGBWW_COLOR] = color_util.color_rgb_to_rgbww(
*rgb_color, light.min_mireds, light.max_mireds
)
elif COLOR_MODE_HS in supported_color_modes:
params[ATTR_HS_COLOR] = color_util.color_RGB_to_hs(*rgb_color)
elif COLOR_MODE_XY in supported_color_modes:
params[ATTR_XY_COLOR] = color_util.color_RGB_to_xy(*rgb_color)
elif ATTR_XY_COLOR in params and COLOR_MODE_XY not in supported_color_modes:
xy_color = params.pop(ATTR_XY_COLOR)
if COLOR_MODE_HS in supported_color_modes:
params[ATTR_HS_COLOR] = color_util.color_xy_to_hs(*xy_color)
elif COLOR_MODE_RGB in supported_color_modes:
params[ATTR_RGB_COLOR] = color_util.color_xy_to_RGB(*xy_color)
elif COLOR_MODE_RGBW in supported_color_modes:
rgb_color = color_util.color_xy_to_RGB(*xy_color)
params[ATTR_RGBW_COLOR] = color_util.color_rgb_to_rgbw(*rgb_color)
elif COLOR_MODE_RGBWW in supported_color_modes:
rgb_color = color_util.color_xy_to_RGB(*xy_color)
params[ATTR_RGBWW_COLOR] = color_util.color_rgb_to_rgbww(
*rgb_color, light.min_mireds, light.max_mireds
)
# If both white and brightness are specified, override white
if (
supported_color_modes
and ATTR_WHITE in params
and COLOR_MODE_WHITE in supported_color_modes
):
params[ATTR_WHITE] = params.pop(ATTR_BRIGHTNESS, params[ATTR_WHITE])
# Remove deprecated white value if the light supports color mode
if supported_color_modes:
params.pop(ATTR_WHITE_VALUE, None)
if params.get(ATTR_BRIGHTNESS) == 0 or params.get(ATTR_WHITE) == 0:
await async_handle_light_off_service(light, call)
else:
await light.async_turn_on(**filter_turn_on_params(light, params))
async def async_handle_light_off_service(light, call):
"""Handle turning off a light."""
params = dict(call.data["params"])
if ATTR_TRANSITION not in params:
profiles.apply_default(light.entity_id, True, params)
await light.async_turn_off(**filter_turn_off_params(light, params))
async def async_handle_toggle_service(light, call):
"""Handle toggling a light."""
if light.is_on:
await async_handle_light_off_service(light, call)
else:
await async_handle_light_on_service(light, call)
# Listen for light on and light off service calls.
component.async_register_entity_service(
SERVICE_TURN_ON,
vol.All(cv.make_entity_service_schema(LIGHT_TURN_ON_SCHEMA), preprocess_data),
async_handle_light_on_service,
)
component.async_register_entity_service(
SERVICE_TURN_OFF,
vol.All(cv.make_entity_service_schema(LIGHT_TURN_OFF_SCHEMA), preprocess_data),
async_handle_light_off_service,
)
component.async_register_entity_service(
SERVICE_TOGGLE,
vol.All(cv.make_entity_service_schema(LIGHT_TURN_ON_SCHEMA), preprocess_data),
async_handle_toggle_service,
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up a config entry."""
component = cast(EntityComponent, hass.data[DOMAIN])
return await component.async_setup_entry(entry)
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
component = cast(EntityComponent, hass.data[DOMAIN])
return await component.async_unload_entry(entry)
def _coerce_none(value: str) -> None:
"""Coerce an empty string as None."""
if not isinstance(value, str):
raise vol.Invalid("Expected a string")
if value:
raise vol.Invalid("Not an empty string")
@dataclasses.dataclass
class Profile:
"""Representation of a profile."""
name: str
color_x: float | None = dataclasses.field(repr=False)
color_y: float | None = dataclasses.field(repr=False)
brightness: int | None
transition: int | None = None
hs_color: tuple[float, float] | None = dataclasses.field(init=False)
SCHEMA = vol.Schema(
vol.Any(
vol.ExactSequence(
(
str,
vol.Any(cv.small_float, _coerce_none),
vol.Any(cv.small_float, _coerce_none),
vol.Any(cv.byte, _coerce_none),
)
),
vol.ExactSequence(
(
str,
vol.Any(cv.small_float, _coerce_none),
vol.Any(cv.small_float, _coerce_none),
vol.Any(cv.byte, _coerce_none),
vol.Any(VALID_TRANSITION, _coerce_none),
)
),
)
)
def __post_init__(self) -> None:
"""Convert xy to hs color."""
if None in (self.color_x, self.color_y):
self.hs_color = None
return
self.hs_color = color_util.color_xy_to_hs(
cast(float, self.color_x), cast(float, self.color_y)
)
@classmethod
def from_csv_row(cls, csv_row: list[str]) -> Profile:
"""Create profile from a CSV row tuple."""
return cls(*cls.SCHEMA(csv_row))
class Profiles:
"""Representation of available color profiles."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize profiles."""
self.hass = hass
self.data: dict[str, Profile] = {}
def _load_profile_data(self) -> dict[str, Profile]:
"""Load built-in profiles and custom profiles."""
profile_paths = [
os.path.join(os.path.dirname(__file__), LIGHT_PROFILES_FILE),
self.hass.config.path(LIGHT_PROFILES_FILE),
]
profiles = {}
for profile_path in profile_paths:
if not os.path.isfile(profile_path):
continue
with open(profile_path, encoding="utf8") as inp:
reader = csv.reader(inp)
# Skip the header
next(reader, None)
try:
for rec in reader:
profile = Profile.from_csv_row(rec)
profiles[profile.name] = profile
except vol.MultipleInvalid as ex:
_LOGGER.error(
"Error parsing light profile row '%s' from %s: %s",
rec,
profile_path,
ex,
)
continue
return profiles
async def async_initialize(self) -> None:
"""Load and cache profiles."""
self.data = await self.hass.async_add_executor_job(self._load_profile_data)
@callback
def apply_default(self, entity_id: str, state_on: bool, params: dict) -> None:
"""Return the default profile for the given light."""
for _entity_id in (entity_id, "group.all_lights"):
name = f"{_entity_id}.default"
if name in self.data:
if not state_on or not params:
self.apply_profile(name, params)
elif self.data[name].transition is not None:
params.setdefault(ATTR_TRANSITION, self.data[name].transition)
@callback
def apply_profile(self, name: str, params: dict) -> None:
"""Apply a profile."""
profile = self.data.get(name)
if profile is None:
return
if profile.hs_color is not None:
params.setdefault(ATTR_HS_COLOR, profile.hs_color)
if profile.brightness is not None:
params.setdefault(ATTR_BRIGHTNESS, profile.brightness)
if profile.transition is not None:
params.setdefault(ATTR_TRANSITION, profile.transition)
@dataclasses.dataclass
class LightEntityDescription(ToggleEntityDescription):
"""A class that describes binary sensor entities."""
class LightEntity(ToggleEntity):
"""Base class for light entities."""
entity_description: LightEntityDescription
_attr_brightness: int | None = None
_attr_color_mode: str | None = None
_attr_color_temp: int | None = None
_attr_effect_list: list[str] | None = None
_attr_effect: str | None = None
_attr_hs_color: tuple[float, float] | None = None
_attr_max_mireds: int = 500
_attr_min_mireds: int = 153
_attr_rgb_color: tuple[int, int, int] | None = None
_attr_rgbw_color: tuple[int, int, int, int] | None = None
_attr_rgbww_color: tuple[int, int, int, int, int] | None = None
_attr_supported_color_modes: set[str] | None = None
_attr_supported_features: int = 0
_attr_xy_color: tuple[float, float] | None = None
@property
def brightness(self) -> int | None:
"""Return the brightness of this light between 0..255."""
return self._attr_brightness
@property
def color_mode(self) -> str | None:
"""Return the color mode of the light."""
return self._attr_color_mode
@property
def _light_internal_color_mode(self) -> str:
"""Return the color mode of the light with backwards compatibility."""
color_mode = self.color_mode
if color_mode is None:
# Backwards compatibility for color_mode added in 2021.4
# Add warning in 2021.6, remove in 2021.10
supported = self._light_internal_supported_color_modes
if (
COLOR_MODE_RGBW in supported
and self.white_value is not None
and self.hs_color is not None
):
return COLOR_MODE_RGBW
if COLOR_MODE_HS in supported and self.hs_color is not None:
return COLOR_MODE_HS
if COLOR_MODE_COLOR_TEMP in supported and self.color_temp is not None:
return COLOR_MODE_COLOR_TEMP
if COLOR_MODE_BRIGHTNESS in supported and self.brightness is not None:
return COLOR_MODE_BRIGHTNESS
if COLOR_MODE_ONOFF in supported:
return COLOR_MODE_ONOFF
return COLOR_MODE_UNKNOWN
return color_mode
@property
def hs_color(self) -> tuple[float, float] | None:
"""Return the hue and saturation color value [float, float]."""
return self._attr_hs_color
@property
def xy_color(self) -> tuple[float, float] | None:
"""Return the xy color value [float, float]."""
return self._attr_xy_color
@property
def rgb_color(self) -> tuple[int, int, int] | None:
"""Return the rgb color value [int, int, int]."""
return self._attr_rgb_color
@property
def rgbw_color(self) -> tuple[int, int, int, int] | None:
"""Return the rgbw color value [int, int, int, int]."""
return self._attr_rgbw_color
@property
def _light_internal_rgbw_color(self) -> tuple[int, int, int, int] | None:
"""Return the rgbw color value [int, int, int, int]."""
rgbw_color = self.rgbw_color
if (
rgbw_color is None
and self.hs_color is not None
and self.white_value is not None
):
# Backwards compatibility for rgbw_color added in 2021.4
# Add warning in 2021.6, remove in 2021.10
r, g, b = color_util.color_hs_to_RGB( # pylint: disable=invalid-name
*self.hs_color
)
w = self.white_value # pylint: disable=invalid-name
rgbw_color = (r, g, b, w)
return rgbw_color
@property
def rgbww_color(self) -> tuple[int, int, int, int, int] | None:
"""Return the rgbww color value [int, int, int, int, int]."""
return self._attr_rgbww_color
@property
def color_temp(self) -> int | None:
"""Return the CT color value in mireds."""
return self._attr_color_temp
@property
def min_mireds(self) -> int:
"""Return the coldest color_temp that this light supports."""
# Default to the Philips Hue value that HA has always assumed
# https://developers.meethue.com/documentation/core-concepts
return self._attr_min_mireds
@property
def max_mireds(self) -> int:
"""Return the warmest color_temp that this light supports."""
# Default to the Philips Hue value that HA has always assumed
# https://developers.meethue.com/documentation/core-concepts
return self._attr_max_mireds
@property
def white_value(self) -> int | None:
"""Return the white value of this light between 0..255."""
return None
@property
def effect_list(self) -> list[str] | None:
"""Return the list of supported effects."""
return self._attr_effect_list
@property
def effect(self) -> str | None:
"""Return the current effect."""
return self._attr_effect
@property
def capability_attributes(self):
"""Return capability attributes."""
data = {}
supported_features = self.supported_features
supported_color_modes = self._light_internal_supported_color_modes
if COLOR_MODE_COLOR_TEMP in supported_color_modes:
data[ATTR_MIN_MIREDS] = self.min_mireds
data[ATTR_MAX_MIREDS] = self.max_mireds
if supported_features & SUPPORT_EFFECT:
data[ATTR_EFFECT_LIST] = self.effect_list
data[ATTR_SUPPORTED_COLOR_MODES] = sorted(supported_color_modes)
return data
def _light_internal_convert_color(self, color_mode: str) -> dict:
data: dict[str, tuple] = {}
if color_mode == COLOR_MODE_HS and self.hs_color:
hs_color = self.hs_color
data[ATTR_HS_COLOR] = (round(hs_color[0], 3), round(hs_color[1], 3))
data[ATTR_RGB_COLOR] = color_util.color_hs_to_RGB(*hs_color)
data[ATTR_XY_COLOR] = color_util.color_hs_to_xy(*hs_color)
elif color_mode == COLOR_MODE_XY and self.xy_color:
xy_color = self.xy_color
data[ATTR_HS_COLOR] = color_util.color_xy_to_hs(*xy_color)
data[ATTR_RGB_COLOR] = color_util.color_xy_to_RGB(*xy_color)
data[ATTR_XY_COLOR] = (round(xy_color[0], 6), round(xy_color[1], 6))
elif color_mode == COLOR_MODE_RGB and self.rgb_color:
rgb_color = self.rgb_color
data[ATTR_HS_COLOR] = color_util.color_RGB_to_hs(*rgb_color)
data[ATTR_RGB_COLOR] = tuple(int(x) for x in rgb_color[0:3])
data[ATTR_XY_COLOR] = color_util.color_RGB_to_xy(*rgb_color)
elif color_mode == COLOR_MODE_RGBW and self._light_internal_rgbw_color:
rgbw_color = self._light_internal_rgbw_color
rgb_color = color_util.color_rgbw_to_rgb(*rgbw_color)
data[ATTR_HS_COLOR] = color_util.color_RGB_to_hs(*rgb_color)
data[ATTR_RGB_COLOR] = tuple(int(x) for x in rgb_color[0:3])
data[ATTR_RGBW_COLOR] = tuple(int(x) for x in rgbw_color[0:4])
data[ATTR_XY_COLOR] = color_util.color_RGB_to_xy(*rgb_color)
elif color_mode == COLOR_MODE_RGBWW and self.rgbww_color:
rgbww_color = self.rgbww_color
rgb_color = color_util.color_rgbww_to_rgb(
*rgbww_color, self.min_mireds, self.max_mireds
)
data[ATTR_HS_COLOR] = color_util.color_RGB_to_hs(*rgb_color)
data[ATTR_RGB_COLOR] = tuple(int(x) for x in rgb_color[0:3])
data[ATTR_RGBWW_COLOR] = tuple(int(x) for x in rgbww_color[0:5])
data[ATTR_XY_COLOR] = color_util.color_RGB_to_xy(*rgb_color)
elif color_mode == COLOR_MODE_COLOR_TEMP and self.color_temp:
hs_color = color_util.color_temperature_to_hs(
color_util.color_temperature_mired_to_kelvin(self.color_temp)
)
data[ATTR_HS_COLOR] = (round(hs_color[0], 3), round(hs_color[1], 3))
data[ATTR_RGB_COLOR] = color_util.color_hs_to_RGB(*hs_color)
data[ATTR_XY_COLOR] = color_util.color_hs_to_xy(*hs_color)
return data
@final
@property
def state_attributes(self):
"""Return state attributes."""
if not self.is_on:
return None
data = {}
supported_features = self.supported_features
color_mode = self._light_internal_color_mode
if color_mode not in self._light_internal_supported_color_modes:
# Increase severity to warning in 2021.6, reject in 2021.10
_LOGGER.debug(
"%s: set to unsupported color_mode: %s, supported_color_modes: %s",
self.entity_id,
color_mode,
self._light_internal_supported_color_modes,
)
data[ATTR_COLOR_MODE] = color_mode
if color_mode in COLOR_MODES_BRIGHTNESS:
data[ATTR_BRIGHTNESS] = self.brightness
elif supported_features & SUPPORT_BRIGHTNESS:
# Backwards compatibility for ambiguous / incomplete states
# Add warning in 2021.6, remove in 2021.10
data[ATTR_BRIGHTNESS] = self.brightness
if color_mode == COLOR_MODE_COLOR_TEMP:
data[ATTR_COLOR_TEMP] = self.color_temp
if color_mode in COLOR_MODES_COLOR or color_mode == COLOR_MODE_COLOR_TEMP:
data.update(self._light_internal_convert_color(color_mode))
if supported_features & SUPPORT_COLOR_TEMP and not self.supported_color_modes:
# Backwards compatibility
# Add warning in 2021.6, remove in 2021.10
data[ATTR_COLOR_TEMP] = self.color_temp
if supported_features & SUPPORT_WHITE_VALUE and not self.supported_color_modes:
# Backwards compatibility
# Add warning in 2021.6, remove in 2021.10
data[ATTR_WHITE_VALUE] = self.white_value
if self.hs_color is not None:
data.update(self._light_internal_convert_color(COLOR_MODE_HS))
if supported_features & SUPPORT_EFFECT:
data[ATTR_EFFECT] = self.effect
return {key: val for key, val in data.items() if val is not None}
@property
def _light_internal_supported_color_modes(self) -> set:
"""Calculate supported color modes with backwards compatibility."""
supported_color_modes = self.supported_color_modes
if supported_color_modes is None:
# Backwards compatibility for supported_color_modes added in 2021.4
# Add warning in 2021.6, remove in 2021.10
supported_features = self.supported_features
supported_color_modes = set()
if supported_features & SUPPORT_COLOR_TEMP:
supported_color_modes.add(COLOR_MODE_COLOR_TEMP)
if supported_features & SUPPORT_COLOR:
supported_color_modes.add(COLOR_MODE_HS)
if supported_features & SUPPORT_WHITE_VALUE:
supported_color_modes.add(COLOR_MODE_RGBW)
if supported_features & SUPPORT_BRIGHTNESS and not supported_color_modes:
supported_color_modes = {COLOR_MODE_BRIGHTNESS}
if not supported_color_modes:
supported_color_modes = {COLOR_MODE_ONOFF}
return supported_color_modes
@property
def supported_color_modes(self) -> set[str] | None:
"""Flag supported color modes."""
return self._attr_supported_color_modes
@property
def supported_features(self) -> int:
"""Flag supported features."""
return self._attr_supported_features
class Light(LightEntity):
"""Representation of a light (for backwards compatibility)."""
def __init_subclass__(cls, **kwargs):
"""Print deprecation warning."""
super().__init_subclass__(**kwargs)
_LOGGER.warning(
"Light is deprecated, modify %s to extend LightEntity",
cls.__name__,
)
def legacy_supported_features(
supported_features: int, supported_color_modes: list[str] | None
) -> int:
"""Calculate supported features with backwards compatibility."""
# Backwards compatibility for supported_color_modes added in 2021.4
if supported_color_modes is None:
return supported_features
if any(mode in supported_color_modes for mode in COLOR_MODES_COLOR):
supported_features |= SUPPORT_COLOR
if any(mode in supported_color_modes for mode in COLOR_MODES_BRIGHTNESS):
supported_features |= SUPPORT_BRIGHTNESS
if COLOR_MODE_COLOR_TEMP in supported_color_modes:
supported_features |= SUPPORT_COLOR_TEMP
return supported_features
| 37.997917
| 91
| 0.669362
|
4a0025bb471106442cc10e40a4774936990b6cfd
| 1,540
|
py
|
Python
|
setup.py
|
sscpac/statick-web
|
fc90b4205106d10f2dcc15dcf1f0f718998a1c96
|
[
"CC0-1.0"
] | 2
|
2020-02-07T00:13:53.000Z
|
2020-02-07T00:58:26.000Z
|
setup.py
|
sscpac/statick-web
|
fc90b4205106d10f2dcc15dcf1f0f718998a1c96
|
[
"CC0-1.0"
] | 19
|
2020-02-14T23:47:25.000Z
|
2022-02-28T21:38:01.000Z
|
setup.py
|
sscpac/statick-web
|
fc90b4205106d10f2dcc15dcf1f0f718998a1c96
|
[
"CC0-1.0"
] | 3
|
2020-02-12T16:16:54.000Z
|
2020-02-14T00:53:46.000Z
|
"""Setup."""
from setuptools import setup
with open("README.md", encoding="utf8") as fid:
long_description = fid.read() # pylint: disable=invalid-name
TEST_DEPS = [
"mock",
"pytest",
]
EXTRAS = {
"test": TEST_DEPS,
}
setup(
author="NIWC Pacific",
name="statick-web",
description="Statick analysis plugins for Web (css, html, js) files.",
version="0.0.7",
packages=[
"statick_tool",
"statick_tool.plugins.discovery",
"statick_tool.plugins.tool",
],
package_dir={
"statick_tool": ".",
"statick_tool.plugins.discovery": "src/statick_web/plugins/discovery",
"statick_tool.plugins.tool": "src/statick_web/plugins/tool",
},
package_data={
"statick_tool": ["rsc/.*", "rsc/*"],
"statick_tool.plugins.discovery": ["*.yapsy-plugin"],
"statick_tool.plugins.tool": ["*.yapsy-plugin"],
},
long_description=long_description,
long_description_content_type="text/markdown",
install_requires=["statick"],
tests_require=TEST_DEPS,
extras_require=EXTRAS,
url="https://github.com/sscpac/statick-web",
classifiers=[
"License :: CC0 1.0 Universal (CC0 1.0) Public Domain Dedication",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Topic :: Software Development :: Testing",
],
)
| 28.518519
| 78
| 0.614935
|
4a0027a36df9479321adbe0eb7c74b910d6f7215
| 665
|
py
|
Python
|
mayan/apps/common/tasks.py
|
eshbeata/open-paperless
|
6b9ed1f21908116ad2795b3785b2dbd66713d66e
|
[
"Apache-2.0"
] | 2,743
|
2017-12-18T07:12:30.000Z
|
2022-03-27T17:21:25.000Z
|
mayan/apps/common/tasks.py
|
kyper999/mayan-edms
|
ca7b8301a1f68548e8e718d42a728a500d67286e
|
[
"Apache-2.0"
] | 15
|
2017-12-18T14:58:07.000Z
|
2021-03-01T20:05:05.000Z
|
mayan/apps/common/tasks.py
|
kyper999/mayan-edms
|
ca7b8301a1f68548e8e718d42a728a500d67286e
|
[
"Apache-2.0"
] | 257
|
2017-12-18T03:12:58.000Z
|
2022-03-25T08:59:10.000Z
|
from __future__ import unicode_literals
from datetime import timedelta
import logging
from django.apps import apps
from django.utils.timezone import now
from mayan.celery import app
from .literals import UPLOAD_EXPIRATION_INTERVAL
logger = logging.getLogger(__name__)
@app.task(ignore_result=True)
def task_delete_stale_uploads():
logger.info('Executing')
SharedUploadedFile = apps.get_model(
app_label='common', model_name='SharedUploadedFile'
)
for expired_upload in SharedUploadedFile.objects.filter(datetime__lt=now() - timedelta(seconds=UPLOAD_EXPIRATION_INTERVAL)):
expired_upload.delete()
logger.info('Finshed')
| 23.75
| 128
| 0.780451
|
4a0027a653df3f253dc3cc586dd2630606cd7272
| 714
|
py
|
Python
|
stubs/micropython-v1_12-pyboard/zlib.py
|
mattytrentini/micropython-stubs
|
4d596273823b69e9e5bcf5fa67f249c374ee0bbc
|
[
"MIT"
] | null | null | null |
stubs/micropython-v1_12-pyboard/zlib.py
|
mattytrentini/micropython-stubs
|
4d596273823b69e9e5bcf5fa67f249c374ee0bbc
|
[
"MIT"
] | null | null | null |
stubs/micropython-v1_12-pyboard/zlib.py
|
mattytrentini/micropython-stubs
|
4d596273823b69e9e5bcf5fa67f249c374ee0bbc
|
[
"MIT"
] | null | null | null |
"""
Module: 'zlib' on micropython-v1.12-pyboard
"""
# MCU: {'ver': 'v1.12', 'port': 'pyboard', 'arch': 'armv7emsp', 'sysname': 'pyboard', 'release': '1.12.0', 'name': 'micropython', 'mpy': 7685, 'version': '1.12.0', 'machine': 'PYBv1.1 with STM32F405RG', 'build': '', 'nodename': 'pyboard', 'platform': 'pyboard', 'family': 'micropython'}
# Stubber: 1.5.4
from typing import Any
class DecompIO:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
def read(self, *args, **kwargs) -> Any:
...
def readinto(self, *args, **kwargs) -> Any:
...
def readline(self, *args, **kwargs) -> Any:
...
def decompress(*args, **kwargs) -> Any:
...
| 25.5
| 286
| 0.536415
|
4a00283fca771a8d507e9ec4963513cfd89688a1
| 10,031
|
py
|
Python
|
salt/modules/napalm_bgp.py
|
ipmb/salt
|
699912ef9cde28040378aa53d6c7a12d8af756b1
|
[
"Apache-2.0"
] | null | null | null |
salt/modules/napalm_bgp.py
|
ipmb/salt
|
699912ef9cde28040378aa53d6c7a12d8af756b1
|
[
"Apache-2.0"
] | null | null | null |
salt/modules/napalm_bgp.py
|
ipmb/salt
|
699912ef9cde28040378aa53d6c7a12d8af756b1
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
NAPALM BGP
==========
Manages BGP configuration on network devices and provides statistics.
:codeauthor: Mircea Ulinic <mircea@cloudflare.com> & Jerome Fleury <jf@cloudflare.com>
:maturity: new
:depends: napalm
:platform: unix
Dependencies
------------
- :mod:`napalm proxy minion <salt.proxy.napalm>`
.. versionadded:: 2016.11.0
'''
from __future__ import absolute_import
# Import python lib
import logging
log = logging.getLogger(__file__)
# import NAPALM utils
import salt.utils.napalm
from salt.utils.napalm import proxy_napalm_wrap
# ----------------------------------------------------------------------------------------------------------------------
# module properties
# ----------------------------------------------------------------------------------------------------------------------
__virtualname__ = 'bgp'
__proxyenabled__ = ['napalm']
# uses NAPALM-based proxy to interact with network devices
# ----------------------------------------------------------------------------------------------------------------------
# property functions
# ----------------------------------------------------------------------------------------------------------------------
def __virtual__():
'''
NAPALM library must be installed for this module to work and run in a (proxy) minion.
'''
return salt.utils.napalm.virtual(__opts__, __virtualname__, __file__)
# ----------------------------------------------------------------------------------------------------------------------
# helper functions -- will not be exported
# ----------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
# callable functions
# ----------------------------------------------------------------------------------------------------------------------
@proxy_napalm_wrap
def config(group=None, neighbor=None, **kwargs):
'''
Provides the BGP configuration on the device.
:param group: Name of the group selected to display the configuration.
:param neighbor: IP Address of the neighbor to display the configuration.
If the group parameter is not specified, the neighbor setting will be ignored.
:return: A dictionary containing the BGP configuration from the network device.
The keys of the main dictionary are the group names.
Each group has the following properties:
* type (string)
* description (string)
* apply_groups (string list)
* multihop_ttl (int)
* multipath (True/False)
* local_address (string)
* local_as (int)
* remote_as (int)
* import_policy (string)
* export_policy (string)
* remove_private_as (True/False)
* prefix_limit (dictionary)
* neighbors (dictionary)
Each neighbor in the dictionary of neighbors provides:
* description (string)
* import_policy (string)
* export_policy (string)
* local_address (string)
* local_as (int)
* remote_as (int)
* authentication_key (string)
* prefix_limit (dictionary)
* route_reflector_client (True/False)
* nhs (True/False)
CLI Example:
.. code-block:: bash
salt '*' bgp.config # entire BGP config
salt '*' bgp.config PEERS-GROUP-NAME # provides detail only about BGP group PEERS-GROUP-NAME
salt '*' bgp.config PEERS-GROUP-NAME 172.17.17.1 # provides details only about BGP neighbor 172.17.17.1,
# configured in the group PEERS-GROUP-NAME
Output Example:
.. code-block:: python
{
'PEERS-GROUP-NAME':{
'type' : u'external',
'description' : u'Here we should have a nice description',
'apply_groups' : [u'BGP-PREFIX-LIMIT'],
'import_policy' : u'PUBLIC-PEER-IN',
'export_policy' : u'PUBLIC-PEER-OUT',
'remove_private': True,
'multipath' : True,
'multihop_ttl' : 30,
'neighbors' : {
'192.168.0.1': {
'description' : 'Facebook [CDN]',
'prefix_limit' : {
'inet': {
'unicast': {
'limit': 100,
'teardown': {
'threshold' : 95,
'timeout' : 5
}
}
}
}
'peer-as' : 32934,
'route_reflector': False,
'nhs' : True
},
'172.17.17.1': {
'description' : 'Twitter [CDN]',
'prefix_limit' : {
'inet': {
'unicast': {
'limit': 500,
'no-validate': 'IMPORT-FLOW-ROUTES'
}
}
}
'peer_as' : 13414
'route_reflector': False,
'nhs' : False
}
}
}
}
'''
return salt.utils.napalm.call(
napalm_device, # pylint: disable=undefined-variable
'get_bgp_config',
**{
'group': group,
'neighbor': neighbor
}
)
@proxy_napalm_wrap
def neighbors(neighbor=None, **kwargs):
'''
Provides details regarding the BGP sessions configured on the network device.
:param neighbor: IP Address of a specific neighbor.
:return: A dictionary with the statistics of the all/selected BGP neighbors.
Outer dictionary keys represent the VRF name.
Keys of inner dictionary represent the AS numbers, while the values are lists of dictionaries,
having the following keys:
* up (True/False)
* local_as (int)
* remote_as (int)
* local_address (string)
* routing_table (string)
* local_address_configured (True/False)
* local_port (int)
* remote_address (string)
* remote_port (int)
* multihop (True/False)
* multipath (True/False)
* remove_private_as (True/False)
* import_policy (string)
* export_policy (string)
* input_messages (int)
* output_messages (int)
* input_updates (int)
* output_updates (int)
* messages_queued_out (int)
* connection_state (string)
* previous_connection_state (string)
* last_event (string)
* suppress_4byte_as (True/False)
* local_as_prepend (True/False)
* holdtime (int)
* configured_holdtime (int)
* keepalive (int)
* configured_keepalive (int)
* active_prefix_count (int)
* received_prefix_count (int)
* accepted_prefix_count (int)
* suppressed_prefix_count (int)
* advertised_prefix_count (int)
* flap_count (int)
CLI Example:
.. code-block:: bash
salt '*' bgp.neighbors # all neighbors
salt '*' bgp.neighbors 172.17.17.1 # only session with BGP neighbor(s) 172.17.17.1
Output Example:
.. code-block:: python
{
'default': {
8121: [
{
'up' : True,
'local_as' : 13335,
'remote_as' : 8121,
'local_address' : u'172.101.76.1',
'local_address_configured' : True,
'local_port' : 179,
'remote_address' : u'192.247.78.0',
'router_id' : u'192.168.0.1',
'remote_port' : 58380,
'multihop' : False,
'import_policy' : u'4-NTT-TRANSIT-IN',
'export_policy' : u'4-NTT-TRANSIT-OUT',
'input_messages' : 123,
'output_messages' : 13,
'input_updates' : 123,
'output_updates' : 5,
'messages_queued_out' : 23,
'connection_state' : u'Established',
'previous_connection_state' : u'EstabSync',
'last_event' : u'RecvKeepAlive',
'suppress_4byte_as' : False,
'local_as_prepend' : False,
'holdtime' : 90,
'configured_holdtime' : 90,
'keepalive' : 30,
'configured_keepalive' : 30,
'active_prefix_count' : 132808,
'received_prefix_count' : 566739,
'accepted_prefix_count' : 566479,
'suppressed_prefix_count' : 0,
'advertise_prefix_count' : 0,
'flap_count' : 27
}
]
}
}
'''
return salt.utils.napalm.call(
napalm_device, # pylint: disable=undefined-variable
'get_bgp_neighbors_detail',
**{
'neighbor_address': neighbor
}
)
| 36.212996
| 120
| 0.434154
|
4a0028c34a0e33cdbd3e5a8d6d5c0ae1cbaa0c93
| 2,557
|
py
|
Python
|
craigslistings/config.py
|
fgregg/listings_scraper
|
bd90299537c9e34d5bd22d310780f269872f1789
|
[
"MIT"
] | null | null | null |
craigslistings/config.py
|
fgregg/listings_scraper
|
bd90299537c9e34d5bd22d310780f269872f1789
|
[
"MIT"
] | 4
|
2016-05-13T23:01:25.000Z
|
2016-05-13T23:01:52.000Z
|
craigslistings/config.py
|
fgregg/listings_scraper
|
bd90299537c9e34d5bd22d310780f269872f1789
|
[
"MIT"
] | null | null | null |
max_packet_size = 1048576 # Set in my.conf
byte_encoding = 5 # UTF-8 Uses up to four bytes
string_chunk = int(max_packet_size/byte_encoding)
cities = {"newyork" : ('New York', 'NY'),
"losangeles" : ('Los Angeles', 'CA'),
"chicago" : ('Chicago', 'IL'),
"houston" : ('Houston', 'TX'),
"philadelphia" : ('Philadelphia', 'PA'),
"phoenix" : ('Phoenix', 'AZ'),
"sanantonio" : ('San Antonio', 'TX'),
"sandiego" : ('San Diego', 'CA'),
"dallas" : ('Dallas', 'TX'),
"jacksonville" : ('Jacksonville', 'FL'),
"indianapolis" : ('Indianapolis', 'IN'),
"sanfrancisco" : ('San Francisco', 'CA'),
"austin" : ('Austin', 'TX'),
"columbus" : ('Columbus', 'OH'),
"charlotte" : ('Charlotte', 'NC'),
"detroit" : ('Detroit', 'MI'),
"elpaso" : ('El Paso', 'TX'),
"memphis" : ('Memphis', 'TN'),
"baltimore" : ('Baltimore', 'MD'),
"boston" : ('Boston', 'MA'),
"seattle" : ('Seattle', 'WA'),
"dc" : ('Washington', 'DC'),
"nashville" : ('Nashville', 'TN'),
"denver" : ('Denver', 'CO'),
"louisville" : ('Louisville', 'KY'),
"milwaukee" : ('Milwaukee', 'WI'),
"portland" : ('Portland', 'OR'),
"lasvegas" : ('Las Vegas', 'NV'),
"oklahomacity" : ('Oklahoma City', 'OK'),
"albuquerque" : ('Albuquerque', 'NM'),
"tucson" : ('Tucson', 'AZ'),
"fresno": ('Fresno', 'CA'),
"sacramento" : ('Sacramento', 'CA'),
"kansascity" : ('Kansas City', 'MO'),
"atlanta" : ('Atlanta', 'GA'),
"cosprings" : ('Colorado Springs', 'CO'),
"omaha" : ('Omaha', 'NE'),
"raleigh" : ('Raleigh', 'NC'),
"miami" : ('Miami', 'FL'),
"cleveland" : ('Cleveland', 'OH'),
"tulsa" : ('Tulsa', 'OK'),
"minneapolis" : ('Minneapolis', 'MN'),
"wichita" : ('Wichita', 'KS'),
"knoxville" : ('Knoxville', 'TN'),
"asheville" : ('Asheville', 'NC')
}
std_feeds = [["sublet", "http://%s.craigslist.org/sub/index.rss"],
["room", "http://%s.craigslist.org/roo/index.rss"],
["apartment" , "http://%s.craigslist.org/apa/index.rss"]
]
ny_feeds = [["sublet", "http://%s.craigslist.org/sub/index.rss"],
["room", "http://%s.craigslist.org/roo/index.rss"],
["apartment" , "http://%s.craigslist.org/abo/index.rss"]
]
| 41.918033
| 69
| 0.461478
|
4a0029502221cd3b12646092a32f9595795e491c
| 2,469
|
py
|
Python
|
frappe/modules/export_file.py
|
ollyboy/frappe
|
b2f78de243d47a534b761f133cd9d27376148e25
|
[
"MIT"
] | 1
|
2021-09-04T14:24:36.000Z
|
2021-09-04T14:24:36.000Z
|
frappe/modules/export_file.py
|
vamagithub/frappe
|
b2f78de243d47a534b761f133cd9d27376148e25
|
[
"MIT"
] | null | null | null |
frappe/modules/export_file.py
|
vamagithub/frappe
|
b2f78de243d47a534b761f133cd9d27376148e25
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
import frappe, os
import frappe.model
from frappe.modules import scrub, get_module_path, scrub_dt_dn
def export_doc(doc):
export_to_files([[doc.doctype, doc.name]])
def export_to_files(record_list=None, record_module=None, verbose=0, create_init=None):
"""
Export record_list to files. record_list is a list of lists ([doctype, docname, folder name],) ,
"""
if frappe.flags.in_import:
return
if record_list:
for record in record_list:
folder_name = record[2] if len(record) == 3 else None
write_document_file(frappe.get_doc(record[0], record[1]), record_module, create_init=create_init, folder_name=folder_name)
def write_document_file(doc, record_module=None, create_init=True, folder_name=None):
newdoc = doc.as_dict(no_nulls=True)
doc.run_method("before_export", newdoc)
# strip out default fields from children
for df in doc.meta.get_table_fields():
for d in newdoc.get(df.fieldname):
for fieldname in frappe.model.default_fields:
if fieldname in d:
del d[fieldname]
module = record_module or get_module_name(doc)
# create folder
if folder_name:
folder = create_folder(module, folder_name, doc.name, create_init)
else:
folder = create_folder(module, doc.doctype, doc.name, create_init)
# write the data file
fname = scrub(doc.name)
with open(os.path.join(folder, fname + ".json"), 'w+') as txtfile:
txtfile.write(frappe.as_json(newdoc))
def get_module_name(doc):
if doc.doctype == 'Module Def':
module = doc.name
elif doc.doctype=="Workflow":
module = frappe.db.get_value("DocType", doc.document_type, "module")
elif hasattr(doc, 'module'):
module = doc.module
else:
module = frappe.db.get_value("DocType", doc.doctype, "module")
return module
def create_folder(module, dt, dn, create_init):
module_path = get_module_path(module)
dt, dn = scrub_dt_dn(dt, dn)
# create folder
folder = os.path.join(module_path, dt, dn)
frappe.create_folder(folder)
# create init_py_files
if create_init:
create_init_py(module_path, dt, dn)
return folder
def create_init_py(module_path, dt, dn):
def create_if_not_exists(path):
initpy = os.path.join(path, '__init__.py')
if not os.path.exists(initpy):
open(initpy, 'w').close()
create_if_not_exists(os.path.join(module_path))
create_if_not_exists(os.path.join(module_path, dt))
create_if_not_exists(os.path.join(module_path, dt, dn))
| 29.392857
| 125
| 0.744026
|
4a002981ccfee4723998af045d3b2ac282026a08
| 2,018
|
py
|
Python
|
TurbulenceSuite_master.py
|
tzhangwps/Turbulence-Suite
|
ceb7d1c6a1914da5a2316603f289238a4bb6a826
|
[
"MIT"
] | 13
|
2019-10-14T23:12:33.000Z
|
2022-03-24T18:06:21.000Z
|
TurbulenceSuite_master.py
|
tzhangwps/Turbulence-Suite
|
ceb7d1c6a1914da5a2316603f289238a4bb6a826
|
[
"MIT"
] | 2
|
2021-06-01T23:32:12.000Z
|
2021-08-08T17:36:22.000Z
|
TurbulenceSuite_master.py
|
tzhangwps/Turbulence-and-Systemic-Risk
|
ceb7d1c6a1914da5a2316603f289238a4bb6a826
|
[
"MIT"
] | 8
|
2020-03-03T12:03:00.000Z
|
2021-12-06T22:11:50.000Z
|
"""
This is the main script that runs all modules.
"""
import os
import argparse
import src.drop_recent as drp
import src.main as main
import TurbulenceSuite_paths as path
os.chdir(os.path.dirname(os.path.abspath(__file__)))
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--drop_recent', type=int,
help=
"""
How many rows (reverse-chronological) do you want to
remove from the "index_data.pkl" dataframe?
""")
args = parser.parse_args()
rows_to_drop = args.drop_recent
if rows_to_drop is not None:
drp.DropRecent().drop(rows_to_drop=rows_to_drop)
else:
main_process = main.MainProcess()
main_process.append_prices_and_returns()
main_process.calculate_turbulence_and_systemic_risk()
main_process.save_chart_data()
#MIT License
#
#Copyright (c) 2019 Terrence Zhang
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
| 38.075472
| 80
| 0.714569
|
4a0029c8251f47f98ce829032b7bda70cc3adc00
| 579
|
py
|
Python
|
pywslegislature/tests/test_biennium.py
|
JacksonMaxfield/wa_legislature
|
0444fe77f3c4e59bc17c55ea07f9f9773baeae0b
|
[
"MIT"
] | 5
|
2017-06-29T04:23:51.000Z
|
2019-01-28T19:40:14.000Z
|
pywslegislature/tests/test_biennium.py
|
JacksonMaxfield/wa-legislature
|
0444fe77f3c4e59bc17c55ea07f9f9773baeae0b
|
[
"MIT"
] | 8
|
2019-02-11T05:39:52.000Z
|
2021-02-24T02:27:54.000Z
|
pywslegislature/tests/test_biennium.py
|
JacksonMaxfield/wa-legislature
|
0444fe77f3c4e59bc17c55ea07f9f9773baeae0b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
from pywslegislature.biennium import Biennium
@pytest.mark.parametrize("year, expected_year, expected_biennium", [
(None, 2019, "2019-20"),
(2018, 2018, "2017-18"),
(2017, 2017, "2017-18"),
pytest.param(1, None, None, marks=pytest.mark.raises(exception=ValueError))
])
def test_biennium(year, expected_year, expected_biennium):
"""Test the creation and basic properties of the Biennium object."""
b = Biennium(year)
assert expected_year == b.year
assert expected_biennium == str(b)
| 27.571429
| 79
| 0.692573
|
4a002a292b2d64ff306c626589a34f0e80202ec8
| 4,616
|
py
|
Python
|
source/lib/cdk_infra/network_sg.py
|
awslabs/sql-based-etl-with-apache-spark-on-amazon-eks
|
2c9cbbcf6eb705fe964e29d5d9288a6d17203c7f
|
[
"Apache-2.0"
] | 12
|
2020-12-10T22:26:52.000Z
|
2022-03-10T00:07:56.000Z
|
source/lib/cdk_infra/network_sg.py
|
awslabs/sql-based-etl-with-apache-spark-on-amazon-eks
|
2c9cbbcf6eb705fe964e29d5d9288a6d17203c7f
|
[
"Apache-2.0"
] | null | null | null |
source/lib/cdk_infra/network_sg.py
|
awslabs/sql-based-etl-with-apache-spark-on-amazon-eks
|
2c9cbbcf6eb705fe964e29d5d9288a6d17203c7f
|
[
"Apache-2.0"
] | 2
|
2021-11-24T14:27:16.000Z
|
2022-01-12T04:32:12.000Z
|
######################################################################################################################
# Copyright 2020-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express o#implied. See the License for the specific language governing permissions #
# and limitations under the License. # #
######################################################################################################################
from aws_cdk import (
core,
aws_ec2 as ec2,
aws_s3 as s3
)
import lib.util.override_rule as scan
class NetworkSgConst(core.Construct):
@property
def vpc(self):
return self._vpc
def __init__(self,scope: core.Construct, id:str, eksname:str, codebucket: str, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
# //*************************************************//
# //******************* NETWORK ********************//
# //************************************************//
# create VPC
self._vpc = ec2.Vpc(self, 'eksVpc',max_azs=2, nat_gateways=1)
core.Tags.of(self._vpc).add('Name', eksname + 'EksVpc')
self._log_bucket=s3.Bucket.from_bucket_name(self,'vpc_logbucket', codebucket)
self._vpc.add_flow_log("FlowLogCloudWatch",
destination=ec2.FlowLogDestination.to_s3(self._log_bucket,'vpcRejectlog/'),
traffic_type=ec2.FlowLogTrafficType.REJECT
)
# VPC endpoint security group
self._vpc_endpoint_sg = ec2.SecurityGroup(self,'EndpointSg',
vpc=self._vpc,
description='Security Group for Endpoint',
)
self._vpc_endpoint_sg.add_ingress_rule(ec2.Peer.ipv4(self._vpc.vpc_cidr_block),ec2.Port.tcp(port=443))
core.Tags.of(self._vpc_endpoint_sg).add('Name','SparkOnEKS-VPCEndpointSg')
# Add VPC endpoint
self._vpc.add_gateway_endpoint("S3GatewayEndpoint",
service=ec2.GatewayVpcEndpointAwsService.S3,
subnets=[ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE)])
self._vpc.add_interface_endpoint("EcrDockerEndpoint",service=ec2.InterfaceVpcEndpointAwsService.ECR_DOCKER, security_groups=[self._vpc_endpoint_sg])
self._vpc.add_interface_endpoint("CWLogsEndpoint", service=ec2.InterfaceVpcEndpointAwsService.CLOUDWATCH_LOGS,security_groups=[self._vpc_endpoint_sg])
self._vpc.add_interface_endpoint("AthenaEndpoint", service=ec2.InterfaceVpcEndpointAwsService.ATHENA,security_groups=[self._vpc_endpoint_sg])
self._vpc.add_interface_endpoint("KMSEndpoint", service=ec2.InterfaceVpcEndpointAwsService.KMS,security_groups=[self._vpc_endpoint_sg])
# Override Cfn_Nag rule for AWS Solution CICD validation
for subnet in self._vpc.public_subnets:
scan.suppress_cfnnag_rule('W33','a public facing ALB is required and ingress from the internet should be permitted.',subnet.node.default_child)
self._vpc_endpoint_sg.node.default_child.add_metadata('cfn_nag',{
"rules_to_suppress": [
{
"id": "W40",
"reason": "Egress IP Protocol of -1 is default and generally considered OK"
},
{
"id": "W5",
"reason": "Security Groups with cidr open considered OK"
}
]
})
| 58.43038
| 158
| 0.505416
|
4a002a98864598386020b8be2c0012533c548c1f
| 5,940
|
py
|
Python
|
src/db_to_spider.py
|
vraxzeztan/valuenet
|
d9ef1390697730ff66408d67f85073aa9b9388ed
|
[
"Apache-2.0"
] | 1
|
2020-11-28T17:44:05.000Z
|
2020-11-28T17:44:05.000Z
|
src/db_to_spider.py
|
vraxzeztan/valuenet
|
d9ef1390697730ff66408d67f85073aa9b9388ed
|
[
"Apache-2.0"
] | 4
|
2021-03-31T20:01:24.000Z
|
2021-12-13T20:43:13.000Z
|
src/db_to_spider.py
|
vraxzeztan/valuenet
|
d9ef1390697730ff66408d67f85073aa9b9388ed
|
[
"Apache-2.0"
] | null | null | null |
"""
@author: neera
"""
import sqlite3
import argparse
import json
#global parameters
normalize_table_names = False #want to normalise table names or not?
normalize_column_names = False #want to normalize column names or not
table_col_mapping = {}
def clean():
print()
print("######################")
print()
def change_column_types(col_types):
for i in range(len(col_types)):
col_type = col_types[i].lower()
if col_type == "integer":
col_type = 'number'
col_types[i] = col_type
return col_types
#input : cursor object , empty list
#returns a list of all the tables in the database
def find_all_tables(cursor,tables):
cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
results = cursor.fetchall()
for result in results:
tables.append(str(result[0]))
print("Found the following tables from the database : ")
print(tables)
clean()
return tables
#input : cursor object, columns =[-1,'*'](this is used by valuenet), list of all the tables in the database
#returns : columns, their datatypes, primary and foreigm keys relationships
def find_all_columns_types_keys(cursor,columns,tables):
col_types = []
col_types.append('TEXT')
foreign_keys = []
primary_keys = []
print("Found the following foreign keys relationships: ")
for index,table in enumerate(tables):
rows = cursor.execute("PRAGMA table_info({})".format((table)))
table_info = rows.fetchall()
for info in table_info:
col_name = info[1]
col_type = info[2]
col = []
col.append(index)
col.append(col_name)
columns.append(col)
col_types.append(col_type)
table_col_mapping[(table,col_name)] = len(columns)-1
for table in tables:
rows = cursor.execute("PRAGMA foreign_key_list({})".format((table)))
fk_info = rows.fetchall()
if len(fk_info):
for fks in fk_info:
src_table = table
des_table = fks[2]
src_col = fks[3]
des_col = fks[4]
f_key = []
f_key.append(table_col_mapping[(src_table,src_col)])
f_key.append(table_col_mapping[(des_table,des_col)])
foreign_keys.append(f_key)
print(src_col, " in ", src_table, " references ", des_col, " in ", des_table)
rows = cursor.execute("PRAGMA table_info({})".format((table)))
pk_info = rows.fetchall()
for pk in pk_info:
if pk[len(pk)-1] == 1:
primary_keys.append(table_col_mapping[(table,pk[1])])
clean()
print("Found the following primary keys from the database:")
for pk in primary_keys:
table = tables[columns[pk][0]]
col_name = columns[pk][1]
print(table ," --->" , col_name)
clean()
cleaned_columns = [[-1,'*']]
for col in range(1,len(columns)):
temp_col = columns[col][1].lower()
temp_col = temp_col.split('_')
col_name = " ".join(temp_col)
temp_col = []
temp_col.append(columns[col][0])
temp_col.append(col_name)
cleaned_columns.append(temp_col)
return columns,cleaned_columns,col_types, foreign_keys, primary_keys
def normalize():
return
if __name__ =="__main__":
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--sqlite_path', type=str, required=False,default = 'dealPlatform')
arg_parser.add_argument('--database_id',type = str,default ="dealPlatform")
args = arg_parser.parse_args()
try :
db = sqlite3.connect(args.sqlite_path+".db")
cursor = db.cursor()
spider_data = []
db_info = {}
db_info['db_id'] = args.database_id
tables = find_all_tables(cursor,[])
db_info['table_names_original'] = tables
if normalize_table_names:
db_info['table_names'] = normalize( db_info['table_names_original'] )
else:
db_info['table_names'] = db_info['table_names_original']
columns, cleaned_columns, column_types, foreign_keys, primary_keys = find_all_columns_types_keys(cursor,[[-1,"*"]],tables)
db_info['column_names_original'] = columns
db_info['column_names'] = cleaned_columns
# if normalize_column_names:
# db_info['column_names'] = normalize(db_info['column_names_original'])
# else:
# db_info['column_names'] = columns
db_info['column_types'] = change_column_types(column_types)
db_info['foreign_keys'] = foreign_keys
db_info['primary_keys'] = primary_keys
spider_data.append(db_info)
with open("tables.json",'w') as f:
json.dump(spider_data,f)
except Exception as e:
print("Exception: " + str(e))
# def find_all_columns(cursor,columns,tables):
# temp_cols = []
# for index,table in enumerate(tables):
# cursor.execute("select * from " +table)
# for description in cursor.description:
# col = []
# col.append(index)
# col.append(description[0])
# columns.append(col)
# temp_cols.append(description[0])
# return columns, temp_cols
# def find_col_types(cursor,col_types,cols,tables):
# table_col_mapping = {}
# for table in tables:
# print(table)
# rows = db.execute("PRAGMA table_info({})".format((table)))
# table_col_mapping[table] = rows.fetchall()
| 34.534884
| 131
| 0.575589
|
4a002aeac0dc05d327258151d9310d1a5c3d91f9
| 12,957
|
py
|
Python
|
gear_library/bids-app-template/run.py
|
joshicola/fw-gear-building-gui
|
924aa742479f6f0983509106e8d63a3fc267eaa3
|
[
"Apache-2.0"
] | null | null | null |
gear_library/bids-app-template/run.py
|
joshicola/fw-gear-building-gui
|
924aa742479f6f0983509106e8d63a3fc267eaa3
|
[
"Apache-2.0"
] | null | null | null |
gear_library/bids-app-template/run.py
|
joshicola/fw-gear-building-gui
|
924aa742479f6f0983509106e8d63a3fc267eaa3
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
"""Run the gear: set up for and call command-line command."""
import json
import os
import shutil
import sys
from pathlib import Path
import flywheel_gear_toolkit
import psutil
from flywheel_gear_toolkit.interfaces.command_line import (
build_command_list,
exec_command,
)
from flywheel_gear_toolkit.licenses.freesurfer import install_freesurfer_license
from flywheel_gear_toolkit.utils.zip_tools import zip_output
from utils.bids.download_run_level import download_bids_for_runlevel
from utils.bids.run_level import get_run_level_and_hierarchy
from utils.dry_run import pretend_it_ran
from utils.fly.make_file_name_safe import make_file_name_safe
from utils.results.zip_htmls import zip_htmls
from utils.results.zip_intermediate import (
zip_all_intermediate_output,
zip_intermediate_selected,
)
GEAR = "bids-app-template"
REPO = "flywheel-apps"
CONTAINER = f"{REPO}/{GEAR}]"
FREESURFER_FULLPATH = "/opt/freesurfer/license.txt"
def main(gtk_context):
log = gtk_context.log
# Keep a list of errors and warning to print all in one place at end of log
# Any errors will prevent the command from running and will cause exit(1)
errors = []
warnings = []
# Given the destination container, figure out if running at the project,
# subject, or session level.
hierarchy = get_run_level_and_hierarchy(
gtk_context.client, gtk_context.destination["id"]
)
# This is the label of the project, subject or session and is used
# as part of the name of the output files.
run_label = make_file_name_safe(hierarchy["run_label"])
# Output will be put into a directory named as the destination id.
# This allows the raw output to be deleted so that a zipped archive
# can be returned.
output_analysisid_dir = gtk_context.output_dir / gtk_context.destination["id"]
# {{#script.cpus}}
# ==============================os_cpu_count========================================
# get # cpu's to set -openmp
os_cpu_count = str(os.cpu_count())
log.info("os.cpu_count() = %s", os_cpu_count)
n_cpus = gtk_context.config.get("n_cpus")
if n_cpus:
if n_cpus > os_cpu_count:
log.warning("n_cpus > number available, using %d", os_cpu_count)
gtk_context.config["n_cpus"] = os_cpu_count
elif n_cpus == 0:
log.info("n_cpus == 0, using %d (maximum available)", os_cpu_count)
gtk_context.config["n_cpus"] = os_cpu_count
else: # Default is to use all cpus available
gtk_context.config["n_cpus"] = os_cpu_count # zoom zoom
# ==================================================================================
# {{/script.cpus}}
# {{#script.memory_available}}
# ========================memory_available==========================================
mem_gb = psutil.virtual_memory().available / (1024 ** 3)
log.info("psutil.virtual_memory().available= {:4.1f} GiB".format(mem_gb))
# ==================================================================================
# {{/script.memory_available}}
# grab environment for gear (saved in Dockerfile)
with open("/tmp/gear_environ.json", "r") as f:
environ = json.load(f)
# Add environment to log if debugging
kv = ""
for k, v in environ.items():
kv += k + "=" + v + " "
log.debug("Environment: " + kv)
# get config for command by skipping gear config parameters
command_config = {}
for key, val in gtk_context.config.items():
if not key.startswith("gear-"):
command_config[key] = val
# print("command_config:", json.dumps(command_config, indent=4))
# Validate the command parameter dictionary - make sure everything is
# ready to run so errors will appear before launching the actual gear
# code. Add descriptions of problems to errors & warnings lists.
# print("gtk_context.config:", json.dumps(gtk_context.config, indent=4))
# The main command line command to be run:
# editme: Set the actual gear command:
command = ["{{script.bids_command}}"]
# This block of code is active when not rendered by pystache
# {{#if_not_mustache_rendered}}
command = ["echo"]
# {{/if_not_mustache_rendered}}
# This is also used as part of the name of output files
command_name = make_file_name_safe(command[0])
# editme: add positional arguments that the above command needs
# 3 positional args: bids path, output dir, 'participant'
# This should be done here in case there are nargs='*' arguments
# These follow the BIDS Apps definition (https://github.com/BIDS-Apps)
command.append(str(gtk_context.work_dir / "bids"))
command.append(str(output_analysisid_dir))
command.append("{{script.participant}}")
# This block of code is active when not rendered by pystache
# {{#if_not_mustache_rendered}}
command.append("Subject_1")
# {{/if_not_mustache_rendered}}
command = build_command_list(command, command_config)
# print(command)
# {{#script.verbose}}
# =============================verbose==============================================
for ii, cmd in enumerate(command):
if cmd.startswith("--verbose"):
# handle a 'count' argparse argument where manifest gives
# enumerated possibilities like v, vv, or vvv
# e.g. replace "--verbose=vvv' with '-vvv'
command[ii] = cmd.split("=")[1]
# ==================================================================================
# {{/script.verbose}}
# {{#script.needs_freesurfer_license}}
# ========================needs_freesurfer_license==================================
# if the command needs a freesurfer license keep this
if Path(FREESURFER_FULLPATH).exists():
log.debug("%s exists.", FREESURFER_FULLPATH)
install_freesurfer_license(gtk_context, FREESURFER_FULLPATH)
# ==================================================================================
# {{/script.needs_freesurfer_license}}
if len(errors) == 0:
# {{#script.bids_tree}}
# ============================bids_tree=========================================
# Create HTML file that shows BIDS "Tree" like output?
tree = True
# ==============================================================================
# {{/script.bids_tree}}
# {{^script.bids_tree}}
# ============================bids_tree=========================================
# Create HTML file that shows BIDS "Tree" like output?
tree = False
# ==============================================================================
# {{/script.bids_tree}}
tree_title = f"{command_name} BIDS Tree"
# Whether or not to include src data (e.g. dicoms) when downloading BIDS
src_data = False
# Limit download to specific folders? e.g. ['anat','func','fmap']
# when downloading BIDS
folders = [] # empty list is no limit
error_code = download_bids_for_runlevel(
gtk_context,
hierarchy,
tree=tree,
tree_title=tree_title,
src_data=src_data,
folders=folders,
dry_run=gtk_context.config.get("gear-dry-run"),
do_validate_bids=gtk_context.config.get("gear-run-bids-validation"),
)
if error_code > 0 and not gtk_context.config.get("gear-ignore-bids-errors"):
errors.append(f"BIDS Error(s) detected. Did not run {CONTAINER}")
# now that work/bids/ exists, copy in the ignore file
bidsignore_path = gtk_context.get_input_path("bidsignore")
if bidsignore_path:
shutil.copy(bidsignore_path, "work/bids/.bidsignore")
log.info("Installed .bidsignore in work/bids/")
# see https://github.com/bids-standard/pybids/tree/master/examples
# for any necessary work on the bids files inside the gear, perhaps
# to query results or count stuff to estimate how long things will take.
# Add that stuff to utils/bids.py
# Don't run if there were errors or if this is a dry run
ok_to_run = True
if len(errors) > 0:
ok_to_run = False
returncode = 1
log.info("Command was NOT run because of previous errors.")
if gtk_context.config.get("gear-dry-run"):
ok_to_run = False
returncode = 0
e = "gear-dry-run is set: Command was NOT run."
log.warning(e)
warnings.append(e)
pretend_it_ran(gtk_context)
try:
if ok_to_run:
returncode = 0
# Create output directory
log.info("Creating output directory %s", output_analysisid_dir)
Path(output_analysisid_dir).mkdir()
# This is what it is all about
exec_command(command, environ=environ)
except RuntimeError as exc:
returncode = 1
errors.append(exc)
log.critical(exc)
log.exception("Unable to execute command.")
finally:
# Cleanup, move all results to the output directory
# TODO
# see https://github.com/bids-standard/pybids/tree/master/examples
# for any necessary work on the bids files inside the gear, perhaps
# to query results or count stuff to estimate how long things will take.
# Add that to utils/results.py
# zip entire output/<analysis_id> folder into
# <gear_name>_<project|subject|session label>_<analysis.id>.zip
zip_file_name = (
gtk_context.manifest["name"]
+ f"_{run_label}_{gtk_context.destination['id']}.zip"
)
zip_output(
str(gtk_context.output_dir),
gtk_context.destination["id"],
zip_file_name,
dry_run=False,
exclude_files=None,
)
# {{#script.zip_htmls}}
# ===============================zip_htmls======================================
# zip any .html files in output/<analysis_id>/
zip_htmls(gtk_context, output_analysisid_dir)
# ==============================================================================
# {{/script.zip_htmls}}
# {{#script.save_intermediate_output}}
# ========================save_intermediate_output==============================
# possibly save ALL intermediate output
if gtk_context.config.get("gear-save-intermediate-output"):
zip_all_intermediate_output(gtk_context, run_label)
# possibly save intermediate files and folders
zip_intermediate_selected(gtk_context, run_label)
# ==============================================================================
# {{/script.save_intermediate_output}}
# clean up: remove output that was zipped
if Path(output_analysisid_dir).exists():
if not gtk_context.config.get("gear-keep-output"):
log.debug('removing output directory "%s"', str(output_analysisid_dir))
shutil.rmtree(output_analysisid_dir)
else:
log.info(
'NOT removing output directory "%s"', str(output_analysisid_dir)
)
else:
log.info("Output directory does not exist so it cannot be removed")
# Report errors and warnings at the end of the log so they can be easily seen.
if len(warnings) > 0:
msg = "Previous warnings:\n"
for err in warnings:
if str(type(err)).split("'")[1] == "str":
# show string
msg += " Warning: " + str(err) + "\n"
else: # show type (of warning) and warning message
err_type = str(type(err)).split("'")[1]
msg += f" {err_type}: {str(err)}\n"
log.info(msg)
if len(errors) > 0:
msg = "Previous errors:\n"
for err in errors:
if str(type(err)).split("'")[1] == "str":
# show string
msg += " Error msg: " + str(err) + "\n"
else: # show type (of error) and error message
err_type = str(type(err)).split("'")[1]
msg += f" {err_type}: {str(err)}\n"
log.info(msg)
returncode = 1
return returncode
if __name__ == "__main__":
gtk_context = flywheel_gear_toolkit.GearToolkitContext()
# Setup basic logging and log the configuration for this job
if gtk_context["gear-log-level"] == "INFO":
gtk_context.init_logging("info")
else:
gtk_context.init_logging("debug")
gtk_context.log_config()
exit_status = main(gtk_context)
gtk_context.log.info("%s Gear is done. Returning %s", CONTAINER, exit_status)
sys.exit(exit_status)
| 38.677612
| 88
| 0.572432
|
4a002b475a18cc7b946769505803c606d7e7b207
| 4,277
|
py
|
Python
|
BGWpy/BGW/epsilontask.py
|
BerkeleyGW/BGWpy
|
f3c4a4c60a7ed7530aee9bae5a43c73e1f9ca7c0
|
[
"BSD-3-Clause-LBNL"
] | 27
|
2015-11-19T06:14:56.000Z
|
2022-02-11T18:03:26.000Z
|
BGWpy/BGW/epsilontask.py
|
wu2meng3/BGWpy
|
f3c4a4c60a7ed7530aee9bae5a43c73e1f9ca7c0
|
[
"BSD-3-Clause-LBNL"
] | 5
|
2020-08-31T14:27:08.000Z
|
2021-04-08T15:28:44.000Z
|
BGWpy/BGW/epsilontask.py
|
wu2meng3/BGWpy
|
f3c4a4c60a7ed7530aee9bae5a43c73e1f9ca7c0
|
[
"BSD-3-Clause-LBNL"
] | 13
|
2016-02-10T05:36:25.000Z
|
2021-11-02T01:52:09.000Z
|
from __future__ import print_function
import os
from .bgwtask import BGWTask
from .kgrid import KgridTask, get_kpt_grid
from .inputs import EpsilonInput
# Public
__all__ = ['EpsilonTask']
class EpsilonTask(BGWTask):
"""Inverse dielectric function calculation."""
_TASK_NAME = 'Epsilon'
_input_fname = 'epsilon.inp'
_output_fname = 'epsilon.out'
def __init__(self, dirname, **kwargs):
"""
Arguments
---------
dirname : str
Directory in which the files are written and the code is executed.
Will be created if needed.
Keyword arguments
-----------------
(All mandatory unless specified otherwise)
structure : pymatgen.Structure
Structure object containing information on the unit cell.
ngkpt : list(3), float
K-points grid. Number of k-points along each primitive vector
of the reciprocal lattice. This is actually a Q-points grid
in a GW calculation.
qshift : list(3), float
Q-point used to treat the Gamma point.
ecuteps : float
Energy cutoff for the dielectric function.
wfn_fname : str
Path to the wavefunction file produced by pw2bgw.
wfnq_fname : str
Path to the q-shifted wavefunction file produced by pw2bgw.
extra_lines : list, optional
Any other lines that should appear in the input file.
extra_variables : dict, optional
Any other variables that should be declared in the input file.
Properties
----------
eps0mat_fname : str
Path to the eps0mat file produced.
eps0mat_h5_fname : str
Path to the eps0mat.h5 file produced.
epsmat_fname : str
Path to the epsmat file produced.
epsmat_h5_fname : str
Path to the epsmat.h5 file produced.
"""
super(EpsilonTask, self).__init__(dirname, **kwargs)
# Compute k-points grids
# TODO maybe make these properties
#structure = kwargs['structure']
#ngkpt = kwargs['ngkpt']
#kpts_ush, wtks_ush = get_kpt_grid(structure, ngkpt)
kgrid_kwargs = dict()
for key in ('structure', 'ngkpt', 'fft', 'use_tr', 'clean_after'):
if key in kwargs:
kgrid_kwargs[key] = kwargs[key]
self.kgridtask = KgridTask(dirname=dirname, **kgrid_kwargs)
symkpt = kwargs.get('symkpt', True)
if symkpt:
kpts_ush, wtks_ush = self.kgridtask.get_kpoints()
else:
kpts_ush, wtks_ush = self.kgridtask.get_kpt_grid_nosym()
extra_lines = kwargs.get('extra_lines',[])
extra_variables = kwargs.get('extra_variables',{})
# Input file
self.input = EpsilonInput(
kwargs['ecuteps'],
kwargs['qshift'],
kpts_ush[1:],
*extra_lines,
**extra_variables)
self.input.fname = self._input_fname
# Set up the run script
self.wfn_fname = kwargs['wfn_fname']
self.wfnq_fname = kwargs['wfnq_fname']
ex = 'epsilon.cplx.x' if self._flavor_complex else 'epsilon.real.x'
self.runscript['EPSILON'] = ex
self.runscript.append('$MPIRUN $EPSILON &> {}'.format(self._output_fname))
@property
def wfn_fname(self):
return self._wfn_fname
@wfn_fname.setter
def wfn_fname(self, value):
self._wfn_fname = value
self.update_link(value, 'WFN')
@property
def wfnq_fname(self):
return self._wfnq_fname
@wfnq_fname.setter
def wfnq_fname(self, value):
self._wfnq_fname = value
self.update_link(value, 'WFNq')
def write(self):
super(EpsilonTask, self).write()
with self.exec_from_dirname():
self.input.write()
@property
def eps0mat_fname(self):
# Eventually, hdf5 will be mandatory.
basename = 'eps0mat.h5' if self._use_hdf5 else 'eps0mat'
return os.path.join(self.dirname, basename)
@property
def epsmat_fname(self):
basename = 'epsmat.h5' if self._use_hdf5 else 'epsmat'
return os.path.join(self.dirname, basename)
| 30.333333
| 82
| 0.606032
|
4a002be6655988567d40851153d6001b3f6dda1b
| 1,862
|
py
|
Python
|
src/RestrictedPython/__init__.py
|
rahulbahal7/restricted-python
|
c39cffe71dfc30630e946977735303d3a65b0383
|
[
"ZPL-2.1"
] | 236
|
2015-01-03T17:14:53.000Z
|
2022-03-01T15:52:46.000Z
|
src/RestrictedPython/__init__.py
|
rahulbahal7/restricted-python
|
c39cffe71dfc30630e946977735303d3a65b0383
|
[
"ZPL-2.1"
] | 149
|
2016-10-24T06:56:44.000Z
|
2022-02-24T08:09:10.000Z
|
src/RestrictedPython/__init__.py
|
rahulbahal7/restricted-python
|
c39cffe71dfc30630e946977735303d3a65b0383
|
[
"ZPL-2.1"
] | 30
|
2015-04-03T05:38:13.000Z
|
2021-11-10T05:13:38.000Z
|
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""RestrictedPython package."""
# flake8: NOQA: E401
# This is a file to define public API in the base namespace of the package.
# use: isort:skip to supress all isort related warnings / errors,
# as this file should be logically grouped imports
# compile_restricted methods:
from RestrictedPython.compile import compile_restricted # isort:skip
from RestrictedPython.compile import compile_restricted_eval # isort:skip
from RestrictedPython.compile import compile_restricted_exec # isort:skip
from RestrictedPython.compile import compile_restricted_function # isort:skip
from RestrictedPython.compile import compile_restricted_single # isort:skip
# predefined builtins
from RestrictedPython.Guards import safe_builtins # isort:skip
from RestrictedPython.Guards import safe_globals # isort:skip
from RestrictedPython.Limits import limited_builtins # isort:skip
from RestrictedPython.Utilities import utility_builtins # isort:skip
# Helper Methods
from RestrictedPython.PrintCollector import PrintCollector # isort:skip
from RestrictedPython.compile import CompileResult # isort:skip
# Policy
from RestrictedPython.transformer import RestrictingNodeTransformer # isort:skip
#
from RestrictedPython.Eval import RestrictionCapableEval
| 43.302326
| 81
| 0.746509
|
4a002c8ecd6c1227a5c76ce68626dbcd76065120
| 3,258
|
py
|
Python
|
clean-libgo-dirs.py
|
thanm/devel-scripts
|
f8777a7c282b4b15f004e22ebf5c6b6751b82f87
|
[
"Apache-2.0"
] | null | null | null |
clean-libgo-dirs.py
|
thanm/devel-scripts
|
f8777a7c282b4b15f004e22ebf5c6b6751b82f87
|
[
"Apache-2.0"
] | null | null | null |
clean-libgo-dirs.py
|
thanm/devel-scripts
|
f8777a7c282b4b15f004e22ebf5c6b6751b82f87
|
[
"Apache-2.0"
] | 3
|
2019-05-18T23:01:33.000Z
|
2021-09-18T14:17:10.000Z
|
#!/usr/bin/python3
"""Script to clean object files in GCC/Gollvm build dir libgo subdirs.
"""
import getopt
import os
import sys
import script_utils as u
# Dry run mode
flag_dryrun = False
# Echo commands mode
flag_echo = False
def docmd(cmd):
"""Execute a command."""
if flag_echo or flag_dryrun:
sys.stderr.write("executing: " + cmd + "\n")
if flag_dryrun:
return
u.docmd(cmd)
def dochdir(thedir):
"""Switch to dir."""
if flag_echo or flag_dryrun:
sys.stderr.write("cd " + thedir + "\n")
try:
os.chdir(thedir)
except OSError as err:
u.error("chdir failed: %s" % err)
def do_clean(subdir):
"""Clean this libgo dir."""
flavs = (".o", "gox", "gox.tmp", ".a", ".so", ".lo", ".la", ".go")
here = os.getcwd()
dochdir(subdir)
cmd = "find . -depth "
first = True
for item in flavs:
if not first:
cmd += " -o "
first = False
cmd += "-name '*%s' -print" % item
lines = u.docmdlines(cmd)
#lines.reverse()
debris = lines
for d in debris:
if not d:
continue
u.verbose(1, "toclean '%s'" % d)
if flag_dryrun:
u.verbose(0, "... remove %s" % d)
else:
os.unlink(d)
dochdir(here)
def do_gollvm_clean():
"""Clean a gollv build dir."""
lgd = "tools/gollvm/libgo"
u.verbose(1, "visiting %s" % lgd)
do_clean(lgd)
files = ["vet", "test2json", "buildid", "go", "gofmt", "cgo"]
for f in files:
p = "tools/gollvm/gotools/" + f
if os.path.exists(p):
u.verbose(1, "cleaning %s" % p)
if not flag_dryrun:
os.unlink(p)
def do_gccgo_clean():
"""Clean a gccgo build dir."""
if not os.path.exists("config.log"):
u.error("no 'config.log' here -- needs to be run in GCC build dir")
lines = u.docmdlines("find . -depth -name libgo -print")
lines.reverse()
libgodirs = lines
for lgd in libgodirs:
if os.path.exists(lgd):
u.verbose(1, "visiting %s" % lgd)
do_clean(lgd)
files = ["vet", "test2json", "buildid", "go", "gofmt", "cgo"]
for f in files:
p = "gotools/" + f
if not flag_dryrun and os.path.exists(p):
u.verbose(1, "cleaning %s" % p)
os.unlink(p)
def perform():
"""Top level driver routine."""
if os.path.exists("config.log"):
do_gccgo_clean()
elif os.path.exists("CMakeCache.txt"):
do_gollvm_clean()
else:
u.error("no 'config.log' or 'CMakeCache.txt' here -- "
"needs to be run in gccgo or gollvm build dir")
def usage(msgarg):
"""Print usage and exit."""
me = os.path.basename(sys.argv[0])
if msgarg:
sys.stderr.write("error: %s\n" % msgarg)
print("""\
usage: %s [options]
options:
-d increase debug msg verbosity level
-D dryrun mode (echo commands but do not execute)
""" % me)
sys.exit(1)
def parse_args():
"""Command line argument parsing."""
global flag_dryrun, flag_echo
try:
optlist, args = getopt.getopt(sys.argv[1:], "deD")
except getopt.GetoptError as err:
# unrecognized option
usage(str(err))
if args:
usage("unknown extra args")
for opt, _ in optlist:
if opt == "-d":
u.increment_verbosity()
elif opt == "-e":
flag_echo = True
elif opt == "-D":
flag_dryrun = True
parse_args()
u.setdeflanglocale()
perform()
| 22.013514
| 71
| 0.601289
|
4a002ca9a322dd5533ef9a5104d0cc83ad1c817a
| 3,905
|
py
|
Python
|
tools/pinlist.py
|
jallib/jallib
|
5e6117bf9d518870194a6eeb26c2993b62eb4e8a
|
[
"Zlib",
"BSD-3-Clause"
] | 28
|
2015-04-23T07:30:11.000Z
|
2022-03-02T12:56:34.000Z
|
tools/pinlist.py
|
jallib/Jallib
|
50eba5921cb24e21f3fa5d1f946e9ee9b6a25a71
|
[
"Zlib",
"BSD-3-Clause"
] | 43
|
2015-07-26T09:03:09.000Z
|
2021-12-19T19:16:49.000Z
|
tools/pinlist.py
|
jallib/Jallib
|
50eba5921cb24e21f3fa5d1f946e9ee9b6a25a71
|
[
"Zlib",
"BSD-3-Clause"
] | 15
|
2015-05-20T06:44:04.000Z
|
2021-10-01T16:58:45.000Z
|
#!/usr/bin/env python3
"""
Title: List all pins and their aliases of all (Jallib) PICs
Author: Rob Hamerling, Copyright (c) 2017..2017. All rights reserved.
Adapted-by:
Revision: $Revision$
Compiler: N/A
This file is part of jallib https://github.com/jallib/jallib
Released under the BSD license https://www.opensource.org/licenses/bsd-license.php
Description:
Simple list of all pins and their aliases.
Sources: N/A
Notes:
"""
from pic2jal_environment import check_and_set_environment
base, mplabxversion = check_and_set_environment() # obtain environment variables
if (base == ""):
exit(1)
import os
import sys
import re
from xml.dom.minidom import parse, Node
pinlist = os.path.join(base, "pinlist.txt") # destination
picdir = os.path.join(base, "mplabx." + mplabxversion, "content", "edc") # base of .pic files
portpin = re.compile(r"^R[A-L]{1}[0-7]{1}\Z") # Rx0..7 (x in range A..L)
gpiopin = re.compile(r"^GP[0-5]{1}\Z") # GP0..5
# ------------------------------
def list_pic_pins(fp, pindict):
""" list all pins and their aliases of a pic
"""
pinlist = list(pindict.keys())
pinlist.sort() # pin number sequence
for pin in pinlist:
fp.write(" %3d" % (pin) + " : " + ", ".join(pindict[pin]) + "\n")
fp.write("\n")
# -------------------------------------
def build_pin_dict(fp, filepath):
""" build a dictionary with pins and their aliases for one pic
and print the dictionary
"""
dom = parse(filepath) # load .pic file
pinlist = {} # new dictionary
i = 1 # pin number
for pin in dom.getElementsByTagName("edc:Pin"): # select pin nodes
aliaslist = [] # new aliaslist this pin
for vpin in pin.getElementsByTagName("edc:VirtualPin"):
alias = vpin.getAttribute("edc:name") # raw alias
alias = alias.upper().strip("_").split()[0] # first word
aliaslist.append(alias) # add alias!
pinlist[i] = aliaslist # add aliaslist this pin
i += 1
for alias in aliaslist:
if (re.match(portpin, alias) or re.match(gpiopin, alias)): # select Rxy or GPx
portbit = alias
if portbit != aliaslist[0]: # not first in list
aliaslist.remove(portbit) # remove it
aliaslist.insert(0, portbit) # add it to front
break
picname = os.path.splitext(os.path.split(filepath)[1])[0][3:].upper() # pic type
print(picname) # progress signal
fp.write(picname + "\n")
if len(pinlist) > 0: # any pins in list
list_pic_pins(fp, pinlist) # list pinmap this pic
else:
print(" No pinlist!")
fp.write(" No pinlist\n")
# ---------------------
def build_pinlist():
""" For all (previously selected) PICs in MPLABX build pin dictionary
"""
with open(pinlist, "w") as fp:
fp.write("List of pins and their aliases in MPLABX" + mplabxversion + "\n\n") # opening line
for (root, dirs, files) in os.walk(picdir): # whole tree (incl subdirs!)
dirs.sort()
files.sort() # for unsorted filesystems!
for file in files:
build_pin_dict(fp, os.path.join(root,file)) # create pin dictionary
fp.write("\n")
# ================ mainline =======================
if (__name__ == "__main__"):
print("Building pinlist", picdir)
build_pinlist()
| 33.663793
| 124
| 0.519846
|
4a002d9015d18f840f418ac6f448480706e2d115
| 2,178
|
py
|
Python
|
examples/asyncio/wamp/rpc/complex/backend.py
|
rapyuta-robotics/autobahn-python
|
c08e9e352d526a7fd0885bb94706366a432ada1a
|
[
"MIT"
] | 1,670
|
2015-10-12T15:46:22.000Z
|
2022-03-30T22:12:53.000Z
|
examples/asyncio/wamp/rpc/complex/backend.py
|
rapyuta-robotics/autobahn-python
|
c08e9e352d526a7fd0885bb94706366a432ada1a
|
[
"MIT"
] | 852
|
2015-10-16T22:11:03.000Z
|
2022-03-27T07:57:01.000Z
|
examples/asyncio/wamp/rpc/complex/backend.py
|
rapyuta-robotics/autobahn-python
|
c08e9e352d526a7fd0885bb94706366a432ada1a
|
[
"MIT"
] | 790
|
2015-10-15T08:46:12.000Z
|
2022-03-30T12:22:13.000Z
|
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Crossbar.io Technologies GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
import asyncio
from os import environ
from autobahn.wamp.types import CallResult
from autobahn.asyncio.wamp import ApplicationSession, ApplicationRunner
class Component(ApplicationSession):
"""
Application component that provides procedures which
return complex results.
"""
async def onJoin(self, details):
def add_complex(a, ai, b, bi):
return CallResult(c=a + b, ci=ai + bi)
await self.register(add_complex, 'com.myapp.add_complex')
def split_name(fullname):
forename, surname = fullname.split()
return CallResult(forename, surname)
await self.register(split_name, 'com.myapp.split_name')
if __name__ == '__main__':
url = environ.get("AUTOBAHN_DEMO_ROUTER", "ws://127.0.0.1:8080/ws")
realm = "crossbardemo"
runner = ApplicationRunner(url, realm)
runner.run(Component)
| 37.551724
| 79
| 0.6809
|
4a002dbbdabb748eb00fbfbee6a35af8d4d2d47a
| 7,580
|
py
|
Python
|
website_showroom/models.py
|
future21star/ShowRoom
|
52ed0ba7544e780a72ee494f4b67a4de5a653246
|
[
"BSD-3-Clause"
] | 27
|
2015-01-25T15:43:47.000Z
|
2021-11-08T11:20:29.000Z
|
website_showroom/models.py
|
future21star/ShowRoom
|
52ed0ba7544e780a72ee494f4b67a4de5a653246
|
[
"BSD-3-Clause"
] | 2
|
2015-08-20T16:53:56.000Z
|
2018-07-05T15:43:09.000Z
|
website_showroom/models.py
|
future21star/ShowRoom
|
52ed0ba7544e780a72ee494f4b67a4de5a653246
|
[
"BSD-3-Clause"
] | 21
|
2015-01-06T03:36:52.000Z
|
2020-12-08T12:03:49.000Z
|
import os, uuid
from django.conf import settings
from django.db import models
from django.db.models.signals import post_save, pre_delete
from django.dispatch import receiver
class Edition(models.Model):
help_text = "Main title shown on page"
site_title = models.CharField(max_length=40, help_text=help_text)
help_text = "2-letter-country-code for showing a corresponding flag (e.g. 'de', 'en'). Careful, not existing code will break site."
country = models.CharField(max_length=2, help_text=help_text)
help_text = "Numeric value for edition order. Tip: Use 100-200-300-... steps for easy reordering. "
help_text += "Edition first in order will be used as edition default."
order = models.IntegerField(help_text=help_text)
help_text = "Something like 'English version', used for mouseover on flag"
short_description = models.CharField(max_length=40, help_text=help_text)
help_text = "Used for html title tag"
html_title = models.CharField(max_length=100, help_text=help_text)
help_text = "Subtitle (HTML tags possible)"
site_subtitle = models.CharField(max_length=125, help_text=help_text)
help_text = "Title for rss feed"
rss_title = models.CharField(max_length=100, help_text=help_text)
help_text = "Description for rss feed"
rss_description = models.CharField(max_length=200, help_text=help_text)
help_text = "Optional, link to Facebook page"
facebook_url = models.CharField(max_length=90, blank=True, null=True, help_text=help_text)
help_text = "Optional, link to Twitter page"
twitter_url = models.CharField(max_length=90, blank=True, null=True, help_text=help_text)
help_text = "Optional, link to Google+ page"
google_plus_url = models.CharField(max_length=90, blank=True, null=True, help_text=help_text)
help_text = "Something like - e.g. - 'Home'"
home_menu_title = models.CharField(max_length=40, help_text=help_text)
help_text = "HTML color code, e.g. '#999999"
home_menu_color = models.CharField(max_length=7, help_text=help_text)
help_text = "HTML color code, e.g. '#000000"
home_menu_active_color = models.CharField(max_length=7, help_text=help_text)
help_text = "Number of websites for home category"
home_num_websites = models.IntegerField(help_text=help_text)
help_text = "Left footer (HTML tags possible)"
footer_left = models.CharField(max_length=200, help_text=help_text)
help_text = "Right footer (HTML tags possible)"
footer_right = models.CharField(max_length=200, help_text=help_text)
help_text = "Title of contact navi"
contact_title = models.CharField(max_length=40, help_text=help_text)
help_text = "Complete HTML content of contact page, with <p>, <br> and all that stuff"
contact_html = models.TextField()
comments = models.TextField(blank=True)
ordering = ['order']
def __str__(self):
return self.site_title + " (" + self.country + ")"
class Category(models.Model):
help_text = "Only used in admin context, not displayed on site (edition specific category names)"
name = models.CharField(max_length=40, help_text=help_text)
color = models.CharField(max_length=7, help_text="Format: #ffffff")
active_color = models.CharField(max_length=7, help_text="Format: #ffffff")
def __str__(self):
return self.name
def get_ed_categories(self):
return EditionCategory.objects.filter(category_id=self.id)
def editions(self):
ed_categories = self.get_ed_categories()
ret = ''
for ed_c in ed_categories:
if len(ret) > 0:
ret += ' | '
ret += ed_c.edition.country
return ret
class EditionCategory(models.Model):
edition = models.ForeignKey(Edition)
category = models.ForeignKey(Category)
help_text = "Edition specific category name"
name = models.CharField(max_length=40, help_text=help_text)
help_text = "Every url-conform string except 'contact' (e.g. 'my-category-1')"
url_name = models.SlugField(max_length=40, help_text=help_text)
help_text = "Numeric value for category order. Tip: Use 100-200-300-... steps for easy reordering."
order = models.IntegerField(help_text=help_text)
ordering = ['order']
def get_path(instance, filename):
pos = filename.rfind('.')
path = 'screenshots/' + 's_' + str(uuid.uuid1()) + filename[pos:]
return path
class Website(models.Model):
help_text = "Generic title, used if no extra edition specific title is provided"
title = models.CharField(max_length=50, help_text=help_text)
category = models.ForeignKey(Category)
help_text = "Optional, 2-letter-country-code for showing a corresponding flag (e.g. 'de', 'en'). Careful, not existing code will break site."
country = models.CharField(max_length=2, null=True, blank=True, help_text=help_text)
help_text = "Image file, size: 300x200, name will be unified. "
help_text += "Larger file image will be resized. "
help_text += "Greater height will be cropped (making screen capture with website width "
help_text += "and height generously higher than aspect ratio is easiest)"
screenshot = models.ImageField(upload_to=get_path, help_text=help_text)
url = models.CharField(max_length=90)
pub_date = models.DateTimeField('date published', auto_now_add=True)
ordering = ['category']
def __str__(self):
return self.title
def get_ed_websites(self):
return EditionWebsite.objects.filter(website_id=self.id)
def editions(self):
ed_websites = self.get_ed_websites()
ret = ''
for ed_ws in ed_websites:
if len(ret) > 0:
ret += ' | '
ret += ed_ws.edition.country + ' (' + str(ed_ws.order) + ')'
return ret
@receiver(post_save, sender=Website)
def post_save_handler(sender, instance, using, **kwargs):
from PIL import Image
image = Image.open(instance.screenshot)
thumb_ratio = float(1.5)
img_ratio = float(image.size[0]) / float(image.size[1])
print("Ratios: T " + str(thumb_ratio) + ", I " + str(img_ratio))
# img is relatively heigher than thumb
if thumb_ratio > img_ratio:
crop_width = image.size[0]
crop_height = int(image.size[0] / thumb_ratio)
image = image.crop((0, 0, crop_width, crop_height,))
image.thumbnail([300, 200], Image.ANTIALIAS)
image.save(settings.MEDIA_ROOT + '/' + instance.screenshot.name)
@receiver(pre_delete, sender=Website)
def pre_delete_handler(sender, instance, using, **kwargs):
try:
path = settings.MEDIA_ROOT + '/' + instance.screenshot.name
os.remove(path)
except OSError:
pass
pre_delete.connect(pre_delete_handler, sender=Website)
post_save.connect(post_save_handler, sender=Website)
class EditionWebsite(models.Model):
edition = models.ForeignKey(Edition)
website = models.ForeignKey(Website)
help_text = "Edition specific title, if left empty, generic title is used"
title = models.CharField(max_length=50, null=True, blank=True, help_text=help_text)
help_text = "Edition specific description"
desc = models.TextField(help_text=help_text)
help_text = "Numeric value for website order. Tip: Use 100-200-300-... steps for easy reordering."
order = models.IntegerField(help_text=help_text)
pub_date = models.DateTimeField('date published', auto_now_add=True)
def get_title(self):
if self.title:
return self.title
else:
return self.website.title
| 44.069767
| 145
| 0.702639
|
4a002dc963c1a92bd9242b4fa5a56fbfe6c330a2
| 1,097
|
py
|
Python
|
utils/run.py
|
nilsalex/yktotp-jsonapi
|
df61a5c5e7c06c2db4d8bfda1a5a95e1c555afcb
|
[
"MIT"
] | 1
|
2022-03-27T08:40:31.000Z
|
2022-03-27T08:40:31.000Z
|
utils/run.py
|
nilsalex/yktotp-jsonapi
|
df61a5c5e7c06c2db4d8bfda1a5a95e1c555afcb
|
[
"MIT"
] | null | null | null |
utils/run.py
|
nilsalex/yktotp-jsonapi
|
df61a5c5e7c06c2db4d8bfda1a5a95e1c555afcb
|
[
"MIT"
] | 1
|
2022-03-27T08:40:40.000Z
|
2022-03-27T08:40:40.000Z
|
#!/usr/bin/env python3
import os
import platform
import subprocess
import sys
if len(sys.argv) < 2:
print("no message provided")
print("usage: run.py [message]")
exit(1)
message = sys.argv[1]
message_raw = bytes(message, "UTF-8")
length = len(message_raw)
length_raw = length.to_bytes(4, byteorder=sys.byteorder, signed=False)
encoded_message = length_raw + message_raw
print(f"message: {message}")
print(f"encoded message: {encoded_message}")
exe_name = "yktotp-jsonapi.exe" if platform.system() == "Windows" else "yktotp-jsonapi"
exe_path = os.path.join(os.path.dirname(sys.argv[0]), "../target/release/", exe_name)
process = subprocess.run(os.path.abspath(exe_path),
text=True,
input=encoded_message.decode(),
capture_output=True)
response_raw = bytes(process.stdout, "UTF-8")
print(f"encoded response: {response_raw}")
response_length = int.from_bytes(response_raw[0:4], byteorder=sys.byteorder, signed=False)
response = response_raw[4:4 + response_length].decode()
print(f"response: {response}")
| 29.648649
| 90
| 0.690975
|
4a002ffbde8aa6989156d61a125800e3637656d4
| 28,353
|
py
|
Python
|
src/ansys/mapdl/core/mapdl_geometry.py
|
beppo-dd/pymapdl
|
19da39c785bacfb5c785328a0dd139a173924bd7
|
[
"MIT"
] | null | null | null |
src/ansys/mapdl/core/mapdl_geometry.py
|
beppo-dd/pymapdl
|
19da39c785bacfb5c785328a0dd139a173924bd7
|
[
"MIT"
] | null | null | null |
src/ansys/mapdl/core/mapdl_geometry.py
|
beppo-dd/pymapdl
|
19da39c785bacfb5c785328a0dd139a173924bd7
|
[
"MIT"
] | null | null | null |
"""Module to support MAPDL CAD geometry"""
import re
import numpy as np
import pyvista as pv
from ansys.mapdl.core.misc import run_as_prep7, supress_logging
VALID_TYPE_MSG = """- 'S' : Select a new set (default)
- 'R' : Reselect a set from the current set.
- 'A' : Additionally select a set and extend the current set.
- 'U' : Unselect a set from the current set.
"""
FLST_LOOKUP = {
"NODE": 1, # node numbers
"ELEM": 2, # element numbers
"KP": 3, # keypoint numbers
"LINE": 4, # line numbers
"AREA": 5, # area numbers
"VOLU": 6, # volume numbers
"TRACE": 7, # trace points
"COORD": 8, # coordinate locations
}
def merge_polydata(items):
"""Merge list of polydata or unstructured grids"""
# lazy import here for faster module loading
try:
from pyvista._vtk import vtkAppendPolyData
except:
from vtk import vtkAppendPolyData
afilter = vtkAppendPolyData()
for item in items:
afilter.AddInputData(item)
afilter.Update()
return pv.wrap(afilter.GetOutput())
def get_elements_per_area(resp):
"""Get the number of elements meshed for each area given the response
from ``AMESH``.
GENERATE NODES AND ELEMENTS IN ALL SELECTED AREAS
** AREA 1 MESHED WITH 64 QUADRILATERALS, 0 TRIANGLES **
** AREA 2 MESHED WITH 64 QUADRILATERALS, 0 TRIANGLES **
** AREA 3 MESHED WITH 64 QUADRILATERALS, 0 TRIANGLES **
** AREA 4 MESHED WITH 64 QUADRILATERALS, 0 TRIANGLES **
** AREA 5 MESHED WITH 64 QUADRILATERALS, 0 TRIANGLES **
** AREA 6 MESHED WITH 64 QUADRILATERALS, 0 TRIANGLES **
** AREA 7 MESHED WITH 64 QUADRILATERALS, 0 TRIANGLES **
** AREA 8 MESHED WITH 64 QUADRILATERALS, 0 TRIANGLES **
** AREA 9 MESHED WITH 64 QUADRILATERALS, 0 TRIANGLES **
** AREA 10 MESHED WITH 64 QUADRILATERALS, 0 TRIANGLES **
** AREA 11 MESHED WITH 64 QUADRILATERALS, 0 TRIANGLES **
** AREA 12 MESHED WITH 64 QUADRILATERALS, 0 TRIANGLES **
NUMBER OF AREAS MESHED = 12
MAXIMUM NODE NUMBER = 772
MAXIMUM ELEMENT NUMBER = 768
Returns
-------
list
List of tuples, each containing the area number and number of
elements per area.
"""
# MAPDL changed their output at some point. Check for both output types.
reg = re.compile(r"Meshing of area (\d*) completed \*\* (\d*) elements")
groups = reg.findall(resp)
if groups:
groups = [[int(anum), int(nelem)] for anum, nelem in groups]
else:
reg = re.compile(r"AREA\s*(\d*).*?(\d*)\s*QUADRILATERALS,\s*(\d*) TRIANGLES")
groups = reg.findall(resp)
groups = [(int(anum), int(nquad) + int(ntri)) for anum, nquad, ntri in groups]
return groups
class Geometry:
"""Pythonic representation of MAPDL CAD geometry
Contains advanced methods to extend geometry building and
selection within MAPDL.
"""
def __init__(self, mapdl):
from ansys.mapdl.core.mapdl import _MapdlCore
if not isinstance(mapdl, _MapdlCore):
raise TypeError("Must be initialized using a MAPDL class")
self._mapdl = mapdl
self._keypoints_cache = None
self._lines_cache = None
self._log = self._mapdl._log
def _set_log_level(self, level):
return self._mapdl.set_log_level(level)
def _load_iges(self):
"""Loads the iges file from MAPDL as a pyiges class"""
# Lazy import here for speed and stability
# possible to exclude this import in the future
try:
from pyiges import Iges
except ImportError:
raise ImportError(
"Please install pyiges to use this feature with:\n" "pip install pyiges"
)
return Iges(self._mapdl._generate_iges())
def _reset_cache(self):
self._keypoints_cache = None
self._lines_cache = None
@property
def _keypoints(self):
"""Returns keypoints cache"""
if self._keypoints_cache is None:
self._keypoints_cache = self._load_keypoints()
return self._keypoints_cache
@property
def keypoints(self):
"""Keypoint coordinates"""
return np.asarray(self._keypoints.points)
@property
def _lines(self):
"""Returns lines cache"""
if self._lines_cache is None:
self._lines_cache = self._load_lines()
return self._lines_cache
@property
def lines(self):
"""Active lines as a pyvista.PolyData"""
return self._lines
def areas(self, quality=4, merge=False):
"""List of areas from MAPDL represented as ``pyvista.PolyData``.
Parameters
----------
quality : int, optional
quality of the mesh to display. Varies between 1 (worst)
to 10 (best).
merge : bool, optional
Option to merge areas into a single mesh. Default
``False`` to return a list of areas. When ``True``,
output will be a single mesh.
Returns
-------
list of pyvista.UnstructuredGrid
List of ``pyvista.UnstructuredGrid`` meshes representing
the active surface areas selected by ``ASEL``. If
``merge=True``, areas are returned as a single merged
UnstructuredGrid.
Examples
--------
Return a list of areas as indiviudal grids
>>> areas = mapdl.areas(quality=3)
>>> areab
[UnstructuredGrid (0x7f14add95040)
N Cells: 12
N Points: 20
X Bounds: -2.000e+00, 2.000e+00
Y Bounds: 0.000e+00, 1.974e+00
Z Bounds: 0.000e+00, 0.000e+00
N Arrays: 4,
UnstructuredGrid (0x7f14add95ca0)
N Cells: 12
N Points: 20
X Bounds: -2.000e+00, 2.000e+00
Y Bounds: 0.000e+00, 1.974e+00
Z Bounds: 5.500e-01, 5.500e-01
N Arrays: 4,
...
Return a single merged mesh.
>>> area_mesh = mapdl.areas(quality=3)
>>> area_mesh
UnstructuredGrid (0x7f14add95ca0)
N Cells: 24
N Points: 30
X Bounds: -2.000e+00, 2.000e+00
Y Bounds: 0.000e+00, 1.974e+00
Z Bounds: 5.500e-01, 5.500e-01
N Arrays: 4
"""
quality = int(quality)
if quality > 10:
raise ValueError("``quality`` parameter must be a value between 0 and 10")
surf = self.generate_surface(11 - quality)
if merge:
return surf
entity_num = surf["entity_num"]
areas = []
anums = np.unique(entity_num)
for anum in anums:
areas.append(surf.extract_cells(entity_num == anum))
return areas
@supress_logging
@run_as_prep7
def generate_surface(self, density=4, amin=None, amax=None, ninc=None):
"""Generate an all-triangular surface of the active surfaces.
Parameters
----------
density : int, optional
APDL smart sizing option. Ranges from 1 (worst) to 10
(best).
amin : int, optional
Minimum APDL numbered area to select. See
``mapdl.anum`` for available areas.
amax : int, optional
Maximum APDL numbered area to select. See
``mapdl.anum`` for available areas.
ninc : int, optional
Steps to between amin and amax.
"""
# store initially selected areas and elements
with self._mapdl.non_interactive:
self._mapdl.cm("__tmp_elem__", "ELEM")
self._mapdl.cm("__tmp_area__", "AREA")
orig_anum = self.anum
# reselect from existing selection to mimic APDL behavior
if amin or amax:
if amax is None:
amax = amin
if amin is None: # amax is non-zero
amin = 1
if ninc is None:
ninc = ""
self._mapdl.asel("R", "AREA", vmin=amin, vmax=amax, vinc=ninc)
# duplicate areas to avoid affecting existing areas
a_num = int(self._mapdl.get(entity="AREA", item1="NUM", it1num="MAXD"))
self._mapdl.numstr("AREA", a_num, mute=True)
self._mapdl.agen(2, "ALL", noelem=1, mute=True)
a_max = int(self._mapdl.get(entity="AREA", item1="NUM", it1num="MAXD"))
self._mapdl.asel("S", "AREA", vmin=a_num + 1, vmax=a_max, mute=True)
# necessary to reset element/area meshing association
self._mapdl.aatt(mute=True)
# create a temporary etype
etype_max = int(self._mapdl.get(entity="ETYP", item1="NUM", it1num="MAX"))
etype_old = self._mapdl.parameters.type
etype_tmp = etype_max + 1
old_routine = self._mapdl.parameters.routine
self._mapdl.et(etype_tmp, "MESH200", 6, mute=True)
self._mapdl.shpp("off", mute=True)
self._mapdl.smrtsize(density, mute=True)
self._mapdl.type(etype_tmp, mute=True)
if old_routine != "PREP7":
self._mapdl.prep7(mute=True)
# Mesh and get the number of elements per area
resp = self._mapdl.amesh("all")
groups = get_elements_per_area(resp)
self._mapdl.esla("S")
grid = self._mapdl.mesh._grid.linear_copy()
pd = pv.PolyData(grid.points, grid.cells, n_faces=grid.n_cells)
# pd['ansys_node_num'] = grid['ansys_node_num']
# pd['vtkOriginalPointIds'] = grid['vtkOriginalPointIds']
# pd.clean(inplace=True) # OPTIONAL
# delete all temporary meshes and clean up settings
self._mapdl.aclear("ALL", mute=True)
self._mapdl.adele("ALL", kswp=1, mute=True)
self._mapdl.numstr("AREA", 1, mute=True)
self._mapdl.type(etype_old, mute=True)
self._mapdl.etdele(etype_tmp, mute=True)
self._mapdl.shpp("ON", mute=True)
self._mapdl.smrtsize("OFF", mute=True)
self._mapdl.cmsel("S", "__tmp_area__", "AREA", mute=True)
self._mapdl.cmsel("S", "__tmp_elem__", "ELEM", mute=True)
# store the area number used for each element
entity_num = np.empty(grid.n_cells, dtype=np.int32)
if grid and groups:
# add anum info
i = 0
for index, (anum, nelem) in enumerate(groups):
# have to use original area numbering here as the
# duplicated areas numbers are inaccurate
entity_num[i : i + nelem] = orig_anum[index]
i += nelem
else:
entity_num[:] = 0
pd["entity_num"] = entity_num
return pd
@property
def n_volu(self):
"""Number of volumes currently selected
Examples
--------
>>> mapdl.n_area
1
"""
return self._item_count("VOLU")
@property
def n_area(self):
"""Number of areas currently selected
Examples
--------
>>> mapdl.n_area
1
"""
return self._item_count("AREA")
@property
def n_line(self):
"""Number of lines currently selected
Examples
--------
>>> mapdl.n_line
1
"""
return self._item_count("LINE")
@property
def n_keypoint(self):
"""Number of keypoints currently selected
Examples
--------
>>> mapdl.n_keypoint
1
"""
return self._item_count("KP")
@supress_logging
def _item_count(self, entity):
"""Return item count for a given entity"""
return int(self._mapdl.get(entity=entity, item1="COUNT"))
@property
def knum(self):
"""Array of keypoint numbers.
Examples
--------
>>> mapdl.block(0, 1, 0, 1, 0, 1)
>>> mapdl.knum
array([1, 2, 3, 4, 5, 6, 7, 8], dtype=int32)
"""
return self._mapdl.get_array("KP", item1="KLIST").astype(np.int32)
@property
def lnum(self):
"""Array of line numbers.
Examples
--------
>>> mapdl.block(0, 1, 0, 1, 0, 1)
>>> mapdl.lnum
array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=int32)
"""
# this (weirdly) sometimes fails
for _ in range(5):
lnum = self._mapdl.get_array("LINES", item1="LLIST")
if lnum.size == self.n_line:
break
return lnum.astype(np.int32)
@property
def anum(self):
"""Array of area numbers.
Examples
--------
>>> mapdl.block(0, 1, 0, 1, 0, 1)
>>> mapdl.anum
array([1, 2, 3, 4, 5, 6], dtype=int32)
"""
return self._mapdl.get_array("AREA", item1="ALIST").astype(np.int32)
@property
def vnum(self):
"""Array of volume numbers.
Examples
--------
>>> mapdl.block(0, 1, 0, 1, 0, 1)
>>> mapdl.vnum
array([1], dtype=int32)
"""
return self._mapdl.get_array("VOLU", item1="VLIST").astype(np.int32)
@supress_logging
def _load_lines(self):
"""Load lines from MAPDL using IGES"""
# ignore volumes
self._mapdl.cm("__tmp_volu__", "VOLU", mute=True)
self._mapdl.cm("__tmp_line__", "LINE", mute=True)
self._mapdl.cm("__tmp_area__", "AREA", mute=True)
self._mapdl.cm("__tmp_keyp__", "KP", mute=True)
self._mapdl.ksel("ALL", mute=True)
self._mapdl.lsel("ALL", mute=True)
self._mapdl.asel("ALL", mute=True)
self._mapdl.vsel("NONE", mute=True)
iges = self._load_iges()
self._mapdl.cmsel("S", "__tmp_volu__", "VOLU", mute=True)
self._mapdl.cmsel("S", "__tmp_area__", "AREA", mute=True)
self._mapdl.cmsel("S", "__tmp_line__", "LINE", mute=True)
self._mapdl.cmsel("S", "__tmp_keyp__", "KP", mute=True)
selected_lnum = self.lnum
lines = []
entity_nums = []
for bspline in iges.bsplines():
# allow only 10001 as others appear to be construction entities
if bspline.d["status_number"] in [1, 10001]:
entity_num = int(bspline.d["entity_subs_num"])
if entity_num not in entity_nums and entity_num in selected_lnum:
entity_nums.append(entity_num)
line = bspline.to_vtk()
line.cell_data["entity_num"] = entity_num
lines.append(line)
entities = iges.lines() + iges.circular_arcs()
for line in entities:
if line.d["status_number"] == 1:
entity_num = int(line.d["entity_subs_num"])
if entity_num not in entity_nums and entity_num in selected_lnum:
entity_nums.append(entity_num)
line = line.to_vtk(resolution=100)
line.cell_data["entity_num"] = entity_num
lines.append(line)
if lines:
lines = merge_polydata(lines)
lines["entity_num"] = lines["entity_num"].astype(np.int32)
else:
lines = pv.PolyData()
return lines
def _load_keypoints(self):
"""Load keypoints from MAPDL using IGES"""
# write only keypoints
self._mapdl.cm("__tmp_volu__", "VOLU", mute=True)
self._mapdl.cm("__tmp_area__", "AREA", mute=True)
self._mapdl.cm("__tmp_line__", "LINE", mute=True)
self._mapdl.vsel("NONE", mute=True)
self._mapdl.asel("NONE", mute=True)
self._mapdl.lsel("NONE", mute=True)
iges = self._load_iges()
self._mapdl.cmsel("S", "__tmp_volu__", "VOLU", mute=True)
self._mapdl.cmsel("S", "__tmp_area__", "AREA", mute=True)
self._mapdl.cmsel("S", "__tmp_line__", "LINE", mute=True)
keypoints = []
kp_num = []
for kp in iges.points():
keypoints.append([kp.x, kp.y, kp.z])
kp_num.append(int(kp.d["entity_subs_num"]))
# self._kp_num = np.array(self._kp_num)
keypoints_pd = pv.PolyData(keypoints)
keypoints_pd["entity_num"] = kp_num
return keypoints_pd
def __str__(self):
"""Current geometry info"""
info = "MAPDL Selected Geometry\n"
info += "Keypoints: %d\n" % self.n_keypoint
info += "Lines: %d\n" % self.n_line
info += "Areas: %d\n" % self.n_area
info += "Volumes: %d\n" % self.n_volu
return info
def keypoint_select(self, items, sel_type="S", return_selected=False):
"""Select keypoints using a sequence of items.
Parameters
----------
items : sequence or None
List, range, or sequence of integers of the keypoints you want
to select. If ``None`` or ``'NONE'``, no keypoints will be
selected. If 'ALL', selects all keypoints.
sel_type : str, optional
Selection type. May be one of the following:
* ``'S'``: Select a new set (default)
* ``'R'``: Reselect a set from the current set.
* ``'A'``: Additionally select a set and extend the current set.
* ``'U'``: Unselect a set from the current set.
return_selected : bool, optional
Return the keypoint numbers selected. Optional, and can be
disabled for performance. Default ``False``.
Returns
-------
list
List of keypoint numbers if ``return_selected=True``.
Examples
--------
Create a new selection of keypoints [1, 5, 10]
>>> mapdl.geometry.keypoint_select([1, 5, 10])
Create a new selection of keypoints from 1 through 20
>>> mapdl.geometry.keypoint_select(range(1, 21))
Unselect keypoints 1 through 20
>>> mapdl.geometry.keypoint_select(range(1, 21), sel_type='U')
Append to an existing selection of keypoints
>>> mapdl.geometry.keypoint_select([1, 2, 3], sel_type='A')
Reselect from the existing selection of keypoints
>>> mapdl.geometry.keypoint_select([3, 4, 5], sel_type='R')
Select no keypoints
>>> mapdl.geometry.keypoint_select(None)
Select all keypoints
>>> mapdl.geometry.keypoint_select('ALL')
"""
if isinstance(items, str):
items = items.upper()
# special cases
if items is None or items == "NONE":
self._mapdl.ksel("NONE")
return
if items == "ALL":
self._mapdl.ksel("ALL")
if return_selected:
return self.knum
return
self._select_items(items, "KP", sel_type)
if return_selected:
return self.knum
def line_select(self, items, sel_type="S", return_selected=False):
"""Select lines using a sequence of items.
Parameters
----------
items : sequence or None
List, range, or sequence of integers of the lines you want
to select. If ``None`` or ``'NONE'``, no lines will be
selected. If 'ALL', selects all lines.
sel_type : str, optional
Selection type. May be one of the following:
* ``'S'``: Select a new set (default)
* ``'R'``: Reselect a set from the current set.
* ``'A'``: Additionally select a set and extend the current set.
* ``'U'``: Unselect a set from the current set.
return_selected : bool, optional
Return the line numbers selected. Optional, and can be
disabled for performance. Default ``False``.
Returns
-------
list
List of the selected lines if ``return_selected=True``.
Examples
--------
Create a new selection of lines [1, 5, 10]
>>> mapdl.geometry.line_select([1, 5, 10])
Create a new selection of lines from 1 through 20
>>> mapdl.geometry.line_select(range(1, 21))
Unselect lines 1 through 20
>>> mapdl.geometry.line_select(range(1, 21), sel_type='U')
Append to an existing selection of lines
>>> mapdl.geometry.line_select([1, 2, 3], sel_type='A')
Reselect from the existing selection of lines
>>> mapdl.geometry.line_select([3, 4, 5], sel_type='R')
Select no lines
>>> mapdl.geometry.line_select(None)
Select all lines
>>> mapdl.geometry.line_select('ALL')
"""
if isinstance(items, str):
items = items.upper()
# special cases
if items is None or items == "NONE":
self._mapdl.lsel("NONE")
return
if items == "ALL":
self._mapdl.lsel("ALL")
if return_selected:
return self.lnum
return
self._select_items(items, "LINE", sel_type)
if return_selected:
return self.lnum
def area_select(self, items, sel_type="S", return_selected=False):
"""Select areas using a sequence of items.
Parameters
----------
items : sequence, str, None
List, range, or sequence of integers of the areas you want
to select. If ``None`` or ``'NONE'``, no areas will be
selected. If 'ALL', selects all areas.
sel_type : str, optional
Selection type. May be one of the following:
* ``'S'``: Select a new set (default)
* ``'R'``: Reselect a set from the current set.
* ``'A'``: Additionally select a set and extend the current set.
* ``'U'``: Unselect a set from the current set.
return_selected : bool, optional
Return the area numbers selected. Optional, and can be
disabled for performance. Default ``False``.
Returns
-------
list
List of the selected areas if ``return_selected=True``.
Examples
--------
Create a new selection of areas [1, 5, 10]
>>> mapdl.geometry.area_select([1, 5, 10])
Create a new selection of areas from 1 through 20
>>> mapdl.geometry.area_select(range(1, 21))
Unselect areas 1 through 20
>>> mapdl.geometry.area_select(range(1, 21), sel_type='U')
Append to an existing selection of areas
>>> mapdl.geometry.area_select([1, 2, 3], sel_type='A')
Reselect from the existing selection of areas
>>> mapdl.geometry.area_select([3, 4, 5], sel_type='R')
Select no areas
>>> mapdl.geometry.area_select(None)
Select all areas
>>> mapdl.geometry.area_select('ALL')
"""
if isinstance(items, str):
items = items.upper()
# special cases
if items is None or items == "NONE":
self._mapdl.asel("NONE")
return
if items == "ALL":
self._mapdl.asel("ALL")
if return_selected:
return self.anum
return
self._select_items(items, "AREA", sel_type)
if return_selected:
return self.anum
def volume_select(self, items, sel_type="S", return_selected=False):
"""Select volumes using a sequence of items.
Parameters
----------
items : sequence, str, or None
List, range, or sequence of integers of the volumes you want
to select. If ``None`` or ``'NONE'``, no volumes will be
selected. If 'ALL', selects all volumes.
sel_type : str, optional
Selection type. May be one of the following:
* ``'S'``: Select a new set (default)
* ``'R'``: Reselect a set from the current set.
* ``'A'``: Additionally select a set and extend the current set.
* ``'U'``: Unselect a set from the current set.
return_selected : bool, optional
Return the volume numbers selected. Optional, and can be
disabled for performance. Default ``False``.
Returns
-------
list
List of the selected volumes if ``return_selected=True``.
Examples
--------
Create a new selection of volumes [1, 5, 10]
>>> mapdl.geometry.volume_select([1, 5, 10])
Create a new selection of volumes from 1 through 20
>>> mapdl.geometry.volume_select(range(1, 21))
Unselect volumes 1 through 20
>>> mapdl.geometry.volume_select(range(1, 21), sel_type='U')
Append to an existing selection of volumes
>>> mapdl.geometry.volume_select([1, 2, 3], sel_type='A')
Reselect from the existing selection of volumes
>>> mapdl.geometry.volume_select([3, 4, 5], sel_type='R')
Select no volumes
>>> mapdl.geometry.volume_select(None)
Select all volumes
>>> mapdl.geometry.volume_select('ALL')
"""
if isinstance(items, str):
items = items.upper()
# special cases
if items is None or items == "NONE":
self._mapdl.vsel("NONE")
return
if items == "ALL":
self._mapdl.vsel("ALL")
if return_selected:
return self.vnum
return
self._select_items(items, "VOLU", sel_type)
if return_selected:
return self.vnum
def _select_items(self, items, item_type, sel_type):
"""Select items using FLST
Parameters
----------
areas : sequence
Sequence of items.
item_type : str
Item lookup type. One of:
* 'NODE' : node numbers
* 'ELEM' : element numbers
* 'KP' : keypoint numbers
* 'LINE' : line numbers
* 'AREA' : area numbers
* 'VOLU' : volume numbers
* 'TRACE' : trace points
* 'COORD' : coordinate locations
sel_type : str, optional
Selection type. Must be one of the following:
* ``'S'``: Select a new set (default)
* ``'R'``: Reselect a set from the current set.
* ``'A'``: Additionally select a set and extend the current set.
* ``'U'``: Unselect a set from the current set.
"""
if item_type not in FLST_LOOKUP:
raise KeyError(f'Invalid ``item_type`` "{item_type}"')
sel_type = sel_type.upper()
valid_sel_type = ["S", "R", "A", "U"]
if sel_type not in valid_sel_type:
raise ValueError(
f'Invalid ``sel_type`` "{sel_type}"\n\n'
f"Use one of the following:\n{VALID_TYPE_MSG}"
)
# convert to a flat array as it's easier for type checking
items = np.asarray(items)
if not np.issubdtype(items.dtype, np.number):
raise TypeError("Item numbers must be a numeric type")
items = items.ravel().astype(np.int, copy=False)
# consider logic for negative values to support ranges. This
# is the 'ORDER' option
# ordered items
# FLST,5,76,4,ORDE,74
# FITEM,5,2
# LSEL, , , ,P51X
# unordered option
with self._mapdl.non_interactive:
self._mapdl.flst(5, items.size, FLST_LOOKUP[item_type])
for item in items:
self._mapdl.fitem(5, item)
if item_type == "NODE":
self._mapdl.nsel(sel_type, vmin="P51X")
elif item_type == "ELEM":
self._mapdl.esel(sel_type, vmin="P51X")
elif item_type == "KP":
self._mapdl.ksel(sel_type, vmin="P51X")
elif item_type == "LINE":
self._mapdl.lsel(sel_type, vmin="P51X")
elif item_type == "AREA":
self._mapdl.asel(sel_type, vmin="P51X")
elif item_type == "VOLU":
self._mapdl.vsel(sel_type, vmin="P51X")
else:
raise ValueError(f'Unable to select "{item_type}"')
| 31.608696
| 88
| 0.557683
|
4a00304d4ff99d519cb3157639f8e257eb88f6d0
| 368
|
py
|
Python
|
language/python/modules/Selenium/chromedriver_module.py
|
bigfoolliu/liu_aistuff
|
aa661d37c05c257ee293285dd0868fb7e8227628
|
[
"MIT"
] | 1
|
2019-11-25T07:23:42.000Z
|
2019-11-25T07:23:42.000Z
|
language/python/modules/Selenium/chromedriver_module.py
|
bigfoolliu/liu_aistuff
|
aa661d37c05c257ee293285dd0868fb7e8227628
|
[
"MIT"
] | 13
|
2020-01-07T16:09:47.000Z
|
2022-03-02T12:51:44.000Z
|
language/python/modules/Selenium/chromedriver_module.py
|
bigfoolliu/liu_aistuff
|
aa661d37c05c257ee293285dd0868fb7e8227628
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# author: bigfoolliu
"""
chromee driver操作浏览器
"""
from selenium import webdriver
import time
wd = webdriver.Chrome()
wd.get("https://www.baidu.com") # 打开百度浏览器
wd.find_element_by_id("kw").send_keys("天气") # 定位输入框并输入搜索词
wd.find_element_by_id("su").click() #点击[百度一下]按钮进行搜索
time.sleep(3) #等待3秒
wd.quit() #关闭浏览器
| 17.52381
| 59
| 0.679348
|
4a0030d35bd400da99c1b54f6877c35ed15a84d3
| 5,013
|
py
|
Python
|
zaqar/storage/sqlalchemy/flavors.py
|
mail2nsrajesh/zaqar
|
a68a03a228732050b33c2a7f35d1caa9f3467718
|
[
"Apache-2.0"
] | null | null | null |
zaqar/storage/sqlalchemy/flavors.py
|
mail2nsrajesh/zaqar
|
a68a03a228732050b33c2a7f35d1caa9f3467718
|
[
"Apache-2.0"
] | null | null | null |
zaqar/storage/sqlalchemy/flavors.py
|
mail2nsrajesh/zaqar
|
a68a03a228732050b33c2a7f35d1caa9f3467718
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2015 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""flavors: an implementation of the flavor management storage
controller for sqlalchemy.
"""
import oslo_db.exception
import sqlalchemy as sa
from zaqar.storage import base
from zaqar.storage import errors
from zaqar.storage.sqlalchemy import tables
from zaqar.storage.sqlalchemy import utils
class FlavorsController(base.FlavorsBase):
def __init__(self, *args, **kwargs):
super(FlavorsController, self).__init__(*args, **kwargs)
self._pools_ctrl = self.driver.pools_controller
@utils.raises_conn_error
def list(self, project=None, marker=None, limit=10, detailed=False):
marker = marker or ''
# TODO(cpp-cabrera): optimization - limit the columns returned
# when detailed=False by specifying them in the select()
# clause
stmt = sa.sql.select([tables.Flavors]).where(
sa.and_(tables.Flavors.c.name > marker,
tables.Flavors.c.project == project)
)
if limit > 0:
stmt = stmt.limit(limit)
cursor = self.driver.run(stmt)
marker_name = {}
def it():
for cur in cursor:
marker_name['next'] = cur[0]
yield _normalize(cur, detailed=detailed)
yield it()
yield marker_name and marker_name['next']
@utils.raises_conn_error
def get(self, name, project=None, detailed=False):
stmt = sa.sql.select([tables.Flavors]).where(
sa.and_(tables.Flavors.c.name == name,
tables.Flavors.c.project == project)
)
flavor = self.driver.run(stmt).fetchone()
if flavor is None:
raise errors.FlavorDoesNotExist(name)
return _normalize(flavor, detailed)
@utils.raises_conn_error
def create(self, name, pool_group, project=None, capabilities=None):
cap = None if capabilities is None else utils.json_encode(capabilities)
try:
stmt = sa.sql.expression.insert(tables.Flavors).values(
name=name, pool_group=pool_group, project=project,
capabilities=cap
)
self.driver.run(stmt)
except oslo_db.exception.DBDuplicateEntry:
if not self._pools_ctrl.get_pools_by_group(pool_group):
raise errors.PoolGroupDoesNotExist(pool_group)
# TODO(flaper87): merge update/create into a single
# method with introduction of upsert
self.update(name, pool_group=pool_group,
project=project,
capabilities=cap)
@utils.raises_conn_error
def exists(self, name, project=None):
stmt = sa.sql.select([tables.Flavors.c.name]).where(
sa.and_(tables.Flavors.c.name == name,
tables.Flavors.c.project == project)
).limit(1)
return self.driver.run(stmt).fetchone() is not None
@utils.raises_conn_error
def update(self, name, project=None, pool_group=None, capabilities=None):
fields = {}
if capabilities is not None:
fields['capabilities'] = capabilities
if pool_group is not None:
fields['pool_group'] = pool_group
assert fields, '`pool_group` or `capabilities` not found in kwargs'
if 'capabilities' in fields:
fields['capabilities'] = utils.json_encode(fields['capabilities'])
stmt = sa.sql.update(tables.Flavors).where(
sa.and_(tables.Flavors.c.name == name,
tables.Flavors.c.project == project)).values(**fields)
res = self.driver.run(stmt)
if res.rowcount == 0:
raise errors.FlavorDoesNotExist(name)
@utils.raises_conn_error
def delete(self, name, project=None):
stmt = sa.sql.expression.delete(tables.Flavors).where(
sa.and_(tables.Flavors.c.name == name,
tables.Flavors.c.project == project)
)
self.driver.run(stmt)
@utils.raises_conn_error
def drop_all(self):
stmt = sa.sql.expression.delete(tables.Flavors)
self.driver.run(stmt)
def _normalize(flavor, detailed=False):
ret = {
'name': flavor[0],
'pool_group': flavor[2],
}
if detailed:
capabilities = flavor[3]
ret['capabilities'] = (utils.json_decode(capabilities)
if capabilities else {})
return ret
| 33.42
| 79
| 0.629563
|
4a00324a3d08a9818b19fba2d918337b30c143ea
| 14,096
|
py
|
Python
|
django/contrib/messages/tests/base.py
|
mitar/django
|
aa757ac22de3e657df49086cf01a26f6c73b8dfb
|
[
"BSD-3-Clause"
] | 2
|
2020-11-28T20:04:33.000Z
|
2021-07-12T19:42:45.000Z
|
django/contrib/messages/tests/base.py
|
akaariai/django-old
|
45b80c420d6655ec5b86bea3b3c17b4adaa61291
|
[
"BSD-3-Clause"
] | 1
|
2019-02-03T08:41:30.000Z
|
2019-02-03T08:41:30.000Z
|
django/contrib/messages/tests/base.py
|
akaariai/django-old
|
45b80c420d6655ec5b86bea3b3c17b4adaa61291
|
[
"BSD-3-Clause"
] | null | null | null |
from django import http
from django.conf import settings
from django.contrib.messages import constants, utils, get_level, set_level
from django.contrib.messages.api import MessageFailure
from django.contrib.messages.storage import default_storage, base
from django.contrib.messages.storage.base import Message
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.utils import override_settings
from django.utils.translation import ugettext_lazy
from django.utils.unittest import skipIf
def skipUnlessAuthIsInstalled(func):
return skipIf(
'django.contrib.auth' not in settings.INSTALLED_APPS,
"django.contrib.auth isn't installed")(func)
def add_level_messages(storage):
"""
Adds 6 messages from different levels (including a custom one) to a storage
instance.
"""
storage.add(constants.INFO, 'A generic info message')
storage.add(29, 'Some custom level')
storage.add(constants.DEBUG, 'A debugging message', extra_tags='extra-tag')
storage.add(constants.WARNING, 'A warning')
storage.add(constants.ERROR, 'An error')
storage.add(constants.SUCCESS, 'This was a triumph.')
class override_settings_tags(override_settings):
def enable(self):
super(override_settings_tags, self).enable()
# LEVEL_TAGS is a constant defined in the
# django.contrib.messages.storage.base module, so after changing
# settings.MESSAGE_TAGS, we need to update that constant too.
self.old_level_tags = base.LEVEL_TAGS
base.LEVEL_TAGS = utils.get_level_tags()
def disable(self):
super(override_settings_tags, self).disable()
base.LEVEL_TAGS = self.old_level_tags
class BaseTest(TestCase):
storage_class = default_storage
urls = 'django.contrib.messages.tests.urls'
levels = {
'debug': constants.DEBUG,
'info': constants.INFO,
'success': constants.SUCCESS,
'warning': constants.WARNING,
'error': constants.ERROR,
}
def setUp(self):
self.settings_override = override_settings_tags(
TEMPLATE_DIRS = (),
MESSAGE_TAGS = '',
MESSAGE_STORAGE = '%s.%s' % (self.storage_class.__module__,
self.storage_class.__name__),
)
self.settings_override.enable()
def tearDown(self):
self.settings_override.disable()
def get_request(self):
return http.HttpRequest()
def get_response(self):
return http.HttpResponse()
def get_storage(self, data=None):
"""
Returns the storage backend, setting its loaded data to the ``data``
argument.
This method avoids the storage ``_get`` method from getting called so
that other parts of the storage backend can be tested independent of
the message retrieval logic.
"""
storage = self.storage_class(self.get_request())
storage._loaded_data = data or []
return storage
def test_add(self):
storage = self.get_storage()
self.assertFalse(storage.added_new)
storage.add(constants.INFO, 'Test message 1')
self.assertTrue(storage.added_new)
storage.add(constants.INFO, 'Test message 2', extra_tags='tag')
self.assertEqual(len(storage), 2)
def test_add_lazy_translation(self):
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, ugettext_lazy('lazy message'))
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 1)
def test_no_update(self):
storage = self.get_storage()
response = self.get_response()
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 0)
def test_add_update(self):
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, 'Test message 1')
storage.add(constants.INFO, 'Test message 1', extra_tags='tag')
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 2)
def test_existing_add_read_update(self):
storage = self.get_existing_storage()
response = self.get_response()
storage.add(constants.INFO, 'Test message 3')
list(storage) # Simulates a read
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 0)
def test_existing_read_add_update(self):
storage = self.get_existing_storage()
response = self.get_response()
list(storage) # Simulates a read
storage.add(constants.INFO, 'Test message 3')
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 1)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_full_request_response_cycle(self):
"""
With the message middleware enabled, tests that messages are properly
stored and then retrieved across the full request/redirect/response
cycle.
"""
data = {
'messages': ['Test message %d' % x for x in xrange(10)],
}
show_url = reverse('django.contrib.messages.tests.urls.show')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('django.contrib.messages.tests.urls.add',
args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertTrue('messages' in response.context)
messages = [Message(self.levels[level], msg) for msg in
data['messages']]
self.assertEqual(list(response.context['messages']), messages)
for msg in data['messages']:
self.assertContains(response, msg)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_with_template_response(self):
data = {
'messages': ['Test message %d' % x for x in xrange(10)],
}
show_url = reverse('django.contrib.messages.tests.urls.show_template_response')
for level in self.levels.keys():
add_url = reverse('django.contrib.messages.tests.urls.add_template_response',
args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertTrue('messages' in response.context)
for msg in data['messages']:
self.assertContains(response, msg)
# there shouldn't be any messages on second GET request
response = self.client.get(show_url)
for msg in data['messages']:
self.assertNotContains(response, msg)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_multiple_posts(self):
"""
Tests that messages persist properly when multiple POSTs are made
before a GET.
"""
data = {
'messages': ['Test message %d' % x for x in xrange(10)],
}
show_url = reverse('django.contrib.messages.tests.urls.show')
messages = []
for level in ('debug', 'info', 'success', 'warning', 'error'):
messages.extend([Message(self.levels[level], msg) for msg in
data['messages']])
add_url = reverse('django.contrib.messages.tests.urls.add',
args=(level,))
self.client.post(add_url, data)
response = self.client.get(show_url)
self.assertTrue('messages' in response.context)
self.assertEqual(list(response.context['messages']), messages)
for msg in data['messages']:
self.assertContains(response, msg)
@override_settings(
INSTALLED_APPS=filter(
lambda app:app!='django.contrib.messages', settings.INSTALLED_APPS),
MIDDLEWARE_CLASSES=filter(
lambda m:'MessageMiddleware' not in m, settings.MIDDLEWARE_CLASSES),
TEMPLATE_CONTEXT_PROCESSORS=filter(
lambda p:'context_processors.messages' not in p,
settings.TEMPLATE_CONTEXT_PROCESSORS),
MESSAGE_LEVEL=constants.DEBUG
)
def test_middleware_disabled(self):
"""
Tests that, when the middleware is disabled, an exception is raised
when one attempts to store a message.
"""
data = {
'messages': ['Test message %d' % x for x in xrange(10)],
}
show_url = reverse('django.contrib.messages.tests.urls.show')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('django.contrib.messages.tests.urls.add',
args=(level,))
self.assertRaises(MessageFailure, self.client.post, add_url,
data, follow=True)
@override_settings(
INSTALLED_APPS=filter(
lambda app:app!='django.contrib.messages', settings.INSTALLED_APPS),
MIDDLEWARE_CLASSES=filter(
lambda m:'MessageMiddleware' not in m, settings.MIDDLEWARE_CLASSES),
TEMPLATE_CONTEXT_PROCESSORS=filter(
lambda p:'context_processors.messages' not in p,
settings.TEMPLATE_CONTEXT_PROCESSORS),
MESSAGE_LEVEL=constants.DEBUG
)
def test_middleware_disabled_fail_silently(self):
"""
Tests that, when the middleware is disabled, an exception is not
raised if 'fail_silently' = True
"""
data = {
'messages': ['Test message %d' % x for x in xrange(10)],
'fail_silently': True,
}
show_url = reverse('django.contrib.messages.tests.urls.show')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('django.contrib.messages.tests.urls.add',
args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertFalse('messages' in response.context)
def stored_messages_count(self, storage, response):
"""
Returns the number of messages being stored after a
``storage.update()`` call.
"""
raise NotImplementedError('This method must be set by a subclass.')
def test_get(self):
raise NotImplementedError('This method must be set by a subclass.')
def get_existing_storage(self):
return self.get_storage([Message(constants.INFO, 'Test message 1'),
Message(constants.INFO, 'Test message 2',
extra_tags='tag')])
def test_existing_read(self):
"""
Tests that reading the existing storage doesn't cause the data to be
lost.
"""
storage = self.get_existing_storage()
self.assertFalse(storage.used)
# After iterating the storage engine directly, the used flag is set.
data = list(storage)
self.assertTrue(storage.used)
# The data does not disappear because it has been iterated.
self.assertEqual(data, list(storage))
def test_existing_add(self):
storage = self.get_existing_storage()
self.assertFalse(storage.added_new)
storage.add(constants.INFO, 'Test message 3')
self.assertTrue(storage.added_new)
def test_default_level(self):
# get_level works even with no storage on the request.
request = self.get_request()
self.assertEqual(get_level(request), constants.INFO)
# get_level returns the default level if it hasn't been set.
storage = self.get_storage()
request._messages = storage
self.assertEqual(get_level(request), constants.INFO)
# Only messages of sufficient level get recorded.
add_level_messages(storage)
self.assertEqual(len(storage), 5)
def test_low_level(self):
request = self.get_request()
storage = self.storage_class(request)
request._messages = storage
self.assertTrue(set_level(request, 5))
self.assertEqual(get_level(request), 5)
add_level_messages(storage)
self.assertEqual(len(storage), 6)
def test_high_level(self):
request = self.get_request()
storage = self.storage_class(request)
request._messages = storage
self.assertTrue(set_level(request, 30))
self.assertEqual(get_level(request), 30)
add_level_messages(storage)
self.assertEqual(len(storage), 2)
@override_settings(MESSAGE_LEVEL=29)
def test_settings_level(self):
request = self.get_request()
storage = self.storage_class(request)
self.assertEqual(get_level(request), 29)
add_level_messages(storage)
self.assertEqual(len(storage), 3)
def test_tags(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.tags for msg in storage]
self.assertEqual(tags,
['info', '', 'extra-tag debug', 'warning', 'error',
'success'])
@override_settings_tags(MESSAGE_TAGS={
constants.INFO: 'info',
constants.DEBUG: '',
constants.WARNING: '',
constants.ERROR: 'bad',
29: 'custom',
}
)
def test_custom_tags(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.tags for msg in storage]
self.assertEqual(tags,
['info', 'custom', 'extra-tag', '', 'bad', 'success'])
| 38.304348
| 89
| 0.625426
|
4a00328cf31ead7031feed51ddca3d8c42f2a869
| 2,165
|
py
|
Python
|
tests/__init__.py
|
onyb/peewee
|
323983c2ecf2ec70a14ed78ddd00cf5cd17d56e2
|
[
"MIT"
] | 1
|
2019-11-17T04:55:26.000Z
|
2019-11-17T04:55:26.000Z
|
tests/__init__.py
|
onyb/peewee
|
323983c2ecf2ec70a14ed78ddd00cf5cd17d56e2
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
onyb/peewee
|
323983c2ecf2ec70a14ed78ddd00cf5cd17d56e2
|
[
"MIT"
] | 1
|
2019-07-07T20:57:22.000Z
|
2019-07-07T20:57:22.000Z
|
import sys
import unittest
from peewee import OperationalError
# Core modules.
from .database import *
from .expressions import *
from .fields import *
from .keys import *
from .manytomany import *
from .models import *
from .model_save import *
from .model_sql import *
from .prefetch_tests import *
from .queries import *
from .regressions import *
from .results import *
from .schema import *
from .sql import *
from .transactions import *
# Extensions.
try:
from .apsw_ext import *
except ImportError:
print('Unable to import APSW extension tests, skipping.')
try:
from .cysqlite import *
except ImportError:
print('Unable to import sqlite C extension tests, skipping.')
from .dataset import *
from .db_url import *
from .extra_fields import *
from .hybrid import *
from .kv import *
from .migrations import *
try:
import mysql.connector
from .mysql_ext import *
except ImportError:
print('Unable to import mysql-connector, skipping mysql_ext tests.')
from .pool import *
try:
from .postgres import *
except ImportError:
print('Unable to import postgres extension tests, skipping.')
except OperationalError:
print('Postgresql test database "peewee_test" not found, skipping '
'the postgres_ext tests.')
from .pwiz_integration import *
from .reflection import *
from .shortcuts import *
from .signals import *
try:
from .sqlcipher_ext import *
except ImportError:
print('Unable to import SQLCipher extension tests, skipping.')
try:
from .sqlite import *
except ImportError:
print('Unable to import sqlite extension tests, skipping.')
from .sqliteq import *
from .sqlite_udf import *
from .test_utils import *
if __name__ == '__main__':
from peewee import print_
print_("""\033[1;31m
______ ______ ______ __ __ ______ ______
/\ == \ /\ ___\ /\ ___\ /\ \ _ \ \ /\ ___\ /\ ___\\
\ \ _-/ \ \ __\ \ \ __\ \ \ \/ ".\ \ \ \ __\ \ \ __\\
\ \_\ \ \_____\ \ \_____\ \ \__/".~\_\ \ \_____\ \ \_____\\
\/_/ \/_____/ \/_____/ \/_/ \/_/ \/_____/ \/_____/
\033[0m""")
unittest.main(argv=sys.argv)
| 27.75641
| 72
| 0.661432
|
4a0032d95a6081a6cebe58eebc1cdd26eb445298
| 10,999
|
py
|
Python
|
conans/test/unittests/tools/cmake/test_cmaketoolchain.py
|
dvirtz/conan
|
21617e5fec1c0b053e5ccf3749cf641d31c0e3a6
|
[
"MIT"
] | 1
|
2022-01-21T05:31:13.000Z
|
2022-01-21T05:31:13.000Z
|
conans/test/unittests/tools/cmake/test_cmaketoolchain.py
|
dvirtz/conan
|
21617e5fec1c0b053e5ccf3749cf641d31c0e3a6
|
[
"MIT"
] | null | null | null |
conans/test/unittests/tools/cmake/test_cmaketoolchain.py
|
dvirtz/conan
|
21617e5fec1c0b053e5ccf3749cf641d31c0e3a6
|
[
"MIT"
] | null | null | null |
import types
import pytest
from mock import Mock
from conan.tools.cmake import CMakeToolchain
from conan.tools.cmake.toolchain import Block, GenericSystemBlock
from conans import ConanFile, Settings
from conans.model.conf import Conf
from conans.model.env_info import EnvValues
@pytest.fixture
def conanfile():
c = ConanFile(Mock(), None)
c.settings = "os", "compiler", "build_type", "arch"
c.initialize(Settings({"os": ["Windows"],
"compiler": {"gcc": {"libcxx": ["libstdc++"]}},
"build_type": ["Release"],
"arch": ["x86"]}), EnvValues())
c.settings.build_type = "Release"
c.settings.arch = "x86"
c.settings.compiler = "gcc"
c.settings.compiler.libcxx = "libstdc++"
c.settings.os = "Windows"
c.conf = Conf()
c.folders.set_base_generators(".")
c._conan_node = Mock()
c._conan_node.dependencies = []
return c
def test_cmake_toolchain(conanfile):
toolchain = CMakeToolchain(conanfile)
content = toolchain.content
assert 'set(CMAKE_BUILD_TYPE "Release"' in content
def test_remove(conanfile):
toolchain = CMakeToolchain(conanfile)
toolchain.blocks.remove("generic_system")
content = toolchain.content
assert 'CMAKE_BUILD_TYPE' not in content
def test_template_remove(conanfile):
toolchain = CMakeToolchain(conanfile)
toolchain.blocks["generic_system"].template = ""
content = toolchain.content
assert 'CMAKE_BUILD_TYPE' not in content
def test_template_change(conanfile):
toolchain = CMakeToolchain(conanfile)
tmp = toolchain.blocks["generic_system"].template
toolchain.blocks["generic_system"].template = tmp.replace("CMAKE_BUILD_TYPE", "OTHER_THING")
content = toolchain.content
assert 'set(OTHER_THING "Release"' in content
def test_context_change(conanfile):
toolchain = CMakeToolchain(conanfile)
tmp = toolchain.blocks["generic_system"]
def context(self):
assert self
return {"build_type": "SuperRelease"}
tmp.context = types.MethodType(context, tmp)
content = toolchain.content
assert 'set(CMAKE_BUILD_TYPE "SuperRelease"' in content
def test_context_update(conanfile):
toolchain = CMakeToolchain(conanfile)
build_type = toolchain.blocks["generic_system"].values["build_type"]
toolchain.blocks["generic_system"].values["build_type"] = "Super" + build_type
content = toolchain.content
assert 'set(CMAKE_BUILD_TYPE "SuperRelease"' in content
def test_context_replace(conanfile):
toolchain = CMakeToolchain(conanfile)
toolchain.blocks["generic_system"].values = {"build_type": "SuperRelease"}
content = toolchain.content
assert 'set(CMAKE_BUILD_TYPE "SuperRelease"' in content
def test_replace_block(conanfile):
toolchain = CMakeToolchain(conanfile)
class MyBlock(Block):
template = "HelloWorld"
def context(self):
return {}
toolchain.blocks["generic_system"] = MyBlock
content = toolchain.content
assert 'HelloWorld' in content
assert 'CMAKE_BUILD_TYPE' not in content
def test_add_new_block(conanfile):
toolchain = CMakeToolchain(conanfile)
class MyBlock(Block):
template = "Hello {{myvar}}!!!"
def context(self):
return {"myvar": "World"}
toolchain.blocks["mynewblock"] = MyBlock
content = toolchain.content
assert 'Hello World!!!' in content
assert 'CMAKE_BUILD_TYPE' in content
def test_extend_block(conanfile):
toolchain = CMakeToolchain(conanfile)
class MyBlock(GenericSystemBlock):
template = "Hello {{build_type}}!!"
def context(self):
c = super(MyBlock, self).context()
c["build_type"] = c["build_type"] + "Super"
return c
toolchain.blocks["generic_system"] = MyBlock
content = toolchain.content
assert 'Hello ReleaseSuper!!' in content
assert 'CMAKE_BUILD_TYPE' not in content
def test_user_toolchain(conanfile):
toolchain = CMakeToolchain(conanfile)
toolchain.blocks["user_toolchain"].user_toolchain = "myowntoolchain.cmake"
content = toolchain.content
assert 'include("myowntoolchain.cmake")' in content
toolchain = CMakeToolchain(conanfile)
content = toolchain.content
assert 'include(' not in content
@pytest.fixture
def conanfile_apple():
c = ConanFile(Mock(), None)
c.settings = "os", "compiler", "build_type", "arch"
c.initialize(Settings({"os": {"Macos": {"version": ["10.15"]}},
"compiler": {"apple-clang": {"libcxx": ["libc++"]}},
"build_type": ["Release"],
"arch": ["x86"]}), EnvValues())
c.settings.build_type = "Release"
c.settings.arch = "x86"
c.settings.compiler = "apple-clang"
c.settings.compiler.libcxx = "libc++"
c.settings.os = "Macos"
c.settings.os.version = "10.15"
c.conf = Conf()
c.folders.set_base_generators(".")
c._conan_node = Mock()
c._conan_node.dependencies = []
return c
def test_osx_deployment_target(conanfile_apple):
toolchain = CMakeToolchain(conanfile_apple)
content = toolchain.content
assert 'set(CMAKE_OSX_DEPLOYMENT_TARGET "10.15" CACHE STRING "")' in content
@pytest.fixture
def conanfile_msvc():
c = ConanFile(Mock(), None)
c.settings = "os", "compiler", "build_type", "arch"
c.initialize(Settings({"os": ["Windows"],
"compiler": {"msvc": {"version": ["193"], "update": [None],
"cppstd": ["20"]}},
"build_type": ["Release"],
"arch": ["x86"]}), EnvValues())
c.settings.build_type = "Release"
c.settings.arch = "x86"
c.settings.compiler = "msvc"
c.settings.compiler.version = "193"
c.settings.compiler.cppstd = "20"
c.settings.os = "Windows"
c.conf = Conf()
c.folders.set_base_generators(".")
c._conan_node = Mock()
c._conan_node.dependencies = []
return c
def test_toolset(conanfile_msvc):
toolchain = CMakeToolchain(conanfile_msvc)
assert 'CMAKE_GENERATOR_TOOLSET "v143"' in toolchain.content
assert 'Visual Studio 17 2022' in toolchain.generator
assert 'CMAKE_CXX_STANDARD 20' in toolchain.content
@pytest.fixture
def conanfile_linux():
c = ConanFile(Mock(), None)
c.settings = "os", "compiler", "build_type", "arch"
c.initialize(Settings({"os": ["Linux"],
"compiler": {"gcc": {"version": ["11"], "cppstd": ["20"]}},
"build_type": ["Release"],
"arch": ["x86_64"]}), EnvValues())
c.settings.build_type = "Release"
c.settings.arch = "x86_64"
c.settings.compiler = "gcc"
c.settings.compiler.version = "11"
c.settings.compiler.cppstd = "20"
c.settings.os = "Linux"
c.conf = Conf()
c.folders.set_base_generators(".")
c._conan_node = Mock()
c._conan_node.dependencies = []
return c
def test_no_fpic_when_not_an_option(conanfile_linux):
toolchain = CMakeToolchain(conanfile_linux)
content = toolchain.content
assert 'set(CMAKE_POSITION_INDEPENDENT_CODE' not in content
@pytest.fixture
def conanfile_linux_shared():
c = ConanFile(Mock(), None)
c.settings = "os", "compiler", "build_type", "arch"
c.options = {
"fPIC": [True, False],
"shared": [True, False],
}
c.default_options = {"fPIC": False, "shared": True, }
c.initialize(Settings({"os": ["Linux"],
"compiler": {"gcc": {"version": ["11"], "cppstd": ["20"]}},
"build_type": ["Release"],
"arch": ["x86_64"]}), EnvValues())
c.settings.build_type = "Release"
c.settings.arch = "x86_64"
c.settings.compiler = "gcc"
c.settings.compiler.version = "11"
c.settings.compiler.cppstd = "20"
c.settings.os = "Linux"
c.conf = Conf()
c.folders.set_base_generators(".")
c._conan_node = Mock()
c._conan_node.dependencies = []
return c
@pytest.mark.parametrize("fPIC", [True, False])
def test_fpic_when_shared_true(conanfile_linux_shared, fPIC):
conanfile_linux_shared.options.fPIC = fPIC
toolchain = CMakeToolchain(conanfile_linux_shared)
cmake_value = 'ON' if fPIC else 'OFF'
content = toolchain.content
assert 'set(CMAKE_POSITION_INDEPENDENT_CODE {})'.format(cmake_value) in content
def test_fpic_when_not_shared(conanfile_linux_shared):
conanfile_linux_shared.options.shared = False
toolchain = CMakeToolchain(conanfile_linux_shared)
content = toolchain.content
assert 'set(CMAKE_POSITION_INDEPENDENT_CODE' in content
@pytest.fixture
def conanfile_windows_fpic():
c = ConanFile(Mock(), None)
c.settings = "os", "compiler", "build_type", "arch"
c.options = {"fPIC": [True, False], }
c.default_options = {"fPIC": True, }
c.initialize(Settings({"os": ["Windows"],
"compiler": {"gcc": {"libcxx": ["libstdc++"]}},
"build_type": ["Release"],
"arch": ["x86"]}), EnvValues())
c.settings.build_type = "Release"
c.settings.arch = "x86"
c.settings.compiler = "gcc"
c.settings.compiler.libcxx = "libstdc++"
c.settings.os = "Windows"
c.conf = Conf()
c.folders.set_base_generators(".")
c._conan_node = Mock()
c._conan_node.dependencies = []
return c
def test_no_fpic_on_windows(conanfile_windows_fpic):
toolchain = CMakeToolchain(conanfile_windows_fpic)
content = toolchain.content
assert 'set(CMAKE_POSITION_INDEPENDENT_CODE' not in content
@pytest.fixture
def conanfile_linux_fpic():
c = ConanFile(Mock(), None)
c.settings = "os", "compiler", "build_type", "arch"
c.options = {"fPIC": [True, False], }
c.default_options = {"fPIC": False, }
c.initialize(Settings({"os": ["Linux"],
"compiler": {"gcc": {"version": ["11"], "cppstd": ["20"]}},
"build_type": ["Release"],
"arch": ["x86_64"]}), EnvValues())
c.settings.build_type = "Release"
c.settings.arch = "x86_64"
c.settings.compiler = "gcc"
c.settings.compiler.version = "11"
c.settings.compiler.cppstd = "20"
c.settings.os = "Linux"
c.conf = Conf()
c.folders.set_base_generators(".")
c._conan_node = Mock()
c._conan_node.dependencies = []
return c
def test_fpic_disabled(conanfile_linux_fpic):
conanfile_linux_fpic.options.fPIC = False
toolchain = CMakeToolchain(conanfile_linux_fpic)
content = toolchain.content
assert 'set(CMAKE_POSITION_INDEPENDENT_CODE OFF' in content
def test_fpic_enabled(conanfile_linux_fpic):
conanfile_linux_fpic.options.fPIC = True
toolchain = CMakeToolchain(conanfile_linux_fpic)
content = toolchain.content
assert 'set(CMAKE_POSITION_INDEPENDENT_CODE ON' in content
| 32.931138
| 96
| 0.643604
|
4a00340976f0480f9d96b50f781f86e4b9beec6c
| 30,338
|
py
|
Python
|
tensorflow_tts/models/tacotron2.py
|
ashishpatel26/TensorflowTTS
|
bd29c3eefa51041b76fd355d94025b4c13084296
|
[
"Apache-2.0"
] | 2
|
2020-06-01T07:39:25.000Z
|
2021-11-08T09:31:33.000Z
|
tensorflow_tts/models/tacotron2.py
|
ashishpatel26/TensorflowTTS
|
bd29c3eefa51041b76fd355d94025b4c13084296
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_tts/models/tacotron2.py
|
ashishpatel26/TensorflowTTS
|
bd29c3eefa51041b76fd355d94025b4c13084296
|
[
"Apache-2.0"
] | 1
|
2020-10-05T06:06:20.000Z
|
2020-10-05T06:06:20.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 The Tacotron-2 Authors, Minh Nguyen (@dathudeptrai) and Eren Gölge (@erogol)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tacotron-2 Modules."""
import collections
import numpy as np
import tensorflow as tf
from tensorflow_addons.seq2seq import Sampler
from tensorflow_addons.seq2seq import BahdanauAttention
from tensorflow_addons.seq2seq import dynamic_decode
from tensorflow_addons.seq2seq import Decoder
def get_initializer(initializer_range=0.02):
"""Creates a `tf.initializers.truncated_normal` with the given range.
Args:
initializer_range: float, initializer range for stddev.
Returns:
TruncatedNormal initializer with stddev = `initializer_range`.
"""
return tf.keras.initializers.TruncatedNormal(stddev=initializer_range)
def gelu(x):
"""Gaussian Error Linear unit."""
cdf = 0.5 * (1.0 + tf.math.erf(x / tf.math.sqrt(2.0)))
return x * cdf
def gelu_new(x):
"""Smoother gaussian Error Linear Unit."""
cdf = 0.5 * (1.0 + tf.tanh((np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def swish(x):
"""Swish activation function."""
return x * tf.sigmoid(x)
def mish(x):
return x * tf.math.tanh(tf.math.softplus(x))
ACT2FN = {
"identity": tf.keras.layers.Activation('linear'),
"tanh": tf.keras.layers.Activation('tanh'),
"gelu": tf.keras.layers.Activation(gelu),
"relu": tf.keras.activations.relu,
"swish": tf.keras.layers.Activation(swish),
"gelu_new": tf.keras.layers.Activation(gelu_new),
"mish": tf.keras.layers.Activation(mish)
}
class TFTacotronConvBatchNorm(tf.keras.layers.Layer):
"""Tacotron-2 Convolutional Batchnorm module."""
def __init__(self, filters, kernel_size, dropout_rate, activation=None, name_idx=None):
super().__init__()
self.conv1d = tf.keras.layers.Conv1D(filters,
kernel_size,
kernel_initializer=get_initializer(0.02),
padding='same',
name='conv_._{}'.format(name_idx))
self.norm = tf.keras.layers.BatchNormalization(axis=-1, name='batch_norm_._{}'.format(name_idx))
self.dropout = tf.keras.layers.Dropout(rate=dropout_rate, name='dropout_._{}'.format(name_idx))
self.act = ACT2FN[activation]
def call(self, inputs, training=False):
outputs = self.conv1d(inputs)
outputs = self.norm(outputs, training=training)
outputs = self.act(outputs)
outputs = self.dropout(outputs, training=training)
return outputs
class TFTacotronEmbeddings(tf.keras.layers.Layer):
"""Construct character/phoneme/positional/speaker embeddings."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
self.embedding_hidden_size = config.embedding_hidden_size
self.initializer_range = config.initializer_range
self.config = config
if config.n_speakers > 1:
self.speaker_embeddings = tf.keras.layers.Embedding(
config.n_speakers,
config.embedding_hidden_size,
embeddings_initializer=get_initializer(self.initializer_range),
name="speaker_embeddings"
)
self.speaker_fc = tf.keras.layers.Dense(units=config.embedding_hidden_size, name='speaker_fc')
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(config.embedding_dropout_prob)
def build(self, input_shape):
"""Build shared character/phoneme embedding layers."""
with tf.name_scope("character_embeddings"):
self.character_embeddings = self.add_weight(
"weight",
shape=[self.vocab_size, self.embedding_hidden_size],
initializer=get_initializer(self.initializer_range),
)
super().build(input_shape)
def call(self, inputs, training=False):
"""Get character embeddings of inputs.
Args:
1. character, Tensor (int32) shape [batch_size, length].
2. speaker_id, Tensor (int32) shape [batch_size]
Returns:
Tensor (float32) shape [batch_size, length, embedding_size].
"""
return self._embedding(inputs, training=training)
def _embedding(self, inputs, training=False):
"""Applies embedding based on inputs tensor."""
input_ids, speaker_ids = inputs
# create embeddings
inputs_embeds = tf.gather(self.character_embeddings, input_ids)
embeddings = inputs_embeds
if self.config.n_speakers > 1:
speaker_embeddings = self.speaker_embeddings(speaker_ids)
speaker_features = tf.math.softplus(self.speaker_fc(speaker_embeddings))
# extended speaker embeddings
extended_speaker_features = speaker_features[:, tf.newaxis, :]
# sum all embedding
embeddings += extended_speaker_features
# apply layer-norm and dropout for embeddings.
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings, training=training)
return embeddings
class TFTacotronEncoderConvs(tf.keras.layers.Layer):
"""Tacotron-2 Encoder Convolutional Batchnorm module."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.conv_batch_norm = []
for i in range(config.n_conv_encoder):
conv = TFTacotronConvBatchNorm(
filters=config.encoder_conv_filters,
kernel_size=config.encoder_conv_kernel_sizes,
activation=config.encoder_conv_activation,
dropout_rate=config.encoder_conv_dropout_rate,
name_idx=i)
self.conv_batch_norm.append(conv)
def call(self, inputs, training=False):
"""Call logic."""
outputs = inputs
for conv in self.conv_batch_norm:
outputs = conv(outputs, training=training)
return outputs
class TFTacotronEncoder(tf.keras.layers.Layer):
"""Tacotron-2 Encoder."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.embeddings = TFTacotronEmbeddings(config, name='embeddings')
self.convbn = TFTacotronEncoderConvs(config, name='conv_batch_norm')
self.bilstm = tf.keras.layers.Bidirectional(
tf.keras.layers.LSTM(units=config.encoder_lstm_units, return_sequences=True),
name='bilstm'
)
def call(self, inputs, training=False):
"""Call logic."""
input_ids, speaker_ids, input_mask = inputs
# create embedding and mask them since we sum
# speaker embedding to all character embedding.
input_embeddings = self.embeddings([input_ids, speaker_ids], training=training)
# pass embeddings to convolution batch norm
conv_outputs = self.convbn(input_embeddings, training=training)
# bi-lstm.
outputs = self.bilstm(conv_outputs, mask=input_mask)
return outputs
class TrainingSampler(Sampler):
"""Training sampler for Seq2Seq training."""
def __init__(self,
config,
):
super().__init__()
self.config = config
# create schedule factor.
# the input of a next decoder cell is calculated by formular:
# next_inputs = ratio * prev_groundtruth_outputs + (1.0 - ratio) * prev_predicted_outputs.
self._ratio = tf.constant(1.0, dtype=tf.float32)
self._reduction_factor = self.config.reduction_factor
def setup_target(self, targets, mel_lengths):
"""Setup ground-truth mel outputs for decoder."""
self.mel_lengths = mel_lengths
self.set_batch_size(tf.shape(targets)[0])
self.targets = targets[:, self._reduction_factor - 1::self._reduction_factor, :]
self.max_lengths = tf.tile([tf.shape(self.targets)[1]], [self._batch_size])
@property
def batch_size(self):
return self._batch_size
@property
def sample_ids_shape(self):
return tf.TensorShape([])
@property
def sample_ids_dtype(self):
return tf.int32
@property
def reduction_factor(self):
return self._reduction_factor
def initialize(self):
"""Return (Finished, next_inputs)."""
return (tf.tile([False], [self._batch_size]),
tf.tile([[0.0]], [self._batch_size, self.config.n_mels]))
def sample(self, time, outputs, state):
return tf.tile([0], [self._batch_size])
def next_inputs(self, time, outputs, state, sample_ids, **kwargs):
finished = (time + 1 >= self.max_lengths)
next_inputs = self._ratio * self.targets[:, time, :] + \
(1.0 - self._ratio) * outputs[:, -self.config.n_mels:]
next_state = state
return (finished, next_inputs, next_state)
def set_batch_size(self, batch_size):
self._batch_size = batch_size
class TestingSampler(TrainingSampler):
"""Testing sampler for Seq2Seq training."""
def __init__(self,
config,
):
super().__init__(config)
def next_inputs(self, time, outputs, state, sample_ids, **kwargs):
stop_token_prediction = kwargs.get("stop_token_prediction")
stop_token_prediction = tf.nn.sigmoid(stop_token_prediction)
finished = tf.cast(tf.round(stop_token_prediction), tf.bool)
finished = tf.reduce_all(finished)
next_inputs = outputs[:, -self.config.n_mels:]
next_state = state
return (finished, next_inputs, next_state)
class TFTacotronLocationSensitiveAttention(BahdanauAttention):
"""Tacotron-2 Location Sensitive Attention module."""
def __init__(self,
config,
memory,
mask_encoder=True,
memory_sequence_length=None,
is_cumulate=True):
"""Init variables."""
memory_length = memory_sequence_length if (mask_encoder is True) else None
super().__init__(
units=config.attention_dim,
memory=memory,
memory_sequence_length=memory_length,
probability_fn="softmax",
name="LocationSensitiveAttention"
)
self.location_convolution = tf.keras.layers.Conv1D(
filters=config.attention_filters,
kernel_size=config.attention_kernel,
padding='same',
use_bias=False,
name='location_conv'
)
self.location_layer = tf.keras.layers.Dense(units=config.attention_dim,
use_bias=False,
name='location_layer')
self.v = tf.keras.layers.Dense(1, use_bias=True, name='scores_attention')
self.config = config
self.is_cumulate = is_cumulate
self.use_window = False
def setup_window(self, win_front=2, win_back=4):
self.win_front = tf.constant(win_front, tf.int32)
self.win_back = tf.constant(win_back, tf.int32)
self._indices = tf.expand_dims(tf.range(tf.shape(self.keys)[1]), 0)
self._indices = tf.tile(self._indices, [tf.shape(self.keys)[0], 1]) # [batch_size, max_time]
self.use_window = True
def _compute_window_mask(self, max_alignments):
"""Compute window mask for inference.
Args:
max_alignments (int): [batch_size]
"""
expanded_max_alignments = tf.expand_dims(max_alignments, 1) # [batch_size, 1]
low = expanded_max_alignments - self.win_front
high = expanded_max_alignments + self.win_back
mlow = tf.cast((self._indices < low), tf.float32)
mhigh = tf.cast((self._indices > high), tf.float32)
mask = mlow + mhigh
return mask # [batch_size, max_length]
def __call__(self, inputs, training=False):
query, state, prev_max_alignments = inputs
processed_query = self.query_layer(query) if self.query_layer else query
processed_query = tf.expand_dims(processed_query, 1)
expanded_alignments = tf.expand_dims(state, axis=2)
f = self.location_convolution(expanded_alignments)
processed_location_features = self.location_layer(f)
energy = self._location_sensitive_score(processed_query,
processed_location_features,
self.keys)
# mask energy on inference steps.
if self.use_window is True:
window_mask = self._compute_window_mask(prev_max_alignments)
energy = energy + window_mask * -1e20
alignments = self.probability_fn(energy, state)
if self.is_cumulate:
state = alignments + state
else:
state = alignments
expanded_alignments = tf.expand_dims(alignments, 2)
context = tf.reduce_sum(expanded_alignments * self.values, 1)
return context, alignments, state
def _location_sensitive_score(self, W_query, W_fil, W_keys):
"""Calculate location sensitive energy."""
return tf.squeeze(self.v(tf.nn.tanh(W_keys + W_query + W_fil)), -1)
def get_initial_state(self, batch_size, size):
"""Get initial alignments."""
return tf.zeros(shape=[batch_size, size], dtype=tf.float32)
def get_initial_context(self, batch_size):
"""Get initial attention."""
return tf.zeros(shape=[batch_size, self.config.encoder_lstm_units * 2], dtype=tf.float32)
class TFTacotronPrenet(tf.keras.layers.Layer):
"""Tacotron-2 prenet."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.prenet_dense = [
tf.keras.layers.Dense(units=config.prenet_units,
activation=ACT2FN[config.prenet_activation],
name='dense_._{}'.format(i))
for i in range(config.n_prenet_layers)
]
self.dropout = tf.keras.layers.Dropout(rate=config.prenet_dropout_rate, name='dropout')
def call(self, inputs, training=False):
"""Call logic."""
outputs = inputs
for layer in self.prenet_dense:
outputs = layer(outputs)
outputs = self.dropout(outputs, training=True)
return outputs
class TFTacotronPostnet(tf.keras.layers.Layer):
"""Tacotron-2 postnet."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.conv_batch_norm = []
for i in range(config.n_conv_postnet):
conv = TFTacotronConvBatchNorm(
filters=config.postnet_conv_filters,
kernel_size=config.postnet_conv_kernel_sizes,
dropout_rate=config.postnet_dropout_rate,
activation='identity' if i + 1 == config.n_conv_postnet else 'tanh',
name_idx=i
)
self.conv_batch_norm.append(conv)
def call(self, inputs, training=False):
"""Call logic."""
outputs = inputs
for _, conv in enumerate(self.conv_batch_norm):
outputs = conv(outputs, training=training)
return outputs
TFTacotronDecoderCellState = collections.namedtuple(
'TFTacotronDecoderCellState',
['attention_lstm_state',
'decoder_lstms_state',
'context',
'time',
'state',
'alignment_history',
'max_alignments'])
TFDecoderOutput = collections.namedtuple(
"TFDecoderOutput", ("mel_output", "token_output", "sample_id"))
class TFTacotronDecoderCell(tf.keras.layers.AbstractRNNCell):
"""Tacotron-2 custom decoder cell."""
def __init__(self, config, training, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.training = training
self.prenet = TFTacotronPrenet(config, name='prenet')
# define lstm cell on decoder.
# TODO(@dathudeptrai) switch to zone-out lstm.
self.attention_lstm = tf.keras.layers.LSTMCell(units=config.decoder_lstm_units,
name='attention_lstm_cell')
lstm_cells = []
for i in range(config.n_lstm_decoder):
lstm_cell = tf.keras.layers.LSTMCell(units=config.decoder_lstm_units,
name='lstm_cell_._{}'.format(i))
lstm_cells.append(lstm_cell)
self.decoder_lstms = tf.keras.layers.StackedRNNCells(lstm_cells,
name='decoder_lstms')
# define attention layer.
if config.attention_type == 'lsa':
# create location-sensitive attention.
self.attention_layer = TFTacotronLocationSensitiveAttention(
config,
memory=None,
mask_encoder=True,
memory_sequence_length=None,
is_cumulate=True
)
else:
raise ValueError("Only lsa (location-sensitive attention) is supported")
# frame, stop projection layer.
self.frame_projection = tf.keras.layers.Dense(
units=config.n_mels * config.reduction_factor, name='frame_projection')
self.stop_projection = tf.keras.layers.Dense(units=config.reduction_factor, name='stop_projection')
self.config = config
def set_alignment_size(self, alignment_size):
self.alignment_size = alignment_size
@property
def output_size(self):
"""Return output (mel) size."""
return self.frame_projection.units
@property
def state_size(self):
"""Return hidden state size."""
return TFTacotronDecoderCellState(
attention_lstm_state=self.attention_lstm.state_size,
decoder_lstms_state=self.decoder_lstms.state_size,
time=tf.TensorShape([]),
attention=self.config.attention_dim,
state=self.alignment_size,
alignment_history=(),
max_alignments=tf.TensorShape([1]),
)
def get_initial_state(self, batch_size):
"""Get initial states."""
initial_attention_lstm_cell_states = self.attention_lstm.get_initial_state(None, batch_size, dtype=tf.float32)
initial_decoder_lstms_cell_states = self.decoder_lstms.get_initial_state(None, batch_size, dtype=tf.float32)
initial_context = tf.zeros(shape=[batch_size, self.config.encoder_lstm_units * 2], dtype=tf.float32)
initial_state = self.attention_layer.get_initial_state(batch_size, size=self.alignment_size)
initial_alignment_history = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True)
return TFTacotronDecoderCellState(
attention_lstm_state=initial_attention_lstm_cell_states,
decoder_lstms_state=initial_decoder_lstms_cell_states,
time=tf.zeros([], dtype=tf.int32),
context=initial_context,
state=initial_state,
alignment_history=initial_alignment_history,
max_alignments=tf.zeros([batch_size], dtype=tf.int32),
)
def call(self, inputs, states):
"""Call logic."""
decoder_input = inputs
# 1. apply prenet for decoder_input.
prenet_out = self.prenet(decoder_input, training=self.training) # [batch_size, dim]
# 2. concat prenet_out and prev context vector
# then use it as input of attention lstm layer.
attention_lstm_input = tf.concat([prenet_out, states.context], axis=-1)
attention_lstm_output, next_attention_lstm_state = self.attention_lstm(
attention_lstm_input, states.attention_lstm_state)
# 3. compute context, alignment and cumulative alignment.
prev_state = states.state
prev_alignment_history = states.alignment_history
prev_max_alignments = states.max_alignments
context, alignments, state = self.attention_layer(
[attention_lstm_output,
prev_state,
prev_max_alignments],
training=self.training
)
# 4. run decoder lstm(s)
decoder_lstms_input = tf.concat([attention_lstm_output, context], axis=-1)
decoder_lstms_output, next_decoder_lstms_state = self.decoder_lstms(
decoder_lstms_input,
states.decoder_lstms_state
)
# 5. compute frame feature and stop token.
projection_inputs = tf.concat([decoder_lstms_output, context], axis=-1)
decoder_outputs = self.frame_projection(projection_inputs)
stop_inputs = tf.concat([decoder_lstms_output, decoder_outputs], axis=-1)
stop_tokens = self.stop_projection(stop_inputs)
# 6. save alignment history to visualize.
alignment_history = prev_alignment_history.write(states.time, alignments)
# 7. return new states.
new_states = TFTacotronDecoderCellState(
attention_lstm_state=next_attention_lstm_state,
decoder_lstms_state=next_decoder_lstms_state,
time=states.time + 1,
context=context,
state=state,
alignment_history=alignment_history,
max_alignments=tf.argmax(alignments, -1, output_type=tf.int32)
)
return (decoder_outputs, stop_tokens), new_states
class TFTacotronDecoder(Decoder):
"""Tacotron-2 Decoder."""
def __init__(self,
decoder_cell,
decoder_sampler,
output_layer=None):
"""Initial variables."""
self.cell = decoder_cell
self.sampler = decoder_sampler
self.output_layer = output_layer
def setup_decoder_init_state(self, decoder_init_state):
self.initial_state = decoder_init_state
def initialize(self, **kwargs):
return self.sampler.initialize() + (self.initial_state,)
@property
def output_size(self):
return TFDecoderOutput(
mel_output=tf.nest.map_structure(
lambda shape: tf.TensorShape(shape), self.cell.output_size),
token_output=tf.TensorShape(self.sampler.reduction_factor),
sample_id=self.sampler.sample_ids_shape
)
@property
def output_dtype(self):
return TFDecoderOutput(
tf.float32,
tf.float32,
self.sampler.sample_ids_dtype
)
@property
def batch_size(self):
return self.sampler._batch_size
def step(self, time, inputs, state, training=False):
(mel_outputs, stop_tokens), cell_state = self.cell(inputs, state, training=training)
if self.output_layer is not None:
mel_outputs = self.output_layer(mel_outputs)
sample_ids = self.sampler.sample(
time=time, outputs=mel_outputs, state=cell_state
)
(finished, next_inputs, next_state) = self.sampler.next_inputs(
time=time,
outputs=mel_outputs,
state=cell_state,
sample_ids=sample_ids,
stop_token_prediction=stop_tokens
)
outputs = TFDecoderOutput(mel_outputs, stop_tokens, sample_ids)
return (outputs, next_state, next_inputs, finished)
class TFTacotron2(tf.keras.Model):
"""Tensorflow tacotron-2 model."""
def __init__(self, config, training, **kwargs):
"""Initalize tacotron-2 layers."""
super().__init__(self, **kwargs)
self.encoder = TFTacotronEncoder(config, name='encoder')
self.decoder_cell = TFTacotronDecoderCell(config, training=training, name='decoder_cell')
self.decoder = TFTacotronDecoder(
self.decoder_cell,
TrainingSampler(config) if training is True else TestingSampler(config)
)
self.postnet = TFTacotronPostnet(config, name='post_net')
self.post_projection = tf.keras.layers.Dense(units=config.n_mels,
name='residual_projection')
self.config = config
def _build(self):
input_ids = np.array([[1, 2, 3, 4, 5, 6, 7, 8, 9]])
input_lengths = np.array([9])
speaker_ids = np.array([0])
mel_outputs = np.random.normal(size=(1, 50, 80)).astype(np.float32)
mel_lengths = np.array([50])
self(input_ids, input_lengths, speaker_ids, mel_outputs, mel_lengths, 10, training=True)
@tf.function(experimental_relax_shapes=True)
def call(self,
input_ids,
input_lengths,
speaker_ids,
mel_outputs,
mel_lengths,
maximum_iterations=tf.constant(2000, tf.int32),
use_window_mask=False,
win_front=2,
win_back=3,
training=False):
"""Call logic."""
# create input-mask based on input_lengths
input_mask = tf.sequence_mask(input_lengths,
maxlen=tf.reduce_max(input_lengths),
name='input_sequence_masks')
# Encoder Step.
encoder_hidden_states = self.encoder([input_ids, speaker_ids, input_mask], training=training)
batch_size = tf.shape(encoder_hidden_states)[0]
alignment_size = tf.shape(encoder_hidden_states)[1]
# Setup some initial placeholders for decoder step. Include:
# 1. mel_outputs, mel_lengths for teacher forcing mode.
# 2. alignment_size for attention size.
# 3. initial state for decoder cell.
# 4. memory (encoder hidden state) for attention mechanism.
self.decoder.sampler.setup_target(targets=mel_outputs, mel_lengths=mel_lengths)
self.decoder.cell.set_alignment_size(alignment_size)
self.decoder.setup_decoder_init_state(
self.decoder.cell.get_initial_state(batch_size)
)
self.decoder.cell.attention_layer.setup_memory(
memory=encoder_hidden_states,
memory_sequence_length=input_lengths # use for mask attention.
)
if use_window_mask:
self.decoder.cell.attention_layer.setup_window(win_front=win_front, win_back=win_back)
# run decode step.
(frames_prediction, stop_token_prediction, _), final_decoder_state, _ = dynamic_decode(
self.decoder,
maximum_iterations=maximum_iterations
)
decoder_output = tf.reshape(frames_prediction, [batch_size, -1, self.config.n_mels])
stop_token_prediction = tf.reshape(stop_token_prediction, [batch_size, -1])
residual = self.postnet(decoder_output, training=training)
residual_projection = self.post_projection(residual)
mel_outputs = decoder_output + residual_projection
alignment_history = tf.transpose(final_decoder_state.alignment_history.stack(), [1, 2, 0])
return decoder_output, mel_outputs, stop_token_prediction, alignment_history
@tf.function(experimental_relax_shapes=True)
def inference(self,
input_ids,
input_lengths,
speaker_ids,
use_window_mask=False,
win_front=2,
win_back=4,
maximum_iterations=tf.constant(2000, dtype=tf.int32)):
"""Call logic."""
# create input-mask based on input_lengths
input_mask = tf.sequence_mask(input_lengths,
maxlen=tf.reduce_max(input_lengths),
name='input_sequence_masks')
# Encoder Step.
encoder_hidden_states = self.encoder([input_ids, speaker_ids, input_mask], training=False)
batch_size = tf.shape(encoder_hidden_states)[0]
alignment_size = tf.shape(encoder_hidden_states)[1]
# Setup some initial placeholders for decoder step. Include:
# 1. batch_size for inference.
# 2. alignment_size for attention size.
# 3. initial state for decoder cell.
# 4. memory (encoder hidden state) for attention mechanism.
# 5. window front/back to solve long sentence synthesize problems. (call after setup memory.)
self.decoder.sampler.set_batch_size(batch_size)
self.decoder.cell.set_alignment_size(alignment_size)
self.decoder.setup_decoder_init_state(
self.decoder.cell.get_initial_state(batch_size)
)
self.decoder.cell.attention_layer.setup_memory(
memory=encoder_hidden_states,
memory_sequence_length=input_lengths # use for mask attention.
)
if use_window_mask:
self.decoder.cell.attention_layer.setup_window(win_front=win_front, win_back=win_back)
# run decode step.
(frames_prediction, stop_token_prediction, _), final_decoder_state, _ = dynamic_decode(
self.decoder,
maximum_iterations=maximum_iterations
)
decoder_output = tf.reshape(frames_prediction, [batch_size, -1, self.config.n_mels])
stop_token_prediction = tf.reshape(stop_token_prediction, [batch_size, -1])
residual = self.postnet(decoder_output, training=False)
residual_projection = self.post_projection(residual)
mel_outputs = decoder_output + residual_projection
alignment_history = tf.transpose(final_decoder_state.alignment_history.stack(), [1, 2, 0])
return decoder_output, mel_outputs, stop_token_prediction, alignment_history
| 38.795396
| 118
| 0.639495
|
4a0034d9ce7307305e4162fb6679bb5cef7663b2
| 215
|
py
|
Python
|
Python/Mundo 2/ex050.py
|
henrique-tavares/Coisas
|
f740518b1bedec5b0ea8c12ae07a2cac21eb51ae
|
[
"MIT"
] | 1
|
2020-02-07T20:39:26.000Z
|
2020-02-07T20:39:26.000Z
|
Python/Mundo 2/ex050.py
|
neptune076/Coisas
|
85c064cc0e134465aaf6ef41acf747d47f108fc9
|
[
"MIT"
] | null | null | null |
Python/Mundo 2/ex050.py
|
neptune076/Coisas
|
85c064cc0e134465aaf6ef41acf747d47f108fc9
|
[
"MIT"
] | null | null | null |
print("")
soma = 0
for i in range(6):
n = float(input("Digite o {}° número: ".format(i+1)))
if (n % 2 == 0):
soma += n
print("\nA soma dos valores pares é igual a {}".format(soma), end="\n\n")
| 21.5
| 73
| 0.52093
|
4a003596fa6d669903c2e48aed4710fb168a0f55
| 1,424
|
py
|
Python
|
gallery/migrations/0001_initial.py
|
macymuhia/Photogenic
|
b96a16b6630a33da829c300e58c20717a38aee83
|
[
"MIT"
] | null | null | null |
gallery/migrations/0001_initial.py
|
macymuhia/Photogenic
|
b96a16b6630a33da829c300e58c20717a38aee83
|
[
"MIT"
] | 8
|
2020-06-05T22:50:42.000Z
|
2021-09-08T01:16:16.000Z
|
gallery/migrations/0001_initial.py
|
macymuhia/Photogenic
|
b96a16b6630a33da829c300e58c20717a38aee83
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.4 on 2019-08-23 15:32
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cat', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('place', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='gallery/')),
('image_name', models.CharField(max_length=10)),
('image_description', models.CharField(max_length=30)),
('category', models.ManyToManyField(to='gallery.Category')),
('location', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='gallery.Location')),
],
),
]
| 34.731707
| 119
| 0.570225
|
4a0035f7bf82f3e394bafdab8bd7fe18465adcee
| 544
|
py
|
Python
|
index.py
|
Irony00/FlapPyBird-Reinforcement-Learning
|
2369378520a539df1955b48b2ed968e30ff1e903
|
[
"MIT"
] | 1
|
2022-02-13T01:30:39.000Z
|
2022-02-13T01:30:39.000Z
|
index.py
|
Irony00/FlapPyBird
|
2369378520a539df1955b48b2ed968e30ff1e903
|
[
"MIT"
] | null | null | null |
index.py
|
Irony00/FlapPyBird
|
2369378520a539df1955b48b2ed968e30ff1e903
|
[
"MIT"
] | null | null | null |
import time
import flappy_bird_gym
env = flappy_bird_gym.make("FlappyBird-v0")
obs = env.reset()
while True:
# Next action:
# (feed the observation to your agent here)
action = env.action_space.sample() # env.action_space.sample() for a random action
# Processing:
obs, reward, done, info = env.step(action)
# Rendering the game:
# (remove this two lines during training)
env.render()
time.sleep(1 / 30) # FPS
# Checking if the player is still alive
if done:
break
env.close()
| 22.666667
| 87
| 0.650735
|
4a003641ada43efc280c95016f9fc649d8062cac
| 47,783
|
py
|
Python
|
nipy/neurospin/group/spatial_relaxation_onesample.py
|
fperez/nipy
|
559f17150bd9fa8ead4fd088b330d7cf7db7aa79
|
[
"BSD-3-Clause"
] | 1
|
2015-05-07T16:53:33.000Z
|
2015-05-07T16:53:33.000Z
|
nipy/neurospin/group/spatial_relaxation_onesample.py
|
fperez/nipy
|
559f17150bd9fa8ead4fd088b330d7cf7db7aa79
|
[
"BSD-3-Clause"
] | null | null | null |
nipy/neurospin/group/spatial_relaxation_onesample.py
|
fperez/nipy
|
559f17150bd9fa8ead4fd088b330d7cf7db7aa79
|
[
"BSD-3-Clause"
] | null | null | null |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#####################################################################################
# BAYESIAN MODEL SELECTION FOR ACTIVATION DETECTION ON FMRI GROUP DATA
# Merlin Keller, 2009
import numpy as np
import scipy.special as sp
from routines import add_lines
from displacement_field import displacement_field
#####################################################################################
# some useful functions
def log_gammainv_pdf(x, a, b):
"""
log density of the inverse gamma distribution with shape a and scale b,
at point x, using Stirling's approximation for a > 100
"""
return a * np.log(b) - sp.gammaln(a) - (a + 1) * np.log(x) - b / x
def log_gaussian_pdf(x, m, v):
"""
log density of the gaussian distribution with mean m and variance v at point x
"""
return -0.5 * (np.log(2 * np.pi * v) + (x - m)**2 / v)
#####################################################################################
# spatial relaxation multivariate statistic class
class multivariate_stat:
def __init__(self, data, vardata=None, XYZ=None, std=None, sigma=None,
labels=None, network=None, v_shape=3, v_scale=20,
std_shape=3, std_scale=20, m_mean_rate=1e-3,
m_var_shape=3, m_var_scale=20, disp_mask=None,
labels_prior=None, label_values=None, labels_prior_mask=None):
"""
Multivariate modeling of fMRI group data accounting for spatial uncertainty
In: data (n,p) estimated effects
vardata (n,p) variances of estimated effects
XYZ (3,p) voxel coordinates
std <float> Initial guess for standard deviate of spatial displacements
sigma <float> regularity of displacement field
labels (p,) labels defining regions of interest
network (N,) binary region labels (1 for active, 0 for inactive)
v_shape <float> intensity variance prior shape
v_scale <float> intensity variance prior scale
std_shape <float> spatial standard error prior shape
std_scale <float> spatial standard error prior scale
m_mean_rate <float> mean effect prior rate
m_var_shape <float> effect variance prior shape
m_var_scale <float> effect variance prior scale
disp_mask (q,) mask of the brain, to limit displacements
labels_prior (M,r) prior on voxelwise region membership
labels_prior_values (M,r) voxelwise label values where prior is defined
labels_prior_mask (r,) Mask of voxels where a label prior is defined
"""
self.data = data
if vardata != None and vardata.max() == 0:
self.vardata = None
else:
self.vardata = vardata
self.std = std
self.sigma = sigma
self.labels = labels
self.network = network
self.v_shape = v_shape
self.v_scale = v_scale
self.std_shape = std_shape
self.std_scale = std_scale
n, p = data.shape
if labels == None:
self.labels = np.zeros(p, int)
M = self.labels.max() + 1
if network == None:
self.network = np.ones(M, int)
if np.isscalar(m_mean_rate):
self.m_mean_rate = np.zeros(M, float) + m_mean_rate
else:
self.m_mean_rate = m_mean_rate
if np.isscalar(m_var_shape):
self.m_var_shape = np.zeros(M, float) + m_var_shape
else:
self.m_var_shape = m_var_shape
if np.isscalar(m_var_scale):
self.m_var_scale = np.zeros(M, float) + m_var_scale
else:
self.m_var_scale = m_var_scale
if std != None:
self.D = displacement_field(XYZ, sigma, data.shape[0], disp_mask)
self.labels_prior = labels_prior
self.label_values = label_values
self.labels_prior_mask = labels_prior_mask
def init_hidden_variables(self, mode='saem', init_spatial=True):
n, p = self.data.shape
self.X = self.data.copy()
self.m = self.X.mean(axis=0)
#self.v = np.square(self.X - self.m).mean()
N = len(self.network)
self.m_mean = np.zeros(N, float)
self.m_var = np.zeros(N, float)
self.v = np.zeros(N, float)
#self.s0 = np.zeros(N, float)
#self.S0 = np.zeros(N, float)
self.s1 = np.zeros(N, float)
self.S1 = np.zeros(N, float)
self.s2 = np.zeros(N, float)
self.S2 = np.zeros(N, float)
self.s3 = np.zeros(N, float)
self.S3 = np.zeros(N, float)
self.s6 = np.zeros(N, float)
for j in xrange(N):
self.s6[j] = (self.labels == j).sum()
self.S6 = self.s6.copy()
self.m_var_post_scale = np.zeros(N, float)
if init_spatial and self.std != None:
B = len(self.D.block)
if B == 0:
self.std = None
else:
self.R = np.zeros((n, B), int)
self.N = np.ones(p, float) * n
self.s4 = 0.0
self.S4 = 0.0
self.s5 = np.zeros(N, float)
self.S5 = np.zeros(N, float)
std = self.std
self.update_summary_statistics(init_spatial)
if mode == 'saem':
self.update_parameters_saem(init_spatial)
else:
self.update_parameters_mcmc(init_spatial)
self.std = std
def update_summary_statistics(self, w=1.0, update_spatial=True, mode='saem'):
n, p = self.data.shape
if self.std == None:
m = self.m
else:
m = self.m[self.D.I]
if update_spatial:
self.s4 = np.square(self.D.U).sum()
if mode == 'saem':
self.S4 += w * (self.s4 - self.S4)
if self.vardata == None:
SS = np.square(self.data - m) #/ self.v + np.log(2 * np.pi * self.v)
else:
SS = np.square(self.X - m) #/ self.vardata + np.log(2 * np.pi * self.vardata)
if self.std == None:
SS_sum = SS.sum(axis=0)
else:
SS_sum = np.zeros(p, float)
for i in xrange(n):
Ii = self.D.I[i]
SSi = SS[i].reshape(p, 1)
add_lines(SSi, SS_sum.reshape(p, 1), Ii)
for j in xrange(len(self.network)):
L = np.where(self.labels == j)[0]
self.s1[j] = SS_sum[L].sum()
if self.labels_prior != None:
self.s6[j] = len(L)
self.s2[j] = np.square(self.m[L]).sum()
if self.network[j] == 1:
self.s3[j] = self.m[L].sum()
if update_spatial and self.std != None:
self.s5[j] = self.N[L].sum()
if mode == 'saem':
self.S5 += w * (self.s5 - self.S5)
if mode == 'saem':
self.S1 += w * (self.s1 - self.S1)
self.S2 += w * (self.s2 - self.S2)
self.S3 += w * (self.s3 - self.S3)
if self.labels_prior != None:
self.S6 += w * (self.s6 - self.S6)
size = self.S6
sum_sq = self.S2
sum = self.S3
else:
size = self.S6
sum_sq = self.s2
sum = self.s3
# Update m_var post scale
# used to update parameters,
# and compute conditional posterior
rate = self.m_mean_rate
shape = self.m_var_shape
scale = self.m_var_scale
J = self.network == 1
N1 = J.sum()
if N1 > 0:
post_rate = rate[J] + size[J]
self.m_var_post_scale[J] = scale[J] + 0.5 * (sum_sq[J] - np.square(sum[J]) / post_rate)
if N1 < len(self.network):
self.m_var_post_scale[J==0] = scale[J==0] + 0.5 * sum_sq[J==0]
def update_parameters_saem(self, update_spatial=True):
n, p = self.data.shape
#self.v = (self.S1 + 2 * self.v_scale) / (n * p + 2 * (1 + self.v_shape))
size = self.S6
rate = self.m_mean_rate
shape = self.m_var_shape
scale = self.m_var_scale
if self.std == None:
N = n * size
else:
N = self.S5
if update_spatial:
#B = len(self.D.block)
self.std = np.sqrt(
(self.S4 + 2 * self.std_scale) / (self.D.U.size + 2 * self.std_shape + 2))
self.v = (self.S1 + 2 * self.v_scale) / (N + 2 * self.v_shape + 2)
J = self.network == 1
N1 = J.sum()
if N1 > 0:
self.m_mean[J] = self.S3[J] / (rate[J] + size[J])
self.m_var[J] = 2 * self.m_var_post_scale[J] / (size[J] + 2 * shape[J] + 3)
if N1 < len(self.network):
self.m_var[J==0] = 2 * self.m_var_post_scale[J==0] / (size[J==0] + 2 * shape[J==0] + 2)
def update_parameters_mcmc(self, update_spatial=True):
n, p = self.data.shape
#self.v = (self.s1 + 2 * self.v_scale) / np.random.chisquare(df = n * p + 2 * self.v_shape)
size = self.s6
rate = self.m_mean_rate
shape = self.m_var_shape
scale = self.m_var_scale
if self.std == None:
N = n * size
else:
N = self.s5
if update_spatial:
#B = len(self.D.block)
self.std = np.sqrt(
(self.s4 + 2*self.std_scale) / np.random.chisquare(df=self.D.U.size + 2*self.std_shape))
J = self.network == 1
if J.sum() > 0:
post_rate = rate[J] + size[J]
self.m_mean[J] = self.s3[J] / post_rate
+ np.random.randn(J.sum()) * np.sqrt(self.m_var[J] / post_rate)
for j in xrange(len(self.network)):
self.v[j] = (self.s1[j] + 2 * self.v_scale) / np.random.chisquare(df = N[j] + 2 * self.v_shape)
self.m_var[j] = 2 * self.m_var_post_scale[j] / np.random.chisquare(df = size[j] + 2 * shape[j])
def update_displacements(self):
n, p = self.data.shape
B = len(self.D.block)
if self.proposal == 'prior':
for i in xrange(n):
for b in np.random.permutation(range(B)):
block = self.D.block[b]
A = self.update_block(i, b, 'prior', self.std)
elif self.proposal == 'rand_walk':
if np.isscalar(self.proposal_std):
for i in xrange(n):
for b in np.random.permutation(range(B)):
block = self.D.block[b]
A = self.update_block(i, b, 'rand_walk', self.proposal_std)
else:
for i in xrange(n):
for b in np.random.permutation(range(B)):
block = self.D.block[b]
A = self.update_block(i, b, 'rand_walk', self.proposal_std[:, i, b])
else:
for i in xrange(n):
for b in np.random.permutation(range(B)):
block = self.D.block[b]
A = self.update_block(i, b, 'fixed', self.proposal_std[:, i, b], self.proposal_mean[:, i, b])
self.N *= 0
ones = np.ones((p, 1), float)
for i in xrange(n):
Ii = self.D.I[i]
add_lines(ones, self.N.reshape(p, 1), Ii)
if self.verbose:
print "mean rejected displacements :", self.R.mean(axis=0)
def update_block(self, i, b, proposal='prior', proposal_std=None,
proposal_mean=None, verbose=False, reject_override=False):
block = self.D.block[b]
if verbose:
print 'sampling field', i, 'block', b
# Propose new displacement
U, V, L, W, I = self.D.sample(i, b, proposal, proposal_std,
proposal_mean)
Uc = self.D.U[:, i, b]
Ic = self.D.I[i, L]
# log acceptance rate
mc = self.m[Ic]
m = self.m[I]
vc = self.v[self.labels[Ic]]
v = self.v[self.labels[I]]
#A = ((mc - m) * (mc + m - 2 * self.X[i, L])).sum() / self.v
A = (np.log(v) - np.log(vc)
+ (self.X[i, L] - mc)**2 / vc
- (self.X[i, L] - m)**2 / v).sum()
if not proposal == 'prior':
A += (Uc**2 - U**2).sum() / self.std**2
if proposal == 'fixed':
if proposal_std.max() == 0:
A = np.inf
else:
A += ((U - Uc) * (U + Uc - 2 * proposal_mean) / proposal_std**2).sum()
self.R[i, b] = np.random.uniform() > np.exp(0.5 * A)
if self.R[i, b] == 0 and not reject_override:
self.D.U[:, i, b] = U
self.D.V[:, i, block] = V
if len(L)> 0:
self.D.W[:, i, L] = W
self.D.I[i, L] = I
return A
def update_effects(self, T=1.0):
"""
T is a temperature used to compute log posterior density
by simulated annealing
"""
n, p = self.data.shape
if self.std == None:
m = self.m
v = self.v[self.labels]
else:
m = self.m[self.D.I]
v = self.v[self.labels[self.D.I]]
#tot_var = self.v + self.vardata
#cond_mean = (self.v * self.data + self.vardata * m) / tot_var
#cond_var = self.v * self.vardata / tot_var
tot_var = v + self.vardata
cond_mean = (v * self.data + self.vardata * m) / tot_var
cond_var = T * v * self.vardata / tot_var
self.X = cond_mean + np.random.randn(n, p) * np.sqrt(cond_var)
def update_mean_effect(self, T=1.0):
"""
T is a temperature used to compute log posterior density
by simulated annealing
"""
n, p = self.data.shape
X_sum = np.zeros(p, float)
if self.std == None:
X_sum = self.X.sum(axis=0)
else:
#self.N *= 0
#ones = np.ones((p, 1), float)
for i in xrange(n):
Ii = self.D.I[i]
XI = self.X[i].reshape(p, 1)
add_lines(XI, X_sum.reshape(p, 1), Ii)
#add_lines(ones, self.N.reshape(p, 1), Ii)
for j in xrange(len(self.network)):
L = np.where(self.labels == j)[0]
m_var = self.m_var[j] * T
v = self.v[j] * T
if self.std == None:
#tot_var = self.v + m_var * n
tot_var = v + m_var * n
else:
#tot_var = self.v + m_var * self.N[L]
tot_var = v + m_var * self.N[L]
#cond_mean = (X_sum[L] * m_var + self.v * self.m_mean[j]) / tot_var
#cond_std = np.sqrt(self.v * m_var / tot_var)
cond_mean = (X_sum[L] * m_var + v * self.m_mean[j]) / tot_var
cond_std = np.sqrt(v * m_var / tot_var)
self.m[L] = cond_mean + np.random.randn(len(L)) * cond_std
def update_labels(self):
N, r = self.labels_prior.shape
I = self.labels_prior_mask
m_mean = self.m_mean[self.label_values]
m_var = self.m_var[self.label_values]
L = (self.m[I].reshape(1, r) - m_mean)**2 / m_var
P = self.labels_prior * np.exp(-0.5 * L) / np.sqrt(m_var)
P_cumsum = P.cumsum(axis=0)
X = np.random.rand(r) * P_cumsum[-1]
labels = (X > P_cumsum).sum(axis=0)
self.labels[I] = self.label_values[labels, xrange(r)]
def evaluate(self, nsimu=1e3, burnin=100, J=None, verbose=False,
proposal='prior', proposal_std=None, proposal_mean=None,
compute_post_mean=False, mode='saem', update_spatial=True):
"""
Sample posterior distribution of model parameters, or compute their MAP estimator
In: nsimu <int> Number of samples drawn from posterior mean distribution
burnin <int> Number of discarded burn-in samples
J (N,) voxel indices where successive mean values are stored
verbose <bool> Print some infos during the sampling process
proposal <str> 'prior', 'rand_walk' or 'fixed'
proposal_mean <float> Used for fixed proposal only
proposal_std <float> Used for random walk or fixed proposal
mode <str> if mode='saem', compute MAP estimates of model parameters.
if mode='mcmc', sample their posterior distribution
update_spatial <bool> when False, enables sampling conditional on spatial parameters
Out: self.m_values (N, nsimu+burnin) successive mean values (if J is not empty)
if self.labels_prior is not empty:
self.labels_post (M,r) posterior distribution of region labels
if self.std is not empty:
self.std_values (nsimu+burnin,) successive spatial standard deviate values
if compute_post_mean is True:
self.mean_m (p,) posterior average of mean effect
self.var_m (p,) posterior variance of mean effect
if self.std is not empty and compute_post_mean is True:
self.r (n, nblocks) mean rejection rate for each displacement field
self.mean_U (3, n, nblocks) posterior average of displacement weights
self.var_U (3, n, nblocks) posterior marginal variances of displacement weights
"""
#self.init_hidden_variables()
n, p = self.data.shape
self.nsimu = nsimu
self.burnin = burnin
self.J = J
self.verbose = verbose
self.proposal = proposal
self.proposal_mean = proposal_mean
self.proposal_std = proposal_std
self.compute_post_mean = compute_post_mean
#self.v_values = np.zeros(nsimu + burnin, float)
if J != None:
self.m_values = np.zeros((len(J), nsimu + burnin), float)
if self.std != None:
B = len(self.D.block)
if update_spatial:
self.std_values = np.zeros(nsimu + burnin, float)
if proposal == 'rand_walk':
self.proposal_std_values = np.zeros(nsimu + burnin, float)
if self.labels_prior != None:
self.labels_post = np.zeros(self.labels_prior.shape, float)
#Il = np.array(np.where(self.labels_prior > 0))
#r = len(self.labels_prior_mask)
if compute_post_mean:
sum_m = np.zeros(p, float)
sum_m_sq = np.zeros(p, float)
if mode == 'mcmc':
N = len(self.network)
self.P = np.zeros(N, float)
self.mean_m_mean = np.zeros(N, float)
self.mean_m_var = np.zeros(N, float)
self.mean_v = np.zeros(N, float)
if update_spatial and self.std != None:
self.r = np.zeros((n, B), float)
sum_U = np.zeros((3, n, B), float)
sum_U_sq = np.zeros((3, n, B), float)
niter = np.array([int(burnin), int(nsimu)])
for j in np.arange(2)[niter>0]:
if j == 0:
w = 1
if self.verbose:
print "Burn-in"
else:
if mode == 'saem':
if self.verbose:
print "Maximizing likelihood"
else:
if self.verbose:
print "Sampling posterior distribution"
for i in xrange(niter[j]):
if self.verbose:
if mode == 'saem':
print "SAEM",
else:
print "Gibbs",
print "iteration", i+1, "out of", niter[j]
# Gibbs iteration
#i += 1
if update_spatial and self.std != None:
self.update_displacements()
if j == 0 and self.proposal == 'rand_walk':
self.proposal_std = np.clip(self.proposal_std * (1 + 0.9) / (1 + self.R.mean()), 0.01, 10.0)
if self.vardata != None:
self.update_effects()
self.update_mean_effect()
if self.labels_prior != None:
self.update_labels()
if j == 1:
w = 1.0 / (i + 1)
self.update_summary_statistics(w, update_spatial, mode)
if mode == 'saem':
self.update_parameters_saem(update_spatial)
else:
self.update_parameters_mcmc(update_spatial)
if self.verbose:
print "population effect min variance value :", self.m_var.min()
# Update results
#self.v_values[i + self.burnin * j] = self.v
if update_spatial and self.std != None:
self.std_values[i + self.burnin * j] = self.std
if proposal == 'rand_walk':
self.proposal_std_values[i + self.burnin * j] = self.proposal_std
if self.J != None:
self.m_values[:, i + self.burnin * j] = self.m[self.J]
if j == 1 and self.labels_prior != None:
self.labels_post += \
self.label_values == self.labels[self.labels_prior_mask]
#self.labels_post[Il[0], Il[1]] += \
#self.label_values[Il[0], Il[1]] == self.labels[Il[0]]
if j == 1 and compute_post_mean:
sum_m += self.m
sum_m_sq += self.m**2
if mode == 'mcmc':
self.P += (self.m_mean > 0)
self.mean_m_mean += self.m_mean
self.mean_m_var += self.m_var
self.mean_v += self.v
if update_spatial and self.std != None:
self.r += self.R
sum_U += self.D.U
sum_U_sq += self.D.U**2
if j== 1 and self.labels_prior != None:
self.labels_post /= nsimu
if j == 1 and compute_post_mean:
self.mean_m = sum_m / float(self.nsimu)
self.var_m = sum_m_sq / float(self.nsimu) - self.mean_m**2
if mode == 'mcmc':
self.P /= float(self.nsimu)
self.mean_m_mean /= float(self.nsimu)
self.mean_m_var /= float(self.nsimu)
self.mean_v /= float(self.nsimu)
if update_spatial and self.std != None:
self.r /= float(self.nsimu)
self.mean_U = sum_U / float(self.nsimu)
self.var_U = sum_U_sq / float(self.nsimu) - self.mean_U**2
#####################################################################################
# MAP estimation of displacement fields
def estimate_displacements_SA(self, nsimu=100, c=0.99, proposal_std=None, verbose=False):
"""
MAP estimate of elementary displacements conditional on model parameters
"""
if proposal_std==None:
proposal_std = self.proposal_std
LL, self.Z, self.tot_var, self.SS1, self.SS2, self.SS3, self.SS4 =\
self.compute_log_voxel_likelihood(return_SS=True)
self.log_voxel_likelihood = LL
for i in xrange(nsimu):
if verbose:
print "SA iteration", i+1, "out of", nsimu
self.update_displacements_SA(c**i, proposal_std, verbose)
self.update_summary_statistics(w=1.0, update_spatial=True)
def update_displacements_SA(self, T=1.0, proposal_std=None, verbose=False):
n = self.data.shape[0]
B = len(self.D.block)
for i in xrange(n):
for b in np.random.permutation(range(B)):
#block = self.D.block[b]
A = self.update_block_SA(i, b, T, proposal_std, verbose)
if self.verbose:
print "mean rejected displacements :", self.R.mean(axis=0)
def compute_log_conditional_displacements_posterior(self, U=None, nsimu=100, burnin=100, proposal_std=None, verbose=False, change_U=False):
"""
Compute posterior log density of elementary displacements at point U, conditional on model parameters
"""
n = self.data.shape[0]
B = len(self.D.block)
if U == None:
U = self.D.U.copy()
if proposal_std == None:
proposal_std = self.proposal_std
LL, self.Z, self.tot_var, self.SS1, self.SS2, self.SS3, self.SS4 =\
self.compute_log_voxel_likelihood(return_SS=True)
self.log_voxel_likelihood = LL
if not change_U:
Uc = self.D.U.copy()
proposal_c = self.proposal
proposal_mean_c = self.proposal_mean
proposal_std_c = self.proposal_std.copy()
self.proposal = 'fixed'
self.proposal_mean = U
self.proposal_std = U * 0
self.update_displacements()
#Restore displacement parameters
self.proposal = proposal_c
self.proposal_mean = proposal_mean_c
self.proposal_std = proposal_std_c
self.update_summary_statistics(update_spatial=True, mode='mcmc')
L = 0.0
i,b = n-1, B-1
n_ib = n * B - i * B - b
nsimu_ib = nsimu / n_ib
burnin_ib = burnin / n_ib
A_values = np.zeros(nsimu_ib, float)
A2_values = np.zeros(nsimu_ib, float)
SS_values = np.zeros(nsimu_ib, float)
if verbose:
print 'Compute mean acceptance rate for block', i, b
print 'Burn-in'
if verbose:
print 'Sample acceptance rate values'
for s in xrange(nsimu / (n * B - i * B - b)):
if verbose:
print "SA iteration", s, "out of", nsimu / (n * B - i * B - b)
A_values[s] = self.update_block_SA(\
i, b, 1.0, proposal_std,
verbose=False, reject_override=True)
mean_acceptance = np.exp(A_values).clip(0,1).mean()
L -= np.log(mean_acceptance)
for i in range(n)[::-1]:
for b in range(B)[::-1]:
n_ib = n * B - i * B - b
nsimu_ib = nsimu / n_ib
burnin_ib = burnin / n_ib
A_values = np.zeros(nsimu_ib, float)
A2_values = np.zeros(nsimu_ib, float)
SS_values = np.zeros(nsimu_ib, float)
if verbose:
print 'Compute log conditional posterior for block', i, b
print 'Burn-in'
for s in xrange(burnin / n_ib):
if verbose:
print "SA iteration", s, "out of", burnin_ib
for bb in xrange(b, B):
A = self.update_block_SA(\
i, bb, 1.0, proposal_std, verbose=False)
for ii in xrange(i+1, n):
for bb in xrange(B):
A = self.update_block_SA(\
ii, bb, 1.0, proposal_std, verbose=False)
if verbose:
print 'Sample kernel and acceptance rate values'
for s in xrange(nsimu_ib):
if verbose:
print "SA iteration", s, "out of", nsimu_ib
for bb in xrange(b, B):
A = self.update_block_SA(\
i, bb, 1.0, proposal_std, verbose=False)
for ii in xrange(i+1, n):
for bb in xrange(B):
A = self.update_block_SA(\
ii, bb, 1.0, proposal_std, verbose=False)
A_values[s] = self.update_block_SA(\
i, b, 1.0, proposal_std*0, verbose=False, reject_override=True,
proposal='fixed', proposal_mean=U[:, i, b])
SS_values[s] = np.square(U[:, i, b] - self.D.U[:, i, b]).sum()
if b > 0:
A2_values[s] = self.update_block_SA(\
i, b-1, 1.0, proposal_std, verbose=False,
reject_override=True)
elif i > 0:
A2_values[s] = self.update_block_SA(\
i-1, B-1, 1.0, proposal_std, verbose=False,
reject_override=True)
mean_acceptance = np.exp(A2_values).clip(0,1).mean()
mean_kernel = \
(np.exp(A_values).clip(0,1) * \
np.exp( -0.5 * SS_values / proposal_std**2) \
/ (np.sqrt(2 * np.pi) * proposal_std)**3).mean()
L += np.log(mean_kernel) - np.log(mean_acceptance)*(i>0 or b>0)
if not change_U:
# Restore initial displacement value
self.proposal = 'fixed'
self.proposal_mean = Uc
self.proposal_std = Uc * 0
self.update_displacements()
self.proposal = proposal_c
self.proposal_mean = proposal_mean_c
self.proposal_std = proposal_std_c
self.update_summary_statistics(update_spatial=True, mode='mcmc')
return L
def update_block_SA(self, i, b, T=1.0, proposal_std=None, verbose=False, reject_override=False, proposal='rand_walk', proposal_mean=None):
"""
Update displacement block using simulated annealing scheme
with random-walk kernel
"""
if proposal_std==None:
proposal_std=self.std
block = self.D.block[b]
if verbose:
print 'sampling field', i, 'block', b
# Propose new displacement
U, V, L, W, I = self.D.sample(i, b, proposal, proposal_std * T, proposal_mean=proposal_mean)
Uc = self.D.U[:, i, b].copy()
#Vc = self.D.V[:, i, block].copy()
p = self.data.shape[1]
pL = len(L)
if pL > 0:
#Wc = self.D.W[:, i, L].copy()
Ic = self.D.I[i, L].copy()
J = np.unique(np.concatenate((I, Ic)))
q = len(J)
IJ = np.searchsorted(J, I)
IJc = np.searchsorted(J, Ic)
N = self.N[J].copy()
Zc = self.Z[i,L].copy()
tot_varc = self.tot_var[i,L].copy()
SS1 = self.SS1[J].copy()
SS2 = self.SS2[J].copy()
SS3 = self.SS3[J].copy()
SS4 = self.SS4[J].copy()
# log acceptance rate
#self.D.U[:, i, b] = U
#self.D.V[:, i, block] = V
#if pL > 0:
#self.D.W[:, i, L] = W
#self.D.I[i, L] = I
ones = np.ones((len(L), 1), float)
add_lines(-ones, N.reshape(q, 1), IJc)
add_lines(ones, N.reshape(q, 1), IJ)
Z = self.data[i,L] - self.m_mean[self.labels[I]]
if self.vardata == None:
tot_var = self.v[self.labels[I]] + np.zeros(len(L), float)
else:
tot_var = self.v[self.labels[I]] + self.vardata[i,L]
add_lines(\
-(1.0 / tot_varc).reshape(pL, 1),
SS1.reshape(q, 1),
IJc)
add_lines(\
(1.0 / tot_var).reshape(pL, 1),
SS1.reshape(q, 1),
IJ)
add_lines(\
-np.log(tot_varc).reshape(pL, 1),
SS2.reshape(q, 1),
IJc)
add_lines(\
np.log(tot_var).reshape(pL, 1),
SS2.reshape(q, 1),
IJ)
add_lines(\
-(Zc**2 / tot_varc).reshape(pL, 1),
SS3.reshape(q, 1),
IJc)
add_lines(\
(Z**2 / tot_var).reshape(pL, 1),
SS3.reshape(q, 1),
IJ)
add_lines(\
-(Zc / tot_varc).reshape(pL, 1),
SS4.reshape(q, 1),
IJc)
add_lines(\
(Z / tot_var).reshape(pL, 1),
SS4.reshape(q, 1),
IJ)
fc = self.log_voxel_likelihood[J]
f = - 0.5 * (\
N * np.log(2 * np.pi) + \
np.log(1 + self.m_var[self.labels[J]] * SS1) \
+ SS2 + SS3 - SS4**2 / \
(1 / self.m_var[self.labels[J]] + SS1))
else:
f = np.zeros(1)
fc = np.zeros(1)
A = (f - fc).sum() + 0.5 * (Uc**2 - U**2).sum() / self.std**2
self.R[i, b] = np.random.uniform() > np.exp(A / T)
if self.R[i, b] == 0 and not reject_override:
self.D.U[:, i, b] = U
self.D.V[:, i, block] = V
if len(L) > 0:
self.D.W[:, i, L] = W
self.D.I[i, L] = I
self.N[J] = N
self.Z[i,L] = Z
self.tot_var[i,L] = tot_var
self.SS1[J] = SS1
self.SS2[J] = SS2
self.SS3[J] = SS3
self.SS4[J] = SS4
self.log_voxel_likelihood[J] = f
return A
#####################################################################################
# Marginal likelihood computation for model selection
def compute_log_region_likelihood_slow(self, v=None, m_mean=None, m_var=None, verbose=False, J=None):
"""
Essentially maintained for debug purposes
"""
if v == None:
v = self.v
if m_mean == None:
m_mean = self.m_mean
if m_var == None:
m_var = self.m_var
n, p = self.data.shape
nregions = len(self.network)
log_region_likelihood = np.zeros(nregions, float)
if J == None:
J = xrange(nregions)
if self.std == None:
nk = n
else:
I = self.D.I
argsort_I = np.argsort(I.ravel())
data_I = self.data.ravel()[argsort_I]
if self.vardata != None:
var_I = (self.vardata + v[self.labels[I]]).ravel()[argsort_I]
cumsum = np.zeros(p + 1, int)
cumsum[1:] = self.N.cumsum().astype(int)
for i in xrange(len(J)):
j = J[i]
if verbose:
print "computing log likelihood for region", i + 1, "out of", len(J)
m_var_j = self.m_var[j]
m_mean_j = self.m_mean[j]
v_j = self.v[j]
L = np.where(self.labels == j)[0]
for k in L:
if self.std == None:
datak = np.matrix(self.data[:, k].reshape(n, 1) - m_mean_j)
if self.vardata != None:
vark = self.vardata[:, k] + v_j
else:
nk = int(self.N[k])
datak = np.matrix(data_I[cumsum[k] : cumsum[k + 1]].reshape(nk, 1) - m_mean_j)
if self.vardata != None:
vark = var_I[cumsum[k] : cumsum[k + 1]]
Vk = np.matrix(np.zeros((nk, nk), float) + m_var_j)
if self.vardata == None:
Vk[xrange(nk), xrange(nk)] = v_j + m_var_j
else:
Vk[xrange(nk), xrange(nk)] = vark + m_var_j
log_region_likelihood[j] += np.log(np.linalg.det(Vk)) + datak.transpose() * np.linalg.inv(Vk) * datak
if self.std == None:
nj = n * len(L)
else:
nj = self.N[L].sum()
log_region_likelihood[j] += nj * np.log(2 * np.pi)
return log_region_likelihood
def compute_log_region_likelihood(self, v=None, m_mean=None, m_var=None):
log_voxel_likelihood = self.compute_log_voxel_likelihood(v, m_mean, m_var)
N = len(self.network)
log_region_likelihood = np.zeros(N, float)
for j in xrange(N):
log_region_likelihood[j] = log_voxel_likelihood[self.labels==j].sum()
return log_region_likelihood
def compute_log_voxel_likelihood(self, v=None, m_mean=None, m_var=None, return_SS=False):
if v == None:
v = self.v
if m_mean == None:
m_mean = self.m_mean
if m_var == None:
m_var = self.m_var
n, p = self.data.shape
if self.std == None:
N = n
v_labels = v[self.labels]
Z = self.data - m_mean[self.labels]
else:
N = self.N
I = self.D.I
v_labels = v[self.labels[I]]
Z = self.data - m_mean[self.labels[I]]
if self.vardata == None:
tot_var = v_labels + np.zeros(self.data.shape, float)
else:
tot_var = v_labels + self.vardata
if self.std == None:
SS1 = (1 / tot_var).sum(axis=0)
SS2 = np.log(tot_var).sum(axis=0)
SS3 = (Z**2 / tot_var).sum(axis=0)
SS4 = (Z / tot_var).sum(axis=0)
else:
SS1 = np.zeros(p, float)
SS2 = np.zeros(p, float)
SS3 = np.zeros(p, float)
SS4 = np.zeros(p, float)
for i in xrange(n):
Ii = self.D.I[i]
add_lines((1 / tot_var[i]).reshape(p, 1), SS1.reshape(p, 1), Ii)
add_lines(np.log(tot_var[i]).reshape(p, 1), SS2.reshape(p, 1), Ii)
add_lines((Z[i]**2 / tot_var[i]).reshape(p, 1), SS3.reshape(p, 1), Ii)
add_lines((Z[i] / tot_var[i]).reshape(p, 1), SS4.reshape(p, 1), Ii)
LL = - 0.5 * (N * np.log(2 * np.pi) + np.log(1 + m_var[self.labels] * SS1) \
+ SS2 + SS3 - SS4**2 / (1 / m_var[self.labels] + SS1))
if return_SS:
return LL, Z, tot_var, SS1, SS2, SS3, SS4
else:
return LL
def compute_log_prior(self, v=None, m_mean=None, m_var=None, std=None):
"""
compute log prior density of model parameters, spatial uncertainty excepted,
assuming hidden variables have been initialized
"""
if v == None:
v = self.v
if m_mean == None:
m_mean = self.m_mean
if m_var == None:
m_var = self.m_var
if std == None:
std = self.std
N = len(self.network)
log_prior_values = np.zeros(N + 1, float)
log_prior_values[:-1] = log_gammainv_pdf(v, self.v_shape, self.v_scale)
log_prior_values[:-1] += log_gammainv_pdf(m_var, self.m_var_shape, self.m_var_scale)
J = self.network == 1
if J.sum() > 0:
log_prior_values[J] += log_gaussian_pdf(m_mean[J], 0, m_var[J] / self.m_mean_rate[J])
if self.std != None:
log_prior_values[-1] = log_gammainv_pdf(std**2, self.std_shape, self.std_scale)
return log_prior_values
def compute_log_conditional_posterior(self, v=None, m_mean=None, m_var=None, std=None):
"""
compute log posterior density of model parameters, conditional on hidden parameters.
This function is used in compute_log_region_posterior. It should only be used within
the Gibbs sampler, and not the SAEM algorithm.
"""
n,p = self.data.shape
if v == None:
v = self.v
if m_mean == None:
m_mean = self.m_mean
if m_var == None:
m_var = self.m_var
if std == None:
std = self.std
log_conditional_posterior = np.zeros(len(self.network) + 1, float)
size = self.s6
if self.std == None:
N = n * size
else:
N = self.s5
log_conditional_posterior[:-1] = log_gammainv_pdf(v, self.v_shape + 0.5 * N, self.v_scale + 0.5 * self.s1)
log_conditional_posterior[:-1] += log_gammainv_pdf(m_var, self.m_var_shape + 0.5 * size, self.m_var_post_scale)
J = self.network == 1
if J.sum() > 0:
post_rate = self.m_mean_rate[J] + size[J]
log_conditional_posterior[J] += log_gaussian_pdf(m_mean[J], self.s3[J] / post_rate, m_var[J] / post_rate)
if std != None:
#B = len(self.D.block)
log_conditional_posterior[-1] = \
log_gammainv_pdf(std**2, self.std_shape + 0.5 * self.D.U.size, self.std_scale + 0.5 * self.s4)
return log_conditional_posterior
def sample_log_conditional_posterior(self, v=None, m_mean=None, m_var=None, std=None, nsimu=100, burnin=100, stabilize=False, verbose=False, update_spatial=False):
"""
sample log conditional posterior density of region parameters
using a Gibbs sampler (assuming all hidden variables have been initialized).
Computes posterior mean.
if stabilize is True, sampling is conditioned on the parameters, reducing
the variance of the estimate, but introducing a positive bias.
"""
if v == None:
v = self.v.copy()
if m_mean == None:
m_mean = self.m_mean.copy()
if m_var == None:
m_var = self.m_var.copy()
if std == None and self.std != None:
if np.isscalar(self.std):
std = self.std
else:
std = self.std.copy()
if update_spatial:
U = self.D.U.copy()
proposal = self.proposal
proposal_mean = self.proposal_mean
proposal_std = self.proposal_std
N = len(self.network)
log_conditional_posterior_values = np.zeros((nsimu, N+1), float)
#self.init_hidden_variables()
n, p = self.data.shape
posterior_mean = np.zeros(p, float)
self.nsimu = nsimu
self.burnin = burnin
#self.J = J
self.verbose = verbose
niter = np.array([int(burnin), int(nsimu)])
for k in np.arange(2)[niter>0]:
if self.verbose:
if k == 0:
print "Burn-in"
else:
print "Sampling posterior distribution"
for i in xrange(niter[k]):
if self.verbose:
print "Iteration", i+1, "out of", niter[k]
# Gibbs iteration
#i += 1
if update_spatial and self.std != None:
self.update_displacements()
if self.vardata != None:
self.update_effects()
self.update_mean_effect()
posterior_mean += self.m
if not stabilize:
self.update_summary_statistics(update_spatial, mode='mcmc')
self.update_parameters_mcmc(update_spatial)
if self.verbose:
print "population effect min variance value :", self.m_var.min()
if k == 1:
if stabilize:
self.update_summary_statistics(update_spatial, mode='mcmc')
log_conditional_posterior_values[i] = \
self.compute_log_conditional_posterior(v, m_mean, m_var, std)#[:-1]
posterior_mean /= nsimu
if not stabilize:
# Restore initial parameter values
self.v[:], self.m_mean[:], self.m_var[:], self.std = v, m_mean, m_var, std
if update_spatial:
# Restore initial displacement values
self.proposal = 'fixed'
self.proposal_mean = U
self.proposal_std = U * 0
self.update_displacements()
self.proposal = proposal
self.proposal_mean = proposal_mean
self.proposal_std = proposal_std
self.update_summary_statistics(update_spatial, mode='mcmc')
return log_conditional_posterior_values, posterior_mean
def compute_log_posterior(self, v=None, m_mean=None, m_var=None, std=None, nsimu=100, burnin=100, stabilize=False, verbose=False, update_spatial=False):
"""
compute log posterior density of region parameters by Rao-Blackwell method,
or a stabilized upper bound if stabilize is True.
"""
log_conditional_posterior_values \
= self.sample_log_conditional_posterior(v, m_mean, m_var, std, nsimu, burnin, stabilize, verbose, update_spatial)[0]
max_log_conditional = log_conditional_posterior_values.max(axis=0)
ll_ratio = log_conditional_posterior_values - max_log_conditional
if stabilize:
return max_log_conditional + ll_ratio.mean(axis=0)
elif not update_spatial:
return max_log_conditional \
+ np.log(np.exp(ll_ratio).sum(axis=0)) \
- np.log(nsimu)
else:
return max_log_conditional.sum() \
+ np.log(np.exp(ll_ratio.sum(axis=1)).sum()) \
- np.log(nsimu)
def compute_marginal_likelihood(self, v=None, m_mean=None, m_var=None, std=None, nsimu=100, burnin=100, stabilize=False, verbose=False, update_spatial=False, U=None, proposal_std=None):
log_likelihood = self.compute_log_region_likelihood(v, m_mean, m_var)
log_prior = self.compute_log_prior(v, m_mean, m_var, std)
log_posterior = self.compute_log_posterior(v, m_mean, m_var, std, nsimu, burnin, stabilize, verbose, update_spatial)
if update_spatial and self.std != None:
n, B = self.data.shape[0], len(self.D.block)
if std == None:
std = self.std
if U == None:
U = self.D.U
log_displacements_prior = \
- 0.5 * np.square(U).sum() / std**2 \
- self.D.U.size * np.log(std)
log_displacements_posterior = \
self.compute_log_conditional_displacements_posterior(\
U,
nsimu*n*B,
burnin*n*B,
proposal_std,
verbose)
return log_likelihood.sum() + \
log_prior.sum() + \
log_displacements_prior - \
log_posterior - \
log_displacements_posterior
else:
return log_likelihood + log_prior[:-1] - log_posterior[:-1]
def compute_conditional_posterior_mean(self, v=None, m_mean=None, m_var=None):
"""
Compute posterior mean of mean effect map,
conditional on parameters and displacements
"""
if v == None:
v = self.v.copy()
if m_mean == None:
m_mean = self.m_mean.copy()
if m_var == None:
m_var = self.m_var.copy()
LL, Z, tot_var, SS1, SS2, SS3, SS4 = \
self.compute_log_voxel_likelihood(v, m_mean, m_var, return_SS=True)
#if self.std == None:
#I = range(self.m.size)*np.ones(self.data.shape,int)
#else:
#I = self.D.I
m_labels = m_mean[self.labels]
v_labels = m_var[self.labels]
return (SS4 + m_labels * SS1 + m_labels / v_labels)\
/ (SS1 + 1.0 / v_labels)
| 44.284523
| 189
| 0.504552
|
4a00365e1ade3e633dc0eac07cc87e9006507d59
| 27,720
|
py
|
Python
|
qa/rpc-tests/test_framework/util.py
|
patrick-huyphan/elements
|
a3b159744d5bc452f66adc453009df0fcca45b43
|
[
"MIT"
] | 1
|
2019-07-02T17:07:34.000Z
|
2019-07-02T17:07:34.000Z
|
qa/rpc-tests/test_framework/util.py
|
patrick-huyphan/elements
|
a3b159744d5bc452f66adc453009df0fcca45b43
|
[
"MIT"
] | 3
|
2017-05-26T09:17:33.000Z
|
2019-07-05T12:04:55.000Z
|
qa/rpc-tests/test_framework/util.py
|
patrick-huyphan/elements
|
a3b159744d5bc452f66adc453009df0fcca45b43
|
[
"MIT"
] | 1
|
2017-09-04T11:53:56.000Z
|
2017-09-04T11:53:56.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
import os
import sys
from binascii import hexlify, unhexlify
from base64 import b64encode
from decimal import Decimal, ROUND_DOWN
import json
import http.client
import random
import shutil
import subprocess
import time
import re
import errno
from pathlib import Path
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
COVERAGE_DIR = None
BITCOIN_ASSET = bytearray.fromhex("b2e15d0d7a0c94e4e2ce0fe6e8691b9e451377f6e46e8045a86f7c4b5d4f0f23")
BITCOIN_ASSET.reverse()
BITCOIN_ASSET_OUT = b"\x01"+BITCOIN_ASSET
# The maximum number of nodes a single test can spawn
MAX_NODES = 9
# Don't assign rpc or p2p ports lower than this
PORT_MIN = 11000
# The number of ports to "reserve" for p2p and rpc, each
PORT_RANGE = 5000
BITCOIND_PROC_WAIT_TIMEOUT = 60
class PortSeed:
# Must be initialized with a unique integer for each process
n = None
#Set Mocktime default to OFF.
#MOCKTIME is only needed for scripts that use the
#cached version of the blockchain. If the cached
#version of the blockchain is used without MOCKTIME
#then the mempools will not sync due to IBD.
MOCKTIME = 0
def enable_mocktime():
#For backwared compatibility of the python scripts
#with previous versions of the cache, set MOCKTIME
#to Jan 1, 2014 + (201 * 10 * 60)
global MOCKTIME
MOCKTIME = 1388534400 + (201 * 10 * 60)
def disable_mocktime():
global MOCKTIME
MOCKTIME = 0
def get_mocktime():
return MOCKTIME
def enable_coverage(dirname):
"""Maintain a log of which RPC calls are made during testing."""
global COVERAGE_DIR
COVERAGE_DIR = dirname
def get_rpc_proxy(url, node_number, timeout=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
COVERAGE_DIR, node_number) if COVERAGE_DIR else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
assert(n <= MAX_NODES)
return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_port(n):
return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def sync_blocks(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same tip.
sync_blocks needs to be called with an rpc_connections set that has least
one node already synced to the latest, stable tip, otherwise there's a
chance it might return before all nodes are stably synced.
"""
# Use getblockcount() instead of waitforblockheight() to determine the
# initial max height because the two RPCs look at different internal global
# variables (chainActive vs latestBlock) and the former gets updated
# earlier.
maxheight = max(x.getblockcount() for x in rpc_connections)
start_time = cur_time = time.time()
while cur_time <= start_time + timeout:
tips = [r.waitforblockheight(maxheight, int(wait * 1000)) for r in rpc_connections]
if all(t["height"] == maxheight for t in tips):
if all(t["hash"] == tips[0]["hash"] for t in tips):
return
raise AssertionError("Block sync failed, mismatched block hashes:{}".format(
"".join("\n {!r}".format(tip) for tip in tips)))
cur_time = time.time()
raise AssertionError("Block sync to height {} timed out:{}".format(
maxheight, "".join("\n {!r}".format(tip) for tip in tips)))
def sync_chain(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same best block
"""
while timeout > 0:
best_hash = [x.getbestblockhash() for x in rpc_connections]
if best_hash == [best_hash[0]]*len(best_hash):
return
time.sleep(wait)
timeout -= wait
raise AssertionError("Chain sync failed: Best block hashes don't match")
def sync_mempools(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while timeout > 0:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
return
time.sleep(wait)
timeout -= wait
raise AssertionError("Mempool sync failed")
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
rpc_u, rpc_p = rpc_auth_pair(n)
with open(os.path.join(datadir, "elements.conf"), 'w', encoding='utf8') as f:
f.write("rpcuser=" + rpc_u + "\n")
f.write("rpcpassword=" + rpc_p + "\n")
f.write("port="+str(p2p_port(n))+"\n")
f.write("rpcport="+str(rpc_port(n))+"\n")
f.write("listenonion=0\n")
f.write("initialfreecoins=2100000000000000\n")
return datadir
def rpc_auth_pair(n):
return 'rpcuser💻' + str(n), 'rpcpass🔑' + str(n)
def rpc_url(i, rpchost=None, cookie_file=None):
if cookie_file:
with open(cookie_file, 'r') as f:
rpc_auth = f.readline()
else:
rpc_u, rpc_p = rpc_auth_pair(i)
rpc_auth = rpc_u+":"+rpc_p
host = '127.0.0.1'
port = rpc_port(i)
if rpchost:
parts = rpchost.split(':')
if len(parts) == 2:
host, port = parts
else:
host = rpchost
return "http://%s@%s:%d" % (rpc_auth, host, int(port))
def wait_for_bitcoind_start(process, url, i):
'''
Wait for bitcoind to start. This means that RPC is accessible and fully initialized.
Raise an exception if bitcoind exits during initialization.
'''
while True:
if process.poll() is not None:
raise Exception('bitcoind exited with status %i during initialization' % process.returncode)
try:
rpc = get_rpc_proxy(url, i)
blocks = rpc.getblockcount()
break # break out of loop on success
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
if e.error['code'] != -28: # RPC in warmup?
raise # unknown JSON RPC exception
time.sleep(0.25)
def initialize_chain(test_dir, num_nodes, cachedir):
"""
Create a cache of a 200-block-long chain (with wallet) for MAX_NODES
Afterward, create num_nodes copies from the cache
"""
assert num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(os.path.join(cachedir, 'node'+str(i))):
create_cache = True
break
if create_cache:
#find and delete old cache directories if any exist
for i in range(MAX_NODES):
if os.path.isdir(os.path.join(cachedir,"node"+str(i))):
shutil.rmtree(os.path.join(cachedir,"node"+str(i)))
# Create cache directories, run bitcoinds:
for i in range(MAX_NODES):
datadir=initialize_datadir(cachedir, i)
args = [ os.getenv("ELEMENTSD", "elementsd"), "-server", "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print("initialize_chain: bitcoind started, waiting for RPC to come up")
wait_for_bitcoind_start(bitcoind_processes[i], rpc_url(i), i)
if os.getenv("PYTHON_DEBUG", ""):
print("initialize_chain: RPC successfully started")
rpcs = []
for i in range(MAX_NODES):
try:
rpcs.append(get_rpc_proxy(rpc_url(i), i))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# Note: To preserve compatibility with older versions of
# initialize_chain, only 4 nodes will generate coins.
#
# blocks are created with timestamps 10 minutes apart
# starting from 2010 minutes in the past
enable_mocktime()
block_time = get_mocktime() - (201 * 10 * 60)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].generate(1)
block_time += 10*60
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
disable_mocktime()
for i in range(MAX_NODES):
os.remove(log_filename(cachedir, i, "debug.log"))
os.remove(log_filename(cachedir, i, "db.log"))
os.remove(log_filename(cachedir, i, "peers.dat"))
os.remove(log_filename(cachedir, i, "fee_estimates.dat"))
for i in range(num_nodes):
from_dir = os.path.join(cachedir, "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in bitcoin.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None, chain='elementsregtest', cookie_auth=False):
"""
Start a bitcoind and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
cookie_file = datadir+"/"+chain+"/.cookie"
if binary is None:
binary = os.getenv("ELEMENTSD", "elementsd")
args = [ binary, "-datadir="+datadir, "-server", "-keypool=1", "-discover=0", "-rest", "-mocktime="+str(get_mocktime()) ]
args.append('-regtest' if chain == 'regtest' else '-chain=' + chain)
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print("start_node: bitcoind started, waiting for RPC to come up")
# We need to make sure the cookie auth file is created before reading it
wait_for_cookie_time = 10
cookie_file_handle = Path(cookie_file)
while cookie_auth and wait_for_cookie_time > 0 and not cookie_file_handle.is_file():
wait_for_cookie_time -= 1
time.sleep(1)
url = rpc_url(i, rpchost, cookie_file if cookie_auth else None)
wait_for_bitcoind_start(bitcoind_processes[i], url, i)
if os.getenv("PYTHON_DEBUG", ""):
print("start_node: RPC successfully started")
proxy = get_rpc_proxy(url, i, timeout=timewait)
if COVERAGE_DIR:
coverage.write_all_rpc_commands(COVERAGE_DIR, proxy)
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, timewait=None, binary=None, chain='elementsregtest'):
"""
Start multiple bitcoinds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for _ in range(num_nodes) ]
if binary is None: binary = [ None for _ in range(num_nodes) ]
rpcs = []
try:
for i in range(num_nodes):
rpcs.append(start_node(i, dirname, extra_args[i], rpchost, timewait=timewait, binary=binary[i], chain=chain))
except: # If one node failed to start, stop the others
stop_nodes(rpcs)
raise
return rpcs
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "elementsregtest", logname)
def stop_node(node, i):
try:
node.stop()
except http.client.CannotSendRequest as e:
print("WARN: Unable to stop node: " + repr(e))
return_code = bitcoind_processes[i].wait(timeout=BITCOIND_PROC_WAIT_TIMEOUT)
assert_equal(return_code, 0)
del bitcoind_processes[i]
def stop_nodes(nodes):
for i, node in enumerate(nodes):
stop_node(node, i)
assert not bitcoind_processes.values() # All connections must be gone now
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using its output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_fee_amount(fee, tx_size, fee_per_kB):
"""Assert the fee was in range"""
target_fee = tx_size * fee_per_kB / 1000
if fee < target_fee:
raise AssertionError("Fee of %s BTC too low! (Should be %s BTC)"%(str(fee), str(target_fee)))
# allow the wallet's estimation to be at most 2 bytes off
if fee > (tx_size + 2) * fee_per_kB / 1000:
raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)"%(str(fee), str(target_fee)))
def assert_equal(thing1, thing2, *args):
if thing1 != thing2 or any(thing1 != arg for arg in args):
raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_greater_than_or_equal(thing1, thing2):
if thing1 < thing2:
raise AssertionError("%s < %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError("Expected substring not found:"+e.error['message'])
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_raises_jsonrpc(code, message, fun, *args, **kwds):
"""Run an RPC and verify that a specific JSONRPC exception code and message is raised.
Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException
and verifies that the error code and message are as expected. Throws AssertionError if
no JSONRPCException was returned or if the error code/message are not as expected.
Args:
code (int), optional: the error code returned by the RPC call (defined
in src/rpc/protocol.h). Set to None if checking the error code is not required.
message (string), optional: [a substring of] the error string returned by the
RPC call. Set to None if checking the error string is not required
fun (function): the function to call. This should be the name of an RPC.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
try:
fun(*args, **kwds)
except JSONRPCException as e:
# JSONRPCException was thrown as expected. Check the code and message values are correct.
if (code is not None) and (code != e.error["code"]):
raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
if (message is not None) and (message not in e.error['message']):
raise AssertionError("Expected substring not found:"+e.error['message'])
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find = False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find == True:
assert_equal(expected, { })
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find == True:
num_matched = num_matched+1
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0 and should_not_find != True:
raise AssertionError("No objects matched %s"%(str(to_match)))
if num_matched > 0 and should_not_find == True:
raise AssertionError("Objects were found %s"%(str(to_match)))
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
node.generate(int(0.5*count)+101)
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "nValue" : t["amount"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value/2)
outputs[addr2] = satoshi_round(send_value/2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransaction(raw_tx)["hex"]
txid = node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" #OP_RETURN OP_PUSH2 512 bytes
for i in range (512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "81"
for k in range(128):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
def create_tx(node, coinbase, to_address, amount):
inputs = [{ "txid" : coinbase, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
addr = node.getnewaddress()
txids = []
for _ in range(num):
t = utxos.pop()
inputs=[{ "txid" : t["txid"], "vout" : t["vout"], "nValue" : t["amount"]}]
outputs = {}
change = t['amount'] - fee
outputs[addr] = satoshi_round(change)
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransaction(newtx, None, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def mine_large_block(node, utxos=None):
# generate a 66k transaction,
# and 14 of them is close to the 1MB block limit
num = 14
txouts = gen_return_txouts()
utxos = utxos if utxos is not None else []
if len(utxos) < num:
utxos.clear()
utxos.extend(node.listunspent())
fee = 100 * node.getnetworkinfo()["relayfee"]
create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee)
node.generate(1)
def get_bip9_status(node, key):
info = node.getblockchaininfo()
return info['bip9_softforks'][key]
| 37.714286
| 130
| 0.657684
|
4a00370048547121f411da537dc0e96acb6337f1
| 14,141
|
py
|
Python
|
models/resnet50.py
|
IIT-PAVIS/Acoustic-Image-Generation
|
a31c32ed6c3fe96d82b715833b7d32c87575e62b
|
[
"MIT"
] | null | null | null |
models/resnet50.py
|
IIT-PAVIS/Acoustic-Image-Generation
|
a31c32ed6c3fe96d82b715833b7d32c87575e62b
|
[
"MIT"
] | null | null | null |
models/resnet50.py
|
IIT-PAVIS/Acoustic-Image-Generation
|
a31c32ed6c3fe96d82b715833b7d32c87575e62b
|
[
"MIT"
] | null | null | null |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains definitions for the original form of Residual Networks.
The 'v1' residual networks (ResNets) implemented in this module were proposed
by:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
Other variants were introduced in:
[2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv: 1603.05027
The networks defined in this module utilize the bottleneck building block of
[1] with projection shortcuts only for increasing depths. They employ batch
normalization *after* every weight layer. This is the architecture used by
MSRA in the Imagenet and MSCOCO 2016 competition models ResNet-101 and
ResNet-152. See [2; Fig. 1a] for a comparison between the current 'v1'
architecture and the alternative 'v2' architecture of [2] which uses batch
normalization *before* every weight layer in the so-called full pre-activation
units.
Typical use:
from tensorflow.contrib.slim.python.slim.nets import
resnet_v1
ResNet-101 for image classification into 1000 classes:
# inputs has shape [batch, 224, 224, 3]
with slim.arg_scope(resnet_v1.resnet_arg_scope()):
net, end_points = resnet_v1.resnet_v1_101(inputs, 1000, is_training=False)
ResNet-101 for semantic segmentation into 21 classes:
# inputs has shape [batch, 513, 513, 3]
with slim.arg_scope(resnet_v1.resnet_arg_scope()):
net, end_points = resnet_v1.resnet_v1_101(inputs,
21,
is_training=False,
global_pool=False,
output_stride=16)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers
from tensorflow.contrib.framework.python.ops import add_arg_scope
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.layers.python.layers import layers as layers_lib
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.contrib.slim.python.slim.nets import resnet_utils
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
resnet_arg_scope = resnet_utils.resnet_arg_scope
@add_arg_scope
def bottleneck(inputs,
depth,
depth_bottleneck,
stride,
rate=1,
outputs_collections=None,
scope=None):
"""Bottleneck residual unit variant with BN after convolutions.
This is the original residual unit proposed in [1]. See Fig. 1(a) of [2] for
its definition. Note that we use here the bottleneck variant which has an
extra bottleneck layer.
When putting together two consecutive ResNet blocks that use this unit, one
should use stride = 2 in the last unit of the first block.
Args:
inputs: A tensor of size [batch, height, width, channels].
depth: The depth of the ResNet unit output.
depth_bottleneck: The depth of the bottleneck layers.
stride: The ResNet unit's stride. Determines the amount of downsampling of
the units output compared to its input.
rate: An integer, rate for atrous convolution.
outputs_collections: Collection to add the ResNet unit output.
scope: Optional variable_scope.
Returns:
The ResNet unit's output.
"""
with variable_scope.variable_scope(scope, 'bottleneck_v1', [inputs]) as sc:
depth_in = utils.last_dimension(inputs.get_shape(), min_rank=4)
if depth == depth_in:
shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
else:
shortcut = layers.conv2d(
inputs,
depth, [1, 1],
stride=stride,
activation_fn=None,
scope='shortcut')
residual = layers.conv2d(
inputs, depth_bottleneck, [1, 1], stride=1, scope='conv1')
residual = resnet_utils.conv2d_same(
residual, depth_bottleneck, 3, stride, rate=rate, scope='conv2')
residual = layers.conv2d(
residual, depth, [1, 1], stride=1, activation_fn=None, scope='conv3')
output = nn_ops.relu(shortcut + residual)
return utils.collect_named_outputs(outputs_collections, sc.name, output)
def resnet_v1(inputs,
blocks,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
include_root_block=True,
reuse=None,
scope=None):
"""Generator for v1 ResNet models.
This function generates a family of ResNet v1 models. See the resnet_v1_*()
methods for specific model instantiations, obtained by selecting different
block instantiations that produce ResNets of various depths.
Training for image classification on Imagenet is usually done with [224, 224]
inputs, resulting in [7, 7] feature maps at the output of the last ResNet
block for the ResNets defined in [1] that have nominal stride equal to 32.
However, for dense prediction tasks we advise that one uses inputs with
spatial dimensions that are multiples of 32 plus 1, e.g., [321, 321]. In
this case the feature maps at the ResNet output will have spatial shape
[(height - 1) / output_stride + 1, (width - 1) / output_stride + 1]
and corners exactly aligned with the input image corners, which greatly
facilitates alignment of the features to the image. Using as input [225, 225]
images results in [8, 8] feature maps at the output of the last ResNet block.
For dense prediction tasks, the ResNet needs to run in fully-convolutional
(FCN) mode and global_pool needs to be set to False. The ResNets in [1, 2] all
have nominal stride equal to 32 and a good choice in FCN mode is to use
output_stride=16 in order to increase the density of the computed features at
small computational and memory overhead, cf. http://arxiv.org/abs/1606.00915.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
blocks: A list of length equal to the number of ResNet blocks. Each element
is a resnet_utils.Block object describing the units in the block.
num_classes: Number of predicted classes for classification tasks. If None
we return the features before the logit layer.
is_training: whether batch_norm layers are in training mode.
global_pool: If True, we perform global average pooling before computing the
logits. Set to True for image classification, False for dense prediction.
output_stride: If None, then the output will be computed at the nominal
network stride. If output_stride is not None, it specifies the requested
ratio of input to output spatial resolution.
include_root_block: If True, include the initial convolution followed by
max-pooling, if False excludes it.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].
If global_pool is False, then height_out and width_out are reduced by a
factor of output_stride compared to the respective height_in and width_in,
else both height_out and width_out equal one. If num_classes is None, then
net is the output of the last ResNet block, potentially after global
average pooling. If num_classes is not None, net contains the pre-softmax
activations.
end_points: A dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: If the target output_stride is not valid.
"""
with variable_scope.variable_scope(
scope, 'resnet_v1', [inputs], reuse=reuse) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
with arg_scope(
[layers.conv2d, bottleneck, resnet_utils.stack_blocks_dense],
outputs_collections=end_points_collection):
with arg_scope([layers.batch_norm], is_training=is_training):
net = inputs
if include_root_block:
if output_stride is not None:
if output_stride % 4 != 0:
raise ValueError('The output_stride needs to be a multiple of 4.')
output_stride /= 4
net = resnet_utils.conv2d_same(net, 64, 7, stride=2, scope='conv1')
net = layers_lib.max_pool2d(net, [3, 3], stride=2, scope='pool1')
net = resnet_utils.stack_blocks_dense(net, blocks, output_stride)
net = layers.conv2d(
net, 12, [3, 4], stride=1, padding='valid', scope='conv_map')
if global_pool:
# Global average pooling.
net = math_ops.reduce_mean(net, [1, 2], name='pool5', keepdims=True)
if num_classes is not None:
net = layers.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='logits')
# Convert end_points_collection into a dictionary of end_points.
end_points = utils.convert_collection_to_dict(end_points_collection)
if num_classes is not None:
end_points['predictions'] = layers_lib.softmax(
net, scope='predictions')
return net, end_points
resnet_v1.default_image_size = 224
def resnet_v1_block(scope, base_depth, num_units, stride):
"""Helper function for creating a resnet_v1 bottleneck block.
Args:
scope: The scope of the block.
base_depth: The depth of the bottleneck layer for each unit.
num_units: The number of units in the block.
stride: The stride of the block, implemented as a stride in the last unit.
All other units have stride=1.
Returns:
A resnet_v1 bottleneck block.
"""
return resnet_utils.Block(scope, bottleneck, [{
'depth': base_depth * 4,
'depth_bottleneck': base_depth,
'stride': 1
}] * (num_units - 1) + [{
'depth': base_depth * 4,
'depth_bottleneck': base_depth,
'stride': stride
}])
def resnet_v1_50(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
reuse=None,
scope='resnet_v1_50'):
"""ResNet-50 model of [1]. See resnet_v1() for arg and return description."""
blocks = [
resnet_v1_block('block1', base_depth=64, num_units=3, stride=1),
resnet_v1_block('block2', base_depth=128, num_units=4, stride=2),
resnet_v1_block('block3', base_depth=256, num_units=6, stride=2),
resnet_v1_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v1(
inputs,
blocks,
num_classes,
is_training,
global_pool,
output_stride,
include_root_block=True,
reuse=reuse,
scope=scope)
def resnet_v1_101(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
reuse=None,
scope='resnet_v1_101'):
"""ResNet-101 model of [1]. See resnet_v1() for arg and return description."""
blocks = [
resnet_v1_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v1_block('block2', base_depth=128, num_units=4, stride=2),
resnet_v1_block('block3', base_depth=256, num_units=23, stride=2),
resnet_v1_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v1(
inputs,
blocks,
num_classes,
is_training,
global_pool,
output_stride,
include_root_block=True,
reuse=reuse,
scope=scope)
def resnet_v1_152(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
reuse=None,
scope='resnet_v1_152'):
"""ResNet-152 model of [1]. See resnet_v1() for arg and return description."""
blocks = [
resnet_v1_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v1_block('block2', base_depth=128, num_units=8, stride=2),
resnet_v1_block('block3', base_depth=256, num_units=36, stride=2),
resnet_v1_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v1(
inputs,
blocks,
num_classes,
is_training,
global_pool,
output_stride,
include_root_block=True,
reuse=reuse,
scope=scope)
def resnet_v1_200(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
reuse=None,
scope='resnet_v1_200'):
"""ResNet-200 model of [2]. See resnet_v1() for arg and return description."""
blocks = [
resnet_v1_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v1_block('block2', base_depth=128, num_units=24, stride=2),
resnet_v1_block('block3', base_depth=256, num_units=36, stride=2),
resnet_v1_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v1(
inputs,
blocks,
num_classes,
is_training,
global_pool,
output_stride,
include_root_block=True,
reuse=reuse,
scope=scope)
| 39.833803
| 80
| 0.673503
|
4a00382ddcfbc0e8dd398ecbcee991399f33f406
| 1,415
|
py
|
Python
|
src/models/classifiers/support_vector_machine_classification.py
|
christiankevintraore/Regression-and-Classification-Model-Selection-Beginners-Playbook
|
d784a6d1246a1617c3f84ab38dc6f252105f7946
|
[
"MIT"
] | null | null | null |
src/models/classifiers/support_vector_machine_classification.py
|
christiankevintraore/Regression-and-Classification-Model-Selection-Beginners-Playbook
|
d784a6d1246a1617c3f84ab38dc6f252105f7946
|
[
"MIT"
] | null | null | null |
src/models/classifiers/support_vector_machine_classification.py
|
christiankevintraore/Regression-and-Classification-Model-Selection-Beginners-Playbook
|
d784a6d1246a1617c3f84ab38dc6f252105f7946
|
[
"MIT"
] | null | null | null |
"""support_vector_machine_classification.py
~~~~~~~~~~~~~~
An implementation of Support Vector Machine Classification.
Desirable features :
- Tune the classifier input parameters for better performance.
"""
#### Libraries
from sklearn.svm import SVC
import models.classifiers.generic_classifier as gc
#### Main SupportVectorMachineClassifier class
class SupportVectorMachineClassifier(gc.GenericClassifier):
def evaluate(self):
"""Applies the Support Vector Machine Classification model on the dataset.
"""
self.classifier = SVC(kernel = 'linear', random_state = 0)
return self.evaluate_from_classifier('Support Vector Machine Classification', self.classifier)
def predict(self):
"""Makes some predictions with Support Vector Machine Classification model.
"""
predictLambda = lambda valuesToPredict : self.classifier.predict(self.X_scaler.transform(valuesToPredict))
return ["Support Vector Machine Classification predictions", super().predict_user_input_variables(predictLambda)]
def predictions_relevance(self):
"""Returns a comparison table for Support Vector Machine Classification model.
"""
return ["Support Vector Machine Classification predictions comparison", super().truncate_predictions_relevance(self.X_scaler.inverse_transform(self.X_test), self.datasetManager.y_test, self.y_pred)]
| 34.512195
| 206
| 0.746996
|
4a003938cbcf77de5b22d20de723fa9e033f4f6b
| 4,716
|
py
|
Python
|
pysgrs/tests/test_cipher_pipeline.py
|
jlandercy/pysgrs
|
2f7765d5911e2928495d8f1003d4fb3168abdbe1
|
[
"BSD-3-Clause"
] | 1
|
2021-11-08T08:23:43.000Z
|
2021-11-08T08:23:43.000Z
|
pysgrs/tests/test_cipher_pipeline.py
|
jlandercy/pysgrs
|
2f7765d5911e2928495d8f1003d4fb3168abdbe1
|
[
"BSD-3-Clause"
] | null | null | null |
pysgrs/tests/test_cipher_pipeline.py
|
jlandercy/pysgrs
|
2f7765d5911e2928495d8f1003d4fb3168abdbe1
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
import unittest
from pysgrs.tests.test_cipher import TestStreamCipher
from pysgrs import ciphers
class TestPipelineCipherSimpleCase(TestStreamCipher, unittest.TestCase):
cipher = ciphers.PipelineCipher([
ciphers.CaesarCipher(),
ciphers.TranspositionCipher()
])
ciphertexts = [
"DJPVBEKQWCFLRX GMSY HNTZ IOUA",
"CWQKEBVPJDAUOI ZTNH YSMG XRLF",
"WFQPUCKNISWBHERVKGTUARHRXRMYOJLZXHD",
"ZDKLVDGIFYOQRNHWBUMACPTL ESXJ",
"MRKTDLAJXOYQULWHBDFCGPEN ISVZ",
"JFCWZONQRDLVBYUETPHIMXSA RLKG",
"VRNCPSITMBKEXXYLODGRQDUJZAFWH",
"KLXIDRQLWVZJFCMYONHXHBGEPATDUS",
"WHJGTBKEZVX HRLML IACXF LLDPN YQUSO",
"MZPSICDVBKT FOELX NRLQD GYJAU DHVRW",
"SEKRTJDRICXVFALHR NZYQU PLHOM BWGLX",
"Oihw hyul rOluh.ybwphih hrrrd i x uubwr d guqrru vzlr x h hhzd oy lu .vzlh",
"Ed hh pwhuzh ppwd hkowldkwpz d,kqwrwlkdw rgwvhqrq ev hhug gbhhgu .b rf r zg rvxdzqdkr xd xk’qrq bivrwg ’",
"Ld krwqwiqjl k quqvldzbrhjpq drwdvdjjbx w,ovu. g o h frwg ld",
"Zsvyq dkbjlhhhjixd .vd rvyv hnwvwrhhr krro p pehp vwwh hhbhv krw qf wdeh k dkkbhbvl vxdl;f dq",
"JJxjddr rgjxJxddjr rxG"
]
class TestPipelineCipherComplexCase(TestStreamCipher, unittest.TestCase):
cipher = P = ciphers.PipelineCipher([
ciphers.CaesarCipher(),
ciphers.VigenereCipher(key="NAPOLEON"),
ciphers.TranspositionCipher()
])
ciphertexts = [
"QXAKOEXUKCUYFI UMFC SCGN MCUN",
"PKBZRBITXDPHCT NTAL JHZU BFLS",
"JTBEHQKAMGWOWRFGZTHUNVVRIGZMZYPNXUH",
"MRVAIDTMTYDDFYWKBHQONEGZ IGXW",
"ZFVIQLNNLONDIWLVBQJQRERB MGVM",
"WTNLMOAUFDAIPJJSTCLWXMFO VZKT",
"IFYRCSVXABZRLINZOQKFBSHXKETWU",
"XZIXQRDPKVOWTNBMOALLSQTSAEHDHW",
"JVUVGPKRDJX WEZXA WAPBT WAQDY CEUFS",
"ZNAHVQDIFYT UBSWM BRYUR RNWOF HVVEA",
"FSVGGXDEMQXIUNZSG BZLUI AAUCX FKGYB",
"Bvvh hlhl vZzjh.nojdltv vreer x m hhfhf o gudfvf znag k l svnd cm zy .kmyu",
"Rq hw aahuzw ddhh wzckwookdo r,vuafjyyrh vukiudcb pj uuug kfluth .f ft e zv fjkqmqdzf kq kk’ffb ovvglu ’",
"Yr yrjehidnw x diuklqdqruxdf rflqioxwoi w,sgu. u c w jclt pr",
"Mfvyf dkbylhwwxims .ks gjmk vbkjhfssf vcvz a aplt zaal llpvj yfk et jqsu x qxxouoiy ikql;s qq",
"WUxnddv fvylYkrrwf eiT"
]
class TestPipelineCipherVigenereSquared(TestStreamCipher, unittest.TestCase):
cipher = P = ciphers.PipelineCipher([
ciphers.VigenereCipher(key="FIRST"),
ciphers.VigenereCipher(key="SECOND"),
])
ciphertexts = [
"XNVJKNGCCQCWVJJILLCQBRASJI",
"WKQCBCTNLXHZWIGDECRDMAHXMJ",
"QTXWAQCFVYGHWBJQEOWMZKZZCCRJQWWKWUM",
"TMEZFJAYHFEAQBJKLOSZRFMBDEOC",
"GUOKJNOSHFEAQCMTWMARPYORLUDE",
"DXBHPWCFMXMTIJTFKBDLCABYHJBK",
"PBAOTFOAVSSNTMPTMNJGBZKZXHFTB",
"EAPBKFIIASQBDEXDYUPQGAFMLBTZRL",
"QTXLODEWIEAYPSDSVLNPQQQKBDSHPHV",
"GMVQJIWNFVNPVUWBBMZEPJBJQZEFWPW",
"MMVQSGBJRDAEQBDOZXYWLJPDBDYWOQDE",
"Iuok ga ia svm hnnz mj xsb akqjcayb. Qaxdg gy qf tib opaa oh gcfb mkvzgnb.",
"Yq pnu gop uyw lwz ntt qrxa usp qnoq, gazmnyk bhjml osx idgy xyk’a ieoenb fsz qthyk ehj ghlenn yhi’n wfuz.",
"Fr rua kaihvl ox cmxvn depjkn, ox crfhi faotos dh h ycnwo pvs.",
"Tulk smn njlsv kaxtpmo qoac cleo xtibfaoto tj mhq; qxkgl wymxboi osni mfrb fh ygg sjgllsrjb.",
"Dmzg Mwubib Yzdcjn Yunx",
"Ngtzxm jjoyfldt chnnsilo wjyc zqzo x okgovdmy xmp vegez vkfvjrzecox."
]
class TestPipelineCipherVigenereSquaredIsVigenere(TestStreamCipher, unittest.TestCase):
cipher = P = ciphers.PipelineCipher([
ciphers.VigenereCipher(key="BOB"),
ciphers.VigenereCipher(key="BOBETTE"),
])
ciphertexts = [
"CDEILZLWXLCFGFQEFWMAZXYZDG",
]
class TestPipelineCipherVigenereSquaredIsVigenereCrossCheck(TestStreamCipher, unittest.TestCase):
cipher = ciphers.VigenereCipher(key="CCCFHUFPPCSUUSCPPFUHF")
ciphertexts = TestPipelineCipherVigenereSquaredIsVigenere.ciphertexts
class TestPipelineCipherEquivalenceCaeserRotation(TestStreamCipher, unittest.TestCase):
cipher = ciphers.PipelineCipher([ciphers.CaesarCipher(), ciphers.RotationCipher(offset=-3)])
ciphertexts = TestStreamCipher.plaintexts
class TestPipelineCipherEquivalenceCaeserVigenere(TestStreamCipher, unittest.TestCase):
cipher = ciphers.PipelineCipher([ciphers.RotationCipher(offset=-3), ciphers.VigenereCipher(key="D")])
ciphertexts = TestStreamCipher.plaintexts
def main():
unittest.main()
sys.exit(0)
if __name__ == "__main__":
main()
| 38.032258
| 120
| 0.702714
|
4a0039e85ce4ca223fe6eeff417a564e96999bf9
| 2,239
|
py
|
Python
|
nqviz/board.py
|
jhan15/nqviz
|
08c8dc0f550dd6dd8f50b2cd343d91002d1faede
|
[
"MIT"
] | null | null | null |
nqviz/board.py
|
jhan15/nqviz
|
08c8dc0f550dd6dd8f50b2cd343d91002d1faede
|
[
"MIT"
] | null | null | null |
nqviz/board.py
|
jhan15/nqviz
|
08c8dc0f550dd6dd8f50b2cd343d91002d1faede
|
[
"MIT"
] | null | null | null |
import pygame
from io import BytesIO
import requests
class board:
def __init__(self, N):
self.N = N
self.colors = [(255,255,255), (255,153,204)]
self.surface_sz = 640
self.grid_sz = self.surface_sz // self.N
self.surface_sz = self.N * self.grid_sz
self.surface = None
self.queen = self.read_queen_image()
self.queen = pygame.transform.scale(self.queen, (self.grid_sz, self.grid_sz))
self.offset = (self.grid_sz-self.queen.get_width()) // 2
self.create_board()
def read_queen_image(self):
url = 'https://github.com/jhan15/nqviz/blob/master/nqviz/queen.png?raw=true'
r = requests.get(url)
dataBytesIO = BytesIO(r.content)
queen_img = pygame.image.load(dataBytesIO)
return queen_img
def create_board(self):
pygame.init()
title = str(self.N) + '-queen chessboard'
pygame.display.set_caption(title)
self.surface = pygame.display.set_mode((self.surface_sz, self.surface_sz))
for row in range(self.N):
color_ind = row % 2
for col in range(self.N):
grid = (col*self.grid_sz, row*self.grid_sz, self.grid_sz, self.grid_sz)
self.surface.fill(self.colors[color_ind], grid)
color_ind = (color_ind + 1) % 2
pygame.display.flip()
def place_queen(self, pos):
x1 = pos[0] * self.grid_sz
y1 = pos[1] * self.grid_sz
x2 = (pos[0] + 1) * self.grid_sz
y2 = (pos[1] + 1) * self.grid_sz
self.surface.blit(self.queen, (x1+self.offset,y1+self.offset))
pygame.display.update(x1,y1,x2,y2)
pygame.event.pump()
pygame.time.delay(500)
def remove_queen(self, pos):
x1 = pos[0] * self.grid_sz
y1 = pos[1] * self.grid_sz
x2 = (pos[0] + 1) * self.grid_sz
y2 = (pos[1] + 1) * self.grid_sz
color_ind = (pos[0] + pos[1]) % 2
grid = (x1, y1, self.grid_sz, self.grid_sz)
self.surface.fill(self.colors[color_ind], grid)
pygame.display.update(x1,y1,x2,y2)
pygame.event.pump()
pygame.time.delay(500)
def quit(self):
pygame.quit()
| 32.926471
| 87
| 0.57883
|
4a003ac4b808c99b9d8999e166d6a807cbcc27ed
| 638
|
py
|
Python
|
examples/benchmarks/tabular_luby.py
|
goktug97/DACBench
|
953bc8efacdb993889b223110e25f7e453c86b2d
|
[
"Apache-2.0"
] | 1
|
2021-02-05T16:18:56.000Z
|
2021-02-05T16:18:56.000Z
|
examples/benchmarks/tabular_luby.py
|
goktug97/DACBench
|
953bc8efacdb993889b223110e25f7e453c86b2d
|
[
"Apache-2.0"
] | null | null | null |
examples/benchmarks/tabular_luby.py
|
goktug97/DACBench
|
953bc8efacdb993889b223110e25f7e453c86b2d
|
[
"Apache-2.0"
] | null | null | null |
"""
Code adapted from
"Dynamic Algorithm Configuration:Foundation of a New Meta-Algorithmic Framework"
by A. Biedenkapp and H. F. Bozkurt and T. Eimer and F. Hutter and M. Lindauer.
Original environment authors: André Biedenkapp, H. Furkan Bozkurt
"""
from examples.example_utils import q_learning
from dacbench.benchmarks import LubyBenchmark
# Make Luby environment
bench = LubyBenchmark()
env = bench.get_environment()
# Execute 10 episodes of tabular Q-Learning
q_func, test_train_stats = q_learning(env, 10)
print(f"Rewards: {test_train_stats[1].episode_rewards}")
print(f"Episode Lenghts: {test_train_stats[1].episode_lengths}")
| 33.578947
| 80
| 0.793103
|
4a003af12e1da2d93a11c4fc470e0d1e83a48fb8
| 1,575
|
py
|
Python
|
src/tweet.py
|
PyLadiesBerlin/advent_twitter_bot
|
4459da9af80ad92a359e8f77b78c2f075e3ad944
|
[
"MIT"
] | 1
|
2021-11-30T15:39:17.000Z
|
2021-11-30T15:39:17.000Z
|
src/tweet.py
|
PyLadiesBerlin/advent_twitter_bot
|
4459da9af80ad92a359e8f77b78c2f075e3ad944
|
[
"MIT"
] | null | null | null |
src/tweet.py
|
PyLadiesBerlin/advent_twitter_bot
|
4459da9af80ad92a359e8f77b78c2f075e3ad944
|
[
"MIT"
] | null | null | null |
import tweepy
from jinja2 import Environment, FileSystemLoader
def twitter_connect(
TWITTER_API_KEY,
TWITTER_API_SECRET_KEY,
TWITTER_ACCESS_TOKEN,
TWITTER_ACCESS_TOKEN_SECRET,
):
"""
Connects to Twitter and return twitter object
"""
auth = tweepy.OAuthHandler(TWITTER_API_KEY, TWITTER_API_SECRET_KEY)
auth.set_access_token(TWITTER_ACCESS_TOKEN, TWITTER_ACCESS_TOKEN_SECRET)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
return api
def twitter_message(post_day, name, description, url, hashtags):
"""
Creates twitter message
Inputs:
post_day: int, day since the project started
name: str
description: str
url: str, url for project
hashtags: str, resource specific tags
returns:
280 characters string
"""
# Message must be 2 characters shorter (278)
lim = 280 - 2
file_loader = FileSystemLoader("src/templates")
env = Environment(loader=file_loader)
if post_day == 0:
template = env.get_template("day_0_template.txt")
else:
template = env.get_template("tweet_template.txt")
text = template.render(
post_day=str(post_day),
name=name,
description=description,
url=url,
hashtags=hashtags,
)
if len(text) > lim:
content_text = text[:lim]
else:
content_text = text
return content_text
def twitter_post(api, message_twitter):
"""
Posts to twitter
"""
api.update_status(status=message_twitter)
| 24.230769
| 83
| 0.671746
|
4a003bea4622b4230ca2531779726b14e64980ab
| 1,458
|
py
|
Python
|
src/anyio/_core/_streams.py
|
richardsheridan/anyio
|
5ed3270c8d12cffc4cd3349d9ff32bc32451ae65
|
[
"MIT"
] | 749
|
2018-10-08T13:12:22.000Z
|
2022-03-28T17:43:16.000Z
|
src/anyio/_core/_streams.py
|
richardsheridan/anyio
|
5ed3270c8d12cffc4cd3349d9ff32bc32451ae65
|
[
"MIT"
] | 357
|
2018-10-07T18:12:40.000Z
|
2022-03-30T20:38:29.000Z
|
src/anyio/_core/_streams.py
|
richardsheridan/anyio
|
5ed3270c8d12cffc4cd3349d9ff32bc32451ae65
|
[
"MIT"
] | 66
|
2018-10-09T05:18:54.000Z
|
2022-03-21T08:37:14.000Z
|
import math
from typing import Optional, Tuple, Type, TypeVar, overload
from ..streams.memory import (
MemoryObjectReceiveStream, MemoryObjectSendStream, MemoryObjectStreamState)
T_Item = TypeVar('T_Item')
@overload
def create_memory_object_stream(
max_buffer_size: float, item_type: Type[T_Item]
) -> Tuple[MemoryObjectSendStream[T_Item], MemoryObjectReceiveStream[T_Item]]:
...
@overload
def create_memory_object_stream(
max_buffer_size: float = 0
) -> Tuple[MemoryObjectSendStream, MemoryObjectReceiveStream]:
...
def create_memory_object_stream(
max_buffer_size: float = 0, item_type: Optional[Type[T_Item]] = None
) -> Tuple[MemoryObjectSendStream, MemoryObjectReceiveStream]:
"""
Create a memory object stream.
:param max_buffer_size: number of items held in the buffer until ``send()`` starts blocking
:param item_type: type of item, for marking the streams with the right generic type for
static typing (not used at run time)
:return: a tuple of (send stream, receive stream)
"""
if max_buffer_size != math.inf and not isinstance(max_buffer_size, int):
raise ValueError('max_buffer_size must be either an integer or math.inf')
if max_buffer_size < 0:
raise ValueError('max_buffer_size cannot be negative')
state: MemoryObjectStreamState = MemoryObjectStreamState(max_buffer_size)
return MemoryObjectSendStream(state), MemoryObjectReceiveStream(state)
| 33.906977
| 95
| 0.753086
|
4a003cb7d22dba38423f89da8b15522a690dc2fb
| 3,219
|
py
|
Python
|
tests/contrib/auth/test_models.py
|
codingjoe/django-mail-auth
|
21b102e511bd801f04fbb2328d846e625dac7f60
|
[
"MIT"
] | 39
|
2019-04-12T22:44:55.000Z
|
2021-12-12T06:03:47.000Z
|
tests/contrib/auth/test_models.py
|
codingjoe/django-mail-auth
|
21b102e511bd801f04fbb2328d846e625dac7f60
|
[
"MIT"
] | 49
|
2019-05-28T10:58:51.000Z
|
2022-03-29T00:33:18.000Z
|
tests/contrib/auth/test_models.py
|
codingjoe/django-mail-auth
|
21b102e511bd801f04fbb2328d846e625dac7f60
|
[
"MIT"
] | 6
|
2019-08-07T06:16:38.000Z
|
2022-02-25T12:14:13.000Z
|
import django
import pytest
from django.core.exceptions import FieldDoesNotExist
from mailauth.contrib.user.models import EmailUser
class TestAbstractEmailUser:
def test_has_usable_password(self):
assert not EmailUser().has_usable_password()
def test_get_session_auth_hash__default(self, db):
user = EmailUser(email="spiderman@avengers.com")
assert user.session_salt
assert user.get_session_auth_hash()
def test_get_session_auth_hash__value_error(self, db):
user = EmailUser(email="spiderman@avengers.com", session_salt=None)
with pytest.raises(ValueError) as e:
user.get_session_auth_hash()
assert "'session_salt' must be set" in str(e.value)
def test_get_session_auth_hash__unique(self, db):
spiderman = EmailUser(email="spiderman@avengers.com")
ironman = EmailUser(email="ironman@avengers.com")
assert spiderman.get_session_auth_hash() != ironman.get_session_auth_hash()
@pytest.mark.skipif(django.VERSION < (3, 1), reason="requires Django 3.1 or higher")
def test_legacy_get_session_auth_hash__default(self, db):
user = EmailUser(email="spiderman@avengers.com")
assert user.session_salt
assert user._legacy_get_session_auth_hash()
@pytest.mark.skipif(django.VERSION < (3, 1), reason="requires Django 3.1 or higher")
def test_legacy_get_session_auth_hash__value_error(self, db):
user = EmailUser(email="spiderman@avengers.com", session_salt=None)
with pytest.raises(ValueError) as e:
user._legacy_get_session_auth_hash()
assert "'session_salt' must be set" in str(e.value)
@pytest.mark.skipif(django.VERSION < (3, 1), reason="requires Django 3.1 or higher")
def test_legacy_get_session_auth_hash__unique(self, db):
spiderman = EmailUser(email="spiderman@avengers.com")
ironman = EmailUser(email="ironman@avengers.com")
assert (
spiderman._legacy_get_session_auth_hash()
!= ironman._legacy_get_session_auth_hash()
)
def test_password_field(self):
user = EmailUser(email="spiderman@avengers.com")
with pytest.raises(FieldDoesNotExist):
user.password
class TestEmailUserManager:
def test_create_user(self, db):
user = EmailUser.objects.create_user("spiderman@avengers.com")
assert user.pk is not None
assert user.email == "spiderman@avengers.com"
assert not user.is_superuser
def test_create_superuser(self, db):
user = EmailUser.objects.create_superuser("spiderman@avengers.com")
assert user.is_superuser
def test_create_superuser__no_staff(self, db):
with pytest.raises(ValueError, match="Superuser must have is_staff=True."):
EmailUser.objects.create_superuser(
"spiderman@avengers.com",
is_staff=False,
)
def test_create_superuser__no_superuser(self, db):
with pytest.raises(ValueError, match="Superuser must have is_superuser=True."):
EmailUser.objects.create_superuser(
"spiderman@avengers.com",
is_superuser=False,
)
| 36.579545
| 88
| 0.690276
|
4a003d9e36b3c4da185d0650643805d2375d097b
| 2,271
|
py
|
Python
|
database_setup.py
|
vietdang7/catalog
|
13f5939ac591d2f54b50ed8461add24e4e2c4b60
|
[
"MIT"
] | null | null | null |
database_setup.py
|
vietdang7/catalog
|
13f5939ac591d2f54b50ed8461add24e4e2c4b60
|
[
"MIT"
] | null | null | null |
database_setup.py
|
vietdang7/catalog
|
13f5939ac591d2f54b50ed8461add24e4e2c4b60
|
[
"MIT"
] | null | null | null |
#Import needed lib from SQLAlchemy
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
Base = declarative_base()
#Setup 'User' table
class User(Base):
__tablename__ = 'user'
name = Column(String(80), nullable = False)
id = Column(Integer, primary_key = True)
email = Column(String(80), nullable = False)
picture = Column(String(250))
#property decoration for serialize (JSON)
@property
def serialize(self):
"""Return object data in easily serializeable format"""
return {
'name' : self.name,
'id' : self.id,
'email' : self.email,
'picture' : self.picture,
}
#Setup 'Category' table
class Category(Base):
__tablename__ = 'category'
id = Column(Integer, primary_key=True)
name = Column(String(250), nullable=False)
catalogitem = relationship('CatalogItem', cascade='all, delete-orphan')
user_id = Column(Integer, ForeignKey('user.id'))
user = relationship(User)
#property decoration for serialize (JSON)
@property
def serialize(self):
"""Return object data in easily serializeable format"""
return {
'name' : self.name,
'id' : self.id,
}
#Setup 'QuoteItem' table
class CatalogItem(Base):
__tablename__ = 'catalogitem'
name = Column(String(80), nullable = False)
id = Column(Integer, primary_key = True)
description = Column(String(250))
category_id = Column(Integer,ForeignKey('category.id'))
category = relationship(Category)
user_id = Column(Integer, ForeignKey('user.id'))
user = relationship(User)
#property decoration for serialize (JSON)
@property
def serialize(self):
"""Return object data in easily serializeable format"""
return {
'name' : self.name,
'description' : self.description,
'id' : self.id,
'category' : self.category.name,
}
engine = create_engine('sqlite:///catalog.db')
Base.metadata.create_all(engine)
| 25.806818
| 75
| 0.619991
|
4a003db691361ba269aa9c929a9cda8f564ae1c5
| 3,913
|
py
|
Python
|
freezing/web/utils/genericboard.py
|
freezingsaddles/freezingsaddles
|
a65281c0183972aa647d9ffb45ff6bd7f8da47d5
|
[
"Apache-2.0"
] | 4
|
2019-01-02T15:14:46.000Z
|
2020-01-02T01:22:34.000Z
|
freezing/web/utils/genericboard.py
|
freezingsaddles/freezingsaddles
|
a65281c0183972aa647d9ffb45ff6bd7f8da47d5
|
[
"Apache-2.0"
] | 119
|
2018-01-19T13:34:39.000Z
|
2022-03-16T11:48:00.000Z
|
freezing/web/utils/genericboard.py
|
freezingsaddles/freezingsaddles
|
a65281c0183972aa647d9ffb45ff6bd7f8da47d5
|
[
"Apache-2.0"
] | 3
|
2016-07-29T02:26:24.000Z
|
2017-11-20T20:38:49.000Z
|
import decimal
import os
import enum
from datetime import datetime
from typing import List, Dict, Any, Tuple
import yaml
from marshmallow import fields
from marshmallow_enum import EnumField
from freezing.model import meta
from freezing.model.msg import BaseSchema, BaseMessage
from freezing.web.config import config
from freezing.web.exc import ObjectNotFound
class GenericBoardField(BaseMessage):
name = None
label = None
type = None # Do we need this ...?
format = None
visible: bool = True
rank_by: bool = False
def format_value(self, v, row):
if isinstance(v, str):
if self.format:
return self.format.format(**dict(row))
else:
return v
elif isinstance(v, (float, decimal.Decimal)):
# '{number:.{digits}f}'.format(number=p, digits=n)
# {:,}
if self.format:
return self.format.format(v)
else:
return "{0:,.2f}".format(v)
elif isinstance(v, int):
# '{number:.{digits}f}'.format(number=p, digits=n)
# {:,}
if self.format:
return self.format.format(v)
else:
return "{0:,}".format(v)
elif isinstance(v, datetime):
if self.format:
return v.strftime(self.format)
else:
return v.isoformat()
else:
return v
class GenericBoardFieldSchema(BaseSchema):
_model_class = GenericBoardField
name = fields.Str()
label = fields.Str()
type = fields.Str()
format = fields.Str()
visible = fields.Bool()
rank_by = fields.Bool()
class GenericBoard(BaseMessage):
title = None
description = None
url = None
query = None
fields: List[GenericBoardField] = None
class GenericBoardSchema(BaseSchema):
_model_class = GenericBoard
title = fields.Str()
description = fields.Str()
url = fields.Str()
query = fields.Str(required=True, allow_none=False)
fields = fields.Nested(GenericBoardFieldSchema, many=True, required=False)
def load_board_and_data(leaderboard) -> Tuple[GenericBoard, List[Dict[str, Any]]]:
board = load_board(leaderboard)
with meta.transaction_context(read_only=True) as session:
rs = session.execute(board.query)
if not board.fields:
board.fields = [GenericBoardField(name=k, label=k) for k in rs.keys()]
rows = rs.fetchall()
return board, format_rows(rows, board)
def load_board(leaderboard) -> GenericBoard:
path = os.path.join(
config.LEADERBOARDS_DIR, "{}.yml".format(os.path.basename(leaderboard))
)
if not os.path.exists(path):
raise ObjectNotFound("Could not find yaml board definition {}".format(path))
with open(path, "rt", encoding="utf-8") as fp:
doc = yaml.load(fp, Loader=yaml.FullLoader)
schema = GenericBoardSchema()
board: GenericBoard = schema.load(doc)
return board
def format_rows(rows, board) -> List[Dict[str, Any]]:
try:
formatted = [
{f.name: f.format_value(row[f.name], row) for f in board.fields}
for row in rows
]
rank_by = next(iter([f.name for f in board.fields if f.rank_by]), None)
return formatted if rank_by is None else rank_rows(formatted, rank_by)
except KeyError as ke:
raise RuntimeError("Field not found in result row: {}".format(ke))
def rank_rows(rows, rank_by, index=1, rank=0, rank_value=None) -> List[Dict[str, Any]]:
if len(rows) == 0:
return rows
else:
head, *tail = rows
head_value = head[rank_by]
head_rank = rank if index > 1 and head_value == rank_value else index
return [{**head, "rank": head_rank}] + rank_rows(
tail, rank_by, 1 + index, head_rank, head_value
)
| 26.80137
| 87
| 0.613851
|
4a003f5ab0c612cd8313b20f4699d23eb8dc0b39
| 3,371
|
py
|
Python
|
samples/particles/steam_example.py
|
cmarshall108/panda3d-python3
|
8bea2c0c120b03ec1c9fd179701fdeb7510bb97b
|
[
"PHP-3.0",
"PHP-3.01"
] | 3
|
2018-03-09T12:07:29.000Z
|
2021-02-25T06:50:25.000Z
|
samples/particles/steam_example.py
|
cmarshall108/panda3d-python3
|
8bea2c0c120b03ec1c9fd179701fdeb7510bb97b
|
[
"PHP-3.0",
"PHP-3.01"
] | 1
|
2018-07-28T20:07:04.000Z
|
2018-07-30T18:28:34.000Z
|
samples/particles/steam_example.py
|
cmarshall108/panda3d-python3
|
8bea2c0c120b03ec1c9fd179701fdeb7510bb97b
|
[
"PHP-3.0",
"PHP-3.01"
] | 2
|
2019-12-02T01:39:10.000Z
|
2021-02-13T22:41:00.000Z
|
#!/usr/bin/env python
# Author: Shao Zhang and Phil Saltzman
# Last Updated: 2015-03-13
#
# This tutorial shows how to take an existing particle effect taken from a
# .ptf file and run it in a general Panda project.
from direct.showbase.ShowBase import ShowBase
from panda3d.core import TextNode
from panda3d.core import AmbientLight, DirectionalLight
from panda3d.core import LPoint3, LVector3
from panda3d.core import Filename
from panda3d.physics import BaseParticleEmitter, BaseParticleRenderer
from panda3d.physics import PointParticleFactory, SpriteParticleRenderer
from panda3d.physics import LinearNoiseForce, DiscEmitter
from direct.particles.Particles import Particles
from direct.particles.ParticleEffect import ParticleEffect
from direct.particles.ForceGroup import ForceGroup
from direct.gui.OnscreenText import OnscreenText
import sys
HELP_TEXT = """
1: Load Steam
2: Load Dust
3: Load Fountain
4: Load Smoke
5: Load Smokering
6: Load Fireish
ESC: Quit
"""
class ParticleDemo(ShowBase):
def __init__(self):
ShowBase.__init__(self)
# Standard title and instruction text
self.title = OnscreenText(
text="Panda3D: Tutorial - Particles",
parent=base.a2dBottomCenter,
style=1, fg=(1, 1, 1, 1), pos=(0, 0.1), scale=.08)
self.escapeEvent = OnscreenText(
text=HELP_TEXT, parent=base.a2dTopLeft,
style=1, fg=(1, 1, 1, 1), pos=(0.06, -0.06),
align=TextNode.ALeft, scale=.05)
# More standard initialization
self.accept('escape', sys.exit)
self.accept('1', self.loadParticleConfig, ['steam.ptf'])
self.accept('2', self.loadParticleConfig, ['dust.ptf'])
self.accept('3', self.loadParticleConfig, ['fountain.ptf'])
self.accept('4', self.loadParticleConfig, ['smoke.ptf'])
self.accept('5', self.loadParticleConfig, ['smokering.ptf'])
self.accept('6', self.loadParticleConfig, ['fireish.ptf'])
self.accept('escape', sys.exit)
base.disableMouse()
base.camera.setPos(0, -20, 2)
base.camLens.setFov(25)
base.setBackgroundColor(0, 0, 0)
# This command is required for Panda to render particles
base.enableParticles()
self.t = loader.loadModel("teapot")
self.t.setPos(0, 10, 0)
self.t.reparentTo(render)
self.setupLights()
self.p = ParticleEffect()
self.loadParticleConfig('steam.ptf')
def loadParticleConfig(self, filename):
# Start of the code from steam.ptf
self.p.cleanup()
self.p = ParticleEffect()
self.p.loadConfig(Filename(filename))
# Sets particles to birth relative to the teapot, but to render at
# toplevel
self.p.start(self.t)
self.p.setPos(3.000, 0.000, 2.250)
# Setup lighting
def setupLights(self):
ambientLight = AmbientLight("ambientLight")
ambientLight.setColor((.4, .4, .35, 1))
directionalLight = DirectionalLight("directionalLight")
directionalLight.setDirection(LVector3(0, 8, -2.5))
directionalLight.setColor((0.9, 0.8, 0.9, 1))
# Set lighting on teapot so steam doesn't get affected
self.t.setLight(self.t.attachNewNode(directionalLight))
self.t.setLight(self.t.attachNewNode(ambientLight))
demo = ParticleDemo()
demo.run()
| 35.484211
| 74
| 0.676357
|
4a003f9d3e46f0a1e5333d6f59e84fdb21b2138c
| 6,258
|
py
|
Python
|
dashboard/lib/flanker/addresslib/_parser/parser.py
|
robertsimmons514/isthislegit
|
aa8f2b6cb2ac3de2b0fe03bb93dbceccc4c1f495
|
[
"BSD-3-Clause"
] | 282
|
2017-07-01T03:47:54.000Z
|
2022-02-25T00:58:40.000Z
|
dashboard/lib/flanker/addresslib/_parser/parser.py
|
robertsimmons514/isthislegit
|
aa8f2b6cb2ac3de2b0fe03bb93dbceccc4c1f495
|
[
"BSD-3-Clause"
] | 46
|
2017-07-26T22:54:13.000Z
|
2022-02-14T21:39:52.000Z
|
dashboard/lib/flanker/addresslib/_parser/parser.py
|
robertsimmons514/isthislegit
|
aa8f2b6cb2ac3de2b0fe03bb93dbceccc4c1f495
|
[
"BSD-3-Clause"
] | 53
|
2017-07-22T15:04:16.000Z
|
2022-03-16T03:36:28.000Z
|
import logging
from collections import namedtuple
import ply.yacc as yacc
from flanker.addresslib._parser.lexer import lexer, tokens
log = logging.getLogger(__name__)
Mailbox = namedtuple('Mailbox', ['display_name', 'local_part', 'domain'])
Url = namedtuple('Url', ['address'])
# Parsing rules
start = 'mailbox_or_url_list'
def p_expression_mailbox_or_url_list(p):
'''mailbox_or_url_list : mailbox_or_url_list delim mailbox_or_url
| mailbox_or_url_list delim
| mailbox_or_url'''
if len(p) == 4:
p[0] = p[1] + [p[3]]
elif len(p) == 3:
p[0] = p[1]
elif len(p) == 2:
p[0] = [p[1]]
def p_delim(p):
'''delim : delim fwsp COMMA
| delim fwsp SEMICOLON
| COMMA
| SEMICOLON'''
def p_expression_mailbox_or_url(p):
'''mailbox_or_url : mailbox
| url'''
p[0] = p[1]
def p_expression_url(p):
'url : ofwsp URL ofwsp'
p[0] = Url(p[2])
def p_expression_mailbox(p):
'''mailbox : addr_spec
| angle_addr
| name_addr'''
p[0] = p[1]
def p_expression_name_addr(p):
'name_addr : ofwsp phrase angle_addr'
p[0] = Mailbox(p[2], p[3].local_part, p[3].domain)
def p_expression_angle_addr(p):
'angle_addr : ofwsp LANGLE addr_spec RANGLE ofwsp'
p[0] = Mailbox('', p[3].local_part, p[3].domain)
def p_expression_addr_spec(p):
'addr_spec : ofwsp local_part AT domain ofwsp'
p[0] = Mailbox('', p[2], p[4])
def p_expression_local_part(p):
'''local_part : DOT_ATOM
| ATOM
| quoted_string'''
p[0] = p[1]
def p_expression_domain(p):
'''domain : DOT_ATOM
| ATOM
| domain_literal'''
p[0] = p[1]
def p_expression_quoted_string(p):
'''quoted_string : DQUOTE quoted_string_text DQUOTE
| DQUOTE DQUOTE'''
if len(p) == 4:
p[0] = '"{}"'.format(p[2])
elif len(p) == 3:
p[0] = '""'
def p_expression_quoted_string_text(p):
'''quoted_string_text : quoted_string_text QTEXT
| quoted_string_text QPAIR
| quoted_string_text fwsp
| QTEXT
| QPAIR
| fwsp'''
p[0] = ''.join(p[1:])
def p_expression_domain_literal(p):
'''domain_literal : LBRACKET domain_literal_text RBRACKET
| LBRACKET RBRACKET'''
if len(p) == 4:
p[0] = '[{}]'.format(p[2])
elif len(p) == 3:
p[0] = '[]'
def p_expression_domain_literal_text(p):
'''domain_literal_text : domain_literal_text DTEXT
| domain_literal_text fwsp
| DTEXT
| fwsp'''
p[0] = ''.join(p[1:])
def p_expression_comment(p):
'''comment : LPAREN comment_text RPAREN
| LPAREN RPAREN'''
p[0] = ''
def p_expression_comment_text(p):
'''comment_text : comment_text CTEXT
| comment_text fwsp
| CTEXT
| fwsp'''
p[0] = ''.join(p[1:])
def p_expression_phrase(p):
'''phrase : phrase fwsp ATOM
| phrase fwsp DOT_ATOM
| phrase fwsp DOT
| phrase fwsp quoted_string
| phrase ATOM
| phrase DOT_ATOM
| phrase DOT
| phrase quoted_string
| ATOM
| DOT_ATOM
| DOT
| quoted_string'''
if len(p) == 4:
p[0] = '{} {}'.format(p[1], p[3])
if len(p) == 3:
p[0] = '{}{}'.format(p[1], p[2])
elif len(p) == 2:
p[0] = p[1]
def p_expression_ofwsp(p):
'''ofwsp : fwsp comment fwsp
| fwsp comment
| comment fwsp
| comment
| fwsp
|'''
p[0] = ''.join(p[1:])
def p_expression_fwsp(p):
'fwsp : FWSP'
p[0] = p[1].replace('\r\n', '')
def p_error(p):
if p:
raise SyntaxError('syntax error: token=%s, lexpos=%s' % (p.value, p.lexpos))
raise SyntaxError('syntax error: eof')
# Build the parsers
log.debug('building mailbox parser')
mailbox_parser = yacc.yacc(start='mailbox',
errorlog=log,
tabmodule='mailbox_parsetab',
debug=False,
write_tables=False)
log.debug('building addr_spec parser')
addr_spec_parser = yacc.yacc(start='addr_spec',
errorlog=log,
tabmodule='addr_spec_parsetab',
debug=False,
write_tables=False)
log.debug('building url parser')
url_parser = yacc.yacc(start='url',
errorlog=log,
tabmodule='url_parsetab',
debug=False,
write_tables=False)
log.debug('building mailbox_or_url parser')
mailbox_or_url_parser = yacc.yacc(start='mailbox_or_url',
errorlog=log,
tabmodule='mailbox_or_url_parsetab',
debug=False,
write_tables=False)
log.debug('building mailbox_or_url_list parser')
mailbox_or_url_list_parser = yacc.yacc(start='mailbox_or_url_list',
errorlog=log,
tabmodule='mailbox_or_url_list_parsetab',
debug=False,
write_tables=False)
# Interactive prompt for easy debugging
if __name__ == '__main__':
while True:
try:
s = raw_input('\nflanker> ')
except KeyboardInterrupt:
break
except EOFError:
break
if s == '': continue
print('\nTokens list:\n')
lexer.input(s)
while True:
tok = lexer.token()
if not tok:
break
print(tok)
print('\nParsing behavior:\n')
result = mailbox_or_url_list_parser.parse(s, debug=log)
print('\nResult:\n')
print(result)
| 28.706422
| 84
| 0.504474
|
4a00403780267db33ae94dd98bcca36391015ba8
| 6,103
|
py
|
Python
|
linkedin-scrapers/codes/WebConnectHelper.py
|
saurabh0424/data-scrapers
|
f043dfeba1a3758fccb6e97d5f3efa286fe79d57
|
[
"Apache-2.0"
] | 1
|
2021-01-10T15:23:29.000Z
|
2021-01-10T15:23:29.000Z
|
linkedin-scrapers/codes/WebConnectHelper.py
|
Copa6/data-scrapers
|
f043dfeba1a3758fccb6e97d5f3efa286fe79d57
|
[
"Apache-2.0"
] | 1
|
2020-04-08T03:42:01.000Z
|
2020-04-08T03:42:01.000Z
|
linkedin-scrapers/codes/WebConnectHelper.py
|
Copa6/data-scrapers
|
f043dfeba1a3758fccb6e97d5f3efa286fe79d57
|
[
"Apache-2.0"
] | null | null | null |
import os
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
import time
from bs4 import BeautifulSoup
import re
import pandas as pd
class WebConnect():
def __init__(self, url):
self.driver = self.connect_driver()
self.driver.get(url)
print("Connected")
def connect_driver(self):
# driver = webdriver.Firefox() # Uses geckodriver win64
# working_dir= os.path.dirname(os.path.dirname(os.path.realpath(__file__)).replace('\\','/'))
# working_dir = os.path.dirname(os.path.realpath(__file__)).replace('\\','/')
# gecko = os.path.normpath(working_dir + '/drivers/geckodriver')
# binary = FirefoxBinary(r'C:\Program Files (x86)\Mozilla Firefox\firefox.exe')
# driver = webdriver.Firefox(firefox_binary=binary, executable_path=gecko+'.exe') # uses geckodriver win32
#To connect using custom profile
# profile = webdriver.FirefoxProfile(os.path.expanduser("C:/Users/ss_0002/AppData/Roaming/Mozilla/Firefox/Profiles/52wbvzqb.default"))
profile = webdriver.FirefoxProfile()
profile.set_preference('dom.webnotifications.enabled', False)
profile.set_preference('browser.link.open_newwindow', 1)
profile.set_preference('browser.link.open_newwindow.restriction', 0)
profile.set_preference('browser.link.open_newwindow.override.external', -1)
driver = webdriver.Firefox(firefox_profile=profile)
# driver = webdriver.Firefox()
return driver
def extract_target_info(self, target, by):
if isinstance(target, dict):
path = target["path"]
by = target["by"]
else:
path = target
by = by
return path, by
def load_element(self, target, by=None, kind="new"):
elem = None
i=0
path, by = self.extract_target_info(target, by)
delay = 5 if kind=="new" else 2
while((elem is None) and (i<delay)):
try:
if by=="xpath":
elem = self.driver.find_element_by_xpath(path)
elif by=="id":
elem = self.driver.find_element_by_id(path)
elif by=="link_text":
elem = self.driver.find_element_by_link_text(path)
elif by=="name":
elem = self.driver.find_element_by_name(path)
except:
time.sleep(1)
i +=1
return(elem)
def check_if_element_exists(self, target, by=None):
elem = None
path, by = self.extract_target_info(target, by)
try:
if by=="xpath":
elem = self.driver.find_element_by_xpath(path)
elif by=="id":
elem = self.driver.find_element_by_id(path)
elif by=="link_text":
elem = self.driver.find_element_by_link_text(path)
elif by=="name":
elem = self.driver.find_element_by_name(path)
except:
time.sleep(1)
return elem
def load_elements(self, target, by=None, kind="new"):
path, by = self.extract_target_info(target, by)
elem = None
i=0
delay = 5 if kind=="new" else 2
while((elem is None) and (i<delay)):
try:
if by=="xpath":
elem = self.driver.find_elements_by_xpath(target)
elif by=="id":
elem = self.driver.find_elements_by_id(target)
elif by=="link_text":
elem = self.driver.find_elements_by_link_text(target)
elif by=="name":
elem = self.driver.find_elements_by_name(target)
except:
time.sleep(1)
i +=1
return(elem)
def login(self, username, password, id_u, id_p, submit_button=None):
elem_login = self.driver.find_element_by_id(id_u)
elem_pw = self.driver.find_element_by_id(id_p)
elem_login.clear()
elem_pw.clear()
elem_login.send_keys(username)
elem_pw.send_keys(password)
if submit_button:
self.click_target(submit_button)
else:
elem_pw.send_keys(Keys.ENTER)
print("logged in")
def search(self, term, id_search):
elem_search = self.load_by_id(id_search)
elem_search.clear()
elem_search.send_keys(term)
elem_search.send_keys(Keys.ENTER)
def click_target(self, target, by=None, kind="new"):
elem = self.load_element(target, by, kind)
if elem is not None:
try:
elem.click()
return True
except:
return False
else:
return False
def get_target_html(self, target=None, by=None, kind='new', loaded_element=None):
if loaded_element is None:
elem = self.load_element(target, by, kind)
else:
elem = loaded_element
if elem is not None:
return elem.get_attribute("innerHTML")
else:
return 0
def get_target_text(self, target, by=None, kind="new"):
elem = self.load_element(target, by, kind)
if elem is not None:
return elem.get_attribute("innerHTML").strip().lstrip().replace("\n", " ")
else:
return 0
def write_to_div(self, message, target, by=None, kind="new"):
elem = self.load_element(target, by, kind)
if elem is not None:
action = webdriver.ActionChains(self.driver)
action.move_to_element_with_offset(elem, 10, 3)
action.click()
action.send_keys(message)
action.send_keys(Keys.ENTER)
action.perform()
return True
else:
return False
def click_back(self):
self.driver.execute_script("window.history.go(-1)")
def scroll_page_down(self):
webdriver.ActionChains(self.driver).send_keys(Keys.PAGE_DOWN).perform()
def scroll_page_up(self):
webdriver.ActionChains(self.driver).send_keys(Keys.PAGE_UP).perform()
def scroll_to_bottom(self):
self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
def scroll_up(self, Y):
script = "window.scrollTo(0,document.body.scrollHeight-" + str(Y) + " )"
self.driver.execute_script(script)
def write_to_csv(self, dataframe, f_name):
dataframe.to_csv(f_name, sep=',', index=False)
def close_connection(self):
try:
self.driver.close()
return True
except:
return False
def goto_url(self, url):
self.driver.get(url)
def switch_tab(self, tab_index):
self.driver.switch_to.window(self.driver.window_handles[tab_index])
def cancel_notification_popup(self):
webdriver.ActionChains(self.driver).send_keys(Keys.ESCAPE).perform()
| 28.25463
| 137
| 0.688514
|
4a00409597e932c4a056c5f285a2caf201e03175
| 2,338
|
py
|
Python
|
next.py
|
sadsunshower/next
|
33c46bd9641dba5c64802241db57512e287b1126
|
[
"MIT"
] | null | null | null |
next.py
|
sadsunshower/next
|
33c46bd9641dba5c64802241db57512e287b1126
|
[
"MIT"
] | null | null | null |
next.py
|
sadsunshower/next
|
33c46bd9641dba5c64802241db57512e287b1126
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import sys, json
import util.formatting
import util.timing
flags = ''
ttpath = '~/timetable.json'
selected_week = None
# Process command line arguments
for i in range(1, len(sys.argv)):
if sys.argv[i] == '-p':
ttpath = sys.argv[i+1]
elif sys.argv[i] == '-t':
selected_week = int(sys.argv[i+1])
elif sys.argv[i][0] == '-':
flags = sys.argv[i][1:]
# Display help information, if requested
if '?' in flags:
print('''Nick\'s timetabler v2.0
Usage: next.py [-flhw?] [-p (file)] [-t (week)]
Command line flags:
? : Show this help information and exit
f : Get the directory for the current class
l : Print timetable as LaTeX
h : Print timetable as HTML
w : Get the current week only
p : Use the timetable file at the given path (defaults to ~/timetable.json)
t : Give the timetable of a specific week (defaults to current week / week 1 if outside term)
No arguments will show the timetable in the terminal''')
sys.exit(0)
# Load timetable object from JSON
timetable = None
with open(ttpath) as f:
timetable = json.loads(f.read())
if 'f' in flags:
week = util.timing.get_week(timetable["term_start"], timetable["term_end"])
day, time = util.timing.get_day_time()
cls = util.timing.extract_class(timetable, week, day, time)
if not cls:
# Default to home directory
print('~')
else:
try:
print(timetable["course_folders"][cls.course])
except e:
print('~')
sys.exit(0)
if 'l' in flags:
if selected_week:
print(util.formatting.format_latex(timetable, selected_week))
else:
print(util.formatting.format_latex(timetable))
sys.exit(0)
if 'h' in flags:
if len(sys.argv) > 3:
print(util.formatting.format_html(timetable, selected_week))
else:
print(util.formatting.format_html(timetable))
sys.exit(0)
if 'w' in flags:
try:
week = util.timing.get_week(timetable["term_start"], timetable["term_end"])
if week == -1:
print('Not in term')
else:
print('Week ' + week)
except e:
print('Error getting week')
sys.exit(0)
if selected_week:
print(util.formatting.format_escape(timetable, selected_week))
else:
print(util.formatting.format_escape(timetable))
| 27.186047
| 95
| 0.639435
|
4a0040e93b89c070be5aed21ba4411e943a9ab62
| 1,564
|
py
|
Python
|
10 - Lists/example03.py
|
gyratory/NPTfP3
|
7e070e2e7a01cd2b750914719f71476fe1efcf4d
|
[
"Unlicense"
] | null | null | null |
10 - Lists/example03.py
|
gyratory/NPTfP3
|
7e070e2e7a01cd2b750914719f71476fe1efcf4d
|
[
"Unlicense"
] | null | null | null |
10 - Lists/example03.py
|
gyratory/NPTfP3
|
7e070e2e7a01cd2b750914719f71476fe1efcf4d
|
[
"Unlicense"
] | null | null | null |
menu_item = 0
namelist = []
while menu_item != 9:
print("--------------------")
print("1. Print the list")
print("2. Add a name to the list")
print("3. Remove a name from the list")
print("4. Change an item in the list")
print("9. Quit")
menu_item = int(input("Pick an item from the menu:"))
if menu_item == 1:
current = 0
if len(namelist) > 0:
while current < len(namelist):
print(current, ".", namelist[current])
current = current + 1
else:
print("List is empty")
elif menu_item == 2:
name = input("Type in a name to add: ")
namelist.append(name)
elif menu_item == 3:
del_name = input("What name would you like to remove: ")
if del_name in namelist:
# namelist.remove(del_name) would work just as fine
item_number = namelist.index(del_name)
# The code above only removes the first occurrence of the name. The
# code below from Gerald removes all.
# item_number = namelist.index(del_name)
# del namelist[item_number]
else:
print(del_name, "was not found")
elif menu_item == 4:
old_name = input("What name would you like to change: ")
if old_name in namelist:
item_number = namelist.index(old_name)
new_name = input("What is the new name: ")
namelist[item_number] = new_name
else:
print(old_name, "was not found")
print("Goodbye")
| 36.372093
| 79
| 0.558184
|
4a00419199dfc73248c17a9b3d23633406e688a9
| 1,731
|
py
|
Python
|
translate.py
|
amraboelela/ertugrul
|
00491d002b7a8989b1ec957c94b187d0490e27fa
|
[
"MIT"
] | null | null | null |
translate.py
|
amraboelela/ertugrul
|
00491d002b7a8989b1ec957c94b187d0490e27fa
|
[
"MIT"
] | null | null | null |
translate.py
|
amraboelela/ertugrul
|
00491d002b7a8989b1ec957c94b187d0490e27fa
|
[
"MIT"
] | null | null | null |
# importing the requests library
import sys, subprocess, requests
if len(sys.argv) > 2:
filename = sys.argv[1]
targetLanguage = sys.argv[2]
else:
print("please provide the file name and the target language")
exit(-1)
# api-endpoint
URL = "https://translation.googleapis.com/language/translate/v2"
from translation_key import *
file = open(filename)
sourceLanguage = filename[len(filename)-6:len(filename)-4]
lines = file.read().splitlines()
paragraph = ""
for line in lines:
if "-->" in line:
if len(paragraph) > 0:
paragraph = paragraph[:len(paragraph)-2]
PARAMS = {'key':key, 'q':paragraph, 'source':sourceLanguage, 'target':targetLanguage}
r = requests.get(url = URL, params = PARAMS)
data = r.json()
translatedText = data['data']['translations'][0]['translatedText'].replace('-','').replace("'","'").replace(""","'").replace("'il","'ll")
try:
print(translatedText.encode('utf8'))
print
except Exception as error:
print("error: " + str(error) + " line: " + line)
print line
paragraph = ""
else:
paragraph = paragraph + line.replace('-','').replace("!","").replace(".","") + ". "
PARAMS = {'key':key, 'q':paragraph, 'source':sourceLanguage, 'target':targetLanguage}
r = requests.get(url = URL, params = PARAMS)
data = r.json()
translatedText = data['data']['translations'][0]['translatedText'].replace('-','').replace("'","'").replace(""","'").replace("'il","'ll")
try:
print(translatedText.encode('utf8'))
print
except Exception as error:
print("error: " + str(error) + " line: " + line)
file.close()
| 33.288462
| 158
| 0.596187
|
4a0042df5fce734338e9daf73048e930cf482db9
| 2,517
|
py
|
Python
|
grr/core/grr_response_core/lib/util/retry.py
|
khanhgithead/grr
|
8ad8a4d2c5a93c92729206b7771af19d92d4f915
|
[
"Apache-2.0"
] | 4,238
|
2015-01-01T15:34:50.000Z
|
2022-03-31T08:18:05.000Z
|
grr/core/grr_response_core/lib/util/retry.py
|
khanhgithead/grr
|
8ad8a4d2c5a93c92729206b7771af19d92d4f915
|
[
"Apache-2.0"
] | 787
|
2015-01-02T21:34:24.000Z
|
2022-03-02T13:26:38.000Z
|
grr/core/grr_response_core/lib/util/retry.py
|
khanhgithead/grr
|
8ad8a4d2c5a93c92729206b7771af19d92d4f915
|
[
"Apache-2.0"
] | 856
|
2015-01-02T02:50:11.000Z
|
2022-03-31T11:11:53.000Z
|
#!/usr/bin/env python
"""A module with utilities for retrying function execution."""
import functools
import logging
import math
import time
from typing import Callable
from typing import Optional
from typing import Tuple
from typing import Type
from typing import TypeVar
class Opts:
"""Options that customize the retry mechanism.
Attributes:
attempts: The number of attempts to retry the call.
init_delay_secs: An initial value for delay between retries.
max_delay_secs: A maximum value for delay between retries.
backoff: A backoff multiplayer for the delay between retries.
sleep: A sleep function used for delaying retries.
"""
attempts: int = 1
init_delay_secs: float = 0.0
max_delay_secs: float = math.inf
backoff: float = 1.0
sleep: Callable[[float], None] = time.sleep
class On:
"""A decorator that retries the wrapped function on exception."""
def __init__(
self,
exceptions: Tuple[Type[Exception], ...],
opts: Optional[Opts] = None,
) -> None:
"""Initializes the decorator.
Args:
exceptions: A sequence of exceptions to retry on.
opts: Options that customize the retry behaviour.
"""
if opts is None:
opts = Opts()
if opts.attempts < 1:
raise ValueError("Non-positive number of retries")
self._exceptions = exceptions
self._opts = opts
_R = TypeVar("_R")
# TODO(hanuszczak): Looks like there is a bug in the linter: it recognizes
# `_R` in the argument but doesn't recognize it in the result type position.
def __call__(self, func: Callable[..., _R]) -> Callable[..., _R]: # pylint: disable=undefined-variable
"""Wraps the specified function into a retryable function.
The wrapped function will be attempted to be called specified number of
times after which the error will be propagated.
Args:
func: A function to wrap.
Returns:
A wrapped function that retries on failures.
"""
opts = self._opts
@functools.wraps(func)
def Wrapped(*args, **kwargs) -> On._R:
attempts = 0
delay_secs = opts.init_delay_secs
while True:
try:
return func(*args, **kwargs)
except self._exceptions as error:
attempts += 1
if attempts == opts.attempts:
raise
logging.warning("'%s', to be retried in %s s.", error, delay_secs)
opts.sleep(delay_secs)
delay_secs = min(delay_secs * opts.backoff, opts.max_delay_secs)
return Wrapped
| 27.064516
| 105
| 0.671037
|
4a0044af266cdd63c6169e023bf5c41c440685a0
| 8,091
|
py
|
Python
|
ICLR_2022/Cubic_10D/PI3NN/PI3NN_OOD.py
|
streeve/PI3NN
|
f7f08a195096e0388bb9230bc67c6acd6f41581a
|
[
"Apache-2.0"
] | 11
|
2021-11-08T20:38:50.000Z
|
2022-01-30T02:46:39.000Z
|
ICLR_2022/Cubic_10D/PI3NN/PI3NN_OOD.py
|
streeve/PI3NN
|
f7f08a195096e0388bb9230bc67c6acd6f41581a
|
[
"Apache-2.0"
] | 1
|
2022-01-13T19:46:32.000Z
|
2022-02-09T16:23:56.000Z
|
ICLR_2022/Cubic_10D/PI3NN/PI3NN_OOD.py
|
streeve/PI3NN
|
f7f08a195096e0388bb9230bc67c6acd6f41581a
|
[
"Apache-2.0"
] | 1
|
2021-12-17T18:38:26.000Z
|
2021-12-17T18:38:26.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import torch.optim as optim
# from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from scipy import stats
# torch.set_default_tensor_type(torch.DoubleTensor)
torch.set_default_tensor_type(torch.FloatTensor)
# device = 'cuda' if torch.cuda.is_available() else 'cpu'
device = 'cpu'
print('device', device)
# ==== fix random seed for reproducibility =====
np.random.seed(12345)
torch.manual_seed(12345)
#------------------------------------------------------
# ------- Define the three NNs ------------
#------------------------------------------------------
# ==== define the function =====
def target_fun(x):
y = x**3
y = (torch.sum(y, dim=1, keepdim=True)/10.0) + 1.0*torch.randn(x.size(0), device=device).unsqueeze(1)
return y
# ==== define the mean network =====
class UQ_Net_mean(nn.Module):
def __init__(self, nd):
super(UQ_Net_mean, self).__init__()
self.fc1 = nn.Linear(nd, 200)
self.fc2 = nn.Linear(200, 1)
def forward(self, x):
x = torch.tanh(self.fc1(x))
x = self.fc2(x)
return x
def UQ_loss(self, x, output, ydata):
loss = torch.mean((output[:, 0]-ydata[:, 0])**2)
return loss
# ==== define the upper and lower bound network =====
class UQ_Net_std(nn.Module):
def __init__(self, nd):
super(UQ_Net_std, self).__init__()
self.fc1 = nn.Linear(nd, 200)
self.fc2 = nn.Linear(200, 1)
# self.fc2.bias = torch.nn.Parameter(torch.tensor([25.0])) ## assign a large bias value of the output layer for OOD identification
def forward(self, x):
x = torch.tanh(self.fc1(x))
x = torch.sqrt(torch.square(self.fc2(x)) + 0.1)
return x
def UQ_loss(self, x, output, ydata):
loss = torch.mean((output[:, 0] - ydata[:, 0])**2)
return loss
#------------------------------------------------------
# ------- Generate the data ------------
#------------------------------------------------------
Npar = 10
Ntrain = 5000
Nout = 1
xtrain = (torch.randn(Ntrain, Npar)*1.0).to(device)
ytrain = target_fun(xtrain).to(device)
# normalize data
x_mean = torch.mean(xtrain, axis=0)
x_std = torch.std(xtrain, axis=0)
xtrain_normal = (xtrain - x_mean)/x_std
y_mean = torch.mean(ytrain, axis=0)
y_std = torch.std(ytrain, axis=0)
ytrain_normal = (ytrain - y_mean)/y_std
Nvalid = 1000
xvalid = (torch.randn(Nvalid, Npar)+2.0).to(device)
# xvalid[:,1] = xvalid[:,1] + 4.0
yvalid = target_fun(xvalid).to(device)
xvalid_normal = (xvalid - x_mean) / x_std
yvalid_normal = (yvalid - y_mean) / y_std
#------------------------------------------------------
# ------- Train the three NNs ------------
#------------------------------------------------------
criterion = nn.MSELoss()
# ====== Train the mean network for estimating mean
net = UQ_Net_mean(Npar).to(device)
net.zero_grad()
optimizer = optim.SGD(net.parameters(), lr=0.01)
Max_iter = 3000
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes()
for i in range(Max_iter):
optimizer.zero_grad()
output = net(xtrain_normal)
loss = criterion(output, ytrain_normal)
if i % 1000 == 0:
print(i, loss)
loss.backward()
optimizer.step()
## ==== Calculate the difference to get training data of U and V network
diff = (ytrain_normal - net(xtrain_normal)).detach()
mask = diff > 0
# print(mask.size())
y_up_data = diff[diff > 0].unsqueeze(1)
# x_up_data = xtrain_normal[diff>0].unsqueeze(1)
x_up_data = xtrain_normal[mask[:, 0], :]#.unsqueeze(1)
mask = diff < 0
y_down_data = -1.0 * diff[diff < 0].unsqueeze(1)
x_down_data = xtrain_normal[mask[:, 0], :]#.unsqueeze(1)
# ====== Train the U and V network for estimating upper and lower bound ====
net_up = UQ_Net_std(Npar).to(device)
net_up.zero_grad()
optimizer = optim.SGD(net_up.parameters(), lr=0.01)
for i in range(Max_iter):
optimizer.zero_grad()
output = net_up(x_up_data)
loss = criterion(output, y_up_data)
if torch.isnan(loss):
print(output, y_up_data)
exit()
if i % 1000 == 0:
print(i, loss)
loss.backward()
optimizer.step()
net_down = UQ_Net_std(Npar).to(device)
net_down.zero_grad()
optimizer = optim.SGD(net_down.parameters(), lr=0.01)
for i in range(Max_iter):
optimizer.zero_grad()
output = net_down(x_down_data)
# loss = net_up.UQ_loss(x_down_data, output, y_down_data)
loss = criterion(output, y_down_data)
if torch.isnan(loss):
print(output, y_down_data)
exit()
if i % 1000 == 0:
print(i, loss)
loss.backward()
optimizer.step()
#--------------------------------------------------------------------
# ------- Root-finding to determine alpha and beta ------------
#--------------------------------------------------------------------
quantile = 0.9
num_outlier = int(Ntrain * (1-quantile)/2)
output = net(xtrain_normal)
output_up = net_up(xtrain_normal)
output_down = net_down(xtrain_normal)
##===== find alpha =======
c_up0 = 0.0
c_up1 = 200.0
f0 = (ytrain_normal >= output + c_up0 * output_up).sum() - num_outlier
f1 = (ytrain_normal >= output + c_up1 * output_up).sum() - num_outlier
n_iter = 1000
iter = 0
while iter <= n_iter and f0*f1<0: ##f0 != 0 and f1 != 0:
c_up2 = (c_up0 + c_up1)/2.0
f2 = (ytrain_normal >= output + c_up2 * output_up).sum() - num_outlier
if f2 == 0:
break
elif f2 > 0:
c_up0 = c_up2
f0 = f2
else:
c_up1 = c_up2
f1 = f2
# print('{}, f0: {}, f1: {}, f2: {}'.format(iter, f0, f1, f2))
c_up = c_up2
##===== find beta =======
c_down0 = 0.0
c_down1 = 200.0
f0 = (ytrain_normal <= output - c_down0 * output_down).sum() - num_outlier
f1 = (ytrain_normal <= output - c_down1 * output_down).sum() - num_outlier
n_iter = 1000
iter = 0
while iter <= n_iter and f0*f1<0: ##f0 != 0 and f1 != 0:
c_down2 = (c_down0 + c_down1)/2.0
f2 = (ytrain_normal <= output - c_down2 * output_down).sum() - num_outlier
if f2 == 0:
break
elif f2 > 0:
c_down0 = c_down2
f0 = f2
else:
c_down1 = c_down2
f1 = f2
# print('{}, f0: {}, f1: {}, f2: {}'.format(iter, f0, f1, f2))
c_down = c_down2
print('optimal alpha and beta: ', c_up, c_down)
#--------------------------------------------------------------------
# ------- Save and analysis results ------------
#--------------------------------------------------------------------
##--- 1. run PIVEN, QD, SQR, DER for this 10-Cubic function;
##--- 2. for each method, save two files 'PIW_train.dat', 'PIW_test.dat'
##--- 3. Calculate confidence score for the flight delay data.
output = net(xvalid_normal)
output_up = net_up(xvalid_normal)
output_down = net_down(xvalid_normal)
PI1 = net_up(xtrain_normal) * c_up + net_down(xtrain_normal) * c_down
MPIW_array_train = (PI1 * y_std).detach().numpy()
MPIW_array_test = (output_up * c_up * y_std + c_down * output_down * y_std).detach().numpy()
MPIW_array_train = MPIW_array_train[~np.isnan(MPIW_array_train)]
MPIW_array_test = MPIW_array_test[~np.isnan(MPIW_array_test)]
print(np.shape(MPIW_array_train), np.shape(MPIW_array_test))
np.savetxt('PI3NN_MPIW_train.dat', MPIW_array_train)
np.savetxt('PI3NN_MPIW_test.dat', MPIW_array_test)
kde_train = stats.gaussian_kde(MPIW_array_train)
kde_test = stats.gaussian_kde(MPIW_array_test)
x1 = np.linspace(MPIW_array_train.min(), MPIW_array_train.max(), 100)
p1 = kde_train(x1)
x2 = np.linspace(MPIW_array_test.min(), MPIW_array_test.max(), 100)
p2 = kde_test(x2)
plt.plot(x1,p1, label='train')
plt.plot(x2,p2, label='test')
plt.legend()
plt.savefig('PI3NN_cubic10D_bias.png')
plt.show()
# print('P1 (train) mean: {}, STD: {}'.format(np.mean(p1), np.std(p1)))
# print('P2 (test) mean: {}, STD: {}'.format(np.mean(p2), np.std(p2)))
###------- Option I: calculate confidence interval
conf_score = kde_train(MPIW_array_test)/p1.max()
print(np.mean(conf_score), np.std(conf_score))
| 26.880399
| 138
| 0.597701
|
4a0044d893a1eee8c09ca7df90f1707490a61865
| 7,526
|
py
|
Python
|
torchplus/tensorfunc.py
|
Bertie97/pyctlib
|
c0bb6afc49dacc6c06c5936c1fae132c62c190f7
|
[
"MIT"
] | 3
|
2021-07-03T17:27:44.000Z
|
2021-09-26T20:48:19.000Z
|
torchplus/tensorfunc.py
|
Bertie97/pyctlib
|
c0bb6afc49dacc6c06c5936c1fae132c62c190f7
|
[
"MIT"
] | null | null | null |
torchplus/tensorfunc.py
|
Bertie97/pyctlib
|
c0bb6afc49dacc6c06c5936c1fae132c62c190f7
|
[
"MIT"
] | 1
|
2022-03-08T08:54:03.000Z
|
2022-03-08T08:54:03.000Z
|
#! python3.8 -u
# -*- coding: utf-8 -*-
##############################
## Project PyCTLib
## Package torchplus
##############################
__all__ = """
crop_as
decimal
divide
dot
down_scale
gaussian_kernel
grad_image
image_grid
up_scale
""".split()
from pyoverload import *
from pyctlib import restore_type_wrapper
from pyctlib import vector
import torch
import numpy as np
import torchplus as tp
def decimal(tensor):
return tensor - tp.floor(tensor)
def divide(a, b, limit=1, tol=1e-6):
a = tp.tensor(a)
b = tp.tensor(b)
a_s, b_s = a.shape ^ b.shape
a = a.view(a_s)
b = b.view(b_s)
shape = tp.Size(max(x, y) for x, y in zip(a_s, b_s))
return tp.where(b.abs() < tol, limit * tp.ones(shape), a / tp.where(b.abs() < tol, tol * tp.ones(shape), b))
def add_special(size, special, fill=1):
s = special
if len(s) == 0: pass
elif len(s) == 1: size = size[:s[0]] + (fill,) + size[s[0]:]
else: size = size[:s[0]] + (fill,) + size[s[0]:s[1]] + (fill,) + size[s[1]:]
return size
def gaussian_kernel(n_dims = 2, kernel_size = 3, sigma = 0, normalize = True):
radius = (kernel_size - 1) / 2
if sigma == 0: sigma = radius * 0.6
grid = tp.image_grid(*(kernel_size,) * n_dims).float()
kernel = tp.exp(- ((grid - radius) ** 2).sum(0) / (2 * sigma ** 2))
return (kernel / kernel.sum()) if normalize else kernel
def dot(g1, g2):
assert g1.shape == g2.shape
return (g1 * g2).sum(g1.channel_dimension)
@restore_type_wrapper
def grad_image(array):
'''
Gradient image of array
array: (n_batch, n_feature, n_1, ..., n_{n_dim})
output: (n_batch, n_dim, n_feature, n_1, ..., n_{n_dim})
'''
array = tp.tensor(array)
output = tp.zeros_like(array)
grad_dim = int(array.has_batch)
output = []
for d in range(array.ndim):
if d in array.special: continue
b = (slice(None, None),) * d + (slice(2, None),) + (slice(None, None),) * (array.ndim - d - 1)
a = (slice(None, None),) * d + (slice(None, -2),) + (slice(None, None),) * (array.ndim - d - 1)
output.append(tp.crop_as((array[b] - array[a]) / 2, array))
return tp.stack(output, {grad_dim})
@overload
@restore_type_wrapper("roi")
def crop_as(x: Array, y: tuple, center: tuple, fill: Scalar=0) -> Array:
x = tp.Tensor(x)
size_x = x.shape
size_y = y
if isinstance(size_y, tp.Size) and size_x.nspace == size_y.nspace:
size_y = tuple(size_y.space)
size_y = tuple(size_y)
if len(size_y) == len(size_x): pass
elif len(size_y) == size_x.nspace: size_y = add_special(size_y, size_x.special, -1)
else: raise TypeError("Mismatch dimensions in 'crop_as', please use -1 if the dimension doesn't need to be cropped. ")
assert len(size_y) == len(size_x)
size_y = tuple(a if b == -1 else b for a, b in zip(size_x, size_y))
if len(center) == len(size_x): pass
elif len(center) == size_x.nspace: center = add_special(center, size_x.special, -1)
elif len(x for x in center if x >= 0) == len(x for x in size_y if x >= 0):
center = tuple(a if b >= 0 else -1 for a, b in zip(center, size_y))
else: raise TypeError("Mismatch dimensions for the center in 'crop_as', please use -1 if the dimension that is centered or doesn't need cropping. ")
assert len(center) == len(size_x)
center = tuple(a / 2 if b == -1 else b for a, b in zip(size_x, center))
z = fill * tp.ones(*size_y).type_as(x)
def intersect(u, v):
return max(u[0], v[0]), min(u[1], v[1])
z_box = [intersect((0, ly), (- round(float(m - float(ly) / 2)), - round(float(m - float(ly) / 2)) + lx)) for m, lx, ly in zip(center, size_x, size_y)]
x_box = [intersect((0, lx), (+ round(float(m - float(ly) / 2)), + round(float(m - float(ly) / 2)) + ly)) for m, lx, ly in zip(center, size_x, size_y)]
# if the two boxes are seperated
if any([r[0] >= r[1] for r in z_box]) or any([r[0] >= r[1] for r in x_box]): z.roi = None; return z
region_z = tuple(slice(u, v) for u, v in z_box)
region_x = tuple(slice(u, v) for u, v in x_box)
z[region_z] = x[region_x]
z.roi = region_x
z.special_from_(x)
return z
@overload
def crop_as(x: Array, y: Array, center: tuple, fill: Scalar=0) -> Array:
return crop_as(x, y.shape, center, fill)
@overload
def crop_as(x: Array, y: [tuple, Array], fill: Scalar=0) -> Array:
center = tuple(m/2 for m in x.shape)
return crop_as(x, y, center, fill)
@overload
def crop_as(x: Array, *y: int) -> Array:
center = tuple(m/2 for m in x.shape)
return crop_as(x, y, center)
@restore_type_wrapper
def up_scale(image, *scaling:int):
image = tp.tensor(image)
if len(scaling) == 0:
scaling = (1,)
elif len(scaling) == 1 and iterable(scaling[0]):
scaling = scaling[0]
if len(scaling) == 1:
if isinstance(scaling[0], int):
scaling *= image.nspace
scaling = add_special(scaling, image.special, 1)
else: raise TypeError("Unknown scaling type for 'up_scale'. ")
elif len(scaling) < image.ndim and len(scaling) == image.nspace:
scaling = add_special(scaling, image.special, 1)
for i, s in enumerate(scaling):
image = (
image
.transpose(i, -1)
.unsqueeze(-1)
.repeat((1,) * image.ndim + (int(s),))
.flatten(-2)
.transpose(i, -1)
)
return image
@restore_type_wrapper
def down_scale(image, *scaling:int):
image = tp.tensor(image)
if len(scaling) == 0:
scaling = (1,)
elif len(scaling) == 1 and iterable(scaling[0]):
scaling = scaling[0]
if len(scaling) == 1:
if isinstance(scaling[0], int):
scaling *= image.nspace
scaling = add_special(scaling, image.special, 1)
else: raise TypeError("Unknown scaling type for 'down_scale'. ")
elif len(scaling) < image.ndim and len(scaling) == image.nspace:
scaling = add_special(scaling, image.special, 1)
return image[tuple(slice(None, None, s) for s in scaling)]
@overload
@restore_type_wrapper
def image_grid(x: Array):
return image_grid(x.space)
@overload
def image_grid__default__(*shape):
if len(shape) == 1 and isinstance(shape, (list, tuple)):
shape = shape[0]
ret = tp.stack(tp.meshgrid(*[tp.arange(x) for x in shape]))
return ret.channel_dimension_(0)
def linear(input, weight, bias):
result = input @ weight.T
if bias is not None:
if bias.dim() == 2:
return result + bias
return result + bias.unsqueeze(0)
return result
def get_shape(input):
if isinstance(input, list):
input = vector(input)
l_shape = input.map(get_shape)
if l_shape.all(lambda x: x == l_shape[0]):
return "L{}".format(len(l_shape)) + ("[{}]".format(l_shape[0]) if not l_shape[0].startswith("[") else l_shape[0])
else:
return "[{}]".format(", ".join(l_shape))
if isinstance(input, tuple):
input = vector(input)
l_shape = input.map(get_shape)
if l_shape.all(lambda x: x == l_shape[0]):
return "T{}".format(len(l_shape)) + ("[{}]".format(l_shape[0]) if not l_shape[0].startswith("[") else l_shape[0])
else:
return "[{}]".format(", ".join(l_shape))
if isinstance(input, torch.Tensor):
return str(input.shape)
if isinstance(input, np.ndarray):
return str(input.shape)
return str(type(input))[8:-2]
| 36.009569
| 154
| 0.597263
|
4a0044f6c7fd5be950fdbbdd813398977ca06ec2
| 496
|
py
|
Python
|
TIL/Threshold_binary.py
|
FLY-CODE77/opencv
|
5644e6c1ef43d81efb54ccde6c06f1adf000fb96
|
[
"MIT"
] | 1
|
2020-10-23T14:29:24.000Z
|
2020-10-23T14:29:24.000Z
|
TIL/Threshold_binary.py
|
FLY-CODE77/opencv
|
5644e6c1ef43d81efb54ccde6c06f1adf000fb96
|
[
"MIT"
] | null | null | null |
TIL/Threshold_binary.py
|
FLY-CODE77/opencv
|
5644e6c1ef43d81efb54ccde6c06f1adf000fb96
|
[
"MIT"
] | null | null | null |
# 이진화 트랙바로 이용해서 조정해서 최적값 찾기
import cv2
import sys
import numpy as numpy
src = cv2.imread('HappyFish.jpg', cv2.IMREAD_GRAYSCALE)
if src is None:
print('no img')
sys.exit()
def on_threshold(pos):
_, dst = cv2.threshold(src, pos, 255, cv2.THRESH_BINARY)
cv2.imshow('dst', dst)
cv2.imshow('src', src)
cv2.namedWindow('dst')
# 윈도우 사이즈는 생각을 해봐야 할듯
cv2.createTrackbar('THR', 'dst', 0, 255, on_threshold)
cv2.setTrackbarPos('THR', 'dst', 128)
cv2.waitKey()
cv2.destroyAllWindows()
| 19.076923
| 60
| 0.691532
|
4a004662ecbb22befa047d4d8c69c51af64b12cb
| 2,991
|
py
|
Python
|
windows/win_iis_redirect.py
|
briggsy87/ansible-modules
|
499b3dcd7b8ea03e9b87d9b7cc1080f6d711e47d
|
[
"MIT"
] | null | null | null |
windows/win_iis_redirect.py
|
briggsy87/ansible-modules
|
499b3dcd7b8ea03e9b87d9b7cc1080f6d711e47d
|
[
"MIT"
] | null | null | null |
windows/win_iis_redirect.py
|
briggsy87/ansible-modules
|
499b3dcd7b8ea03e9b87d9b7cc1080f6d711e47d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018, Kyle Briggs <briggsy87@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
DOCUMENTATION = '''
---
module: win_iis_redirect
version_added: ""
short_description: Create an HTTP redirect in IIS
description:
- Uses Powershell commandlets. Checks the current values of the HTTP redirect for the specified site, if any of the values supplied to not match the current values, the command is run to set the redirect to the specified values.
options:
site_name:
description:
- The name of the IIS site
required: false
default: IIS:\sites\Default Web Site
aliases: []
enabled:
description:
- Whether the http redirect is enable or not.
required: false
choices:
- true
- false
default: true
aliases: []
child_only:
description:
- Whether this redirect should effect child only. More info found here: https://docs.microsoft.com/en-us/iis/configuration/system.webserver/httpredirect/#attributes
required: false
choices:
- true
- false
default: false
aliases: []
exact_destination:
description:
- Whether this redirect should apply to the exact location. More info found here: https://docs.microsoft.com/en-us/iis/configuration/system.webserver/httpredirect/#attributes
required: false
choices:
- true
- false
default: false
aliases: []
destination:
description:
- The URL this should direct to
required: yes
default: null
aliases: []
http_response_status:
description:
- Specifies the type of redirection. More info found here: https://docs.microsoft.com/en-us/iis/configuration/system.webserver/httpredirect/#attributes
required: false
choices:
- Found
- Permanent
- Temporary
- Permanent Redirect
default: Found
aliases: []
author: Kyle Briggs
'''
EXAMPLES = '''
# Playbook example
---
- name : IIS redirect
win_iis_redirect:
site_name: IIS:\sites\My Website
enabled: true
child_only: true
exact_destination: false
destination: https://mywebsite.com/
http_response_status: Permanent
'''
| 30.835052
| 235
| 0.68004
|
4a0047176b1cf4bcfe0226abc765822397ff0aa6
| 11,733
|
py
|
Python
|
intersight/model/iam_ldap_policy_response.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 5
|
2021-12-16T15:13:32.000Z
|
2022-03-29T16:09:54.000Z
|
intersight/model/iam_ldap_policy_response.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 4
|
2022-01-25T19:05:51.000Z
|
2022-03-29T20:18:37.000Z
|
intersight/model/iam_ldap_policy_response.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 2
|
2020-07-07T15:01:08.000Z
|
2022-01-31T04:27:35.000Z
|
"""
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. The Intersight OpenAPI document defines the complete set of properties that are returned in the HTTP response. From that perspective, a client can expect that no additional properties are returned, unless these properties are explicitly defined in the OpenAPI document. However, when a client uses an older version of the Intersight OpenAPI document, the server may send additional properties because the software is more recent than the client. In that case, the client may receive properties that it does not know about. Some generated SDKs perform a strict validation of the HTTP response body against the OpenAPI document. # noqa: E501
The version of the OpenAPI document: 1.0.9-4950
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from intersight.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from intersight.model.iam_ldap_policy_list import IamLdapPolicyList
from intersight.model.mo_aggregate_transform import MoAggregateTransform
from intersight.model.mo_document_count import MoDocumentCount
from intersight.model.mo_tag_key_summary import MoTagKeySummary
from intersight.model.mo_tag_summary import MoTagSummary
globals()['IamLdapPolicyList'] = IamLdapPolicyList
globals()['MoAggregateTransform'] = MoAggregateTransform
globals()['MoDocumentCount'] = MoDocumentCount
globals()['MoTagKeySummary'] = MoTagKeySummary
globals()['MoTagSummary'] = MoTagSummary
class IamLdapPolicyResponse(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'object_type': (str,), # noqa: E501
'count': (int,), # noqa: E501
'results': ([MoTagKeySummary], none_type,), # noqa: E501
}
@cached_property
def discriminator():
lazy_import()
val = {
'iam.LdapPolicy.List': IamLdapPolicyList,
'mo.AggregateTransform': MoAggregateTransform,
'mo.DocumentCount': MoDocumentCount,
'mo.TagSummary': MoTagSummary,
}
if not val:
return None
return {'object_type': val}
attribute_map = {
'object_type': 'ObjectType', # noqa: E501
'count': 'Count', # noqa: E501
'results': 'Results', # noqa: E501
}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, object_type, *args, **kwargs): # noqa: E501
"""IamLdapPolicyResponse - a model defined in OpenAPI
Args:
object_type (str): A discriminator value to disambiguate the schema of a HTTP GET response body.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
count (int): The total number of 'iam.LdapPolicy' resources matching the request, accross all pages. The 'Count' attribute is included when the HTTP GET request includes the '$inlinecount' parameter.. [optional] # noqa: E501
results ([MoTagKeySummary], none_type): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
required_args = {
'object_type': object_type,
}
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(
constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in kwargs.items():
if var_name in unused_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
not self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
],
'oneOf': [
IamLdapPolicyList,
MoAggregateTransform,
MoDocumentCount,
MoTagSummary,
],
}
| 46.932
| 1,678
| 0.636581
|
4a00477c6de421fb00f342b429259fa86ded3556
| 638
|
py
|
Python
|
ios/scrape.py
|
TransitApp/svgren
|
aabfc27c168d65d672788b048a6b73da7afeaf44
|
[
"MIT"
] | null | null | null |
ios/scrape.py
|
TransitApp/svgren
|
aabfc27c168d65d672788b048a6b73da7afeaf44
|
[
"MIT"
] | null | null | null |
ios/scrape.py
|
TransitApp/svgren
|
aabfc27c168d65d672788b048a6b73da7afeaf44
|
[
"MIT"
] | null | null | null |
import urllib
import os
try:
os.stat('./download')
except:
os.mkdir('./download')
with open('images.xml') as f:
for line in f:
filename = line.rstrip('\n')
# cleanup weird ? at the end of some file.
if filename[-4:] != '.svg':
filename = filename[:-1]
f = urllib.URLopener()
file_url = 'http://images-staging.transitapp.com/svg/' + filename
print '\"' + filename + '\"' + ','
f.retrieve(file_url, 'download/' + filename)
print 'Use previous printed snippet in main.cpp of svgren ios test app.\n Helps validate if your image will work with the library.'
| 26.583333
| 131
| 0.600313
|
4a0047f02644bfda8858fb7bf2f6bdacc167294a
| 2,257
|
py
|
Python
|
preprocess/count_tags.py
|
vohoaiviet/tag-image-retrieval
|
0a257560581f702cd394f3f28c9e0f6202827ce8
|
[
"MIT"
] | 50
|
2015-11-04T15:53:09.000Z
|
2022-01-03T14:46:17.000Z
|
preprocess/count_tags.py
|
vohoaiviet/tag-image-retrieval
|
0a257560581f702cd394f3f28c9e0f6202827ce8
|
[
"MIT"
] | 2
|
2018-03-07T09:51:50.000Z
|
2018-10-13T11:05:13.000Z
|
preprocess/count_tags.py
|
vohoaiviet/tag-image-retrieval
|
0a257560581f702cd394f3f28c9e0f6202827ce8
|
[
"MIT"
] | 17
|
2015-10-26T03:41:49.000Z
|
2021-08-23T08:11:05.000Z
|
import sys
import os
from basic.constant import ROOT_PATH
from basic.common import checkToSkip, printStatus
INFO = __file__
def process(options, collection):
rootpath = options.rootpath
tpp = options.tpp
tagfile = os.path.join(rootpath, collection, "TextData", "id.userid.%stags.txt" % tpp)
resultfile = os.path.join(rootpath, collection, "TextData", "%stag.userfreq.imagefreq.txt" % tpp)
if checkToSkip(resultfile, options.overwrite):
return 0
printStatus(INFO, "parsing " + tagfile)
tag2imfreq = {}
tag2users = {}
for line in open(tagfile):
elems = str.split(line.strip())
photoid = elems[0]
userid = elems[1]
tagset = set(elems[2:])
for tag in tagset:
tag2imfreq[tag] = tag2imfreq.get(tag, 0) + 1
tag2users.setdefault(tag,[]).append(userid)
printStatus(INFO, "collecting user-freq and image-freq")
results = []
for tag,users in tag2users.iteritems():
userfreq = len(set(users))
imfreq = tag2imfreq[tag]
results.append((tag, userfreq, imfreq))
printStatus(INFO, "sorting in descending order (user-freq as primary key)")
results.sort(key=lambda v:(v[1],v[2]), reverse=True)
printStatus(INFO, "-> %s" % resultfile)
with open(resultfile, 'w') as fw:
fw.write(''.join(['%s %d %d\n' % (tag, userfreq, imfreq) for (tag, userfreq, imfreq) in results]))
fw.close()
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
from optparse import OptionParser
parser = OptionParser(usage="""usage: %prog [options] collection""")
parser.add_option("--overwrite", default=0, type="int", help="overwrite existing file (default: 0)")
parser.add_option("--tpp", default='lemm', type="string", help="tag preprocess (default: lemm)")
parser.add_option("--rootpath", default=ROOT_PATH, type="string", help="rootpath where the train and test collections are stored (default: %s)" % ROOT_PATH)
(options, args) = parser.parse_args(argv)
if len(args) < 1:
parser.print_help()
return 1
return process(options, args[0])
if __name__ == "__main__":
sys.exit(main())
| 31.347222
| 160
| 0.627381
|
4a00492535e7ea905d6a5ac7a578e3489cbd58ee
| 5,192
|
py
|
Python
|
spikefuel/tools.py
|
duguyue100/spikefuel
|
e06713b62c0bc7f881dd75a5a4842723cce4aaab
|
[
"MIT"
] | 12
|
2016-05-12T09:58:19.000Z
|
2021-04-10T02:46:21.000Z
|
spikefuel/tools.py
|
colinshane/spikefuel
|
e06713b62c0bc7f881dd75a5a4842723cce4aaab
|
[
"MIT"
] | 1
|
2019-07-08T03:50:02.000Z
|
2019-07-09T07:22:18.000Z
|
spikefuel/tools.py
|
colinshane/spikefuel
|
e06713b62c0bc7f881dd75a5a4842723cce4aaab
|
[
"MIT"
] | 10
|
2016-04-09T01:58:22.000Z
|
2020-06-07T05:13:46.000Z
|
"""Tools to simplify workflow.
Author: Yuhuang Hu
Email : duguyue100@gmail.com
"""
import os
import cv2
import time
import socket
from spikefuel import dvsproc
# PUBLIC PARAMETERS
UDP_PORT = 8997
REMOTE_IP = "localhost"
BUFSIZE = 1024
def create_vot_image_list(save_path, num_frames):
"""Create a image list for given VOT video sequence.
Parameters
----------
save_path : String
the directory of the given VOT video sequence
num_frames : int
number of frames in the given video sequence
Returns
-------
image_list : list
One ordered list that contains path of frames
"""
image_list = []
for i in xrange(1, num_frames+1):
images_path = os.path.join(save_path, "%08d" % (i,) + ".jpg")
image_list.append(images_path)
return image_list
def create_image_sequence(frames, save_path, title, form=".png"):
"""Create a image sequence by given frames.
Parameters
----------
frames : list
An ordered list that contains all frames. Frames are either in
OpenCV Mat or numpy array
save_path : string
directory of where you want to write the image sequence
title : string
title of the image
form : string
format of output images, in default as PNG image
Returns
-------
A sequence of images written in the given directory and title.
"""
image_base = os.path.join(save_path, title+"-")
num_frames = len(frames)
for i in xrange(num_frames):
image_add = os.path.join(image_base, "%08d" % (i+1,)+form)
print "[MESSAGE] Writing "+image_add
cv2.imwrite(image_add, frames[i])
print "[MESSAGE] Images are written"
def init_dvs():
"""Initialize a socket that can send commands to jAER.
Returns
-------
s : socket
An empty socket that can connect jAER
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(('', 0))
print "[MESSAGE] The socket is established."
return s
def destroy_dvs(s):
"""Destroy dvs socket.
Returns
-------
A flag that indicates if the socket is close successfully.
"""
s.close()
print "[MESSAGE] The socket is closed."
def reset_dvs_time(conn, viewer_id=2, wait=0.3):
"""Send a command to reset time scale across all viewers.
FIXME: the reset print a message that is not supposed to be there.
Parameters
----------
conn : socket
An socket that can connect jAER
viewer_id : int
the n-th viewer you've created, for Mac, default is 2
wait : float
positive float for wait jAER viewers to reset time stamp
"""
addr = REMOTE_IP, UDP_PORT+viewer_id-1
line = 'zerotimestamps'
conn.sendto(line, addr)
time.sleep(wait)
def start_log_dvs(conn, save_path, title, viewer_id=2):
"""Send start logging command to jAER.
Parameters
----------
conn : socket
An socket that can connect jAER
save_path : string
the directory you want to put the rocording
use absolute path since the file saving is with jAER
title : string
the title of your recording, no extension
viewer_id : int
the n-th viewer you've created, for Mac, default is 2
Returns
-------
rec_path : string
absolute path to saved recording
flag that indicates if the function is sent successfully
"""
addr = REMOTE_IP, UDP_PORT+viewer_id-1
rec_path = os.path.join(save_path, title)
# send start logging command
reset_dvs_time(conn, viewer_id)
line = 'startlogging '+rec_path
conn.sendto(line, addr)
data, fromaddr = conn.recvfrom(BUFSIZE)
print ('[MESSAGE] client received %r from %r' % (data, fromaddr))
return rec_path
def stop_log_dvs(conn, viewer_id=2):
"""Send stop logging command to jAER.
Parameters
----------
conn : socket
An socket that can connect jAER
viewer_id : int
the n-th viewer you've created, for Mac, default is 2
Returns
-------
flag that indicates if the function is sent successfully
"""
addr = REMOTE_IP, UDP_PORT+viewer_id-1
# Send stop logging command
line = 'stoplogging'
conn.sendto(line, addr)
data, fromaddr = conn.recvfrom(BUFSIZE)
print ('[MESSAGE] client received %r from %r' % (data, fromaddr))
def log_dvs(conn, save_path, title, duration, viewer_id=2):
"""Log a DVS recording for certain duration.
Parameters
----------
conn : socket
An socket that can connect jAER
save_path : string
the directory you want to put the rocording
use absolute path since the file saving is with jAER
title : string
the title of your recording, no extension
duration : float
duration of the total recording in seconds
viewer_id : int
the n-th viewer you've created, for Mac, default is 2
Returns
-------
A flag that tells if you log the data successfully.
"""
rec_path = start_log_dvs(conn, save_path, title, viewer_id)
time.sleep(duration)
stop_log_dvs(conn)
return dvsproc.check_aedat(rec_path+".aedat")
| 25.45098
| 70
| 0.644453
|
4a0049281dd0db6a5f44887a14a6240852adf40f
| 24,974
|
py
|
Python
|
script/self_driving/model_server.py
|
b41sh/terrier
|
ea319f84b3abf5fe9ab2590baaa399de07f331cb
|
[
"MIT"
] | null | null | null |
script/self_driving/model_server.py
|
b41sh/terrier
|
ea319f84b3abf5fe9ab2590baaa399de07f331cb
|
[
"MIT"
] | null | null | null |
script/self_driving/model_server.py
|
b41sh/terrier
|
ea319f84b3abf5fe9ab2590baaa399de07f331cb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
This file contains the python ModelServer implementation.
Invoke with:
`model_server.py <ZMQ_ENDPOINT>`
The server should be stateless but with caching of models.
The message format that the ModelServer expects should be kept consistent with Messenger class in
the noisepage source code.
The command format should be kept with ModelServerManager in the noisepage source
code.
TODO(Ricky):
- Design an Error code scheme for ModelServerManager and ModelServer.
Like some string errors? This should be transparent to the user of the ModelServerManager so I
am delaying this to next PR.
"""
from __future__ import annotations
import enum
import sys
import atexit
from abc import ABC, abstractmethod
from enum import Enum, auto, IntEnum
from typing import Dict, Optional, Tuple, List, Any
import json
import logging
import os
import pprint
import pickle
from pathlib import Path
import numpy as np
import zmq
from modeling.ou_model_trainer import OUModelTrainer
from modeling.interference_model_trainer import InterferenceModelTrainer
from modeling.util import logging_util
from modeling.type import OpUnit
from modeling.info import data_info
from forecasting.forecaster import Forecaster, parse_model_config
logging_util.init_logging('info')
class ModelType(enum.IntEnum):
"""ModelType
"""
FORECAST = 0,
OPERATING_UNIT = 1
INTERFERENCE = 2
class Callback(IntEnum):
"""
ModelServerManager <==> ModelServer callback Id.
Needs to be kept consistent with ModelServerManager.h's Callback Enum
"""
NOOP = 0
CONNECTED = 1
class Command(Enum):
"""
Command enum for actions to take from the manager.
This has to be kept consistent with the C++ ModelServerManager.
"""
TRAIN = auto() # Train a specific model
QUIT = auto() # Quit the server
PRINT = auto() # Print the message
INFER = auto() # Do inference on a trained model
def __str__(self) -> str:
return self.name
@staticmethod
def from_str(cmd_str: str) -> Command:
if cmd_str == "PRINT":
return Command.PRINT
elif cmd_str == "QUIT":
return Command.QUIT
elif cmd_str == "TRAIN":
return Command.TRAIN
elif cmd_str == "INFER":
return Command.INFER
else:
raise ValueError("Invalid command")
class Message:
"""
Message struct for communication with the ModelServer.
The message format has to be ketp consistent with the C++ Messenger.
A valid message is :
"send_id-recv_id-payload"
Refer to Messenger's documention for the message format
"""
def __init__(self, cmd: Optional[Command] = None,
data: Optional[Dict] = None) -> None:
self.cmd = cmd
self.data = data
@staticmethod
def from_json(json_str: str) -> Optional[Message]:
d = json.loads(json_str)
msg = Message()
try:
msg.cmd = Command.from_str(d["cmd"])
msg.data = d["data"]
except (KeyError, ValueError) as e:
logging.error(f"Invalid Message : {json_str}")
return None
return msg
def to_json(self) -> str:
return json.dumps(self.__dict__)
def __str__(self) -> str:
return pprint.pformat(self.__dict__)
class AbstractModel(ABC):
"""
Interface for all the models
"""
def __init__(self) -> None:
# Model cache that maps from the model path on disk to the model
self.model_cache = dict()
@abstractmethod
def train(self, data: Dict) -> Tuple[bool, str]:
"""
Perform fitting.
Should be overloaded by a specific model implementation.
:param data: data used for training
:return: if training succeeds, {True and empty string}, else {False, error message}
"""
raise NotImplementedError("Should be implemented by child classes")
@abstractmethod
def infer(self, data: Dict) -> Tuple[Any, bool, str]:
"""
Do inference on the model, give the data file, and the model_map_path
:param data: data used for inference
:return: {List of predictions, if inference succeeds, error message}
"""
raise NotImplementedError("Should be implemented by child classes")
def _load_model(self, save_path: str):
"""
Check if a trained model exists at the path.
Load the model into cache if it is not.
:param save_path: path to model to load
:return: None if no model exists at path, or Model map saved at path
"""
save_path = Path(save_path)
# Check model exists
if not save_path.exists():
return None
# use the path string as the key of the cache
save_path_str = str(save_path)
# Load from cache
if self.model_cache.get(save_path, None) is not None:
return self.model_cache[save_path_str]
# Load into cache
model = self._load_model_from_disk(save_path)
self.model_cache[save_path_str] = model
return model
@abstractmethod
def _load_model_from_disk(self, save_path: Path):
"""
Load model from the path on disk (invoked when missing model cache)
:param save_path: model path on disk
:return: model for the child class' specific model type
"""
raise NotImplementedError("Should be implemented by child classes")
class OUModel(AbstractModel):
"""
OUModel that handles training and inference for OU models
"""
# Training parameters
TEST_RATIO = 0.2
TRIM_RATIO = 0.2
EXPOSE_ALL = True
TXN_SAMPLE_RATE = 2
def __init__(self) -> None:
AbstractModel.__init__(self)
def train(self, data: Dict) -> Tuple[bool, str]:
"""
Train a model with the given model name and seq_files directory
:param data: {
methods: [lr, XXX, ...],
input_path: PATH_TO_SEQ_FILES_FOLDER, or None
save_path: PATH_TO_SAVE_MODEL_MAP
}
:return: if training succeeds, {True and empty string}, else {False, error message}
"""
ml_models = data["methods"]
seq_files_dir = data["input_path"]
save_path = data["save_path"]
# Do path checking up-front
save_path = Path(save_path)
save_dir = save_path.parent
try:
# Exist ok, and Creates parent if ok
save_dir.mkdir(parents=True, exist_ok=True)
except PermissionError as e:
return False, "FAIL_PERMISSION_ERROR"
# Create result model metrics in the same directory
save_file_name = save_path.stem
result_path = save_path.with_name(
str(save_file_name) + "_metric_results")
result_path.mkdir(parents=True, exist_ok=True)
test_ratio = OUModel.TEST_RATIO
trim = OUModel.TRIM_RATIO
expose_all = OUModel.EXPOSE_ALL
txn_sample_rate = OUModel.TXN_SAMPLE_RATE
trainer = OUModelTrainer(seq_files_dir, result_path, ml_models,
test_ratio, trim, expose_all, txn_sample_rate)
# Perform training from OUModelTrainer and input files directory
model_map = trainer.train()
# Pickle dump the model
with save_path.open(mode='wb') as f:
pickle.dump((model_map, data_info.instance), f)
return True, ""
def infer(self, data: Dict) -> Tuple[Any, bool, str]:
"""
Do inference on the model, give the data file, and the model_map_path
:param data: {
features: 2D float arrays [[float]],
opunit: Opunit integer for the model
model_path: model path
}
:return: {List of predictions, if inference succeeds, error message}
"""
features = data["features"]
opunit = data["opunit"]
model_path = data["model_path"]
# Load the model map
model_map = self._load_model(model_path)
if model_map is None:
logging.error(
f"Model map at {str(model_path)} has not been trained")
return [], False, "MODEL_MAP_NOT_TRAINED"
# Parameter validation
if not isinstance(opunit, str):
return [], False, "INVALID_OPUNIT"
try:
opunit = OpUnit[opunit]
except KeyError as e:
logging.error(f"{opunit} is not a valid Opunit name")
return [], False, "INVALID_OPUNIT"
features = np.array(features)
logging.debug(f"Using model on {opunit}")
model = model_map[opunit]
if model is None:
logging.error(f"Model for {opunit} doesn't exist")
return [], False, "MODEL_NOT_FOUND"
y_pred = model.predict(features)
return y_pred.tolist(), True, ""
def _load_model_from_disk(self, save_path: Path) -> Dict:
"""
Load model from the path on disk (invoked when missing model cache)
:param save_path: model path on disk
:return: OU model map
"""
with save_path.open(mode='rb') as f:
model, data_info.instance = pickle.load(f)
return model
class InterferenceModel(AbstractModel):
"""
InterferenceModel that handles training and inference for the interference model
"""
# Training parameters
TEST_RATIO = 0.2
IMPACT_MODEL_RATIO = 0.1
WARMUP_PERIOD = 3
USE_QUERY_PREDICT_CACHE = False
ADD_NOISE = False
PREDICT_OU_ONLY = False
TXN_SAMPLE_RATE = 2
NETWORK_SAMPLE_RATE = 2
def __init__(self) -> None:
AbstractModel.__init__(self)
def train(self, data: Dict) -> Tuple[bool, str]:
"""
Train a model with the given model name and seq_files directory
:param data: {
methods: [lr, XXX, ...],
input_path: PATH_TO_SEQ_FILES_FOLDER, or None
save_path: PATH_TO_SAVE_MODEL_MAP
}
:return: if training succeeds, {True and empty string}, else {False, error message}
"""
ml_models = data["methods"]
input_path = data["input_path"]
save_path = data["save_path"]
ou_model_path = data["ou_model_path"]
ee_sample_rate = data["pipeline_metrics_sample_rate"]
# Do path checking up-front
save_path = Path(save_path)
save_dir = save_path.parent
try:
# Exist ok, and Creates parent if ok
save_dir.mkdir(parents=True, exist_ok=True)
except PermissionError as e:
return False, "FAIL_PERMISSION_ERROR"
# Create result model metrics in the same directory
save_file_name = save_path.stem
result_path = save_path.with_name(
str(save_file_name) + "_metric_results")
result_path.mkdir(parents=True, exist_ok=True)
test_ratio = InterferenceModel.TEST_RATIO
impact_model_ratio = InterferenceModel.IMPACT_MODEL_RATIO
warmup_period = InterferenceModel.WARMUP_PERIOD
use_query_predict_cache = InterferenceModel.USE_QUERY_PREDICT_CACHE
add_noise = InterferenceModel.ADD_NOISE
predict_ou_only = InterferenceModel.PREDICT_OU_ONLY
txn_sample_rate = InterferenceModel.TXN_SAMPLE_RATE
network_sample_rate = InterferenceModel.NETWORK_SAMPLE_RATE
with open(ou_model_path, 'rb') as pickle_file:
model_map, data_info.instance = pickle.load(pickle_file)
trainer = InterferenceModelTrainer(input_path, result_path, ml_models, test_ratio, impact_model_ratio,
model_map, warmup_period, use_query_predict_cache, add_noise,
predict_ou_only, ee_sample_rate, txn_sample_rate, network_sample_rate)
# Perform training
trainer.predict_ou_data()
# We only need the directly model for the model server. The other models are for experimental purposes
_, _, direct_model = trainer.train()
# Pickle dump the model
with open(save_path, 'wb') as file:
pickle.dump(direct_model, file)
return True, ""
def infer(self, data: Dict) -> Tuple[Any, bool, str]:
"""
Do inference on the model, give the data file, and the model_path
:param data: {
features: 2D float arrays [[float]],
model_path: model path
}
:return: {List of predictions, if inference succeeds, error message}
"""
features = data["features"]
model_path = data["model_path"]
# Load the model
model = self._load_model(model_path)
if model is None:
logging.error(
f"Model map at {str(model_path)} has not been trained")
return [], False, "MODEL_MAP_NOT_TRAINED"
features = np.array(features)
y_pred = model.predict(features)
return y_pred.tolist(), True, ""
def _load_model_from_disk(self, save_path: Path):
"""
Load model from the path on disk (invoked when missing model cache)
:param save_path: model path on disk
:return: interference model
"""
with save_path.open(mode='rb') as f:
model = pickle.load(f)
return model
class ForecastModel(AbstractModel):
"""
ForecastModel that handles training and inference for Forecast models
"""
# Number of Microseconds per second
MICRO_SEC_PER_SEC = 1000000
def __init__(self) -> None:
AbstractModel.__init__(self)
def _update_parameters(self, interval):
# TODO(wz2): Possibly expose parameters
# Number of data points in a sequence
self.SEQ_LEN = 10 * ForecastModel.MICRO_SEC_PER_SEC // interval
# Number of data points for the horizon
self.HORIZON_LEN = 30 * ForecastModel.MICRO_SEC_PER_SEC // interval
# Number of data points for testing set
self.EVAL_DATA_SIZE = self.SEQ_LEN + 2 * self.HORIZON_LEN
def train(self, data: Dict) -> Tuple[bool, str]:
"""
Train a model with the given model name and seq_files directory
:param data: {
model_names: [LSTM...]
models_config: PATH_TO_JSON model config file
input_path: PATH_TO_TRACE, or None
save_path: PATH_TO_SAVE_MODEL_MAP
interval_micro_sec: Interval duration for aggregation in microseconds
}
:return: if training succeeds, {True and empty string}, else {False, error message}
"""
input_path = data["input_path"]
save_path = data["save_path"]
model_names = data["methods"]
models_config = data.get("models_config")
interval = data["interval_micro_sec"]
self._update_parameters(interval)
# Parse models arguments
models_kwargs = parse_model_config(model_names, models_config)
# Do path checking up-front
save_path = Path(save_path)
save_dir = save_path.parent
try:
# Exist ok, and Creates parent if ok
save_dir.mkdir(parents=True, exist_ok=True)
except PermissionError as e:
return False, "FAIL_PERMISSION_ERROR"
forecaster = Forecaster(
trace_file=input_path,
interval_us=interval,
test_mode=False,
seq_len=self.SEQ_LEN,
eval_size=self.EVAL_DATA_SIZE,
horizon_len=self.HORIZON_LEN)
models = forecaster.train(models_kwargs)
# Pickle dump the model
with save_path.open(mode='wb') as f:
pickle.dump(models, f)
return True, ""
def infer(self, data: Dict) -> Tuple[Any, bool, str]:
"""
Do inference on the model, give the data file, and the model_map_path
:param data: {
input_path: PATH_TO_TRACE, or None
model_path: model path
model_names: [LSTM...]
models_config: PATH_TO_JSON model config file
interval_micro_sec: Interval duration for aggregation in microseconds
}
:return: {Dict<cluster, Dict<query>, List<preds>>, if inference succeeds, error message}
"""
input_path = data["input_path"]
model_names = data["model_names"]
models_config = data.get("models_config")
interval = data["interval_micro_sec"]
model_path = data["model_path"]
self._update_parameters(interval)
# Load the trained models
models = self._load_model(model_path)
if models is None:
logging.error(
f"Models at {str(model_path)} has not been trained")
return [], False, "MODELS_NOT_TRAINED"
forecaster = Forecaster(
trace_file=input_path,
test_mode=True,
interval_us=interval,
seq_len=self.SEQ_LEN,
eval_size=self.EVAL_DATA_SIZE,
horizon_len=self.HORIZON_LEN)
# FIXME:
# Assuming all the queries in the current trace file are from
# the same cluster for now
# Only forecast with first element of model_names
result = {}
query_pred = forecaster.predict(0, models[0][model_names[0]])
for qid, ts in query_pred.items():
result[int(qid)] = ts
return {0: result}, True, ""
def _load_model_from_disk(self, save_path: Path):
"""
Load model from the path on disk (invoked when missing model cache)
:param save_path: model path on disk
:return: workload forecasting model
"""
with save_path.open(mode='rb') as f:
model = pickle.load(f)
return model
class ModelServer:
"""
ModelServer(MS) class that runs in a loop to handle commands from the ModelServerManager from C++
"""
def __init__(self, end_point: str):
"""
Initialize the ModelServer by connecting to the ZMQ IPC endpoint
:param end_point: IPC endpoint
"""
# Establish ZMQ connection
self.context = zmq.Context()
self.socket = self.context.socket(zmq.DEALER)
self.socket.set_string(zmq.IDENTITY, 'model')
logging.debug(
f"Python model trying to connect to manager at {end_point}")
self.socket.connect(f"ipc://{end_point}")
logging.info(f"Python model connected at {end_point}")
# If the ModelServer is closing
self._closing = False
# Register the exit callback
atexit.register(self.cleanup_zmq)
# Gobal model map cache
self.cache = dict()
# Notify the ModelServerManager that I am connected
self._send_msg(0, 0, ModelServer._make_response(
Callback.CONNECTED, "", True, ""))
# Model trainers/inferers
self.model_managers = {ModelType.FORECAST: ForecastModel(),
ModelType.OPERATING_UNIT: OUModel(),
ModelType.INTERFERENCE: InterferenceModel()}
def cleanup_zmq(self):
"""
Close the socket when the script exits
:return:
"""
self.socket.close()
self.context.destroy()
def _send_msg(self, send_id: int, recv_id: int, data: Dict) -> None:
"""
Send a message to the socket.
:param send_id: id on this end, 0 for now
:param recv_id: callback id to invoke on the other end
:param data: payload of the message in JSON
:return:
"""
json_result = json.dumps(data)
msg = f"{send_id}-{recv_id}-{json_result}"
self.socket.send_multipart([''.encode('utf-8'), msg.encode('utf-8')])
@staticmethod
def _make_response(action: Callback, result: Any, success: bool, err: str = "") -> Dict:
"""
Construct a response to the ModelServerManager
:param action: Action callback on the ModelServerManager
:param result: Any result
:param success: True if the action suceeds
:param err: Error message
:return:
"""
return {
"action": action,
"result": result,
"success": success,
"err": err
}
@staticmethod
def _parse_msg(payload: str) -> Tuple[int, int, Optional[Message]]:
logging.debug("PY RECV: " + payload)
tokens = payload.split('-', 2)
# Invalid message format
try:
msg_id = int(tokens[0])
recv_id = int(tokens[1])
except ValueError as e:
logging.error(
f"Invalid message payload format: {payload}, ids not int.")
return -1, -1, None
msg = Message.from_json(tokens[2])
return msg_id, recv_id, msg
def _infer(self, data: Dict) -> Tuple[List, bool, str]:
"""
Do inference on the model
:param data: {
type: model type
model_path: model path
...
}
:return: {List of predictions, if inference succeeds, error message}
"""
model_type = data["type"]
return self.model_managers[ModelType[model_type]].infer(data)
def _recv(self) -> str:
"""
Receive from the ZMQ socket. This is a blocking call.
:return: Message paylod
"""
identity = self.socket.recv()
_delim = self.socket.recv()
payload = self.socket.recv()
logging.debug(f"Python recv: {str(identity)}, {str(payload)}")
return payload.decode("ascii")
def _execute_cmd(self, cmd: Command, data: Dict) -> Tuple[Dict, bool]:
"""
Execute a command from the ModelServerManager
:param cmd:
:param data:
:return: Tuple {
message string to sent back,
if continue the server
}
"""
if cmd == Command.PRINT:
msg = data["message"]
logging.info(f"MESSAGE PRINT: {str(msg)}")
response = self._make_response(Callback.NOOP, f"MODEL_REPLY_{msg}", True)
return response, True
elif cmd == Command.QUIT:
# Will not send any message so empty {} is ok
return self._make_response(Callback.NOOP, "", True), False
elif cmd == Command.TRAIN:
try:
model_type = data["type"]
ok, res = self.model_managers[ModelType[model_type]].train(data)
if ok:
response = self._make_response(Callback.NOOP, res, True)
else:
response = self._make_response(Callback.NOOP, "", False, res)
except ValueError as e:
logging.error(f"Model Not found : {e}")
response = self._make_response(
Callback.NOOP, "", False, "FAIL_MODEL_NOT_FOUND")
except KeyError as e:
logging.error(f"Data format wrong for TRAIN: {e}")
response = self._make_response(
Callback.NOOP, "", False, "FAIL_DATA_FORMAT_ERROR")
except Exception as e:
logging.error(f"Training failed. {e}")
response = self._make_response(
Callback.NOOP, "", False, "FAIL_TRAINING_FAILED")
return response, True
elif cmd == Command.INFER:
result, ok, err = self._infer(data)
response = self._make_response(Callback.NOOP, result, ok, err)
return response, True
def run_loop(self):
"""
Run in a loop to recv/send message to the ModelServer manager
:return:
"""
while (1):
try:
payload = self._recv()
except UnicodeError as e:
logging.warning(f"Failed to decode : {e.reason}")
continue
except KeyboardInterrupt:
if self._closing:
logging.warning("Forced shutting down now.")
os._exit(-1)
else:
logging.info("Received KeyboardInterrupt. Ctrl+C again to force shutting down.")
self._closing = True
continue
send_id, recv_id, msg = self._parse_msg(payload)
if msg is None:
continue
else:
result, cont = self._execute_cmd(msg.cmd, msg.data)
if not cont:
logging.info("Shutting down.")
break
# Currently not expecting to invoke any callback on ModelServer
# side, so second parameter 0
self._send_msg(0, send_id, result)
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Usage: ./model_server.py <ZMQ_ENDPOINT>")
exit(-1)
ms = ModelServer(sys.argv[1])
ms.run_loop()
| 33.703104
| 113
| 0.60615
|
4a0049495f720fe4f1df515ff4eae050ed650aa4
| 79
|
py
|
Python
|
nba/utils/coordinates.py
|
jngaravitoc/nba
|
d2a64a69fd743e066fe3e0bad9c9bc109763ff97
|
[
"MIT"
] | null | null | null |
nba/utils/coordinates.py
|
jngaravitoc/nba
|
d2a64a69fd743e066fe3e0bad9c9bc109763ff97
|
[
"MIT"
] | null | null | null |
nba/utils/coordinates.py
|
jngaravitoc/nba
|
d2a64a69fd743e066fe3e0bad9c9bc109763ff97
|
[
"MIT"
] | null | null | null |
import numpy as np
import astropy as...
# galactocentric astropy definition
| 11.285714
| 35
| 0.772152
|
4a004961be33df12fa814cf12c8dc17008c9a6a2
| 44,350
|
py
|
Python
|
setup.py
|
xoiopure/https-github.com-xoiopure-git-github.com-kivy-kivy
|
0b7d32702714c4792e5373171423357ffd22d054
|
[
"MIT"
] | null | null | null |
setup.py
|
xoiopure/https-github.com-xoiopure-git-github.com-kivy-kivy
|
0b7d32702714c4792e5373171423357ffd22d054
|
[
"MIT"
] | null | null | null |
setup.py
|
xoiopure/https-github.com-xoiopure-git-github.com-kivy-kivy
|
0b7d32702714c4792e5373171423357ffd22d054
|
[
"MIT"
] | null | null | null |
#
# Kivy - Cross-platform UI framework
# https://kivy.org/
#
import sys
build_examples = False
if "--build_examples" in sys.argv:
build_examples = True
sys.argv.remove("--build_examples")
from kivy.utils import pi_version
from copy import deepcopy
import os
from os.path import join, dirname, sep, exists, basename, isdir
from os import walk, environ, makedirs
from distutils.command.build_ext import build_ext
from distutils.version import LooseVersion
from distutils.sysconfig import get_python_inc
from collections import OrderedDict
from time import sleep
from sysconfig import get_paths
from pathlib import Path
import logging
from setuptools import setup, Extension, find_packages
if sys.version_info[0] == 2:
logging.critical(
'Unsupported Python version detected!: Kivy 2.0.0 and higher does not '
'support Python 2. Please upgrade to Python 3, or downgrade Kivy to '
'1.11.1 - the last Kivy release that still supports Python 2.')
def ver_equal(self, other):
return self.version == other
# fix error with py3's LooseVersion comparisons
LooseVersion.__eq__ = ver_equal
def get_description():
with open(join(dirname(__file__), 'README.md'), 'rb') as fileh:
return fileh.read().decode("utf8").replace('\r\n', '\n')
def getoutput(cmd, env=None):
import subprocess
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env)
p.wait()
if p.returncode: # if not returncode == 0
print('WARNING: A problem occurred while running {0} (code {1})\n'
.format(cmd, p.returncode))
stderr_content = p.stderr.read()
if stderr_content:
print('{0}\n'.format(stderr_content))
return ""
return p.stdout.read()
def pkgconfig(*packages, **kw):
flag_map = {'-I': 'include_dirs', '-L': 'library_dirs', '-l': 'libraries'}
lenviron = None
pconfig = join(sys.prefix, 'libs', 'pkgconfig')
if isdir(pconfig):
lenviron = environ.copy()
lenviron['PKG_CONFIG_PATH'] = '{};{}'.format(
environ.get('PKG_CONFIG_PATH', ''), pconfig)
cmd = 'pkg-config --libs --cflags {}'.format(' '.join(packages))
results = getoutput(cmd, lenviron).split()
for token in results:
ext = token[:2].decode('utf-8')
flag = flag_map.get(ext)
if not flag:
continue
kw.setdefault(flag, []).append(token[2:].decode('utf-8'))
return kw
def get_isolated_env_paths():
try:
# sdl2_dev is installed before setup.py is run, when installing from
# source due to pyproject.toml. However, it is installed to a
# pip isolated env, which we need to add to compiler
import kivy_deps.sdl2_dev as sdl2_dev
except ImportError:
return [], []
root = os.path.abspath(join(sdl2_dev.__path__[0], '../../../..'))
includes = [join(root, 'Include')] if isdir(join(root, 'Include')) else []
libs = [join(root, 'libs')] if isdir(join(root, 'libs')) else []
return includes, libs
# -----------------------------------------------------------------------------
# Determine on which platform we are
build_examples = build_examples or \
os.environ.get('KIVY_BUILD_EXAMPLES', '0') == '1'
platform = sys.platform
# Detect 32/64bit for OSX (http://stackoverflow.com/a/1405971/798575)
if sys.platform == 'darwin':
if sys.maxsize > 2 ** 32:
osx_arch = 'x86_64'
else:
osx_arch = 'i386'
# Detect Python for android project (http://github.com/kivy/python-for-android)
ndkplatform = environ.get('NDKPLATFORM')
if ndkplatform is not None and environ.get('LIBLINK'):
platform = 'android'
kivy_ios_root = environ.get('KIVYIOSROOT', None)
if kivy_ios_root is not None:
platform = 'ios'
# proprietary broadcom video core drivers
if exists('/opt/vc/include/bcm_host.h'):
# The proprietary broadcom video core drivers are not available on the
# Raspberry Pi 4
if (pi_version or 4) < 4:
platform = 'rpi'
# use mesa video core drivers
if environ.get('VIDEOCOREMESA', None) == '1':
platform = 'vc'
mali_paths = (
'/usr/lib/arm-linux-gnueabihf/libMali.so',
'/usr/lib/arm-linux-gnueabihf/mali-egl/libmali.so',
'/usr/local/mali-egl/libmali.so')
if any((exists(path) for path in mali_paths)):
platform = 'mali'
# Needed when cross-compiling
if environ.get('KIVY_CROSS_PLATFORM'):
platform = environ.get('KIVY_CROSS_PLATFORM')
# -----------------------------------------------------------------------------
# Detect options
#
c_options = OrderedDict()
c_options['use_rpi'] = platform == 'rpi'
c_options['use_egl'] = False
c_options['use_opengl_es2'] = None
c_options['use_opengl_mock'] = environ.get('READTHEDOCS', None) == 'True'
c_options['use_sdl2'] = None
c_options['use_pangoft2'] = None
c_options['use_ios'] = False
c_options['use_android'] = False
c_options['use_mesagl'] = False
c_options['use_x11'] = False
c_options['use_wayland'] = False
c_options['use_gstreamer'] = None
c_options['use_avfoundation'] = platform in ['darwin', 'ios']
c_options['use_osx_frameworks'] = platform == 'darwin'
c_options['debug_gl'] = False
# Set the alpha size, this will be 0 on the Raspberry Pi and 8 on all other
# platforms, so SDL2 works without X11
c_options['kivy_sdl_gl_alpha_size'] = 8 if pi_version is None else 0
# now check if environ is changing the default values
for key in list(c_options.keys()):
ukey = key.upper()
if ukey in environ:
# kivy_sdl_gl_alpha_size should be an integer, the rest are booleans
value = int(environ[ukey])
if key != 'kivy_sdl_gl_alpha_size':
value = bool(value)
print('Environ change {0} -> {1}'.format(key, value))
c_options[key] = value
use_embed_signature = environ.get('USE_EMBEDSIGNATURE', '0') == '1'
use_embed_signature = use_embed_signature or bool(
platform not in ('ios', 'android'))
# -----------------------------------------------------------------------------
# We want to be able to install kivy as a wheel without a dependency
# on cython, but we also want to use cython where possible as a setup
# time dependency through `pyproject.toml` if building from source.
# There are issues with using cython at all on some platforms;
# exclude them from using or declaring cython.
# This determines whether Cython specific functionality may be used.
can_use_cython = True
if platform in ('ios', 'android'):
# NEVER use or declare cython on these platforms
print('Not using cython on %s' % platform)
can_use_cython = False
# -----------------------------------------------------------------------------
# Setup classes
# the build path where kivy is being compiled
src_path = build_path = dirname(__file__)
print("Current directory is: {}".format(os.getcwd()))
print("Source and initial build directory is: {}".format(src_path))
# __version__ is imported by exec, but help linter not complain
__version__ = None
with open(join(src_path, 'kivy', '_version.py'), encoding="utf-8") as f:
exec(f.read())
class KivyBuildExt(build_ext, object):
def __new__(cls, *a, **kw):
# Note how this class is declared as a subclass of distutils
# build_ext as the Cython version may not be available in the
# environment it is initially started in. However, if Cython
# can be used, setuptools will bring Cython into the environment
# thus its version of build_ext will become available.
# The reason why this is done as a __new__ rather than through a
# factory function is because there are distutils functions that check
# the values provided by cmdclass with issublcass, and so it would
# result in an exception.
# The following essentially supply a dynamically generated subclass
# that mix in the cython version of build_ext so that the
# functionality provided will also be executed.
if can_use_cython:
from Cython.Distutils import build_ext as cython_build_ext
build_ext_cls = type(
'KivyBuildExt', (KivyBuildExt, cython_build_ext), {})
return super(KivyBuildExt, cls).__new__(build_ext_cls)
else:
return super(KivyBuildExt, cls).__new__(cls)
def finalize_options(self):
retval = super(KivyBuildExt, self).finalize_options()
# Build the extensions in parallel if the options has not been set
if hasattr(self, 'parallel') and self.parallel is None:
# Use a maximum of 4 cores. If cpu_count returns None, then parallel
# build will be disabled
self.parallel = min(4, os.cpu_count() or 0)
if self.parallel:
print('Building extensions in parallel using {} cores'.format(
self.parallel))
global build_path
if (self.build_lib is not None and exists(self.build_lib) and
not self.inplace):
build_path = self.build_lib
print("Updated build directory to: {}".format(build_path))
return retval
def build_extensions(self):
# build files
config_h_fn = ('include', 'config.h')
config_pxi_fn = ('include', 'config.pxi')
config_py_fn = ('setupconfig.py', )
# generate headers
config_h = '// Autogenerated file for Kivy C configuration\n'
config_h += '#define __PY3 1\n'
config_pxi = '# Autogenerated file for Kivy Cython configuration\n'
config_pxi += 'DEF PY3 = 1\n'
config_py = '# Autogenerated file for Kivy configuration\n'
config_py += 'PY3 = 1\n'
config_py += 'CYTHON_MIN = {0}\nCYTHON_MAX = {1}\n'.format(
repr(MIN_CYTHON_STRING), repr(MAX_CYTHON_STRING))
config_py += 'CYTHON_BAD = {0}\n'.format(repr(', '.join(map(
str, CYTHON_UNSUPPORTED))))
# generate content
print('Build configuration is:')
for opt, value in c_options.items():
# kivy_sdl_gl_alpha_size is already an integer
if opt != 'kivy_sdl_gl_alpha_size':
value = int(bool(value))
print(' * {0} = {1}'.format(opt, value))
opt = opt.upper()
config_h += '#define __{0} {1}\n'.format(opt, value)
config_pxi += 'DEF {0} = {1}\n'.format(opt, value)
config_py += '{0} = {1}\n'.format(opt, value)
debug = bool(self.debug)
print(' * debug = {0}'.format(debug))
config_pxi += 'DEF DEBUG = {0}\n'.format(debug)
config_py += 'DEBUG = {0}\n'.format(debug)
config_pxi += 'DEF PLATFORM = "{0}"\n'.format(platform)
config_py += 'PLATFORM = "{0}"\n'.format(platform)
for fn, content in (
(config_h_fn, config_h), (config_pxi_fn, config_pxi),
(config_py_fn, config_py)):
build_fn = expand(build_path, *fn)
if self.update_if_changed(build_fn, content):
print('Updated {}'.format(build_fn))
src_fn = expand(src_path, *fn)
if src_fn != build_fn and self.update_if_changed(src_fn, content):
print('Updated {}'.format(src_fn))
c = self.compiler.compiler_type
print('Detected compiler is {}'.format(c))
if c != 'msvc':
for e in self.extensions:
e.extra_link_args += ['-lm']
super(KivyBuildExt, self).build_extensions()
def update_if_changed(self, fn, content):
need_update = True
if exists(fn):
with open(fn) as fd:
need_update = fd.read() != content
if need_update:
directory_name = dirname(fn)
if not exists(directory_name):
makedirs(directory_name)
with open(fn, 'w') as fd:
fd.write(content)
return need_update
def _check_and_fix_sdl2_mixer(f_path):
# Between SDL_mixer 2.0.1 and 2.0.4, the included frameworks changed
# smpeg2 have been replaced with mpg123, but there is no need to fix.
smpeg2_path = ("{}/Versions/A/Frameworks/smpeg2.framework"
"/Versions/A/smpeg2").format(f_path)
if not exists(smpeg2_path):
return
print("Check if SDL2_mixer smpeg2 have an @executable_path")
rpath_from = ("@executable_path/../Frameworks/SDL2.framework"
"/Versions/A/SDL2")
rpath_to = "@rpath/../../../../SDL2.framework/Versions/A/SDL2"
output = getoutput(("otool -L '{}'").format(smpeg2_path)).decode('utf-8')
if "@executable_path" not in output:
return
print("WARNING: Your SDL2_mixer version is invalid")
print("WARNING: The smpeg2 framework embedded in SDL2_mixer contains a")
print("WARNING: reference to @executable_path that will fail the")
print("WARNING: execution of your application.")
print("WARNING: We are going to change:")
print("WARNING: from: {}".format(rpath_from))
print("WARNING: to: {}".format(rpath_to))
getoutput("install_name_tool -change {} {} {}".format(
rpath_from, rpath_to, smpeg2_path))
output = getoutput(("otool -L '{}'").format(smpeg2_path))
if b"@executable_path" not in output:
print("WARNING: Change successfully applied!")
print("WARNING: You'll never see this message again.")
else:
print("WARNING: Unable to apply the changes, sorry.")
# -----------------------------------------------------------------------------
print("Python path is:\n{}\n".format('\n'.join(sys.path)))
# extract version (simulate doc generation, kivy will be not imported)
environ['KIVY_DOC_INCLUDE'] = '1'
import kivy
# Cython check
# on python-for-android and kivy-ios, cython usage is external
from kivy.tools.packaging.cython_cfg import get_cython_versions, get_cython_msg
CYTHON_REQUIRES_STRING, MIN_CYTHON_STRING, MAX_CYTHON_STRING, \
CYTHON_UNSUPPORTED = get_cython_versions()
cython_min_msg, cython_max_msg, cython_unsupported_msg = get_cython_msg()
if can_use_cython:
import Cython
print('\nFound Cython at', Cython.__file__)
cy_version_str = Cython.__version__
cy_ver = LooseVersion(cy_version_str)
print('Detected supported Cython version {}'.format(cy_version_str))
if cy_ver < LooseVersion(MIN_CYTHON_STRING):
print(cython_min_msg)
elif cy_ver in CYTHON_UNSUPPORTED:
print(cython_unsupported_msg)
elif cy_ver > LooseVersion(MAX_CYTHON_STRING):
print(cython_max_msg)
sleep(1)
# extra build commands go in the cmdclass dict {'command-name': CommandClass}
# see tools.packaging.{platform}.build.py for custom build commands for
# portable packages. Also e.g. we use build_ext command from cython if its
# installed for c extensions.
from kivy.tools.packaging.factory import FactoryBuild
cmdclass = {
'build_factory': FactoryBuild,
'build_ext': KivyBuildExt}
try:
# add build rules for portable packages to cmdclass
if platform == 'win32':
from kivy.tools.packaging.win32.build import WindowsPortableBuild
cmdclass['build_portable'] = WindowsPortableBuild
elif platform == 'darwin':
from kivy.tools.packaging.osx.build import OSXPortableBuild
cmdclass['build_portable'] = OSXPortableBuild
except ImportError:
print('User distribution detected, avoid portable command.')
# Detect which opengl version headers to use
if platform in ('android', 'darwin', 'ios', 'rpi', 'mali', 'vc'):
c_options['use_opengl_es2'] = True
elif c_options['use_opengl_es2'] is None:
c_options['use_opengl_es2'] = \
environ.get('KIVY_GRAPHICS', '').lower() == 'gles'
print('Using this graphics system: {}'.format(
['OpenGL', 'OpenGL ES 2'][int(c_options['use_opengl_es2'] or False)]))
# check if we are in a kivy-ios build
if platform == 'ios':
print('Kivy-IOS project environment detect, use it.')
print('Kivy-IOS project located at {0}'.format(kivy_ios_root))
c_options['use_ios'] = True
c_options['use_sdl2'] = True
elif platform == 'android':
c_options['use_android'] = True
elif platform == 'darwin':
if c_options['use_osx_frameworks']:
if osx_arch == "i386":
print("Warning: building with frameworks fail on i386")
else:
print("OSX framework used, force to x86_64 only")
environ["ARCHFLAGS"] = environ.get("ARCHFLAGS", "-arch x86_64")
print("OSX ARCHFLAGS are: {}".format(environ["ARCHFLAGS"]))
# detect gstreamer, only on desktop
# works if we forced the options or in autodetection
if platform not in ('ios', 'android') and (c_options['use_gstreamer']
in (None, True)):
gstreamer_valid = False
if c_options['use_osx_frameworks'] and platform == 'darwin':
# check the existence of frameworks
f_path = '/Library/Frameworks/GStreamer.framework'
if not exists(f_path):
c_options['use_gstreamer'] = False
print('GStreamer framework not found, fallback on pkg-config')
else:
print('GStreamer framework found')
gstreamer_valid = True
c_options['use_gstreamer'] = True
gst_flags = {
'extra_link_args': [
'-F/Library/Frameworks',
'-Xlinker', '-rpath',
'-Xlinker', '/Library/Frameworks',
'-Xlinker', '-headerpad',
'-Xlinker', '190',
'-framework', 'GStreamer'],
'include_dirs': [join(f_path, 'Headers')]}
elif platform == 'win32':
gst_flags = pkgconfig('gstreamer-1.0')
if 'libraries' in gst_flags:
print('GStreamer found via pkg-config')
gstreamer_valid = True
c_options['use_gstreamer'] = True
else:
_includes = get_isolated_env_paths()[0] + [get_paths()['include']]
for include_dir in _includes:
if exists(join(include_dir, 'gst', 'gst.h')):
print('GStreamer found via gst.h')
gstreamer_valid = True
c_options['use_gstreamer'] = True
gst_flags = {
'libraries':
['gstreamer-1.0', 'glib-2.0', 'gobject-2.0']}
break
if not gstreamer_valid:
# use pkg-config approach instead
gst_flags = pkgconfig('gstreamer-1.0')
if 'libraries' in gst_flags:
print('GStreamer found via pkg-config')
c_options['use_gstreamer'] = True
# detect SDL2, only on desktop and iOS, or android if explicitly enabled
# works if we forced the options or in autodetection
sdl2_flags = {}
if platform == 'win32' and c_options['use_sdl2'] is None:
c_options['use_sdl2'] = True
if c_options['use_sdl2'] or (
platform not in ('android',) and c_options['use_sdl2'] is None):
sdl2_valid = False
if c_options['use_osx_frameworks'] and platform == 'darwin':
# check the existence of frameworks
sdl2_valid = True
sdl2_flags = {
'extra_link_args': [
'-F/Library/Frameworks',
'-Xlinker', '-rpath',
'-Xlinker', '/Library/Frameworks',
'-Xlinker', '-headerpad',
'-Xlinker', '190'],
'include_dirs': [],
'extra_compile_args': ['-F/Library/Frameworks']
}
for name in ('SDL2', 'SDL2_ttf', 'SDL2_image', 'SDL2_mixer'):
f_path = '/Library/Frameworks/{}.framework'.format(name)
if not exists(f_path):
print('Missing framework {}'.format(f_path))
sdl2_valid = False
continue
sdl2_flags['extra_link_args'] += ['-framework', name]
sdl2_flags['include_dirs'] += [join(f_path, 'Headers')]
print('Found sdl2 frameworks: {}'.format(f_path))
if name == 'SDL2_mixer':
_check_and_fix_sdl2_mixer(f_path)
if not sdl2_valid:
c_options['use_sdl2'] = False
print('SDL2 frameworks not found, fallback on pkg-config')
else:
c_options['use_sdl2'] = True
print('Activate SDL2 compilation')
if not sdl2_valid and platform != "ios":
# use pkg-config approach instead
sdl2_flags = pkgconfig('sdl2', 'SDL2_ttf', 'SDL2_image', 'SDL2_mixer')
if 'libraries' in sdl2_flags:
print('SDL2 found via pkg-config')
c_options['use_sdl2'] = True
# -----------------------------------------------------------------------------
# declare flags
def get_modulename_from_file(filename):
filename = filename.replace(sep, '/')
pyx = '.'.join(filename.split('.')[:-1])
pyxl = pyx.split('/')
while pyxl[0] != 'kivy':
pyxl.pop(0)
if pyxl[1] == 'kivy':
pyxl.pop(0)
return '.'.join(pyxl)
def expand(root, *args):
return join(root, 'kivy', *args)
class CythonExtension(Extension):
def __init__(self, *args, **kwargs):
Extension.__init__(self, *args, **kwargs)
self.cython_directives = {
'c_string_encoding': 'utf-8',
'profile': 'USE_PROFILE' in environ,
'embedsignature': use_embed_signature,
'language_level': 3,
'unraisable_tracebacks': True,
}
# XXX with pip, setuptools is imported before distutils, and change
# our pyx to c, then, cythonize doesn't happen. So force again our
# sources
self.sources = args[1]
def merge(d1, *args):
d1 = deepcopy(d1)
for d2 in args:
for key, value in d2.items():
value = deepcopy(value)
if key in d1:
d1[key].extend(value)
else:
d1[key] = value
return d1
def determine_base_flags():
includes, libs = get_isolated_env_paths()
flags = {
'libraries': [],
'include_dirs': [join(src_path, 'kivy', 'include')] + includes,
'library_dirs': [] + libs,
'extra_link_args': [],
'extra_compile_args': []}
if c_options['use_ios']:
sysroot = environ.get('IOSSDKROOT', environ.get('SDKROOT'))
if not sysroot:
raise Exception('IOSSDKROOT is not set')
flags['include_dirs'] += [sysroot]
flags['extra_compile_args'] += ['-isysroot', sysroot]
flags['extra_link_args'] += ['-isysroot', sysroot]
elif platform.startswith('freebsd'):
flags['include_dirs'] += [join(
environ.get('LOCALBASE', '/usr/local'), 'include')]
flags['library_dirs'] += [join(
environ.get('LOCALBASE', '/usr/local'), 'lib')]
elif platform == 'darwin':
v = os.uname()
if v[2] >= '13.0.0':
# use xcode-select to search on the right Xcode path
# XXX use the best SDK available instead of a specific one
import platform as _platform
xcode_dev = getoutput('xcode-select -p').splitlines()[0]
sdk_mac_ver = '.'.join(_platform.mac_ver()[0].split('.')[:2])
print('Xcode detected at {}, and using OS X{} sdk'.format(
xcode_dev, sdk_mac_ver))
sysroot = join(
xcode_dev.decode('utf-8'),
'Platforms/MacOSX.platform/Developer/SDKs',
'MacOSX{}.sdk'.format(sdk_mac_ver),
'System/Library/Frameworks')
else:
sysroot = ('/System/Library/Frameworks/'
'ApplicationServices.framework/Frameworks')
flags['extra_compile_args'] += ['-F%s' % sysroot]
flags['extra_link_args'] += ['-F%s' % sysroot]
elif platform == 'win32':
flags['include_dirs'] += [get_python_inc(prefix=sys.prefix)]
flags['library_dirs'] += [join(sys.prefix, "libs")]
return flags
def determine_gl_flags():
kivy_graphics_include = join(src_path, 'kivy', 'include')
flags = {'include_dirs': [kivy_graphics_include], 'libraries': []}
base_flags = {'include_dirs': [kivy_graphics_include], 'libraries': []}
cross_sysroot = environ.get('KIVY_CROSS_SYSROOT')
if c_options['use_opengl_mock']:
return flags, base_flags
if platform == 'win32':
flags['libraries'] = ['opengl32', 'glew32']
elif platform == 'ios':
flags['libraries'] = ['GLESv2']
flags['extra_link_args'] = ['-framework', 'OpenGLES']
elif platform == 'darwin':
flags['extra_link_args'] = ['-framework', 'OpenGL', '-arch', osx_arch]
flags['extra_compile_args'] = ['-arch', osx_arch]
elif platform.startswith('freebsd'):
flags['libraries'] = ['GL']
elif platform.startswith('openbsd'):
flags['include_dirs'] = ['/usr/X11R6/include']
flags['library_dirs'] = ['/usr/X11R6/lib']
flags['libraries'] = ['GL']
elif platform == 'android':
flags['include_dirs'] = [join(ndkplatform, 'usr', 'include')]
flags['library_dirs'] = [join(ndkplatform, 'usr', 'lib')]
flags['libraries'] = ['GLESv2']
elif platform == 'rpi':
if not cross_sysroot:
flags['include_dirs'] = [
'/opt/vc/include',
'/opt/vc/include/interface/vcos/pthreads',
'/opt/vc/include/interface/vmcs_host/linux']
flags['library_dirs'] = ['/opt/vc/lib']
brcm_lib_files = (
'/opt/vc/lib/libbrcmEGL.so',
'/opt/vc/lib/libbrcmGLESv2.so')
else:
print("KIVY_CROSS_SYSROOT: " + cross_sysroot)
flags['include_dirs'] = [
cross_sysroot + '/usr/include',
cross_sysroot + '/usr/include/interface/vcos/pthreads',
cross_sysroot + '/usr/include/interface/vmcs_host/linux']
flags['library_dirs'] = [cross_sysroot + '/usr/lib']
brcm_lib_files = (
cross_sysroot + '/usr/lib/libbrcmEGL.so',
cross_sysroot + '/usr/lib/libbrcmGLESv2.so')
if all((exists(lib) for lib in brcm_lib_files)):
print('Found brcmEGL and brcmGLES library files '
'for rpi platform at ' + dirname(brcm_lib_files[0]))
gl_libs = ['brcmEGL', 'brcmGLESv2']
else:
print(
'Failed to find brcmEGL and brcmGLESv2 library files '
'for rpi platform, falling back to EGL and GLESv2.')
gl_libs = ['EGL', 'GLESv2']
flags['libraries'] = ['bcm_host'] + gl_libs
elif platform in ['mali', 'vc']:
flags['include_dirs'] = ['/usr/include/']
flags['library_dirs'] = ['/usr/lib/arm-linux-gnueabihf']
flags['libraries'] = ['GLESv2']
c_options['use_x11'] = True
c_options['use_egl'] = True
else:
flags['libraries'] = ['GL']
return flags, base_flags
def determine_sdl2():
flags = {}
if not c_options['use_sdl2']:
return flags
sdl2_path = environ.get('KIVY_SDL2_PATH', None)
if sdl2_flags and not sdl2_path and platform == 'darwin':
return sdl2_flags
includes, _ = get_isolated_env_paths()
# no pkgconfig info, or we want to use a specific sdl2 path, so perform
# manual configuration
flags['libraries'] = ['SDL2', 'SDL2_ttf', 'SDL2_image', 'SDL2_mixer']
split_chr = ';' if platform == 'win32' else ':'
sdl2_paths = sdl2_path.split(split_chr) if sdl2_path else []
if not sdl2_paths:
sdl2_paths = []
for include in includes + [join(sys.prefix, 'include')]:
sdl_inc = join(include, 'SDL2')
if isdir(sdl_inc):
sdl2_paths.append(sdl_inc)
sdl2_paths.extend(['/usr/local/include/SDL2', '/usr/include/SDL2'])
flags['include_dirs'] = sdl2_paths
flags['extra_link_args'] = []
flags['extra_compile_args'] = []
flags['library_dirs'] = (
sdl2_paths if sdl2_paths else
['/usr/local/lib/'])
if sdl2_flags:
flags = merge(flags, sdl2_flags)
# ensure headers for all the SDL2 and sub libraries are available
libs_to_check = ['SDL', 'SDL_mixer', 'SDL_ttf', 'SDL_image']
can_compile = True
for lib in libs_to_check:
found = False
for d in flags['include_dirs']:
fn = join(d, '{}.h'.format(lib))
if exists(fn):
found = True
print('SDL2: found {} header at {}'.format(lib, fn))
break
if not found:
print('SDL2: missing sub library {}'.format(lib))
can_compile = False
if not can_compile:
c_options['use_sdl2'] = False
return {}
return flags
base_flags = determine_base_flags()
gl_flags, gl_flags_base = determine_gl_flags()
# -----------------------------------------------------------------------------
# sources to compile
# all the dependencies have been found manually with:
# grep -inr -E '(cimport|include)' kivy/graphics/context_instructions.{pxd,pyx}
graphics_dependencies = {
'buffer.pyx': ['common.pxi'],
'context.pxd': ['instructions.pxd', 'texture.pxd', 'vbo.pxd', 'cgl.pxd'],
'cgl.pxd': ['common.pxi', 'config.pxi', 'gl_redirect.h'],
'compiler.pxd': ['instructions.pxd'],
'compiler.pyx': ['context_instructions.pxd'],
'cgl.pyx': ['cgl.pxd'],
'cgl_mock.pyx': ['cgl.pxd'],
'cgl_sdl2.pyx': ['cgl.pxd'],
'cgl_gl.pyx': ['cgl.pxd'],
'cgl_glew.pyx': ['cgl.pxd'],
'context_instructions.pxd': [
'transformation.pxd', 'instructions.pxd', 'texture.pxd'],
'fbo.pxd': ['cgl.pxd', 'instructions.pxd', 'texture.pxd'],
'fbo.pyx': [
'config.pxi', 'opcodes.pxi', 'transformation.pxd', 'context.pxd'],
'gl_instructions.pyx': [
'config.pxi', 'opcodes.pxi', 'cgl.pxd', 'instructions.pxd'],
'instructions.pxd': [
'vbo.pxd', 'context_instructions.pxd', 'compiler.pxd', 'shader.pxd',
'texture.pxd', '../_event.pxd'],
'instructions.pyx': [
'config.pxi', 'opcodes.pxi', 'cgl.pxd',
'context.pxd', 'common.pxi', 'vertex.pxd', 'transformation.pxd'],
'opengl.pyx': [
'config.pxi', 'common.pxi', 'cgl.pxd', 'gl_redirect.h'],
'opengl_utils.pyx': [
'opengl_utils_def.pxi', 'cgl.pxd', ],
'shader.pxd': ['cgl.pxd', 'transformation.pxd', 'vertex.pxd'],
'shader.pyx': [
'config.pxi', 'common.pxi', 'cgl.pxd',
'vertex.pxd', 'transformation.pxd', 'context.pxd',
'gl_debug_logger.pxi'],
'stencil_instructions.pxd': ['instructions.pxd'],
'stencil_instructions.pyx': [
'config.pxi', 'opcodes.pxi', 'cgl.pxd',
'gl_debug_logger.pxi'],
'scissor_instructions.pyx': [
'config.pxi', 'opcodes.pxi', 'cgl.pxd'],
'svg.pyx': ['config.pxi', 'common.pxi', 'texture.pxd', 'instructions.pxd',
'vertex_instructions.pxd', 'tesselator.pxd'],
'texture.pxd': ['cgl.pxd'],
'texture.pyx': [
'config.pxi', 'common.pxi', 'opengl_utils_def.pxi', 'context.pxd',
'cgl.pxd', 'opengl_utils.pxd',
'img_tools.pxi', 'gl_debug_logger.pxi'],
'vbo.pxd': ['buffer.pxd', 'cgl.pxd', 'vertex.pxd'],
'vbo.pyx': [
'config.pxi', 'common.pxi', 'context.pxd',
'instructions.pxd', 'shader.pxd', 'gl_debug_logger.pxi'],
'vertex.pxd': ['cgl.pxd'],
'vertex.pyx': ['config.pxi', 'common.pxi'],
'vertex_instructions.pyx': [
'config.pxi', 'common.pxi', 'vbo.pxd', 'vertex.pxd',
'instructions.pxd', 'vertex_instructions.pxd',
'cgl.pxd', 'texture.pxd', 'vertex_instructions_line.pxi'],
'vertex_instructions_line.pxi': ['stencil_instructions.pxd']}
sources = {
'_event.pyx': merge(base_flags, {'depends': ['properties.pxd']}),
'_clock.pyx': {},
'weakproxy.pyx': {},
'properties.pyx': merge(base_flags, {'depends': ['_event.pxd']}),
'graphics/buffer.pyx': merge(base_flags, gl_flags_base),
'graphics/context.pyx': merge(base_flags, gl_flags_base),
'graphics/compiler.pyx': merge(base_flags, gl_flags_base),
'graphics/context_instructions.pyx': merge(base_flags, gl_flags_base),
'graphics/fbo.pyx': merge(base_flags, gl_flags_base),
'graphics/gl_instructions.pyx': merge(base_flags, gl_flags_base),
'graphics/instructions.pyx': merge(base_flags, gl_flags_base),
'graphics/opengl.pyx': merge(base_flags, gl_flags_base),
'graphics/opengl_utils.pyx': merge(base_flags, gl_flags_base),
'graphics/shader.pyx': merge(base_flags, gl_flags_base),
'graphics/stencil_instructions.pyx': merge(base_flags, gl_flags_base),
'graphics/scissor_instructions.pyx': merge(base_flags, gl_flags_base),
'graphics/texture.pyx': merge(base_flags, gl_flags_base),
'graphics/transformation.pyx': merge(base_flags, gl_flags_base),
'graphics/vbo.pyx': merge(base_flags, gl_flags_base),
'graphics/vertex.pyx': merge(base_flags, gl_flags_base),
'graphics/vertex_instructions.pyx': merge(base_flags, gl_flags_base),
'graphics/cgl.pyx': merge(base_flags, gl_flags_base),
'graphics/cgl_backend/cgl_mock.pyx': merge(base_flags, gl_flags_base),
'graphics/cgl_backend/cgl_gl.pyx': merge(base_flags, gl_flags),
'graphics/cgl_backend/cgl_glew.pyx': merge(base_flags, gl_flags),
'graphics/cgl_backend/cgl_sdl2.pyx': merge(base_flags, gl_flags_base),
'graphics/cgl_backend/cgl_debug.pyx': merge(base_flags, gl_flags_base),
'core/text/text_layout.pyx': base_flags,
'core/window/window_info.pyx': base_flags,
'graphics/tesselator.pyx': merge(base_flags, {
'include_dirs': ['kivy/lib/libtess2/Include'],
'c_depends': [
'lib/libtess2/Source/bucketalloc.c',
'lib/libtess2/Source/dict.c',
'lib/libtess2/Source/geom.c',
'lib/libtess2/Source/mesh.c',
'lib/libtess2/Source/priorityq.c',
'lib/libtess2/Source/sweep.c',
'lib/libtess2/Source/tess.c'
]
}),
'graphics/svg.pyx': merge(base_flags, gl_flags_base)
}
if c_options["use_sdl2"]:
sdl2_flags = determine_sdl2()
if c_options['use_sdl2'] and sdl2_flags:
sources['graphics/cgl_backend/cgl_sdl2.pyx'] = merge(
sources['graphics/cgl_backend/cgl_sdl2.pyx'], sdl2_flags)
sdl2_depends = {'depends': ['lib/sdl2.pxi']}
for source_file in ('core/window/_window_sdl2.pyx',
'core/image/_img_sdl2.pyx',
'core/text/_text_sdl2.pyx',
'core/audio/audio_sdl2.pyx',
'core/clipboard/_clipboard_sdl2.pyx'):
sources[source_file] = merge(
base_flags, sdl2_flags, sdl2_depends)
if c_options['use_pangoft2'] in (None, True) and platform not in (
'android', 'ios', 'win32'):
pango_flags = pkgconfig('pangoft2')
if pango_flags and 'libraries' in pango_flags:
print('Pango: pangoft2 found via pkg-config')
c_options['use_pangoft2'] = True
pango_depends = {'depends': [
'lib/pango/pangoft2.pxi',
'lib/pango/pangoft2.h']}
sources['core/text/_text_pango.pyx'] = merge(
base_flags, pango_flags, pango_depends)
print(sources['core/text/_text_pango.pyx'])
if platform in ('darwin', 'ios'):
# activate ImageIO provider for our core image
if platform == 'ios':
osx_flags = {'extra_link_args': [
'-framework', 'Foundation',
'-framework', 'UIKit',
'-framework', 'AudioToolbox',
'-framework', 'CoreGraphics',
'-framework', 'QuartzCore',
'-framework', 'ImageIO',
'-framework', 'Accelerate']}
else:
osx_flags = {'extra_link_args': [
'-framework', 'ApplicationServices']}
sources['core/image/img_imageio.pyx'] = merge(
base_flags, osx_flags)
if c_options['use_avfoundation']:
import platform as _platform
mac_ver = [int(x) for x in _platform.mac_ver()[0].split('.')[:2]]
if mac_ver >= [10, 7] or platform == 'ios':
osx_flags = {
'extra_link_args': ['-framework', 'AVFoundation'],
'extra_compile_args': ['-ObjC++'],
'depends': ['core/camera/camera_avfoundation_implem.m']}
sources['core/camera/camera_avfoundation.pyx'] = merge(
base_flags, osx_flags)
else:
print('AVFoundation cannot be used, OSX >= 10.7 is required')
if c_options['use_rpi']:
sources['lib/vidcore_lite/egl.pyx'] = merge(
base_flags, gl_flags)
sources['lib/vidcore_lite/bcm.pyx'] = merge(
base_flags, gl_flags)
if c_options['use_x11']:
libs = ['Xrender', 'X11']
if c_options['use_egl']:
libs += ['EGL']
else:
libs += ['GL']
sources['core/window/window_x11.pyx'] = merge(
base_flags, gl_flags, {
# FIXME add an option to depend on them but not compile them
# cause keytab is included in core, and core is included in
# window_x11
#
# 'depends': [
# 'core/window/window_x11_keytab.c',
# 'core/window/window_x11_core.c'],
'libraries': libs})
if c_options['use_gstreamer']:
sources['lib/gstplayer/_gstplayer.pyx'] = merge(
base_flags, gst_flags, {
'depends': ['lib/gstplayer/_gstplayer.h']})
# -----------------------------------------------------------------------------
# extension modules
def get_dependencies(name, deps=None):
if deps is None:
deps = []
for dep in graphics_dependencies.get(name, []):
if dep not in deps:
deps.append(dep)
get_dependencies(dep, deps)
return deps
def resolve_dependencies(fn, depends):
fn = basename(fn)
deps = []
get_dependencies(fn, deps)
get_dependencies(fn.replace('.pyx', '.pxd'), deps)
deps_final = []
paths_to_test = ['graphics', 'include']
for dep in deps:
found = False
for path in paths_to_test:
filename = expand(src_path, path, dep)
if exists(filename):
deps_final.append(filename)
found = True
break
if not found:
print('ERROR: Dependency for {} not resolved: {}'.format(
fn, dep
))
return deps_final
def get_extensions_from_sources(sources):
ext_modules = []
if environ.get('KIVY_FAKE_BUILDEXT'):
print('Fake build_ext asked, will generate only .h/.c')
return ext_modules
for pyx, flags in sources.items():
is_graphics = pyx.startswith('graphics')
pyx = expand(src_path, pyx)
depends = [expand(src_path, x) for x in flags.pop('depends', [])]
c_depends = [expand(src_path, x) for x in flags.pop('c_depends', [])]
if not can_use_cython:
# can't use cython, so use the .c files instead.
pyx = '%s.c' % pyx[:-4]
if is_graphics:
depends = resolve_dependencies(pyx, depends)
f_depends = [x for x in depends if x.rsplit('.', 1)[-1] in (
'c', 'cpp', 'm')]
module_name = get_modulename_from_file(pyx)
flags_clean = {'depends': depends}
for key, value in flags.items():
if len(value):
flags_clean[key] = value
ext_modules.append(CythonExtension(
module_name, [pyx] + f_depends + c_depends, **flags_clean))
return ext_modules
ext_modules = get_extensions_from_sources(sources)
# -----------------------------------------------------------------------------
# automatically detect data files
split_examples = int(environ.get('KIVY_SPLIT_EXAMPLES', '0'))
data_file_prefix = 'share/kivy-'
examples = {}
examples_allowed_ext = ('readme', 'py', 'wav', 'png', 'jpg', 'svg', 'json',
'avi', 'gif', 'txt', 'ttf', 'obj', 'mtl', 'kv', 'mpg',
'glsl', 'zip')
for root, subFolders, files in walk('examples'):
for fn in files:
ext = fn.split('.')[-1].lower()
if ext not in examples_allowed_ext:
continue
filename = join(root, fn)
directory = '%s%s' % (data_file_prefix, dirname(filename))
if directory not in examples:
examples[directory] = []
examples[directory].append(filename)
binary_deps = []
binary_deps_path = join(src_path, 'kivy', 'binary_deps')
if isdir(binary_deps_path):
for root, dirnames, filenames in walk(binary_deps_path):
for fname in filenames:
binary_deps.append(
join(root.replace(binary_deps_path, 'binary_deps'), fname))
def glob_paths(*patterns, excludes=('.pyc', )):
files = []
base = Path(join(src_path, 'kivy'))
for pat in patterns:
for f in base.glob(pat):
if f.suffix in excludes:
continue
files.append(str(f.relative_to(base)))
return files
# -----------------------------------------------------------------------------
# setup !
if not build_examples:
setup(
name='Kivy',
version=__version__,
author='Kivy Team and other contributors',
author_email='kivy-dev@googlegroups.com',
url='http://kivy.org',
license='MIT',
description=(
'A software library for rapid development of '
'hardware-accelerated multitouch applications.'),
long_description=get_description(),
long_description_content_type='text/markdown',
ext_modules=ext_modules,
cmdclass=cmdclass,
packages=find_packages(include=['kivy*']),
package_dir={'kivy': 'kivy'},
package_data={
'kivy':
glob_paths('*.pxd', '*.pxi') +
glob_paths('**/*.pxd', '**/*.pxi') +
glob_paths('data/**/*.*') +
glob_paths('include/**/*.*') +
glob_paths('tools/**/*.*', excludes=('.pyc', '.enc')) +
glob_paths('graphics/**/*.h') +
glob_paths('tests/**/*.*') +
[
'setupconfig.py',
] + binary_deps
},
data_files=[] if split_examples else list(examples.items()),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: MacOS X',
'Environment :: Win32 (MS Windows)',
'Environment :: X11 Applications',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: BSD :: FreeBSD',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Artistic Software',
'Topic :: Games/Entertainment',
'Topic :: Multimedia :: Graphics :: 3D Rendering',
'Topic :: Multimedia :: Graphics :: Capture :: Digital Camera',
'Topic :: Multimedia :: Graphics :: Presentation',
'Topic :: Multimedia :: Graphics :: Viewers',
'Topic :: Multimedia :: Sound/Audio :: Players :: MP3',
'Topic :: Multimedia :: Video :: Display',
'Topic :: Scientific/Engineering :: Human Machine Interfaces',
'Topic :: Scientific/Engineering :: Visualization',
('Topic :: Software Development :: Libraries :: '
'Application Frameworks'),
'Topic :: Software Development :: User Interfaces'])
else:
setup(
name='Kivy-examples',
version=__version__,
author='Kivy Team and other contributors',
author_email='kivy-dev@googlegroups.com',
url='http://kivy.org',
license='MIT',
description=('Kivy examples.'),
long_description_content_type='text/markdown',
long_description=get_description(),
data_files=list(examples.items()))
| 38.97188
| 80
| 0.597903
|
4a004a2f778bea3aa393169ae71473e2609d3c09
| 1,638
|
py
|
Python
|
tools/draw_ft_epoch_ablation.py
|
twangnh/Calibration_mrcnn
|
e5f3076cefbe35297a403a753bb57e11503db818
|
[
"Apache-2.0"
] | 87
|
2020-07-24T01:28:39.000Z
|
2021-08-29T08:40:18.000Z
|
tools/draw_ft_epoch_ablation.py
|
twangnh/Calibration_mrcnn
|
e5f3076cefbe35297a403a753bb57e11503db818
|
[
"Apache-2.0"
] | 3
|
2020-09-27T12:59:28.000Z
|
2022-01-06T13:14:08.000Z
|
tools/draw_ft_epoch_ablation.py
|
twangnh/Calibration_mrcnn
|
e5f3076cefbe35297a403a753bb57e11503db818
|
[
"Apache-2.0"
] | 20
|
2020-09-05T04:37:19.000Z
|
2021-12-13T02:25:48.000Z
|
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import math
from matplotlib.ticker import FormatStrFormatter
from matplotlib import scale as mscale
from matplotlib import transforms as mtransforms
# z = [0,0.1,0.3,0.9,1,2,5]
# z = list(range(0, 30000, 1000))
# with open('./ft_cat_epoch_ablation_for_drawing.txt', 'r') as f:
with open('./ft_cal_epoch_ablation_for_drawing_compose.txt', 'r') as f:
epoch_results = f.readlines()
epoch_results = [i.strip().split(' ') for i in epoch_results]
epoch_results_array = np.array(epoch_results).astype(np.float)
z = [0,1,2,3,4,5,6,7,8,9,10,11,13,15,20,25,30,35]
# z = [0,1,2,3,4,5,6,7,8,9]
eAP = epoch_results_array[:, :4].mean(axis=1).tolist()
bin1 = epoch_results_array[:, 0].tolist()
bin2 = epoch_results_array[:, 1].tolist()
bin3 = epoch_results_array[:, 2].tolist()
bin4 = epoch_results_array[:, 3].tolist()
fig = plt.figure(figsize=(8,5))
ax1 = fig.add_subplot(111)
matplotlib.rcParams.update({'font.size': 14})
ax1.plot(z, bin4, marker='o', linewidth=2, label='AP of class bin [1000, -)')
ax1.plot(z, bin3, marker='o', linewidth=2, label='AP of class bin [100, 1000)')
ax1.plot(z, bin2, marker='o', linewidth=2, label='AP of class bin [10, 100)')
ax1.plot(z, bin1, marker='o', linewidth=2, label='AP of class bin (0, 10)')
ax1.plot(z, eAP, linestyle='-', marker='o', linewidth=2, label='bAP')
# ax1.plot([0],[15.4], 'D', color = 'green')
plt.xlabel('calibration steps (k)', size=16)
plt.ylabel('AP or bAP', size=16)
# ax1.set_xscale('log')
plt.legend( loc='best')
plt.grid()
plt.savefig('ablation_cal_steps.eps', format='eps', dpi=1000)
plt.show()
| 32.117647
| 80
| 0.689255
|
4a004a3cd542e16ed45339082491236231b63d13
| 1,293
|
py
|
Python
|
EXOSIMS/SimulatedUniverse/DulzPlavchanUniverse.py
|
jaysonfig/EXOSIMS
|
d81436c2b17cdb779cad519b1932d3e3ad49b55e
|
[
"BSD-3-Clause"
] | 16
|
2016-01-17T03:57:36.000Z
|
2021-09-17T16:37:54.000Z
|
EXOSIMS/SimulatedUniverse/DulzPlavchanUniverse.py
|
jaysonfig/EXOSIMS
|
d81436c2b17cdb779cad519b1932d3e3ad49b55e
|
[
"BSD-3-Clause"
] | 271
|
2015-12-09T00:48:08.000Z
|
2022-01-12T22:28:27.000Z
|
EXOSIMS/SimulatedUniverse/DulzPlavchanUniverse.py
|
jaysonfig/EXOSIMS
|
d81436c2b17cdb779cad519b1932d3e3ad49b55e
|
[
"BSD-3-Clause"
] | 28
|
2015-07-31T01:18:34.000Z
|
2022-02-20T20:16:37.000Z
|
from EXOSIMS.Prototypes.SimulatedUniverse import SimulatedUniverse
import numpy as np
class DulzPlavchanUniverse(SimulatedUniverse):
"""Simulated Universe module based on Dulz and Plavchan occurrence rates.
"""
def __init__(self, **specs):
SimulatedUniverse.__init__(self, **specs)
def gen_physical_properties(self, **specs):
"""Generating universe based on Dulz and Plavchan occurrence rate tables.
"""
PPop = self.PlanetPopulation
TL = self.TargetList
# treat eta as the rate parameter of a Poisson distribution
targetSystems = np.random.poisson(lam=PPop.eta, size=TL.nStars)
plan2star = []
for j, n in enumerate(targetSystems):
plan2star = np.hstack((plan2star, [j] * n))
self.plan2star = plan2star.astype(int)
self.sInds = np.unique(self.plan2star)
self.nPlans = len(self.plan2star)
# sample all of the orbital and physical parameters
self.I, self.O, self.w = PPop.gen_angles(self.nPlans)
self.a, self.e, self.p, self.Rp = PPop.gen_plan_params(self.nPlans)
if PPop.scaleOrbits:
self.a *= np.sqrt(TL.L[self.plan2star])
self.gen_M0() # initial mean anomaly
self.Mp = PPop.MfromRp(self.Rp) # mass
| 34.945946
| 81
| 0.655839
|
4a004b2eac1ad18f555dfeb26bceb0c0841a75c3
| 3,856
|
py
|
Python
|
preprocessing.py
|
krup18/image_caption
|
a897364725a3852562e719ff81128cf70c2c4221
|
[
"Apache-2.0"
] | null | null | null |
preprocessing.py
|
krup18/image_caption
|
a897364725a3852562e719ff81128cf70c2c4221
|
[
"Apache-2.0"
] | null | null | null |
preprocessing.py
|
krup18/image_caption
|
a897364725a3852562e719ff81128cf70c2c4221
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 28 21:45:13 2019
@author: krups
"""
import random
import numpy as np
import pandas as pd
from collections import Counter
from tensorflow import set_random_seed
import string
def set_randomseed_value(initial=123):
np.random.seed(initial)
random.seed(initial)
set_random_seed(initial)
def load_doc(filename):
file = open(filename, 'r')
text = file.read()
file.close()
return text
def load_descriptions(doc_text):
textmap = []
for l in doc_text.split('\n'):
c = l.split('\t')
if len(c) < 2:
continue
w = c[0].split("#")
textmap.append(w + [c[1].lower()])
return textmap
def word_data_frame(text_dataframe):
vocabulary = []
for t in text_dataframe.Caption.values:
vocabulary.extend(t.split())
count = Counter(vocabulary)
a1 = []
a2 = []
for i in count.keys():
a1.append(i)
for j in count.values():
a2.append(j)
data = {"word":a1 , "count":a2}
word_dataframe = pd.DataFrame(data)
word_dataframe = word_dataframe.sort_values(by='count', ascending=False)
word_dataframe = word_dataframe.reset_index()[["word","count"]]
return(word_dataframe)
def clean_captions(original_caption):
translated = str.maketrans('', '', string.punctuation)
caption_wo_punctuation = original_caption.translate(translated)
words_not_single_character = ""
for word in caption_wo_punctuation.split():
if len(word) > 1:
words_not_single_character += " " + word
words_not_numeric = ""
for word in words_not_single_character.split():
alpha = word.isalpha()
if alpha:
words_not_numeric += " " + word
return(words_not_numeric)
def add_tokens(captions):
new_captions = []
for t in captions:
t = 'start_seq ' + t + ' end_seq'
new_captions.append(t)
return(new_captions)
'''
jpg_dir = "Flickr8k_Dataset/Flicker8k_Dataset/"
text_dir = "Flickr8k_text/Flickr8k.token.txt"
doc_text=load_doc(text_dir)
textmap = []
load_descriptions(doc_text)
text_dataframe = pd.DataFrame(textmap,columns=["Name", "Index", "Caption"])
word_dataframe = word_data_frame(text_dataframe)
for i, caption in enumerate(text_dataframe.Caption.values):
cleaned_caption = clean_captions(caption)
text_dataframe["Caption"].iloc[i] = cleaned_caption
word_dataframe = word_data_frame(text_dataframe)
text1_dataframe = copy(text_dataframe)
text1_dataframe["Caption"] = add_tokens(text_dataframe["Caption"])
vgg = VGG16(include_top=True,weights=None)
vgg.load_weights("vgg16_weights_tf_dim_ordering_tf_kernels.h5")
vgg.summary()
vgg.layers.pop()
vgg = models.Model(inputs=vgg.inputs, outputs=vgg.layers[-1].output)
#vgg.summary()
images = OrderedDict()
pixels = 224
t_size = (pixels,pixels,3)
pics = os.listdir(jpg_dir)
data = np.zeros((len(pics),pixels,pixels,3))
for index, name in enumerate(pics):
file_name = jpg_dir + '/' + name
img = load_img(file_name, target_size=t_size)
img = img_to_array(img)
nimg = preprocess_input(img)
y_prediction = vgg.predict(nimg.reshape( (1,) + nimg.shape[:3]))
images[name] = y_prediction.flatten()
images_data, index = [],[]
text1_dataframe = text1_dataframe.loc[text1_dataframe["Index"].values == "0",: ]
for i, name in enumerate(text1_dataframe.filename):
if name in images.keys():
images_data.append(images[name])
index.append(i)
filenames = text1_dataframe["Name"].iloc[index].values
captions_data = text1_dataframe["Caption"].iloc[index].values
images_data = np.array(images_data)
nb_words = 8000
tokenizer = Tokenizer(nb_words=nb_words)
tokenizer.fit_on_texts(captions_data)
vocabulary_size = len(tokenizer.word_index) + 1
texts_data = tokenizer.texts_to_sequences(captions_data)
'''
| 28.145985
| 80
| 0.691909
|
4a004b6ecefca31504ccd42443e9374b550e21a6
| 2,755
|
py
|
Python
|
proj/scrapeApiTest.py
|
hp5441/web-scraper-service
|
bad1531d3238e6f7dd12974ae2a70f156bb1461d
|
[
"MIT"
] | null | null | null |
proj/scrapeApiTest.py
|
hp5441/web-scraper-service
|
bad1531d3238e6f7dd12974ae2a70f156bb1461d
|
[
"MIT"
] | null | null | null |
proj/scrapeApiTest.py
|
hp5441/web-scraper-service
|
bad1531d3238e6f7dd12974ae2a70f156bb1461d
|
[
"MIT"
] | null | null | null |
from time import perf_counter, sleep
import requests
import json
import aiohttp
import asyncio
import sys
import websocket
from .celery import app
class async_client():
def __init__(self):
self.session = None
self.count = 0
def getUrl(self, scrip):
return f"https://json.bselivefeeds.indiatimes.com/ET_Community/currenttick?scripcode={scrip}EQ&exchangeid=50&directions=current&callback=serviceHit.autoLoadResultCallback&scripcodetype=company"
async def fetch(self, scrip):
if not self.session:
self.session = aiohttp.ClientSession()
await self.session.get(self.getUrl(scrip))
else:
async with self.session.get(self.getUrl(scrip)) as response:
json_data = await response.text()
json_data = json_data.strip()[34:-1]
pydict = json.loads(json_data)
parametervalues = pydict['query']['parametervalues']
stockdata = pydict['query']['results']['quote'][0]
stockstatus = pydict['query']['marketstatus']['currentMarketStatus']
self.count += 1
return parametervalues['scripcode'], stockdata['Close'], stockstatus, stockdata['DateTemp'], self.count, stockdata
print(self.session)
def getNseUrl(scrip):
return f"https://www.nseindia.com/api/quote-equity?symbol={scrip}"
async def main(client, scrips):
tasks = [asyncio.create_task(client.fetch(scrip)) for scrip in scrips]
results = await asyncio.gather(*tasks)
return results
async def run_test(client, company):
try:
return await main(client, company)
except Exception as e:
print('loop done')
async def run_test_counter(count, client, company):
results = await asyncio.gather(*(run_test(client, company) for _ in range(count)))
return results
async def run_test_counter_periodic(total, interval, count, client, company, final):
await client.fetch('MBLINFRA')
while total > 0:
start = perf_counter()
results = await run_test_counter(count, client, company)
print(results)
print(perf_counter() - start)
sleep(interval)
total -= 1
final[0] = results
await client.session.close()
async def closeClientSession(client):
await client.session.close()
@app.task
def scrape(*companies):
client = async_client()
scrapedResults = [0]
asyncio.run(run_test_counter_periodic(
1, 0, 1, client, companies, scrapedResults))
#socket = websocket.WebSocket()
# socket.connect("ws://localhost:8000/ws/stock/")
# socket.send(json.dumps({
# 'message': {'name': 'RELIANCE', 'data': scrapedResults[0]}
# }))
return scrapedResults[0]
| 30.955056
| 201
| 0.658076
|
4a004bc3a73e8358820775523e1844215245e2a1
| 4,040
|
py
|
Python
|
image_processing.py
|
milanzongor/unihack_2020
|
12d8a46eafa3ac3e21214697e6afc1ff22e87733
|
[
"MIT"
] | 7
|
2020-05-03T15:15:33.000Z
|
2020-09-21T16:35:41.000Z
|
image_processing.py
|
milanzongor/unihack_2020
|
12d8a46eafa3ac3e21214697e6afc1ff22e87733
|
[
"MIT"
] | 2
|
2020-05-07T12:34:31.000Z
|
2020-05-07T12:34:42.000Z
|
image_processing.py
|
milanzongor/unihack_2020
|
12d8a46eafa3ac3e21214697e6afc1ff22e87733
|
[
"MIT"
] | 3
|
2020-05-03T14:01:17.000Z
|
2020-05-14T12:53:38.000Z
|
from imutils import contours
from imutils.perspective import four_point_transform
import cv2
import imutils
import numpy as np
from pdf2image import convert_from_path
# todo scalable
def get_results(pdf_path):
rectangle_elements, gray_cropped = parse_header_elements(pdf_path)
student_id_table = four_point_transform(gray_cropped, rectangle_elements[0].reshape(4, 2))
student_id_table = threshold_image(student_id_table)
student_id_table = delete_outher_box(student_id_table)
student_id = get_number(student_id_table, 6, 10)
exam_points_table = four_point_transform(gray_cropped, rectangle_elements[1].reshape(4, 2))
exam_points_table = threshold_image(exam_points_table)
exam_points_table = delete_outher_box(exam_points_table)
exam_points = get_number(exam_points_table, rows=2, cols=10)
grade_table = four_point_transform(gray_cropped, rectangle_elements[2].reshape(4, 2))
grade_table = threshold_image(grade_table)
grade_table = delete_outher_box(grade_table)
grade = get_number(grade_table, rows=1, cols=6)
return [student_id, exam_points, grade]
def pdf2opencv(path_pdf):
pil_images = convert_from_path(path_pdf, dpi=200)
for pil_image in pil_images:
open_cv_image = np.array(pil_image)
# Convert RGB to BGR
return open_cv_image[:, :, ::-1].copy()
def delete_outher_box(thresh):
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
if len(cnts) == 1:
cv2.drawContours(thresh, cnts, -1, (0, 0, 0), 3)
return thresh
def get_contours(image):
cnts = cv2.findContours(image.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
return imutils.grab_contours(cnts)
def get_recognized_circles(cropped_header):
cnts = get_contours(cropped_header)
questionCnts = []
for c in cnts:
(x, y, w, h) = cv2.boundingRect(c)
ar = w / float(h)
if w >= 20 and h >= 20 and ar >= 0.9 and ar <= 1.1:
questionCnts.append(c)
# top to bottom sort
questionCnts = contours.sort_contours(questionCnts, method="top-to-bottom")[0]
return questionCnts
def threshold_image(image):
return cv2.threshold(image, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
def get_number(thresh, rows, cols):
number = 0
questionCnts = get_recognized_circles(thresh)
for (q, i) in enumerate(np.arange(0, len(questionCnts), cols)):
cnts = contours.sort_contours(questionCnts[i:i + cols])[0]
bubbled = None
for (j, c) in enumerate(cnts):
mask = np.zeros(thresh.shape, dtype="uint8")
cv2.drawContours(mask, [c], -1, 255, -1)
mask = cv2.bitwise_and(thresh, thresh, mask=mask)
total = cv2.countNonZero(mask)
if bubbled is None or total > bubbled[0]:
bubbled = (total, j)
number += bubbled[1] * pow(10, rows - 1 - q)
return number
def check_header(pdf_path):
rectangle_elements, _ = parse_header_elements(pdf_path)
if len(rectangle_elements) >= 3:
print('Header is ok')
return True
else:
return False
def parse_header_elements(pdf_path):
image = pdf2opencv(pdf_path)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
edged = cv2.Canny(blurred, 75, 200)
# get dimensions of the page
(x, y, w, h) = cv2.boundingRect(edged)
edged_cropped = edged[y:y + h // 5, :]
gray_cropped = gray[y:y + h // 5, :]
cnts = get_contours(edged_cropped)
docCnt = None
docCntArr = []
if len(cnts) > 0:
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
for c in cnts:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
if len(approx) == 4 and cv2.contourArea(c) > 9000:
print('IS OK !!!!!!!!!!!!!!!!!!')
docCntArr.append(approx)
return docCntArr, gray_cropped
| 30.37594
| 95
| 0.667079
|
4a004c919d8545848bbba08f5a6fbaa31d94da7c
| 26
|
py
|
Python
|
ACTINN/Classifier/__init__.py
|
odavalos/ACTINN-PyTorch
|
1bc8b127968c2aec9e3d52c949f923434a4d189a
|
[
"MIT"
] | 68
|
2020-08-26T00:50:45.000Z
|
2022-03-04T05:31:44.000Z
|
ACTINN/Classifier/__init__.py
|
odavalos/ACTINN-PyTorch
|
1bc8b127968c2aec9e3d52c949f923434a4d189a
|
[
"MIT"
] | 6
|
2021-05-07T03:28:18.000Z
|
2021-06-03T22:18:03.000Z
|
ACTINN/Classifier/__init__.py
|
odavalos/ACTINN-PyTorch
|
1bc8b127968c2aec9e3d52c949f923434a4d189a
|
[
"MIT"
] | 19
|
2020-08-26T11:36:42.000Z
|
2021-11-04T02:02:28.000Z
|
from .classifier import *
| 13
| 25
| 0.769231
|
4a004d9fd94615ab1592bff576397a99c4c90234
| 1,326
|
py
|
Python
|
adaptive/tests/unit/test_learnernd.py
|
Davide-sd/adaptive
|
ef3b1c51dbfbd01e1d99e285e822cdafc0085358
|
[
"BSD-3-Clause"
] | 720
|
2017-12-10T14:25:31.000Z
|
2022-03-31T02:30:44.000Z
|
adaptive/tests/unit/test_learnernd.py
|
Davide-sd/adaptive
|
ef3b1c51dbfbd01e1d99e285e822cdafc0085358
|
[
"BSD-3-Clause"
] | 305
|
2018-01-22T11:57:02.000Z
|
2022-03-01T08:58:37.000Z
|
adaptive/tests/unit/test_learnernd.py
|
Davide-sd/adaptive
|
ef3b1c51dbfbd01e1d99e285e822cdafc0085358
|
[
"BSD-3-Clause"
] | 51
|
2018-01-25T08:30:28.000Z
|
2022-03-08T08:11:45.000Z
|
import math
import numpy as np
import pytest
from scipy.spatial import ConvexHull
from adaptive.learner.base_learner import uses_nth_neighbors
from adaptive.learner.learnerND import LearnerND, curvature_loss_function
def ring_of_fire(xy):
a = 0.2
d = 0.7
x, y = xy
return x + math.exp(-((x ** 2 + y ** 2 - d ** 2) ** 2) / a ** 4)
def test_learnerND_inits_loss_depends_on_neighbors_correctly():
learner = LearnerND(ring_of_fire, bounds=[(-1, 1), (-1, 1)])
assert learner.nth_neighbors == 0
def test_learnerND_curvature_inits_loss_depends_on_neighbors_correctly():
loss = curvature_loss_function()
assert loss.nth_neighbors == 1
learner = LearnerND(ring_of_fire, bounds=[(-1, 1), (-1, 1)], loss_per_simplex=loss)
assert learner.nth_neighbors == 1
def test_learnerND_accepts_ConvexHull_as_input():
triangle = ConvexHull([(0, 1), (2, 0), (0, 0)])
learner = LearnerND(ring_of_fire, bounds=triangle)
assert learner.nth_neighbors == 0
assert np.allclose(learner._bbox, [(0, 2), (0, 1)])
def test_learnerND_raises_if_too_many_neigbors():
@uses_nth_neighbors(2)
def loss(*args):
return 0
assert loss.nth_neighbors == 2
with pytest.raises(NotImplementedError):
LearnerND(ring_of_fire, bounds=[(-1, 1), (-1, 1)], loss_per_simplex=loss)
| 29.466667
| 87
| 0.702112
|
4a004eb8268e8fd1434f7557dc63c1e58d2e1a2f
| 7,791
|
py
|
Python
|
official/vision/beta/modeling/layers/roi_generator_test.py
|
TUDelftHao/models
|
faf0c2dc442ceaa8425aff73abd00f92f3137b7b
|
[
"Apache-2.0"
] | null | null | null |
official/vision/beta/modeling/layers/roi_generator_test.py
|
TUDelftHao/models
|
faf0c2dc442ceaa8425aff73abd00f92f3137b7b
|
[
"Apache-2.0"
] | null | null | null |
official/vision/beta/modeling/layers/roi_generator_test.py
|
TUDelftHao/models
|
faf0c2dc442ceaa8425aff73abd00f92f3137b7b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for roi_generator.py."""
# Import libraries
import numpy as np
import tensorflow as tf
from official.vision.beta.modeling.layers import roi_generator
class MultilevelProposeRoisTest(tf.test.TestCase):
def test_multilevel_propose_rois_single_level(self):
rpn_boxes_np = np.array(
[[[[0, 0, 10, 10], [0.01, 0.01, 9.9, 9.9]],
[[5, 5, 10, 10], [2, 2, 8, 8]]],
[[[2, 2, 4, 4], [3, 3, 6, 6]],
[[3.1, 3.1, 6.1, 6.1], [1, 1, 8, 8]]]])
rpn_boxes = {
'2': tf.constant(rpn_boxes_np, dtype=tf.float32)
}
rpn_scores_np = np.array(
[[[[0.6], [0.9]], [[0.2], [0.3]]], [[[0.1], [0.8]], [[0.3], [0.5]]]])
rpn_scores = {
'2': tf.constant(rpn_scores_np, dtype=tf.float32)
}
anchor_boxes_np = np.array(
[[[[0, 0, 10, 10], [0.01, 0.01, 9.9, 9.9]],
[[5, 5, 10, 10], [2, 2, 8, 8]]],
[[[2, 2, 4, 4], [3, 3, 6, 6]],
[[3.1, 3.1, 6.1, 6.1], [1, 1, 8, 8]]]])
anchor_boxes = {
'2': tf.constant(anchor_boxes_np, dtype=tf.float32)
}
image_shape = tf.constant([[20, 20], [20, 20]], dtype=tf.int32)
selected_rois_np = np.array(
[[[0.01, 0.01, 9.9, 9.9], [2, 2, 8, 8], [5, 5, 10, 10], [0, 0, 0, 0]],
[[3, 3, 6, 6], [1, 1, 8, 8], [2, 2, 4, 4], [0, 0, 0, 0]]])
selected_roi_scores_np = np.array(
[[0.9, 0.3, 0.2, 0], [0.8, 0.5, 0.1, 0]])
# Runs on TPU.
strategy = tf.distribute.experimental.TPUStrategy()
with strategy.scope():
selected_rois_tpu, selected_roi_scores_tpu = (
roi_generator._multilevel_propose_rois(
rpn_boxes,
rpn_scores,
anchor_boxes=anchor_boxes,
image_shape=image_shape,
pre_nms_top_k=4,
pre_nms_score_threshold=0.0,
pre_nms_min_size_threshold=0.0,
nms_iou_threshold=0.5,
num_proposals=4,
use_batched_nms=False,
decode_boxes=False,
clip_boxes=False,
apply_sigmoid_to_score=False))
# Runs on CPU.
selected_rois_cpu, selected_roi_scores_cpu = (
roi_generator._multilevel_propose_rois(
rpn_boxes,
rpn_scores,
anchor_boxes=anchor_boxes,
image_shape=image_shape,
pre_nms_top_k=4,
pre_nms_score_threshold=0.0,
pre_nms_min_size_threshold=0.0,
nms_iou_threshold=0.5,
num_proposals=4,
use_batched_nms=False,
decode_boxes=False,
clip_boxes=False,
apply_sigmoid_to_score=False))
self.assertNDArrayNear(
selected_rois_tpu.numpy(), selected_rois_cpu.numpy(), 1e-5)
self.assertNDArrayNear(
selected_roi_scores_tpu.numpy(), selected_roi_scores_cpu.numpy(), 1e-5)
self.assertNDArrayNear(
selected_rois_tpu.numpy(), selected_rois_np, 1e-5)
self.assertNDArrayNear(
selected_roi_scores_tpu.numpy(), selected_roi_scores_np, 1e-5)
def test_multilevel_propose_rois_two_levels(self):
rpn_boxes_1_np = np.array(
[[[[0, 0, 10, 10], [0.01, 0.01, 9.99, 9.99]],
[[5, 5, 10, 10], [2, 2, 8, 8]]],
[[[2, 2, 2.5, 2.5], [3, 3, 6, 6]],
[[3.1, 3.1, 6.1, 6.1], [1, 1, 8, 8]]]])
rpn_boxes_2_np = np.array(
[[[[0, 0, 10.01, 10.01]]], [[[2, 2, 4.5, 4.5]]]])
rpn_boxes = {
'2': tf.constant(rpn_boxes_1_np, dtype=tf.float32),
'3': tf.constant(rpn_boxes_2_np, dtype=tf.float32),
}
rpn_scores_1_np = np.array(
[[[[0.6], [0.9]], [[0.2], [0.3]]], [[[0.1], [0.8]], [[0.3], [0.5]]]])
rpn_scores_2_np = np.array([[[[0.95]]], [[[0.99]]]])
rpn_scores = {
'2': tf.constant(rpn_scores_1_np, dtype=tf.float32),
'3': tf.constant(rpn_scores_2_np, dtype=tf.float32),
}
anchor_boxes_1_np = np.array(
[[[[0, 0, 10, 10], [0.01, 0.01, 9.99, 9.99]],
[[5, 5, 10, 10], [2, 2, 8, 8]]],
[[[2, 2, 2.5, 2.5], [3, 3, 6, 6]],
[[3.1, 3.1, 6.1, 6.1], [1, 1, 8, 8]]]])
anchor_boxes_2_np = np.array(
[[[[0, 0, 10.01, 10.01]]], [[[2, 2, 4.5, 4.5]]]])
anchor_boxes = {
'2': tf.constant(anchor_boxes_1_np, dtype=tf.float32),
'3': tf.constant(anchor_boxes_2_np, dtype=tf.float32),
}
image_shape = tf.constant([[20, 20], [20, 20]], dtype=tf.int32)
selected_rois_np = np.array(
[[[0, 0, 10.01, 10.01], [0.01, 0.01, 9.99, 9.99]],
[[2, 2, 4.5, 4.5], [3, 3, 6, 6]]])
selected_roi_scores_np = np.array([[0.95, 0.9], [0.99, 0.8]])
# Runs on TPU.
strategy = tf.distribute.experimental.TPUStrategy()
with strategy.scope():
selected_rois_tpu, selected_roi_scores_tpu = (
roi_generator._multilevel_propose_rois(
rpn_boxes,
rpn_scores,
anchor_boxes=anchor_boxes,
image_shape=image_shape,
pre_nms_top_k=4,
pre_nms_score_threshold=0.0,
pre_nms_min_size_threshold=0.0,
nms_iou_threshold=0.5,
num_proposals=2,
use_batched_nms=False,
decode_boxes=False,
clip_boxes=False,
apply_sigmoid_to_score=False))
# Runs on CPU.
selected_rois_cpu, selected_roi_scores_cpu = (
roi_generator._multilevel_propose_rois(
rpn_boxes,
rpn_scores,
anchor_boxes=anchor_boxes,
image_shape=image_shape,
pre_nms_top_k=4,
pre_nms_score_threshold=0.0,
pre_nms_min_size_threshold=0.0,
nms_iou_threshold=0.5,
num_proposals=2,
use_batched_nms=False,
decode_boxes=False,
clip_boxes=False,
apply_sigmoid_to_score=False))
self.assertNDArrayNear(
selected_rois_tpu.numpy(), selected_rois_cpu.numpy(), 1e-5)
self.assertNDArrayNear(
selected_roi_scores_tpu.numpy(), selected_roi_scores_cpu.numpy(), 1e-5)
self.assertNDArrayNear(
selected_rois_tpu.numpy(), selected_rois_np, 1e-5)
self.assertNDArrayNear(
selected_roi_scores_tpu.numpy(), selected_roi_scores_np, 1e-5)
class MultilevelROIGeneratorTest(tf.test.TestCase):
def test_serialize_deserialize(self):
kwargs = dict(
pre_nms_top_k=2000,
pre_nms_score_threshold=0.0,
pre_nms_min_size_threshold=0.0,
nms_iou_threshold=0.7,
num_proposals=1000,
test_pre_nms_top_k=1000,
test_pre_nms_score_threshold=0.0,
test_pre_nms_min_size_threshold=0.0,
test_nms_iou_threshold=0.7,
test_num_proposals=1000,
use_batched_nms=False,
)
generator = roi_generator.MultilevelROIGenerator(**kwargs)
expected_config = dict(kwargs)
self.assertEqual(generator.get_config(), expected_config)
new_generator = roi_generator.MultilevelROIGenerator.from_config(
generator.get_config())
self.assertAllEqual(generator.get_config(), new_generator.get_config())
if __name__ == '__main__':
tf.test.main()
| 36.406542
| 80
| 0.581825
|
4a004f0240d04c2a17695fc3119aea5ac4c6ae25
| 797
|
py
|
Python
|
cookbook.py
|
uberkinder/Robusta-AutoML
|
9faee4c17ad9f37b09760f9fffea715cdbf2d1fb
|
[
"MIT"
] | 2
|
2019-04-26T19:40:31.000Z
|
2019-10-12T15:18:29.000Z
|
cookbook.py
|
uberkinder/Robusta-AutoML
|
9faee4c17ad9f37b09760f9fffea715cdbf2d1fb
|
[
"MIT"
] | null | null | null |
cookbook.py
|
uberkinder/Robusta-AutoML
|
9faee4c17ad9f37b09760f9fffea715cdbf2d1fb
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
from .preprocessing.category import *
from .preprocessing.numeric import *
from .preprocessing.base import *
from .pipeline import *
mem_reduce_pipe = FeatureUnion([
('numeric', make_pipeline(
TypeSelector(np.number),
DowncastTransformer(),
)),
('category', make_pipeline(
TypeSelector('object'),
)),
])
prep_pipe = make_pipeline(
FeatureUnion([
("numeric", make_pipeline(
TypeSelector(np.number),
SimpleImputer("median"),
GaussRankTransformer(),
ColumnRenamer(prefix='gr_'),
)),
("category", make_pipeline(
TypeSelector("object"),
LabelEncoder(),
ColumnRenamer(prefix='le_'),
)),
])
)
| 20.435897
| 40
| 0.589711
|
4a00504cfcc21310f5e1ff2581db5f00854c5f3a
| 799
|
py
|
Python
|
resc/__init__.py
|
tatsuya4649/resc
|
7f74461b028d49e6230ed2c07dece65d76e1ee89
|
[
"MIT"
] | null | null | null |
resc/__init__.py
|
tatsuya4649/resc
|
7f74461b028d49e6230ed2c07dece65d76e1ee89
|
[
"MIT"
] | null | null | null |
resc/__init__.py
|
tatsuya4649/resc
|
7f74461b028d49e6230ed2c07dece65d76e1ee89
|
[
"MIT"
] | null | null | null |
from .rescerr import RescTypeError
from ._resc import Resc
from .ssh import SSH
from .resclog import RescLogFlag, RescLogHeader, RescLogEmergeHeader, \
RescLogSFlag, RescLog, RescLogFormat, RescLogOver, \
RescLogAnalyze, RescLogPathError, RescLogTypeError, \
RescLogKeyError, RescLogUnMatchError, start_server
__all__ = [
RescTypeError.__name__,
Resc.__name__,
SSH.__name__,
RescLogFlag.__name__,
RescLogHeader.__name__,
RescLogEmergeHeader.__name__,
RescLogSFlag.__name__,
RescLog.__name__,
RescLogFormat.__name__,
RescLogOver.__name__,
RescLogAnalyze.__name__,
RescLogPathError.__name__,
RescLogTypeError.__name__,
RescLogKeyError.__name__,
RescLogUnMatchError.__name__,
start_server.__name__,
]
__version__ = "0.1.0"
| 26.633333
| 71
| 0.7597
|
4a0050868d92ae88107b25ad0d5acc5f16f4d956
| 22,204
|
py
|
Python
|
homeassistant/components/group/__init__.py
|
erogleva/core
|
994ae09f69afe772150a698953c0d7386a745de2
|
[
"Apache-2.0"
] | 1
|
2020-12-17T19:05:31.000Z
|
2020-12-17T19:05:31.000Z
|
homeassistant/components/group/__init__.py
|
erogleva/core
|
994ae09f69afe772150a698953c0d7386a745de2
|
[
"Apache-2.0"
] | 52
|
2020-07-14T14:12:26.000Z
|
2022-03-31T06:24:02.000Z
|
homeassistant/components/group/__init__.py
|
erogleva/core
|
994ae09f69afe772150a698953c0d7386a745de2
|
[
"Apache-2.0"
] | 1
|
2021-04-30T01:57:13.000Z
|
2021-04-30T01:57:13.000Z
|
"""Provide the functionality to group entities."""
import asyncio
from contextvars import ContextVar
import logging
from typing import Any, Dict, Iterable, List, Optional, Set, cast
import voluptuous as vol
from homeassistant import core as ha
from homeassistant.const import (
ATTR_ASSUMED_STATE,
ATTR_ENTITY_ID,
ATTR_ICON,
ATTR_NAME,
CONF_ICON,
CONF_NAME,
ENTITY_MATCH_ALL,
ENTITY_MATCH_NONE,
EVENT_HOMEASSISTANT_START,
SERVICE_RELOAD,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import CoreState, callback, split_entity_id
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity, async_generate_entity_id
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.event import async_track_state_change_event
from homeassistant.helpers.integration_platform import (
async_process_integration_platforms,
)
from homeassistant.helpers.reload import async_reload_integration_platforms
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.loader import bind_hass
# mypy: allow-untyped-calls, allow-untyped-defs, no-check-untyped-defs
DOMAIN = "group"
GROUP_ORDER = "group_order"
ENTITY_ID_FORMAT = DOMAIN + ".{}"
CONF_ENTITIES = "entities"
CONF_ALL = "all"
ATTR_ADD_ENTITIES = "add_entities"
ATTR_AUTO = "auto"
ATTR_ENTITIES = "entities"
ATTR_OBJECT_ID = "object_id"
ATTR_ORDER = "order"
ATTR_ALL = "all"
SERVICE_SET = "set"
SERVICE_REMOVE = "remove"
PLATFORMS = ["light", "cover", "notify"]
REG_KEY = f"{DOMAIN}_registry"
_LOGGER = logging.getLogger(__name__)
current_domain: ContextVar[str] = ContextVar("current_domain")
def _conf_preprocess(value):
"""Preprocess alternative configuration formats."""
if not isinstance(value, dict):
value = {CONF_ENTITIES: value}
return value
GROUP_SCHEMA = vol.All(
vol.Schema(
{
vol.Optional(CONF_ENTITIES): vol.Any(cv.entity_ids, None),
CONF_NAME: cv.string,
CONF_ICON: cv.icon,
CONF_ALL: cv.boolean,
}
)
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.Schema({cv.match_all: vol.All(_conf_preprocess, GROUP_SCHEMA)})},
extra=vol.ALLOW_EXTRA,
)
class GroupIntegrationRegistry:
"""Class to hold a registry of integrations."""
on_off_mapping: Dict[str, str] = {STATE_ON: STATE_OFF}
off_on_mapping: Dict[str, str] = {STATE_OFF: STATE_ON}
on_states_by_domain: Dict[str, Set] = {}
exclude_domains: Set = set()
def exclude_domain(self) -> None:
"""Exclude the current domain."""
self.exclude_domains.add(current_domain.get())
def on_off_states(self, on_states: Set, off_state: str) -> None:
"""Register on and off states for the current domain."""
for on_state in on_states:
if on_state not in self.on_off_mapping:
self.on_off_mapping[on_state] = off_state
if len(on_states) == 1 and off_state not in self.off_on_mapping:
self.off_on_mapping[off_state] = list(on_states)[0]
self.on_states_by_domain[current_domain.get()] = set(on_states)
@bind_hass
def is_on(hass, entity_id):
"""Test if the group state is in its ON-state."""
if REG_KEY not in hass.data:
# Integration not setup yet, it cannot be on
return False
state = hass.states.get(entity_id)
if state is not None:
return state.state in hass.data[REG_KEY].on_off_mapping
return False
@bind_hass
def expand_entity_ids(hass: HomeAssistantType, entity_ids: Iterable[Any]) -> List[str]:
"""Return entity_ids with group entity ids replaced by their members.
Async friendly.
"""
found_ids: List[str] = []
for entity_id in entity_ids:
if not isinstance(entity_id, str) or entity_id in (
ENTITY_MATCH_NONE,
ENTITY_MATCH_ALL,
):
continue
entity_id = entity_id.lower()
try:
# If entity_id points at a group, expand it
domain, _ = ha.split_entity_id(entity_id)
if domain == DOMAIN:
child_entities = get_entity_ids(hass, entity_id)
if entity_id in child_entities:
child_entities = list(child_entities)
child_entities.remove(entity_id)
found_ids.extend(
ent_id
for ent_id in expand_entity_ids(hass, child_entities)
if ent_id not in found_ids
)
else:
if entity_id not in found_ids:
found_ids.append(entity_id)
except AttributeError:
# Raised by split_entity_id if entity_id is not a string
pass
return found_ids
@bind_hass
def get_entity_ids(
hass: HomeAssistantType, entity_id: str, domain_filter: Optional[str] = None
) -> List[str]:
"""Get members of this group.
Async friendly.
"""
group = hass.states.get(entity_id)
if not group or ATTR_ENTITY_ID not in group.attributes:
return []
entity_ids = group.attributes[ATTR_ENTITY_ID]
if not domain_filter:
return cast(List[str], entity_ids)
domain_filter = f"{domain_filter.lower()}."
return [ent_id for ent_id in entity_ids if ent_id.startswith(domain_filter)]
@bind_hass
def groups_with_entity(hass: HomeAssistantType, entity_id: str) -> List[str]:
"""Get all groups that contain this entity.
Async friendly.
"""
if DOMAIN not in hass.data:
return []
groups = []
for group in hass.data[DOMAIN].entities:
if entity_id in group.tracking:
groups.append(group.entity_id)
return groups
async def async_setup(hass, config):
"""Set up all groups found defined in the configuration."""
component = hass.data.get(DOMAIN)
if component is None:
component = hass.data[DOMAIN] = EntityComponent(_LOGGER, DOMAIN, hass)
hass.data[REG_KEY] = GroupIntegrationRegistry()
await async_process_integration_platforms(hass, DOMAIN, _process_group_platform)
await _async_process_config(hass, config, component)
async def reload_service_handler(service):
"""Remove all user-defined groups and load new ones from config."""
auto = list(filter(lambda e: not e.user_defined, component.entities))
conf = await component.async_prepare_reload()
if conf is None:
return
await _async_process_config(hass, conf, component)
await component.async_add_entities(auto)
await async_reload_integration_platforms(hass, DOMAIN, PLATFORMS)
hass.services.async_register(
DOMAIN, SERVICE_RELOAD, reload_service_handler, schema=vol.Schema({})
)
service_lock = asyncio.Lock()
async def locked_service_handler(service):
"""Handle a service with an async lock."""
async with service_lock:
await groups_service_handler(service)
async def groups_service_handler(service):
"""Handle dynamic group service functions."""
object_id = service.data[ATTR_OBJECT_ID]
entity_id = f"{DOMAIN}.{object_id}"
group = component.get_entity(entity_id)
# new group
if service.service == SERVICE_SET and group is None:
entity_ids = (
service.data.get(ATTR_ENTITIES)
or service.data.get(ATTR_ADD_ENTITIES)
or None
)
extra_arg = {
attr: service.data[attr]
for attr in (ATTR_ICON,)
if service.data.get(attr) is not None
}
await Group.async_create_group(
hass,
service.data.get(ATTR_NAME, object_id),
object_id=object_id,
entity_ids=entity_ids,
user_defined=False,
mode=service.data.get(ATTR_ALL),
**extra_arg,
)
return
if group is None:
_LOGGER.warning("%s:Group '%s' doesn't exist!", service.service, object_id)
return
# update group
if service.service == SERVICE_SET:
need_update = False
if ATTR_ADD_ENTITIES in service.data:
delta = service.data[ATTR_ADD_ENTITIES]
entity_ids = set(group.tracking) | set(delta)
await group.async_update_tracked_entity_ids(entity_ids)
if ATTR_ENTITIES in service.data:
entity_ids = service.data[ATTR_ENTITIES]
await group.async_update_tracked_entity_ids(entity_ids)
if ATTR_NAME in service.data:
group.name = service.data[ATTR_NAME]
need_update = True
if ATTR_ICON in service.data:
group.icon = service.data[ATTR_ICON]
need_update = True
if ATTR_ALL in service.data:
group.mode = all if service.data[ATTR_ALL] else any
need_update = True
if need_update:
group.async_write_ha_state()
return
# remove group
if service.service == SERVICE_REMOVE:
await component.async_remove_entity(entity_id)
hass.services.async_register(
DOMAIN,
SERVICE_SET,
locked_service_handler,
schema=vol.All(
vol.Schema(
{
vol.Required(ATTR_OBJECT_ID): cv.slug,
vol.Optional(ATTR_NAME): cv.string,
vol.Optional(ATTR_ICON): cv.string,
vol.Optional(ATTR_ALL): cv.boolean,
vol.Exclusive(ATTR_ENTITIES, "entities"): cv.entity_ids,
vol.Exclusive(ATTR_ADD_ENTITIES, "entities"): cv.entity_ids,
}
)
),
)
hass.services.async_register(
DOMAIN,
SERVICE_REMOVE,
groups_service_handler,
schema=vol.Schema({vol.Required(ATTR_OBJECT_ID): cv.slug}),
)
return True
async def _process_group_platform(hass, domain, platform):
"""Process a group platform."""
current_domain.set(domain)
platform.async_describe_on_off_states(hass, hass.data[REG_KEY])
async def _async_process_config(hass, config, component):
"""Process group configuration."""
hass.data.setdefault(GROUP_ORDER, 0)
tasks = []
for object_id, conf in config.get(DOMAIN, {}).items():
name = conf.get(CONF_NAME, object_id)
entity_ids = conf.get(CONF_ENTITIES) or []
icon = conf.get(CONF_ICON)
mode = conf.get(CONF_ALL)
# We keep track of the order when we are creating the tasks
# in the same way that async_create_group does to make
# sure we use the same ordering system. This overcomes
# the problem with concurrently creating the groups
tasks.append(
Group.async_create_group(
hass,
name,
entity_ids,
icon=icon,
object_id=object_id,
mode=mode,
order=hass.data[GROUP_ORDER],
)
)
# Keep track of the group order without iterating
# every state in the state machine every time
# we setup a new group
hass.data[GROUP_ORDER] += 1
await asyncio.gather(*tasks)
class GroupEntity(Entity):
"""Representation of a Group of entities."""
@property
def should_poll(self) -> bool:
"""Disable polling for group."""
return False
async def async_added_to_hass(self) -> None:
"""Register listeners."""
assert self.hass is not None
async def _update_at_start(_):
await self.async_update_ha_state(True)
self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, _update_at_start)
async def async_defer_or_update_ha_state(self) -> None:
"""Only update once at start."""
assert self.hass is not None
if self.hass.state != CoreState.running:
return
await self.async_update_ha_state(True)
class Group(Entity):
"""Track a group of entity ids."""
def __init__(
self,
hass,
name,
order=None,
icon=None,
user_defined=True,
entity_ids=None,
mode=None,
):
"""Initialize a group.
This Object has factory function for creation.
"""
self.hass = hass
self._name = name
self._state = None
self._icon = icon
self._set_tracked(entity_ids)
self._on_off = None
self._assumed = None
self._on_states = None
self.user_defined = user_defined
self.mode = any
if mode:
self.mode = all
self._order = order
self._assumed_state = False
self._async_unsub_state_changed = None
@staticmethod
def create_group(
hass,
name,
entity_ids=None,
user_defined=True,
icon=None,
object_id=None,
mode=None,
order=None,
):
"""Initialize a group."""
return asyncio.run_coroutine_threadsafe(
Group.async_create_group(
hass, name, entity_ids, user_defined, icon, object_id, mode, order
),
hass.loop,
).result()
@staticmethod
async def async_create_group(
hass,
name,
entity_ids=None,
user_defined=True,
icon=None,
object_id=None,
mode=None,
order=None,
):
"""Initialize a group.
This method must be run in the event loop.
"""
if order is None:
hass.data.setdefault(GROUP_ORDER, 0)
order = hass.data[GROUP_ORDER]
# Keep track of the group order without iterating
# every state in the state machine every time
# we setup a new group
hass.data[GROUP_ORDER] += 1
group = Group(
hass,
name,
order=order,
icon=icon,
user_defined=user_defined,
entity_ids=entity_ids,
mode=mode,
)
group.entity_id = async_generate_entity_id(
ENTITY_ID_FORMAT, object_id or name, hass=hass
)
# If called before the platform async_setup is called (test cases)
component = hass.data.get(DOMAIN)
if component is None:
component = hass.data[DOMAIN] = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_add_entities([group])
return group
@property
def should_poll(self):
"""No need to poll because groups will update themselves."""
return False
@property
def name(self):
"""Return the name of the group."""
return self._name
@name.setter
def name(self, value):
"""Set Group name."""
self._name = value
@property
def state(self):
"""Return the state of the group."""
return self._state
@property
def icon(self):
"""Return the icon of the group."""
return self._icon
@icon.setter
def icon(self, value):
"""Set Icon for group."""
self._icon = value
@property
def state_attributes(self):
"""Return the state attributes for the group."""
data = {ATTR_ENTITY_ID: self.tracking, ATTR_ORDER: self._order}
if not self.user_defined:
data[ATTR_AUTO] = True
return data
@property
def assumed_state(self):
"""Test if any member has an assumed state."""
return self._assumed_state
def update_tracked_entity_ids(self, entity_ids):
"""Update the member entity IDs."""
asyncio.run_coroutine_threadsafe(
self.async_update_tracked_entity_ids(entity_ids), self.hass.loop
).result()
async def async_update_tracked_entity_ids(self, entity_ids):
"""Update the member entity IDs.
This method must be run in the event loop.
"""
self._async_stop()
self._set_tracked(entity_ids)
self._reset_tracked_state()
self._async_start()
def _set_tracked(self, entity_ids):
"""Tuple of entities to be tracked."""
# tracking are the entities we want to track
# trackable are the entities we actually watch
if not entity_ids:
self.tracking = ()
self.trackable = ()
return
excluded_domains = self.hass.data[REG_KEY].exclude_domains
tracking = []
trackable = []
for ent_id in entity_ids:
ent_id_lower = ent_id.lower()
domain = split_entity_id(ent_id_lower)[0]
tracking.append(ent_id_lower)
if domain not in excluded_domains:
trackable.append(ent_id_lower)
self.trackable = tuple(trackable)
self.tracking = tuple(tracking)
@callback
def _async_start(self, *_):
"""Start tracking members and write state."""
self._reset_tracked_state()
self._async_start_tracking()
self.async_write_ha_state()
@callback
def _async_start_tracking(self):
"""Start tracking members.
This method must be run in the event loop.
"""
if self.trackable and self._async_unsub_state_changed is None:
self._async_unsub_state_changed = async_track_state_change_event(
self.hass, self.trackable, self._async_state_changed_listener
)
self._async_update_group_state()
@callback
def _async_stop(self):
"""Unregister the group from Home Assistant.
This method must be run in the event loop.
"""
if self._async_unsub_state_changed:
self._async_unsub_state_changed()
self._async_unsub_state_changed = None
async def async_update(self):
"""Query all members and determine current group state."""
self._state = None
self._async_update_group_state()
async def async_added_to_hass(self):
"""Handle addition to Home Assistant."""
if self.hass.state != CoreState.running:
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_START, self._async_start
)
return
if self.tracking:
self._reset_tracked_state()
self._async_start_tracking()
async def async_will_remove_from_hass(self):
"""Handle removal from Home Assistant."""
self._async_stop()
async def _async_state_changed_listener(self, event):
"""Respond to a member state changing.
This method must be run in the event loop.
"""
# removed
if self._async_unsub_state_changed is None:
return
self.async_set_context(event.context)
new_state = event.data.get("new_state")
if new_state is None:
# The state was removed from the state machine
self._reset_tracked_state()
self._async_update_group_state(new_state)
self.async_write_ha_state()
def _reset_tracked_state(self):
"""Reset tracked state."""
self._on_off = {}
self._assumed = {}
self._on_states = set()
for entity_id in self.trackable:
state = self.hass.states.get(entity_id)
if state is not None:
self._see_state(state)
def _see_state(self, new_state):
"""Keep track of the the state."""
entity_id = new_state.entity_id
domain = new_state.domain
state = new_state.state
registry = self.hass.data[REG_KEY]
self._assumed[entity_id] = new_state.attributes.get(ATTR_ASSUMED_STATE)
if domain not in registry.on_states_by_domain:
# Handle the group of a group case
if state in registry.on_off_mapping:
self._on_states.add(state)
elif state in registry.off_on_mapping:
self._on_states.add(registry.off_on_mapping[state])
self._on_off[entity_id] = state in registry.on_off_mapping
else:
entity_on_state = registry.on_states_by_domain[domain]
if domain in self.hass.data[REG_KEY].on_states_by_domain:
self._on_states.update(entity_on_state)
self._on_off[entity_id] = state in entity_on_state
@callback
def _async_update_group_state(self, tr_state=None):
"""Update group state.
Optionally you can provide the only state changed since last update
allowing this method to take shortcuts.
This method must be run in the event loop.
"""
# To store current states of group entities. Might not be needed.
if tr_state:
self._see_state(tr_state)
if not self._on_off:
return
if (
tr_state is None
or self._assumed_state
and not tr_state.attributes.get(ATTR_ASSUMED_STATE)
):
self._assumed_state = self.mode(self._assumed.values())
elif tr_state.attributes.get(ATTR_ASSUMED_STATE):
self._assumed_state = True
num_on_states = len(self._on_states)
# If all the entity domains we are tracking
# have the same on state we use this state
# and its hass.data[REG_KEY].on_off_mapping to off
if num_on_states == 1:
on_state = list(self._on_states)[0]
# If we do not have an on state for any domains
# we use None (which will be STATE_UNKNOWN)
elif num_on_states == 0:
self._state = None
return
# If the entity domains have more than one
# on state, we use STATE_ON/STATE_OFF
else:
on_state = STATE_ON
group_is_on = self.mode(self._on_off.values())
if group_is_on:
self._state = on_state
else:
self._state = self.hass.data[REG_KEY].on_off_mapping[on_state]
| 29.72423
| 87
| 0.617501
|
4a00508aa16ae8bdd6d3481cae734fa59680522f
| 2,046
|
py
|
Python
|
Day18/hirst_painting_project/main.py
|
CodePuzzler/100-Days-Of-Code-Python
|
4f6da9dabc73f747266ce0e66057d10754ecc54e
|
[
"MIT"
] | null | null | null |
Day18/hirst_painting_project/main.py
|
CodePuzzler/100-Days-Of-Code-Python
|
4f6da9dabc73f747266ce0e66057d10754ecc54e
|
[
"MIT"
] | null | null | null |
Day18/hirst_painting_project/main.py
|
CodePuzzler/100-Days-Of-Code-Python
|
4f6da9dabc73f747266ce0e66057d10754ecc54e
|
[
"MIT"
] | null | null | null |
# Day18 of my 100DaysOfCode Challenge
# Extracted colors from an image
# import colorgram
#
# rgb_colors = []
# colors = colorgram.extract('image.jpg', 30)
# for color in colors:
# rgb_colors.append(color.rgb)
#
# print(rgb_colors)
# Output of above code [Rgb(r=245, g=243, b=238), Rgb(r=246, g=242, b=244), Rgb(r=202, g=164, b=110), Rgb(r=240,
# g=245, b=241), Rgb(r=236, g=239, b=243), Rgb(r=149, g=75, b=50), Rgb(r=222, g=201, b=136), Rgb(r=53, g=93, b=123),
# Rgb(r=170, g=154, b=41), Rgb(r=138, g=31, b=20), Rgb(r=134, g=163, b=184), Rgb(r=197, g=92, b=73), Rgb(r=47, g=121,
# b=86), Rgb(r=73, g=43, b=35), Rgb(r=145, g=178, b=149), Rgb(r=14, g=98, b=70), Rgb(r=232, g=176, b=165), Rgb(r=160,
# g=142, b=158), Rgb(r=54, g=45, b=50), Rgb(r=101, g=75, b=77), Rgb(r=183, g=205, b=171), Rgb(r=36, g=60, b=74),
# Rgb(r=19, g=86, b=89), Rgb(r=82, g=148, b=129), Rgb(r=147, g=17, b=19), Rgb(r=27, g=68, b=102), Rgb(r=12, g=70,
# b=64), Rgb(r=107, g=127, b=153), Rgb(r=176, g=192, b=208), Rgb(r=168, g=99, b=102)]
import turtle as turtle_module
import random
turtle_module.colormode(255)
groot = turtle_module.Turtle()
groot.speed("fastest")
groot.penup()
groot.hideturtle()
color_list = [(202, 164, 109), (238, 240, 245), (150, 75, 49), (223, 201, 135), (52, 93, 124), (172, 154, 40), (140, 30, 19), (133, 163, 185), (198, 91, 71), (46, 122, 86), (72, 43, 35), (145, 178, 148), (13, 99, 71), (233, 175, 164), (161, 142, 158), (105, 74, 77), (55, 46, 50), (183, 205, 171), (36, 60, 74), (18, 86, 90), (81, 148, 129), (148, 17, 20), (14, 70, 64), (30, 68, 100), (107, 127, 153), (174, 94, 97), (176, 192, 209)]
groot.setheading(225)
groot.forward(300)
groot.setheading(0)
number_of_dots = 100
for dot_count in range(1, number_of_dots + 1):
groot.dot(20, random.choice(color_list))
groot.forward(50)
if dot_count % 10 == 0:
groot.setheading(90)
groot.forward(50)
groot.setheading(180)
groot.forward(500)
groot.setheading(0)
screen = turtle_module.Screen()
screen.exitonclick()
| 34.677966
| 434
| 0.599707
|
4a0051cca28f06df1b6d5c4d581b5bca68153689
| 258
|
py
|
Python
|
cclib/scripts/__init__.py
|
pstjohn/cclib
|
e0990eb7291b255fcb2ce24c91506ec13ad64b50
|
[
"BSD-3-Clause"
] | 224
|
2015-01-26T10:04:22.000Z
|
2022-03-28T18:20:40.000Z
|
cclib/scripts/__init__.py
|
pstjohn/cclib
|
e0990eb7291b255fcb2ce24c91506ec13ad64b50
|
[
"BSD-3-Clause"
] | 824
|
2015-01-01T17:55:29.000Z
|
2022-03-31T20:32:51.000Z
|
cclib/scripts/__init__.py
|
pstjohn/cclib
|
e0990eb7291b255fcb2ce24c91506ec13ad64b50
|
[
"BSD-3-Clause"
] | 169
|
2015-01-14T20:29:31.000Z
|
2022-03-01T09:16:02.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
from . import ccget
from . import ccwrite
from . import cda
| 23.454545
| 78
| 0.70155
|
4a0051f3e308cf76833f07ff6064235b15e37525
| 13,239
|
py
|
Python
|
api.py
|
BobCashStory/docker-pure-ftpd
|
ce69eb0b93e197baffccc6dafad5e0e686f9875a
|
[
"MIT"
] | 3
|
2020-04-17T00:15:00.000Z
|
2022-01-17T18:57:22.000Z
|
api.py
|
BobCashStory/docker-pure-ftpd
|
ce69eb0b93e197baffccc6dafad5e0e686f9875a
|
[
"MIT"
] | null | null | null |
api.py
|
BobCashStory/docker-pure-ftpd
|
ce69eb0b93e197baffccc6dafad5e0e686f9875a
|
[
"MIT"
] | null | null | null |
#!/bin/python3
from gevent.pywsgi import WSGIServer
from flask import Flask, request, jsonify
import os
import sys
import subprocess
from os import urandom
from escapism import escape
import string
import logging
from logging.handlers import RotatingFileHandler
from time import strftime
import datetime
import time
import traceback
app = Flask(__name__)
_docker_safe_chars = set(string.ascii_letters + string.digits + "-")
_docker_escape_char = "_"
apiKey = urandom(30).hex()
if os.getenv('X_API_KEY') is not None:
apiKey = os.environ['X_API_KEY']
else:
logging.info("Your X-Api-Key is: " + apiKey)
def _escape(s):
"""Escape a string to docker-safe characters"""
return escape(
s,
safe=_docker_safe_chars,
escape_char=_docker_escape_char,
)
def commandPass(password):
return ["echo", "-e", confirmPass(password)]
def commandList():
return ["cat", "/etc/pure-ftpd/pureftpd.passwd"]
def commandPureFtp(cmd, username, options):
cmd = ["pure-pw", cmd, username]
cmd.extend(options)
return cmd
def deleteUserFolder(username):
username = username.replace('.', '').replace('/', '')
# disable path injection in username
path = "/home/ftpusers/" + username
cmd = ["rm", "-rf", path]
return cmd
def cleanError(output):
if output.startswith('Error'):
return output
cleanErr = output.split('Error')
cleanErr[0] = ''
return 'Error'.join(cleanErr).replace('\n', ' ').strip()
def confirmPass(password):
return password + "\n" + password + "\n"
def printPass(password):
return "echo -e \"" + password + "\n" + password + "\""
def goodApiKey(headers):
auth = headers.get("X-Api-Key")
if auth != apiKey:
return False
else:
return True
def parseLine(line):
arrLine = line.split(':')
return [item.strip() for item in arrLine]
def parseInfo(res):
arrRes = res.split('\n')
result = {}
for line in arrRes:
parsedLine = parseLine(line)
if (len(parsedLine) > 1):
result[parsedLine[0].replace(' ', '_')] = parsedLine[1]
pass
return result
def parseListInfo(res):
arrRes = res.split('\n')
result = []
for line in arrRes:
if (len(line) > 1):
parsedLine = line.split(':')[0]
result.append(parsedLine)
pass
return result
def jsonToCommandArr(json):
# print("json", json)
# print("json", request.json, file=sys.stderr)
command = []
if json.get('chroot') is not None and json.get('chroot') == False:
command.append('-D')
else:
command.append('-d')
if json.get('directory') is not None:
command.append("/home/ftpusers" + json.get('directory'))
else:
username = json.get('username').lower()
folderName = _escape(username)
command.append("/home/ftpusers/" + folderName)
if json.get('download_bandwidth') is not None:
command.append("-t")
command.append(json.get('download_bandwidth'))
if json.get('upload_bandwidth') is not None:
command.append("-T")
command.append(json.get('upload_bandwidth'))
if json.get('max_files_number') is not None:
command.append("-n")
command.append(json.get('max_files_number'))
if json.get('max_files_Mbytes') is not None:
command.append("-N")
command.append(json.get('max_files_Mbytes'))
if json.get('upload_ratio') is not None:
command.append("-q")
command.append(json.get('upload_ratio'))
if json.get('download_ratio') is not None:
command.append("-Q")
command.append(json.get('download_ratio'))
if json.get('allow_client_ip') is not None:
command.append("-r")
command.append(json.get('allow_client_ip'))
if json.get('deny_client_ip') is not None:
command.append("-R")
command.append(json.get('deny_client_ip'))
if json.get('allow_local_ip') is not None:
command.append("-i")
command.append(json.get('allow_local_ip'))
if json.get('deny_local_ip') is not None:
command.append("-I")
command.append(json.get('deny_local_ip'))
if json.get('max_concurrent_sessions') is not None:
command.append("-y")
command.append(json.get('max_concurrent_sessions'))
if json.get('max_concurrent_login_attempts') is not None:
command.append("-C")
command.append(json.get('max_concurrent_login_attempts'))
if json.get('memory_reserve_password_hashing') is not None:
command.append("-M")
command.append(json.get('memory_reserve_password_hashing'))
if json.get('allowed_range_day') is not None:
command.append("-z")
command.append(json.get('allowed_range_day'))
# force commit changes
command.append("-m")
# -D/-d < home directory >
# [-c < gecos > ]
# [-t < download bandwidth >] [-T < upload bandwidth > ]
# [-n < max number of files >] [-N < max Mbytes > ]
# [-q < upload ratio >] [-Q < download ratio > ]
# [-r < allow client ip > / < mask >] [-R < deny client ip > / < mask > ]
# [-i < allow local ip > / < mask >] [-I < deny local ip > / < mask > ]
# [-y < max number of concurrent sessions > ]
# [-C < max number of concurrent login attempts > ]
# [-M < total memory (in MB) to reserve for password hashing > ]
# [-z < hhmm > - < hhmm > ] [-m]
return command
@app.errorhandler(404)
def not_found(error):
return jsonify({'error': 'Not found'}), 404
@app.errorhandler(405)
def not_allowed(error):
return jsonify({'error': 'Method not allowed. The method is not allowed for the requested URL.'}), 405
@app.errorhandler(500)
def not_working(error):
return jsonify({'error': 'Something wrong happen'}), 500
@app.route('/')
def statusRun():
return jsonify({'status': 'ready'}), 200
@app.route('/user/del', methods=['POST'])
def delUser():
if goodApiKey(request.headers):
if (request.json.get('username') is None):
return jsonify({"message": "ERROR: missing username"}), 401
username = request.json.get('username').lower()
options = jsonToCommandArr(request.json)
pureCmd = commandPureFtp('userdel', username, options)
try:
subprocess.check_output(
pureCmd, universal_newlines=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
logging.error("Your error: " +
cleanError(e.output), file=sys.stderr)
return jsonify({"message": "ERROR: command", "code": e.returncode, "err": cleanError(e.output)}), 400
archive = request.json.get('archive')
if (archive is None or archive == 'false'):
delCmd = deleteUserFolder(username)
logging.info("Delete cmd: " + ' '.join(delCmd), file=sys.stderr)
try:
subprocess.check_output(
delCmd, universal_newlines=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
logging.error("Your error: " +
cleanError(e.output), file=sys.stderr)
return jsonify({"message": "ERROR: command", "code": e.returncode, "err": cleanError(e.output)}), 400
return jsonify({"message": "OK: Deleted"}), 200
else:
return jsonify({"message": "ERROR: Unauthorized"}), 401
@app.route('/user/edit', methods=['PUT'])
def editUser():
if goodApiKey(request.headers):
if (request.json.get('username') is None):
return jsonify({"message": "ERROR: missing username/password"}), 401
username = request.json.get('username').lower()
options = jsonToCommandArr(request.json)
pureCmd = commandPureFtp('usermod', username, options)
try:
subprocess.check_output(
pureCmd, universal_newlines=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
logging.error("Your error: " +
cleanError(e.output), file=sys.stderr)
return jsonify({"message": "ERROR: command", "code": e.returncode, "err": cleanError(e.output)}), 400
return jsonify({"message": "OK: Edited"}), 200
else:
return jsonify({"message": "ERROR: Unauthorized"}), 401
@app.route('/user/password', methods=['POST'])
def setUserPwd():
if goodApiKey(request.headers):
if (request.json.get('username') is None or request.json.get('password') is None):
return jsonify({"message": "ERROR: missing username/password"}), 401
password = request.json.get('password')
username = request.json.get('username').lower()
pureCmd = commandPureFtp('passwd', username, ['-m'])
passpass = confirmPass(password)
try:
subprocess.check_output(
pureCmd, universal_newlines=True, input=passpass, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
logging.error("Your error: " +
cleanError(e.output), file=sys.stderr)
return jsonify({"message": "ERROR: command", "code": e.returncode, "err": cleanError(cleanError(e.output))}), 400
return jsonify({"message": "OK: password updated"}), 200
else:
return jsonify({"message": "ERROR: Unauthorized"}), 401
@app.route('/user/info', methods=['GET'])
def getUser():
if goodApiKey(request.headers):
if (request.json.get('username') is None):
return jsonify({"message": "ERROR: missing username"}), 401
username = request.json.get('username').lower()
pureCmd = commandPureFtp(
'show', username, [])
try:
result = subprocess.check_output(
pureCmd, universal_newlines=True, stderr=subprocess.STDOUT)
jsonResult = parseInfo(result)
return jsonify(jsonResult), 200
except subprocess.CalledProcessError as e:
logging.error("Your error: " +
cleanError(e.output), file=sys.stderr)
return jsonify({"message": "ERROR: command", "code": e.returncode, "err": cleanError(e.output)}), 400
else:
return jsonify({"message": "ERROR: Unauthorized"}), 401
@app.route('/user/list', methods=['GET'])
def getUsers():
if goodApiKey(request.headers):
listCmd = commandList()
try:
result = subprocess.check_output(
listCmd, universal_newlines=True, stderr=subprocess.STDOUT)
jsonResult = parseListInfo(result)
return jsonify(jsonResult), 200
except subprocess.CalledProcessError as e:
logging.error("Your error: " +
cleanError(e.output), file=sys.stderr)
return jsonify({"message": "ERROR: command", "code": e.returncode, "err": cleanError(e.output)}), 400
else:
return jsonify({"message": "ERROR: Unauthorized"}), 401
@app.route('/user/add', methods=['POST'])
def addUser():
if goodApiKey(request.headers):
if (request.json.get('username') is None or request.json.get('password') is None):
return jsonify({"message": "ERROR: missing username/password"}), 401
password = request.json.get('password')
username = request.json.get('username').lower()
options = jsonToCommandArr(request.json)
options.append('-u')
options.append('ftp')
pureCmd = commandPureFtp('useradd', username, options)
passpass = confirmPass(password)
try:
subprocess.check_output(
pureCmd, universal_newlines=True, input=passpass, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
logging.error("Your error: " +
cleanError(e.output), file=sys.stderr)
return jsonify({"message": "ERROR: command", "code": e.returncode, "err": cleanError(e.output)}), 400
return jsonify({"message": "OK: Created"}), 200
else:
return jsonify({"message": "ERROR: Unauthorized"}), 401
@app.after_request
def after_request(response):
timestamp = strftime('[%Y-%b-%d %H:%M]')
logger.error('%s %s %s %s %s %s', timestamp, request.remote_addr,
request.method, request.scheme, request.full_path, response.status)
return response
@app.errorhandler(Exception)
def exceptions(e):
tb = traceback.format_exc()
timestamp = strftime('[%Y-%b-%d %H:%M]')
logger.error('%s %s %s %s %s 5xx INTERNAL SERVER ERROR\n%s', timestamp,
request.remote_addr, request.method, request.scheme, request.full_path, tb)
return e.status_code
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
handler = RotatingFileHandler('app.log', maxBytes=100000, backupCount=3)
logger = logging.getLogger('tdm')
logger.addHandler(handler)
# Debug/Development
# app.run(debug=True, host="0.0.0.0", port="5000")
# Production
logging.info(f'==> start API {time.time()}\n')
http_server = WSGIServer(('', 5000), app)
http_server.serve_forever()
| 36.172131
| 125
| 0.616889
|
4a00520e8fb952581e8c873fc19b1caa0893400b
| 653
|
py
|
Python
|
deepq_kvi/__init__.py
|
manantomar/Multi-step-Greedy-Reinforcement-Leaarning-Algorithms
|
f37a4502ef84b939885fe79c82d7cc6c5faa6daf
|
[
"MIT"
] | 2
|
2020-12-04T11:22:47.000Z
|
2021-04-16T07:41:30.000Z
|
deepq_kvi/__init__.py
|
manantomar/Multi-step-Greedy-Reinforcement-Leaarning-Algorithms
|
f37a4502ef84b939885fe79c82d7cc6c5faa6daf
|
[
"MIT"
] | null | null | null |
deepq_kvi/__init__.py
|
manantomar/Multi-step-Greedy-Reinforcement-Leaarning-Algorithms
|
f37a4502ef84b939885fe79c82d7cc6c5faa6daf
|
[
"MIT"
] | 1
|
2021-04-01T01:57:25.000Z
|
2021-04-01T01:57:25.000Z
|
from stable_baselines.deepq_kvi.policies import MlpPolicy, CnnPolicy, LnMlpPolicy, LnCnnPolicy
from stable_baselines.deepq_kvi.build_graph import build_act, build_train # noqa
from stable_baselines.deepq_kvi.dqn import DQN
from stable_baselines.deepq_kvi.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer # noqa
def wrap_atari_dqn(env):
"""
wrap the environment in atari wrappers for DQN
:param env: (Gym Environment) the environment
:return: (Gym Environment) the wrapped environment
"""
from stable_baselines.common.atari_wrappers import wrap_deepmind
return wrap_deepmind(env, frame_stack=True, scale=False)
| 40.8125
| 98
| 0.800919
|
4a005379fc28f7bf8692a504af3c3bb5cd15b37e
| 23,898
|
py
|
Python
|
src/tests/test_db_create_handler.py
|
orosz-usgs/aqts-capture-ecosystem-switch
|
14202aa2fe709ad56982b3503e1073ce36e80f77
|
[
"CC0-1.0"
] | null | null | null |
src/tests/test_db_create_handler.py
|
orosz-usgs/aqts-capture-ecosystem-switch
|
14202aa2fe709ad56982b3503e1073ce36e80f77
|
[
"CC0-1.0"
] | null | null | null |
src/tests/test_db_create_handler.py
|
orosz-usgs/aqts-capture-ecosystem-switch
|
14202aa2fe709ad56982b3503e1073ce36e80f77
|
[
"CC0-1.0"
] | null | null | null |
import json
import os
import datetime
from unittest import TestCase, mock
from src import db_create_handler
from src.db_create_handler import _get_observation_snapshot_identifier, _get_date_string
from src.db_resize_handler import BIG_DB_SIZE
from src.handler import DEFAULT_DB_INSTANCE_IDENTIFIER, \
DEFAULT_DB_CLUSTER_IDENTIFIER
from src.utils import CAPTURE_INSTANCE_TAGS, OBSERVATION_INSTANCE_TAGS
class TestDbCreateHandler(TestCase):
queue_url = 'https://sqs.us-south-10.amazonaws.com/887501/some-queue-name'
sns_arn = 'arn:aws:sns:us-south-23:5746521541:fake-notification'
region = 'us-south-10'
mock_env_vars = {
'AWS_DEPLOYMENT_REGION': region
}
mock_db_cluster_identifiers = {'nwcapture-test', 'nwcapture-qa'}
mock_db_clusters = {
'Marker': 'string',
'DBClusters': [
{
'DBClusterIdentifier': 'string',
'Status': 'string',
},
]
}
mock_event_source_mapping = {
'NextMarker': 'string',
'EventSourceMappings': [
{
'UUID': 'string',
},
]
}
def setUp(self):
self.initial_execution_arn = 'arn:aws:states:us-south-10:98877654311:blah:a17h83j-p84321'
self.state_machine_start_input = {
'Record': {'eventVersion': '2.1', 'eventSource': 'aws:s3'}
}
self.initial_event = {'executionArn': self.initial_execution_arn, 'startInput': self.state_machine_start_input}
self.context = {'element': 'lithium'}
@mock.patch('src.db_create_handler.disable_lambda_trigger', autospec=True)
@mock.patch('src.db_create_handler.rds_client')
def test_delete_capture_db(self, mock_rds, mock_triggers):
os.environ['STAGE'] = 'QA'
os.environ['CAN_DELETE_DB'] = 'true'
db_create_handler.delete_capture_db({}, {})
mock_triggers.return_value = True
mock_rds.delete_db_instance.assert_called_once_with(
DBInstanceIdentifier=DEFAULT_DB_INSTANCE_IDENTIFIER,
SkipFinalSnapshot=True)
mock_rds.delete_db_cluster.assert_called_once_with(
DBClusterIdentifier=DEFAULT_DB_CLUSTER_IDENTIFIER,
SkipFinalSnapshot=True)
@mock.patch('src.db_create_handler.disable_lambda_trigger', autospec=True)
@mock.patch('src.db_create_handler.rds_client')
def test_delete_capture_db_invalid_tier(self, mock_rds, mock_triggers):
os.environ['STAGE'] = 'QA'
os.environ['CAN_DELETE_DB'] = 'false'
with self.assertRaises(Exception) as context:
db_create_handler.delete_capture_db({}, {})
mock_triggers.return_value = True
mock_rds.delete_db_instance.assert_not_called()
mock_rds.delete_db_cluster.assert_not_called()
@mock.patch('src.db_create_handler.rds_client')
def test_create_db_instance_default(self, mock_rds):
os.environ['STAGE'] = 'TEST'
os.environ['CAN_DELETE_DB'] = 'true'
db_create_handler.create_db_instance({}, {})
mock_rds.create_db_instance.assert_called_once_with(
DBInstanceIdentifier=DEFAULT_DB_INSTANCE_IDENTIFIER,
DBInstanceClass=BIG_DB_SIZE,
DBClusterIdentifier=DEFAULT_DB_CLUSTER_IDENTIFIER,
Engine='aurora-postgresql',
Tags=CAPTURE_INSTANCE_TAGS
)
@mock.patch('src.db_create_handler.rds_client')
def test_create_db_instance_default_invalid_tier(self, mock_rds):
os.environ['STAGE'] = 'QA'
os.environ['CAN_DELETE_DB'] = 'false'
with self.assertRaises(Exception) as context:
db_create_handler.create_db_instance({}, {})
mock_rds.create_db_instance.assert_not_called()
@mock.patch('src.db_create_handler.secrets_client')
@mock.patch('src.db_create_handler.rds_client')
def test_modify_postgres_password(self, mock_rds, mock_secrets_client):
os.environ['STAGE'] = 'QA'
os.environ['CAN_DELETE_DB'] = 'true'
my_secret_string = json.dumps(
{
"POSTGRES_PASSWORD": "Password123"
}
)
mock_secret_payload = {
"SecretString": my_secret_string
}
mock_secrets_client.get_secret_value.return_value = mock_secret_payload
db_create_handler.modify_postgres_password({}, {})
mock_rds.modify_db_cluster.assert_called_once_with(
DBClusterIdentifier=DEFAULT_DB_CLUSTER_IDENTIFIER,
ApplyImmediately=True,
MasterUserPassword='Password123')
@mock.patch('src.db_create_handler.secrets_client')
@mock.patch('src.db_create_handler.rds_client')
def test_modify_postgres_password_invalid_tier(self, mock_rds, mock_secrets_client):
os.environ['STAGE'] = 'QA'
os.environ['CAN_DELETE_DB'] = 'false'
my_secret_string = json.dumps({"POSTGRES_PASSWORD": "Password123"})
mock_secret_payload = {
"SecretString": my_secret_string
}
mock_secrets_client.get_secret_value.return_value = mock_secret_payload
with self.assertRaises(Exception) as context:
db_create_handler.modify_postgres_password({}, {})
mock_rds.modify_db_cluster.assert_not_called()
@mock.patch('src.db_create_handler.secrets_client')
@mock.patch('src.db_create_handler.rds_client')
def test_restore_db_cluster(self, mock_rds, mock_secrets_client):
os.environ['STAGE'] = 'QA'
os.environ['CAN_DELETE_DB'] = 'true'
my_secret_string = json.dumps(
{
"KMS_KEY_ID": "kms",
"DB_SUBGROUP_NAME": "subgroup",
"VPC_SECURITY_GROUP_ID": "vpc_id"
}
)
mock_secret_payload = {
"SecretString": my_secret_string
}
mock_secrets_client.get_secret_value.return_value = mock_secret_payload
db_create_handler.restore_db_cluster({}, {})
mock_rds.restore_db_cluster_from_snapshot.assert_called_once_with(
DBClusterIdentifier=DEFAULT_DB_CLUSTER_IDENTIFIER,
SnapshotIdentifier=db_create_handler.get_snapshot_identifier(),
Engine='aurora-postgresql',
EngineVersion='11.7',
Port=5432,
DBSubnetGroupName='subgroup',
DatabaseName='nwcapture-qa',
EnableIAMDatabaseAuthentication=False,
EngineMode='provisioned',
DBClusterParameterGroupName='aqts-capture',
DeletionProtection=False,
CopyTagsToSnapshot=False,
KmsKeyId='kms',
VpcSecurityGroupIds=['vpc_id'],
Tags=[{'Key': 'Name', 'Value': 'NWISWEB-CAPTURE-RDS-AURORA-TEST'},
{'Key': 'wma:applicationId', 'Value': 'NWISWEB-CAPTURE'},
{'Key': 'wma:contact', 'Value': 'tbd'},
{'Key': 'wma:costCenter', 'Value': 'tbd'},
{'Key': 'wma:criticality', 'Value': 'tbd'},
{'Key': 'wma:environment', 'Value': 'qa'},
{'Key': 'wma:operationalHours', 'Value': 'tbd'},
{'Key': 'wma:organization', 'Value': 'IOW'},
{'Key': 'wma:role', 'Value': 'database'},
{'Key': 'wma:system', 'Value': 'NWIS'},
{'Key': 'wma:subSystem', 'Value': 'NWISWeb-Capture'},
{'Key': 'taggingVersion', 'Value': '0.0.1'}]
)
@mock.patch('src.db_create_handler.secrets_client')
@mock.patch('src.db_create_handler.rds_client')
def test_restore_db_cluster_invalid_tier(self, mock_rds, mock_secrets_client):
os.environ['STAGE'] = 'QA'
os.environ['CAN_DELETE_DB'] = 'false'
my_secret_string = json.dumps(
{
"KMS_KEY_ID": "kms",
"DB_SUBGROUP_NAME": "subgroup",
"VPC_SECURITY_GROUP_ID": "vpc_id"
}
)
mock_secret_payload = {
"SecretString": my_secret_string
}
mock_secrets_client.get_secret_value.return_value = mock_secret_payload
with self.assertRaises(Exception) as context:
db_create_handler.restore_db_cluster({}, {})
mock_rds.restore_db_cluster_from_snapshot.assert_not_called()
@mock.patch('src.db_create_handler.secrets_client')
@mock.patch('src.db_create_handler.rds_client')
def test_restore_db_cluster_invalid_secrets(self, mock_rds, mock_secrets_client):
os.environ['STAGE'] = 'QA'
os.environ['CAN_DELETE_DB'] = 'false'
my_secret_string = json.dumps(
{
}
)
mock_secret_payload = {
"SecretString": my_secret_string
}
mock_secrets_client.get_secret_value.return_value = mock_secret_payload
with self.assertRaises(Exception) as context:
db_create_handler.restore_db_cluster({}, {})
mock_rds.restore_db_cluster_from_snapshot.assert_not_called()
@mock.patch('src.db_create_handler.enable_lambda_trigger', autospec=True)
@mock.patch('src.db_create_handler.RDS', autospec=True)
@mock.patch('src.db_create_handler.sqs_client')
@mock.patch('src.db_create_handler.secrets_client')
@mock.patch('src.db_create_handler.rds_client')
def test_modify_schema_owner_password(self, mock_rds, mock_secrets_client, mock_sqs_client,
mock_db, mock_triggers):
os.environ['CAN_DELETE_DB'] = 'true'
os.environ['STAGE'] = 'QA'
mock_triggers.return_value = True
my_secret_string = json.dumps(
{
"DATABASE_ADDRESS": "address",
"DATABASE_NAME": "name",
"VPC_SECURITY_GROUP_ID": "vpc_id",
"POSTGRES_PASSWORD": "Password123",
"SCHEMA_OWNER_PASSWORD": "Password123"
}
)
mock_secret_payload = {
"SecretString": my_secret_string
}
mock_secrets_client.get_secret_value.return_value = mock_secret_payload
db_create_handler.modify_schema_owner_password({}, {})
self.assertEqual(mock_sqs_client.purge_queue.call_count, 2)
@mock.patch('src.db_create_handler.enable_lambda_trigger', autospec=True)
@mock.patch('src.db_create_handler.RDS', autospec=True)
@mock.patch('src.db_create_handler.sqs_client')
@mock.patch('src.db_create_handler.secrets_client')
@mock.patch('src.db_create_handler.rds_client')
def test_modify_schema_owner_password_invalid_tier(self, mock_rds, mock_secrets_client, mock_sqs_client,
mock_db, mock_triggers):
os.environ['CAN_DELETE_DB'] = 'false'
os.environ['STAGE'] = 'QA'
mock_triggers.return_value = True
my_secret_string = json.dumps(
{
"DATABASE_ADDRESS": "address",
"DATABASE_NAME": "name",
"VPC_SECURITY_GROUP_ID": "vpc_id",
"POSTGRES_PASSWORD": "Password123",
"SCHEMA_OWNER_PASSWORD": "Password123"
}
)
mock_secret_payload = {
"SecretString": my_secret_string
}
mock_secrets_client.get_secret_value.return_value = mock_secret_payload
with self.assertRaises(Exception) as context:
db_create_handler.modify_schema_owner_password({}, {})
self.assertEqual(mock_sqs_client.purge_queue.call_count, 0)
@mock.patch('src.db_create_handler.secrets_client')
@mock.patch('src.db_create_handler.rds_client')
def test_copy_observation_db_snapshot(self, mock_rds, mock_secrets_client):
os.environ['CAN_DELETE_DB'] = 'true'
os.environ['STAGE'] = 'TEST'
my_secret_string = json.dumps(
{
"KMS_KEY_ID": "kms",
"DB_SUBGROUP_NAME": "subgroup",
"VPC_SECURITY_GROUP_ID": "vpc_id"
}
)
mock_secret_payload = {
"SecretString": my_secret_string
}
mock_secrets_client.get_secret_value.return_value = mock_secret_payload
two_days_ago = datetime.datetime.now() - datetime.timedelta(2)
date_str = _get_date_string(two_days_ago)
mock_rds.describe_db_snapshots.return_value = {
'DBSnapshots': [
{
"DBInstanceIdentifier": 'observations-prod-external-2',
"DBSnapshotIdentifier": f"rds:observations-prod-external-2-{date_str}"
}
]
}
db_create_handler.copy_observation_db_snapshot({}, {})
mock_rds.copy_db_snapshot.assert_called_once_with(
SourceDBSnapshotIdentifier=_get_observation_snapshot_identifier(),
TargetDBSnapshotIdentifier=f"observationSnapshotTESTTemp",
KmsKeyId='kms')
@mock.patch('src.db_create_handler.secrets_client')
@mock.patch('src.db_create_handler.rds_client')
def test_copy_observation_db_snapshot_invalid_tier(self, mock_rds, mock_secrets_client):
os.environ['CAN_DELETE_DB'] = 'false'
os.environ['STAGE'] = 'TEST'
my_secret_string = json.dumps(
{
"KMS_KEY_ID": "kms",
"DB_SUBGROUP_NAME": "subgroup",
"VPC_SECURITY_GROUP_ID": "vpc_id"
}
)
mock_secret_payload = {
"SecretString": my_secret_string
}
mock_secrets_client.get_secret_value.return_value = mock_secret_payload
two_days_ago = datetime.datetime.now() - datetime.timedelta(2)
date_str = _get_date_string(two_days_ago)
mock_rds.describe_db_snapshots.return_value = {
'DBSnapshots': [
{
"DBInstanceIdentifier": 'observations-prod-external-2',
"DBSnapshotIdentifier": f"rds:observations-prod-external-2-{date_str}"
}
]
}
with self.assertRaises(Exception) as context:
db_create_handler.copy_observation_db_snapshot({}, {})
mock_rds.copy_db_snapshot.assert_not_called()
@mock.patch('src.db_create_handler.secrets_client')
@mock.patch('src.db_create_handler.rds_client')
def test_create_observation_db(self, mock_rds, mock_secrets_client):
os.environ['STAGE'] = 'TEST'
os.environ['CAN_DELETE_DB'] = 'true'
my_secret_string = json.dumps(
{
"KMS_KEY_ID": "kms",
"DB_SUBGROUP_NAME": "subgroup",
"VPC_SECURITY_GROUP_ID": "vpc_id"
}
)
mock_secret_payload = {
"SecretString": my_secret_string
}
mock_secrets_client.get_secret_value.return_value = mock_secret_payload
two_days_ago = datetime.datetime.now() - datetime.timedelta(2)
date_str = _get_date_string(two_days_ago)
mock_rds.describe_db_snapshots.return_value = {
'DBSnapshots': [
{
"DBInstanceIdentifier": 'observations-prod-external-2',
"DBSnapshotIdentifier": f"rds:observations-prod-external-2-{date_str}"
}
]
}
db_create_handler.create_observation_db({}, {})
mock_rds.restore_db_instance_from_db_snapshot.assert_called_once_with(
DBInstanceIdentifier='observations-test',
DBSnapshotIdentifier=f"observationSnapshotTESTTemp", DBInstanceClass='db.r5.2xlarge',
Port=5432, DBSubnetGroupName='subgroup', Iops=0, MultiAZ=False, Engine='postgres',
VpcSecurityGroupIds=['vpc_id'], Tags=OBSERVATION_INSTANCE_TAGS
)
@mock.patch('src.db_create_handler.secrets_client')
@mock.patch('src.db_create_handler.rds_client')
def test_create_observation_db_invalid_tier(self, mock_rds, mock_secrets_client):
os.environ['STAGE'] = 'TEST'
os.environ['CAN_DELETE_DB'] = 'false'
my_secret_string = json.dumps(
{
"KMS_KEY_ID": "kms",
"DB_SUBGROUP_NAME": "subgroup",
"VPC_SECURITY_GROUP_ID": "vpc_id"
}
)
mock_secret_payload = {
"SecretString": my_secret_string
}
mock_secrets_client.get_secret_value.return_value = mock_secret_payload
two_days_ago = datetime.datetime.now() - datetime.timedelta(2)
date_str = _get_date_string(two_days_ago)
mock_rds.describe_db_snapshots.return_value = {
'DBSnapshots': [
{
"DBInstanceIdentifier": 'observations-prod-external-2',
"DBSnapshotIdentifier": f"rds:observations-prod-external-2-{date_str}"
}
]
}
with self.assertRaises(Exception) as context:
db_create_handler.create_observation_db({}, {})
mock_rds.restore_db_instance_from_db_snapshot.assert_not_called()
@mock.patch('src.db_create_handler.secrets_client')
@mock.patch('src.db_create_handler.rds_client')
def test_delete_observation_db(self, mock_rds, mock_secrets_client):
os.environ['STAGE'] = 'QA'
os.environ['CAN_DELETE_DB'] = 'true'
my_secret_string = json.dumps(
{
"KMS_KEY_ID": "kms",
"DB_SUBGROUP_NAME": "subgroup",
"VPC_SECURITY_GROUP_ID": "vpc_id"
}
)
mock_secret_payload = {
"SecretString": my_secret_string
}
mock_secrets_client.get_secret_value.return_value = mock_secret_payload
db_create_handler.delete_observation_db({}, {})
mock_rds.delete_db_instance.assert_called_once_with(
DBInstanceIdentifier='observations-test',
SkipFinalSnapshot=True
)
mock_rds.delete_db_snapshot.assert_called_once_with(
DBSnapshotIdentifier=f"observationSnapshotTESTTemp"
)
@mock.patch('src.db_create_handler.secrets_client')
@mock.patch('src.db_create_handler.rds_client')
def test_delete_observation_db_invalid_tier(self, mock_rds, mock_secrets_client):
os.environ['STAGE'] = 'QA'
os.environ['CAN_DELETE_DB'] = 'false'
my_secret_string = json.dumps(
{
"KMS_KEY_ID": "kms",
"DB_SUBGROUP_NAME": "subgroup",
"VPC_SECURITY_GROUP_ID": "vpc_id"
}
)
mock_secret_payload = {
"SecretString": my_secret_string
}
mock_secrets_client.get_secret_value.return_value = mock_secret_payload
with self.assertRaises(Exception) as context:
db_create_handler.delete_observation_db({}, {})
mock_rds.delete_db_instance.assert_not_called()
mock_rds.delete_db_snapshot.assert_not_called()
@mock.patch('src.db_create_handler.secrets_client')
@mock.patch('src.db_create_handler.rds_client')
def test_modify_observation_postgres_password(self, mock_rds, mock_secrets_client):
os.environ['STAGE'] = 'TEST'
os.environ['CAN_DELETE_DB'] = 'true'
my_secret_string = json.dumps(
{
"POSTGRES_PASSWORD": "Password123"
}
)
mock_secret_payload = {
"SecretString": my_secret_string
}
mock_secrets_client.get_secret_value.return_value = mock_secret_payload
db_create_handler.modify_observation_postgres_password({}, {})
mock_rds.modify_db_instance.assert_called_once_with(
DBInstanceIdentifier='observations-test', ApplyImmediately=True, MasterUserPassword='Password123'
)
@mock.patch('src.db_create_handler.secrets_client')
@mock.patch('src.db_create_handler.rds_client')
def test_modify_observation_postgres_password_invalid_tier(self, mock_rds, mock_secrets_client):
os.environ['STAGE'] = 'TEST'
os.environ['CAN_DELETE_DB'] = 'false'
my_secret_string = json.dumps(
{
"POSTGRES_PASSWORD": "Password123"
}
)
mock_secret_payload = {
"SecretString": my_secret_string
}
mock_secrets_client.get_secret_value.return_value = mock_secret_payload
with self.assertRaises(Exception) as context:
db_create_handler.modify_observation_postgres_password({}, {})
mock_rds.modify_db_instance.assert_not_called()
@mock.patch('src.db_create_handler.RDS', autospec=True)
@mock.patch('src.db_create_handler.secrets_client')
@mock.patch('src.db_create_handler.rds_client')
def test_modify_observation_passwords(self, mock_rds, mock_secrets_client,
mock_db):
os.environ['STAGE'] = 'TEST'
os.environ['CAN_DELETE_DB'] = 'true'
my_secret_string = json.dumps(
{
"WQP_SCHEMA_OWNER_USERNAME": "name",
"WQP_SCHEMA_OWNER_PASSWORD": "Password123",
"WQP_READ_ONLY_USERNAME": "name",
"WQP_READ_ONLY_PASSWORD": "Password123",
"ARS_SCHEMA_OWNER_USERNAME": "name",
"ARS_SCHEMA_OWNER_PASSWORD": "Password123",
"EPA_SCHEMA_OWNER_USERNAME": "name",
"EPA_SCHEMA_OWNER_PASSWORD": "Password123",
"NWIS_SCHEMA_OWNER_USERNAME": "name",
"NWIS_SCHEMA_OWNER_PASSWORD": "Password123",
"DB_OWNER_USERNAME": "name",
"DB_OWNER_PASSWORD": "Password123",
"WDFN_DB_READ_ONLY_USERNAME": "name",
"WDFN_DB_READ_ONLY_PASSWORD": "Password123",
"DATABASE_ADDRESS": "blah",
"DATABASE_NAME": "blah",
"POSTGRES_PASSWORD": "blah"
}
)
mock_secret_payload = {
"SecretString": my_secret_string
}
mock_secrets_client.get_secret_value.return_value = mock_secret_payload
result = db_create_handler.modify_observation_passwords({}, {})
assert result is True
@mock.patch('src.db_create_handler.RDS', autospec=True)
@mock.patch('src.db_create_handler.secrets_client')
@mock.patch('src.db_create_handler.rds_client')
def test_modify_observation_passwords_invalid_tier(self, mock_rds, mock_secrets_client,
mock_db):
os.environ['STAGE'] = 'TEST'
os.environ['CAN_DELETE_DB'] = 'false'
my_secret_string = json.dumps(
{
"WQP_SCHEMA_OWNER_USERNAME": "name",
"WQP_SCHEMA_OWNER_PASSWORD": "Password123",
"WQP_READ_ONLY_USERNAME": "name",
"WQP_READ_ONLY_PASSWORD": "Password123",
"ARS_SCHEMA_OWNER_USERNAME": "name",
"ARS_SCHEMA_OWNER_PASSWORD": "Password123",
"EPA_SCHEMA_OWNER_USERNAME": "name",
"EPA_SCHEMA_OWNER_PASSWORD": "Password123",
"NWIS_SCHEMA_OWNER_USERNAME": "name",
"NWIS_SCHEMA_OWNER_PASSWORD": "Password123",
"DB_OWNER_USERNAME": "name",
"DB_OWNER_PASSWORD": "Password123",
"WDFN_DB_READ_ONLY_USERNAME": "name",
"WDFN_DB_READ_ONLY_PASSWORD": "Password123",
"DATABASE_ADDRESS": "blah",
"DATABASE_NAME": "blah",
"POSTGRES_PASSWORD": "blah"
}
)
mock_secret_payload = {
"SecretString": my_secret_string
}
mock_secrets_client.get_secret_value.return_value = mock_secret_payload
with self.assertRaises(Exception) as context:
db_create_handler.modify_observation_passwords({}, {})
def test_get_date_string(self):
jan_1 = datetime.datetime(2020, 1, 1)
date_str = db_create_handler._get_date_string(jan_1)
assert date_str == "2020-01-01"
| 42.447602
| 119
| 0.627584
|
4a0053a3c8263f441e846a6e11d3f6d784402916
| 401
|
py
|
Python
|
office_games/asgi.py
|
KLorentzen/office-games
|
7f08891892818621c7bf30f6ebed79065cce0237
|
[
"MIT"
] | null | null | null |
office_games/asgi.py
|
KLorentzen/office-games
|
7f08891892818621c7bf30f6ebed79065cce0237
|
[
"MIT"
] | 3
|
2021-04-08T19:02:42.000Z
|
2021-09-22T18:19:21.000Z
|
office_games/asgi.py
|
KLorentzen/office-games
|
7f08891892818621c7bf30f6ebed79065cce0237
|
[
"MIT"
] | null | null | null |
"""
ASGI config for office_games project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'office_games.settings')
application = get_asgi_application()
| 23.588235
| 78
| 0.790524
|
4a0053cc50d5e5165286f54f68ba941279cbee91
| 617
|
py
|
Python
|
swifter/__init__.py
|
vishthemenon/swifter
|
3800503f1997277089cdb3b0d93cb9c199ac7b2a
|
[
"MIT"
] | null | null | null |
swifter/__init__.py
|
vishthemenon/swifter
|
3800503f1997277089cdb3b0d93cb9c199ac7b2a
|
[
"MIT"
] | null | null | null |
swifter/__init__.py
|
vishthemenon/swifter
|
3800503f1997277089cdb3b0d93cb9c199ac7b2a
|
[
"MIT"
] | null | null | null |
# flake8: noqa
import sys
import warnings
from logging import config
from .swifter import SeriesAccessor, DataFrameAccessor
from .parallel_accessor import register_parallel_dataframe_accessor, register_parallel_series_accessor, register_modin
config.dictConfig({"version": 1, "disable_existing_loggers": True})
warnings.filterwarnings("ignore", category=FutureWarning)
if "modin.pandas" in sys.modules:
register_modin()
__all__ = [
"SeriesAccessor",
"DataFrameAccessor",
"register_parallel_dataframe_accessor",
"register_parallel_series_accessor",
"register_modin",
]
__version__ = "1.0.7"
| 28.045455
| 118
| 0.792545
|
4a005427c1bad99cf3b5fbce36614b0d9a644756
| 2,055
|
py
|
Python
|
1M/W4/12.py
|
allenalvin333/Hackerrank_Prep
|
26ed5b874daba4775d006824d36f9e82ea5ff1ea
|
[
"MIT"
] | 2
|
2021-11-25T13:38:36.000Z
|
2021-11-25T13:42:56.000Z
|
1M/W4/12.py
|
allenalvin333/Hackerrank_Prep
|
26ed5b874daba4775d006824d36f9e82ea5ff1ea
|
[
"MIT"
] | null | null | null |
1M/W4/12.py
|
allenalvin333/Hackerrank_Prep
|
26ed5b874daba4775d006824d36f9e82ea5ff1ea
|
[
"MIT"
] | 1
|
2021-11-25T13:38:43.000Z
|
2021-11-25T13:38:43.000Z
|
# https://www.hackerrank.com/challenges/one-month-preparation-kit-tree-huffman-decoding/problem
import queue as Queue
cntr = 0
class Node:
def __init__(self, freq, data):
self.freq = freq
self.data = data
self.left = None
self.right = None
global cntr
self._count = cntr
cntr = cntr + 1
def __lt__(self, other):
if self.freq != other.freq:
return self.freq < other.freq
return self._count < other._count
def huffman_hidden():#builds the tree and returns root
q = Queue.PriorityQueue()
for key in freq:
q.put((freq[key], key, Node(freq[key], key) ))
while q.qsize() != 1:
a = q.get()
b = q.get()
obj = Node(a[0] + b[0], '\0' )
obj.left = a[2]
obj.right = b[2]
q.put((obj.freq, obj.data, obj ))
root = q.get()
root = root[2]#contains root object
return root
def dfs_hidden(obj, already):
if(obj == None):
return
elif(obj.data != '\0'):
code_hidden[obj.data] = already
dfs_hidden(obj.right, already + "1")
dfs_hidden(obj.left, already + "0")
"""
class Node:
def __init__(self, freq,data):
self.freq= freq
self.data=data
self.left = None
self.right = None
"""
def decodeHuff(root, s):
n = root
for z in s:
n = n.left if(z=='0') else n.right
if(n.left==n.right):
print(n.data, end='')
n = root
ip = input()
freq = {} #maps each character to its frequency
cntr = 0
for ch in ip:
if(freq.get(ch) == None):
freq[ch] = 1
else:
freq[ch]+=1
root = huffman_hidden() #contains root of huffman tree
code_hidden = {} #contains code for each object
dfs_hidden(root, "")
if len(code_hidden) == 1: #if there is only one character in the i/p
for key in code_hidden:
code_hidden[key] = "0"
toBeDecoded = ""
for ch in ip:
toBeDecoded += code_hidden[ch]
decodeHuff(root, toBeDecoded)
| 22.096774
| 95
| 0.558151
|
4a0055e11cd6fefd375eadd5189ab2dc49b49ed9
| 3,117
|
py
|
Python
|
tests/pytests/unit/modules/test_zypperpkg.py
|
ifraixedes/saltstack-salt
|
b54becb8b43cc9b7c00b2c0bc637ac534dc62896
|
[
"Apache-2.0"
] | 9,425
|
2015-01-01T05:59:24.000Z
|
2022-03-31T20:44:05.000Z
|
tests/pytests/unit/modules/test_zypperpkg.py
|
ifraixedes/saltstack-salt
|
b54becb8b43cc9b7c00b2c0bc637ac534dc62896
|
[
"Apache-2.0"
] | 33,507
|
2015-01-01T00:19:56.000Z
|
2022-03-31T23:48:20.000Z
|
tests/pytests/unit/modules/test_zypperpkg.py
|
ifraixedes/saltstack-salt
|
b54becb8b43cc9b7c00b2c0bc637ac534dc62896
|
[
"Apache-2.0"
] | 5,810
|
2015-01-01T19:11:45.000Z
|
2022-03-31T02:37:20.000Z
|
"""
:codeauthor: Gareth J. Greenaway <ggreenaway@vmware.com>
"""
import os
import pytest
import salt.modules.pkg_resource as pkg_resource
import salt.modules.zypperpkg as zypper
from tests.support.mock import MagicMock, patch
@pytest.fixture
def configure_loader_modules():
return {zypper: {"rpm": None}, pkg_resource: {}}
def test_list_pkgs_no_context():
"""
Test packages listing.
:return:
"""
def _add_data(data, key, value):
data.setdefault(key, []).append(value)
rpm_out = [
"protobuf-java_|-(none)_|-2.6.1_|-3.1.develHead_|-noarch_|-(none)_|-1499257756",
"yast2-ftp-server_|-(none)_|-3.1.8_|-8.1_|-x86_64_|-(none)_|-1499257798",
"jose4j_|-(none)_|-0.4.4_|-2.1.develHead_|-noarch_|-(none)_|-1499257756",
"apache-commons-cli_|-(none)_|-1.2_|-1.233_|-noarch_|-(none)_|-1498636510",
"jakarta-commons-discovery_|-(none)_|-0.4_|-129.686_|-noarch_|-(none)_|-1498636511",
"susemanager-build-keys-web_|-(none)_|-12.0_|-5.1.develHead_|-noarch_|-(none)_|-1498636510",
"gpg-pubkey_|-(none)_|-39db7c82_|-5847eb1f_|-(none)_|-(none)_|-1519203802",
"gpg-pubkey_|-(none)_|-8a7c64f9_|-5aaa93ca_|-(none)_|-(none)_|-1529925595",
"kernel-default_|-(none)_|-4.4.138_|-94.39.1_|-x86_64_|-(none)_|-1529936067",
"kernel-default_|-(none)_|-4.4.73_|-5.1_|-x86_64_|-(none)_|-1503572639",
"perseus-dummy_|-(none)_|-1.1_|-1.1_|-i586_|-(none)_|-1529936062",
]
with patch.dict(zypper.__grains__, {"osarch": "x86_64"}), patch.dict(
zypper.__salt__,
{"cmd.run": MagicMock(return_value=os.linesep.join(rpm_out))},
), patch.dict(zypper.__salt__, {"pkg_resource.add_pkg": _add_data}), patch.dict(
zypper.__salt__,
{"pkg_resource.format_pkg_list": pkg_resource.format_pkg_list},
), patch.dict(
zypper.__salt__, {"pkg_resource.stringify": MagicMock()}
), patch.object(
zypper, "_list_pkgs_from_context"
) as list_pkgs_context_mock:
pkgs = zypper.list_pkgs(versions_as_list=True, use_context=False)
list_pkgs_context_mock.assert_not_called()
list_pkgs_context_mock.reset_mock()
pkgs = zypper.list_pkgs(versions_as_list=True, use_context=False)
list_pkgs_context_mock.assert_not_called()
list_pkgs_context_mock.reset_mock()
def test_normalize_name():
"""
Test that package is normalized only when it should be
"""
with patch.dict(zypper.__grains__, {"osarch": "x86_64"}):
result = zypper.normalize_name("foo")
assert result == "foo", result
result = zypper.normalize_name("foo.x86_64")
assert result == "foo", result
result = zypper.normalize_name("foo.noarch")
assert result == "foo", result
with patch.dict(zypper.__grains__, {"osarch": "aarch64"}):
result = zypper.normalize_name("foo")
assert result == "foo", result
result = zypper.normalize_name("foo.aarch64")
assert result == "foo", result
result = zypper.normalize_name("foo.noarch")
assert result == "foo", result
| 38.481481
| 100
| 0.652871
|
4a00565c73d1c97aa8a7f7af4418112a586a5b93
| 1,478
|
py
|
Python
|
tt.py
|
wangfengfighting/Caoliu-master
|
9025b1871ec7c2322463d97650ccd5b47f784cbb
|
[
"Apache-2.0"
] | null | null | null |
tt.py
|
wangfengfighting/Caoliu-master
|
9025b1871ec7c2322463d97650ccd5b47f784cbb
|
[
"Apache-2.0"
] | null | null | null |
tt.py
|
wangfengfighting/Caoliu-master
|
9025b1871ec7c2322463d97650ccd5b47f784cbb
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
__author__ = 'Administrator'
# import urllib.request
# path = "D:\\Download"
# url = "http://img.picuphost.com/img/upload/image/20151130/113000016301.jpeg"
# name ="D:\\download\\2.jpeg"
# #保存文件时候注意类型要匹配,如要保存的图片为jpg,则打开的文件的名称必须是jpg格式,否则会产生无效图片
# conn = urllib.request.urlopen(url)
# f = open(name,'wb')
# f.write(conn.read())
# f.close()
# print('Pic Saved!')
import whyspider
# 初始化爬虫对象
my_spider = whyspider.WhySpider()
# # 模拟GET操作
# path="G:\PostgraduatePROJECT\Caoliu-master"
# fname='22.jpeg'
# path2 = path+'\\'+fname
# name='G:\\PostgraduatePROJECT\\Caoliu-master\\down\\22.jpeg'
# f = open(name,'wb')
# data= my_spider.send_get('http://img.picuphost.com/img/upload/image/20151130/113000016301.jpeg')
# f.write(data)
# f.close()
# # 模拟POST操作
# print my_spider.send_post('http://3.apitool.sinaapp.com/','why=PostString2333')
#
# # 模拟GET操作
# print my_spider.send_get('http://www.baidu.com/')
#
# # 切换到手机模式
#my_spider.set_mobile()
#
# # 模拟GET操作
# print my_spider.send_get('http://www.baidu.com/')
# import time
# time1= time.time()
#
# time2= time1+3
# print(time2-time1)
import urllib2
import whyspider
request = urllib2.Request('http://ipoock.com/img/g4/201512242250036siyu.jpeg')
request.add_header('User-Agent', 'fake-client')
#response = urllib2.urlopen(request,timeout=10)
response = urllib2.urlopen('http://ipoock.com/img/g4/201512242250036siyu.jpeg', timeout=10)
print(response)
f=open('J:\\caoliu\\ff.jpeg','wb')
f.write(response)
f.close()
| 27.886792
| 98
| 0.713126
|
4a0056c72b69da45a0fdea537b83f1a097796e38
| 3,999
|
py
|
Python
|
benchmark/startQiskit_noisy2596.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit_noisy2596.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit_noisy2596.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=4
# total number=30
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.cx(input_qubit[0],input_qubit[3]) # number=14
prog.x(input_qubit[3]) # number=15
prog.rx(1.8001325905069514,input_qubit[3]) # number=18
prog.cx(input_qubit[0],input_qubit[3]) # number=16
prog.h(input_qubit[1]) # number=22
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
prog.x(input_qubit[3]) # number=24
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.x(input_qubit[1]) # number=25
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.z(input_qubit[1]) # number=21
prog.h(input_qubit[0]) # number=9
prog.cx(input_qubit[2],input_qubit[0]) # number=10
prog.cx(input_qubit[0],input_qubit[1]) # number=27
prog.x(input_qubit[1]) # number=28
prog.cx(input_qubit[0],input_qubit[1]) # number=29
prog.cx(input_qubit[2],input_qubit[0]) # number=11
prog.y(input_qubit[0]) # number=12
prog.y(input_qubit[0]) # number=13
prog.z(input_qubit[2]) # number=26
prog.cx(input_qubit[2],input_qubit[1]) # number=23
prog.x(input_qubit[0]) # number=19
prog.x(input_qubit[0]) # number=20
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = FakeVigo()
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy2596.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 34.179487
| 140
| 0.653413
|
4a00574da5ff1fcb85a4410c5dc6c89885657a32
| 6,424
|
py
|
Python
|
apps/incidents/tests/test_views.py
|
seanlefevre/openduty
|
34ab21117f114ccc808d8b0aa2cb801c819bdb86
|
[
"MIT"
] | 145
|
2016-04-11T06:53:13.000Z
|
2022-03-22T05:15:49.000Z
|
apps/incidents/tests/test_views.py
|
seanlefevre/openduty
|
34ab21117f114ccc808d8b0aa2cb801c819bdb86
|
[
"MIT"
] | 78
|
2017-09-24T10:59:49.000Z
|
2022-02-12T07:36:27.000Z
|
apps/incidents/tests/test_views.py
|
seanlefevre/openduty
|
34ab21117f114ccc808d8b0aa2cb801c819bdb86
|
[
"MIT"
] | 30
|
2016-04-11T06:53:16.000Z
|
2021-12-29T11:39:26.000Z
|
import pytest
from unittest.mock import patch
from rest_framework import status
from django.urls import reverse
from apps.incidents.models import Incident, IncidentSilenced, Service
from django_dynamic_fixture import G
from apps.commons.tests.fixtures import authenticated_client, base_user, other_user
@pytest.mark.django_db
def test_get_incident_detail(authenticated_client):
c = authenticated_client
incident = G(Incident)
response = c.get(reverse('IncidentDetailView', args=[incident.id]))
assert response.status_code == status.HTTP_200_OK
@pytest.mark.django_db
def test_get_incident_detail_with_incident_silenced(authenticated_client):
c = authenticated_client
incident = G(Incident)
incident_silenced = G(IncidentSilenced, incident=incident)
response = c.get(reverse('IncidentDetailView', args=[incident.id]))
assert response.status_code == status.HTTP_200_OK
@pytest.mark.django_db
def test_get_incidents_list_on_call(authenticated_client):
c = authenticated_client
incident = G(Incident)
response = c.get(reverse('OnCallIncidentsListView'))
assert response.status_code == status.HTTP_200_OK
@pytest.mark.django_db
def test_get_incidents_list(authenticated_client):
c = authenticated_client
incident = G(Incident)
response = c.get(reverse('IncidentsListView'))
assert response.status_code == status.HTTP_200_OK
@pytest.mark.django_db
def test_post_update_type(authenticated_client):
c = authenticated_client
incident = G(Incident, event_type=Incident.ACKNOWLEDGE)
new_event_type = Incident.RESOLVE
data = {
'id': str(incident.id),
'url': str(reverse('IncidentDetailView', args=[incident.id])),
'event_type': new_event_type
}
url = reverse('incidents_update_type')
response = c.post(
url, data, format='json'
)
assert response.status_code == status.HTTP_302_FOUND
incident.refresh_from_db()
assert incident.event_type == new_event_type
@pytest.mark.django_db
def test_update_type(base_user):
service = G(Service)
incident = G(Incident, event_type=Incident.ACKNOWLEDGE, service_to_escalate_to=service)
new_event_type = Incident.RESOLVE
from apps.incidents.views import _update_type
_update_type(user=base_user, ids=[incident.id], event_type=new_event_type)
incident.refresh_from_db()
assert incident.event_type == new_event_type
@pytest.mark.django_db
@patch('django.contrib.messages.error')
def test_post_update_type_event_type_is_none(mocked_messages_error, authenticated_client):
c = authenticated_client
incident = G(Incident, event_type=Incident.ACKNOWLEDGE)
new_event_type = Incident.RESOLVE
data = {
'id': str(incident.id),
'url': str(reverse('IncidentDetailView', args=[incident.id])),
# 'event_type':
}
url = reverse('incidents_update_type')
response = c.post(
url, data, format='json'
)
assert response.status_code == status.HTTP_302_FOUND
incident.refresh_from_db()
assert incident.event_type == Incident.ACKNOWLEDGE
assert mocked_messages_error.called
@pytest.mark.django_db
@patch('django.contrib.messages.error')
def test_post_update_type_incident_not_found(mocked_messages_error, authenticated_client):
c = authenticated_client
incident = G(Incident, event_type=Incident.ACKNOWLEDGE)
new_event_type = Incident.RESOLVE
data = {
'id': str(incident.id + 1),
'url': str(reverse('IncidentDetailView', args=[incident.id])),
'event_type': new_event_type
}
url = reverse('incidents_update_type')
response = c.post(
url, data, format='json'
)
assert response.status_code == status.HTTP_302_FOUND
incident.refresh_from_db()
assert incident.event_type == Incident.ACKNOWLEDGE
assert mocked_messages_error.called
@pytest.mark.django_db
@patch('django.contrib.messages.error')
def test_post_update_type_from_resolved_back_acknowledge(mocked_messages_error, authenticated_client):
c = authenticated_client
new_event_type = Incident.RESOLVE
incident = G(Incident, event_type=new_event_type)
data = {
'id': str(incident.id),
'url': str(reverse('IncidentDetailView', args=[incident.id])),
'event_type': Incident.ACKNOWLEDGE
}
url = reverse('incidents_update_type')
response = c.post(
url, data, format='json'
)
assert response.status_code == status.HTTP_302_FOUND
incident.refresh_from_db()
assert incident.event_type == new_event_type
assert mocked_messages_error.called
@pytest.mark.django_db
def test_post_forward_incident(authenticated_client, base_user):
c = authenticated_client
incident = G(Incident, event_type=Incident.ACKNOWLEDGE)
data = {
'id': str(incident.id),
'url': str(reverse('IncidentDetailView', args=[incident.id])),
"user_id": base_user.id
}
url = reverse('incidents_forward_incident')
response = c.post(url, data, format='json')
assert response.status_code == status.HTTP_302_FOUND
assert incident.eventlog_set.count() == 1
@pytest.mark.django_db
@patch('django.contrib.messages.error')
def test_post_forward_incident_user_not_found(mocked_messages_error, authenticated_client):
c = authenticated_client
incident = G(Incident, event_type=Incident.ACKNOWLEDGE)
data = {
'id': str(incident.id),
'url': str(reverse('IncidentDetailView', args=[incident.id])),
"user_id": 0
}
url = reverse('incidents_forward_incident')
response = c.post(url, data, format='json')
assert response.status_code == status.HTTP_302_FOUND
assert incident.eventlog_set.count() == 0
assert mocked_messages_error.called
@pytest.mark.django_db
@patch('django.contrib.messages.error')
def test_post_forward_incident_incident_not_found(mocked_messages_error, authenticated_client, base_user):
c = authenticated_client
incident = G(Incident, event_type=Incident.ACKNOWLEDGE)
data = {
'id': str(incident.id + 1),
'url': str(reverse('IncidentDetailView', args=[incident.id])),
"user_id": base_user.id
}
url = reverse('incidents_forward_incident')
response = c.post(url, data, format='json')
assert response.status_code == status.HTTP_302_FOUND
assert incident.eventlog_set.count() == 0
assert mocked_messages_error.called
| 35.103825
| 106
| 0.732254
|
4a00588e17e91f42a792e6522d40ca6d4b214b8f
| 2,036
|
py
|
Python
|
questions/longest-increasing-path-in-a-matrix/Solution.py
|
marcus-aurelianus/leetcode-solutions
|
8b43e72fe1f51c84abc3e89b181ca51f09dc7ca6
|
[
"MIT"
] | 141
|
2017-12-12T21:45:53.000Z
|
2022-03-25T07:03:39.000Z
|
questions/longest-increasing-path-in-a-matrix/Solution.py
|
marcus-aurelianus/leetcode-solutions
|
8b43e72fe1f51c84abc3e89b181ca51f09dc7ca6
|
[
"MIT"
] | 32
|
2015-10-05T14:09:52.000Z
|
2021-05-30T10:28:41.000Z
|
questions/longest-increasing-path-in-a-matrix/Solution.py
|
marcus-aurelianus/leetcode-solutions
|
8b43e72fe1f51c84abc3e89b181ca51f09dc7ca6
|
[
"MIT"
] | 56
|
2015-09-30T05:23:28.000Z
|
2022-03-08T07:57:11.000Z
|
"""
Given an m x n integers matrix, return the length of the longest increasing path in matrix.
From each cell, you can either move in four directions: left, right, up, or down. You may not move diagonally or move outside the boundary (i.e., wrap-around is not allowed).
Example 1:
Input: matrix = [[9,9,4],[6,6,8],[2,1,1]]
Output: 4
Explanation: The longest increasing path is [1, 2, 6, 9].
Example 2:
Input: matrix = [[3,4,5],[3,2,6],[2,2,1]]
Output: 4
Explanation: The longest increasing path is [3, 4, 5, 6]. Moving diagonally is not allowed.
Example 3:
Input: matrix = [[1]]
Output: 1
Constraints:
m == matrix.length
n == matrix[i].length
1 <= m, n <= 200
0 <= matrix[i][j] <= 231 - 1
"""
class Solution:
def longestIncreasingPath(self, matrix: List[List[int]]) -> int:
def find_neighbors(matrix, y, x):
curr = matrix[y][x]
deltas = [(-1, 0), (0, -1), (0, 1), (1, 0)]
for yd, xd in deltas:
yn, xn = y + yd, x + xd
if not (0 <= yn < len(matrix)):
continue
if not (0 <= xn < len(matrix[0])):
continue
if matrix[yn][xn] > curr:
yield (yn, xn)
def find_longest_increasing_path(matrix, i, j, track):
if (i, j) in track:
return track[(i, j)]
curr_len = 1
for neighbor in find_neighbors(matrix, i, j):
curr_len = max(curr_len, find_longest_increasing_path(matrix, neighbor[0], neighbor[1], track) + 1)
## cache the result here is very important for speedup
track[(i, j)] = curr_len
return curr_len
if not matrix or not matrix[0]:
return 0
mm = 0
track = {}
for i, row in enumerate(matrix):
for j, elem in enumerate(row):
curr_len = find_longest_increasing_path(matrix, i, j, track)
mm = max(mm, curr_len)
return mm
| 29.507246
| 174
| 0.539784
|
4a0058afae7dfe223bfe4cb12bec6a8c6a97cbd3
| 420
|
py
|
Python
|
src/algorithmic_toolbox/week3/money_change.py
|
aniket-kr/Techniques-of-Designing-Algorithms
|
6aa87fd58eafef41afc2c9d49bd83d706de88f1d
|
[
"MIT"
] | null | null | null |
src/algorithmic_toolbox/week3/money_change.py
|
aniket-kr/Techniques-of-Designing-Algorithms
|
6aa87fd58eafef41afc2c9d49bd83d706de88f1d
|
[
"MIT"
] | null | null | null |
src/algorithmic_toolbox/week3/money_change.py
|
aniket-kr/Techniques-of-Designing-Algorithms
|
6aa87fd58eafef41afc2c9d49bd83d706de88f1d
|
[
"MIT"
] | null | null | null |
from typing import List
DENOMINATIONS = [10, 5, 1]
def count_coins(money: int, denominations: List[int]) -> int:
total_coins = 0
for coin_value in denominations:
if money == 0:
break
total_coins += money // coin_value
money = money % coin_value
return total_coins
if __name__ == '__main__':
money = int(input())
print(count_coins(money, DENOMINATIONS))
| 20
| 61
| 0.62619
|
4a0058d8229a811c0f3aca3645eca83a1cc30ed5
| 288
|
py
|
Python
|
dkr-py310/docker-student-portal-310/course_files/begin_advanced/py_files_1.py
|
pbarton666/virtual_classroom
|
a9d0dc2eb16ebc4d2fd451c3a3e6f96e37c87675
|
[
"MIT"
] | null | null | null |
dkr-py310/docker-student-portal-310/course_files/begin_advanced/py_files_1.py
|
pbarton666/virtual_classroom
|
a9d0dc2eb16ebc4d2fd451c3a3e6f96e37c87675
|
[
"MIT"
] | null | null | null |
dkr-py310/docker-student-portal-310/course_files/begin_advanced/py_files_1.py
|
pbarton666/virtual_classroom
|
a9d0dc2eb16ebc4d2fd451c3a3e6f96e37c87675
|
[
"MIT"
] | null | null | null |
#py_files_1.py
f = open('afile', 'w')
f.write("Hello afile!")
print("What's up with {}?\n".format(f.name))
print("readable?", f.readable())
print("writable?", f.writable())
print("encoding?", f.encoding)
print("closed?", f.closed)
print("Closing now!")
print("closed?", f.closed)
| 18
| 44
| 0.638889
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.